{
#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
if (dev->bus == &pci_bus_type)
- return virt_to_bus(ptr);
+ return virt_to_phys(ptr);
#endif
dma_cache_sync(dev, ptr, size, dir);
- return virt_to_bus(ptr);
+ return virt_to_phys(ptr);
}
#define dma_unmap_single(dev, addr, size, dir) do { } while (0)
if (dev->bus == &pci_bus_type)
return;
#endif
- dma_cache_sync(dev, bus_to_virt(dma_handle), size, dir);
+ dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir);
}
static inline void dma_sync_single_range(struct device *dev,
if (dev->bus == &pci_bus_type)
return;
#endif
- dma_cache_sync(dev, bus_to_virt(dma_handle) + offset, size, dir);
+ dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir);
}
static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
{
if(can_use_virtual_dma == 2) {
if((unsigned int) addr >= (unsigned int) high_memory ||
- virt_to_bus(addr) >= 0x10000000)
+ virt_to_phys(addr) >= 0x10000000)
use_virtual_dma = 1;
else
use_virtual_dma = 0;
doing_pdma = 0;
clear_dma_ff(FLOPPY_DMA);
set_dma_mode(FLOPPY_DMA,mode);
- set_dma_addr(FLOPPY_DMA,virt_to_bus(addr));
+ set_dma_addr(FLOPPY_DMA,virt_to_phys(addr));
set_dma_count(FLOPPY_DMA,size);
enable_dma(FLOPPY_DMA);
return 0;
#define virt_to_phys(address) ((unsigned long)(address))
#endif
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-#define page_to_bus page_to_phys
-
/*
* readX/writeX() are used to access memory mapped devices. On some
* architectures the memory mapped IO stuff needs to be accessed