*/
#include <linux/device.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/dma-debug.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
int dma_nommu_dma_supported(struct device *dev, u64 mask)
{
#ifdef CONFIG_PPC64
- u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
+ u64 limit = phys_to_dma(dev, (memblock_end_of_DRAM() - 1));
/* Limit fits in the mask, we are good */
if (mask >= limit)
return NULL;
ret = page_address(page);
memset(ret, 0, size);
- *dma_handle = __pa(ret) + get_dma_offset(dev);
+ *dma_handle = phys_to_dma(dev,__pa(ret));
return ret;
}
int i;
for_each_sg(sgl, sg, nents, i) {
- sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
+ sg->dma_address = phys_to_dma(dev, sg_phys(sg));
sg->dma_length = sg->length;
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_sync_page(page, offset, size, dir);
- return page_to_phys(page) + offset + get_dma_offset(dev);
+ return phys_to_dma(dev, page_to_phys(page)) + offset;
}
static inline void dma_nommu_unmap_page(struct device *dev,