}
dev_dbg(dev,
- "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
- __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
- buf->safe, (void *) buf->safe_dma_addr);
+ "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+ __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+ buf->safe, buf->safe_dma_addr);
if ((dir == DMA_TO_DEVICE) ||
(dir == DMA_BIDIRECTIONAL)) {
BUG_ON(buf->size != size);
dev_dbg(dev,
- "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
- __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
- buf->safe, (void *) buf->safe_dma_addr);
+ "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+ __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+ buf->safe, buf->safe_dma_addr);
DO_STATS ( device_info->bounce_count++ );
*/
dev_dbg(dev,
- "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
- __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
- buf->safe, (void *) buf->safe_dma_addr);
+ "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+ __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+ buf->safe, buf->safe_dma_addr);
DO_STATS ( device_info->bounce_count++ );
#ifdef __KERNEL__
-#include <linux/mm.h> /* need struct page */
-
+#include <linux/mm_types.h>
#include <linux/scatterlist.h>
#include <asm-generic/dma-coherent.h>
+#include <asm/memory.h>
+
+/*
+ * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
+ * used internally by the DMA-mapping API to provide DMA addresses. They
+ * must not be used by drivers.
+ */
+#ifndef __arch_page_to_dma
+static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
+{
+ return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
+}
+
+static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
+{
+ return (void *)__bus_to_virt(addr);
+}
+
+static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
+{
+ return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
+}
+#else
+static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
+{
+ return __arch_page_to_dma(dev, page);
+}
+
+static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
+{
+ return __arch_dma_to_virt(dev, addr);
+}
+
+static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
+{
+ return __arch_virt_to_dma(dev, addr);
+}
+#endif
/*
* DMA-consistent mapping functions. These allocate/free a region of
if (!arch_is_coherent())
dma_cache_maint(cpu_addr, size, dir);
- return virt_to_dma(dev, (unsigned long)cpu_addr);
+ return virt_to_dma(dev, cpu_addr);
}
#else
extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
unsigned long offset, size_t size,
enum dma_data_direction dir)
{
- return dma_map_single(dev, page_address(page) + offset, size, (int)dir);
+ return dma_map_single(dev, page_address(page) + offset, size, dir);
}
/**
dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
- dma_unmap_single(dev, handle, size, (int)dir);
+ dma_unmap_single(dev, handle, size, dir);
}
/**
enum dma_data_direction dir)
{
if (!arch_is_coherent())
- dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir);
+ dma_cache_maint(dma_to_virt(dev, handle), size, dir);
}
static inline void
enum dma_data_direction dir)
{
if (!arch_is_coherent())
- dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir);
+ dma_cache_maint(dma_to_virt(dev, handle), size, dir);
}
#else
extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
*/
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-/*
- * Optional device DMA address remapping. Do _not_ use directly!
- * We should really eliminate virt_to_bus() here - it's deprecated.
- */
-#ifndef __arch_page_to_dma
-#define page_to_dma(dev, page) ((dma_addr_t)__virt_to_bus((unsigned long)page_address(page)))
-#define dma_to_virt(dev, addr) ((void *)__bus_to_virt(addr))
-#define virt_to_dma(dev, addr) ((dma_addr_t)__virt_to_bus((unsigned long)(addr)))
-#else
-#define page_to_dma(dev, page) (__arch_page_to_dma(dev, page))
-#define dma_to_virt(dev, addr) (__arch_dma_to_virt(dev, addr))
-#define virt_to_dma(dev, addr) (__arch_virt_to_dma(dev, addr))
-#endif
-
/*
* Optional coherency support. Currently used only by selected
* Intel XSC3-based systems.
(dma_addr_t)virt_to_lbus(page_address(page)) : \
(dma_addr_t)__virt_to_bus(page_address(page));})
-#define __arch_dma_to_virt(dev, addr) ({is_lbus_device(dev) ? \
- lbus_to_virt(addr) : \
- __bus_to_virt(addr);})
-
-#define __arch_virt_to_dma(dev, addr) ({is_lbus_device(dev) ? \
- virt_to_lbus(addr) : \
- __virt_to_bus(addr);})
+#define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \
+ lbus_to_virt(addr) : \
+ __bus_to_virt(addr)); })
+
+#define __arch_virt_to_dma(dev, addr) ({ unsigned long __addr = (unsigned long)(addr); \
+ (dma_addr_t) (is_lbus_device(dev) ? \
+ virt_to_lbus(__addr) : \
+ __virt_to_bus(__addr)); })
#endif /* CONFIG_ARCH_OMAP15XX */