From 125ab12acf64ff86b55d20e14db20becd917b7c4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 25 Sep 2008 22:16:22 +0100 Subject: [PATCH] [ARM] dma: fix dmabounce dma_sync_xxx() implementations The dmabounce dma_sync_xxx() implementation have been broken for quite some time; they all copy data between the DMA buffer and the CPU visible buffer no irrespective of the change of ownership. (IOW, a DMA_FROM_DEVICE mapping copies data from the DMA buffer to the CPU buffer during a call to dma_sync_single_for_device().) Fix it by getting rid of sync_single(), moving the contents into the recently created dmabounce_sync_for_xxx() functions and adjusting appropriately. This also makes it possible to properly support the DMA range sync functions. Signed-off-by: Russell King --- arch/arm/common/dmabounce.c | 144 ++++++++++++----------------- arch/arm/include/asm/dma-mapping.h | 26 +++--- 2 files changed, 69 insertions(+), 101 deletions(-) diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 1cb880b734df..d4b0c608fdee 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -205,6 +205,21 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer * /* ************************************************** */ +static struct safe_buffer *find_safe_buffer_dev(struct device *dev, + dma_addr_t dma_addr, const char *where) +{ + if (!dev || !dev->archdata.dmabounce) + return NULL; + if (dma_mapping_error(dev, dma_addr)) { + if (dev) + dev_err(dev, "Trying to %s invalid mapping\n", where); + else + pr_err("unknown device: Trying to %s invalid mapping\n", where); + return NULL; + } + return find_safe_buffer(dev->archdata.dmabounce, dma_addr); +} + static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir) @@ -274,19 +289,7 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir) { - struct dmabounce_device_info *device_info = dev->archdata.dmabounce; - struct safe_buffer *buf = NULL; - - /* - * Trying to unmap an invalid mapping - */ - if (dma_mapping_error(dev, dma_addr)) { - dev_err(dev, "Trying to unmap invalid mapping\n"); - return; - } - - if (device_info) - buf = find_safe_buffer(device_info, dma_addr); + struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap"); if (buf) { BUG_ON(buf->size != size); @@ -296,7 +299,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, __func__, buf->ptr, virt_to_dma(dev, buf->ptr), buf->safe, buf->safe_dma_addr); - DO_STATS ( device_info->bounce_count++ ); + DO_STATS(dev->archdata.dmabounce->bounce_count++); if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { void *ptr = buf->ptr; @@ -317,74 +320,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, dmac_clean_range(ptr, ptr + size); outer_clean_range(__pa(ptr), __pa(ptr) + size); } - free_safe_buffer(device_info, buf); - } -} - -static int sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction dir) -{ - struct dmabounce_device_info *device_info = dev->archdata.dmabounce; - struct safe_buffer *buf = NULL; - - if (device_info) - buf = find_safe_buffer(device_info, dma_addr); - - if (buf) { - /* - * Both of these checks from original code need to be - * commented out b/c some drivers rely on the following: - * - * 1) Drivers may map a large chunk of memory into DMA space - * but only sync a small portion of it. Good example is - * allocating a large buffer, mapping it, and then - * breaking it up into small descriptors. No point - * in syncing the whole buffer if you only have to - * touch one descriptor. - * - * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are - * usually only synced in one dir at a time. - * - * See drivers/net/eepro100.c for examples of both cases. - * - * -ds - * - * BUG_ON(buf->size != size); - * BUG_ON(buf->direction != dir); - */ - - dev_dbg(dev, - "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", - __func__, buf->ptr, virt_to_dma(dev, buf->ptr), - buf->safe, buf->safe_dma_addr); - - DO_STATS ( device_info->bounce_count++ ); - - switch (dir) { - case DMA_FROM_DEVICE: - dev_dbg(dev, - "%s: copy back safe %p to unsafe %p size %d\n", - __func__, buf->safe, buf->ptr, size); - memcpy(buf->ptr, buf->safe, size); - break; - case DMA_TO_DEVICE: - dev_dbg(dev, - "%s: copy out unsafe %p to safe %p, size %d\n", - __func__,buf->ptr, buf->safe, size); - memcpy(buf->safe, buf->ptr, size); - break; - case DMA_BIDIRECTIONAL: - BUG(); /* is this allowed? what does it mean? */ - default: - BUG(); - } - /* - * No need to sync the safe buffer - it was allocated - * via the coherent allocators. - */ - return 0; - } else { - return 1; + free_safe_buffer(dev->archdata.dmabounce, buf); } } @@ -447,18 +383,54 @@ dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, unsigned long off, size_t sz, enum dma_data_direction dir) { - dev_dbg(dev, "%s(dma=%#lx,off=%#lx,sz=%zx,dir=%x)\n", + struct safe_buffer *buf; + + dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", __func__, addr, off, sz, dir); - return sync_single(dev, addr, off + sz, dir); + + buf = find_safe_buffer_dev(dev, addr, __func__); + if (!buf) + return 1; + + dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", + __func__, buf->ptr, virt_to_dma(dev, buf->ptr), + buf->safe, buf->safe_dma_addr); + + DO_STATS(dev->archdata.dmabounce->bounce_count++); + + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { + dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", + __func__, buf->safe + off, buf->ptr + off, sz); + memcpy(buf->ptr + off, buf->safe + off, sz); + } + return 0; } EXPORT_SYMBOL(dmabounce_sync_for_cpu); int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, unsigned long off, size_t sz, enum dma_data_direction dir) { - dev_dbg(dev, "%s(dma=%#lx,off=%#lx,sz=%zx,dir=%x)\n", + struct safe_buffer *buf; + + dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", __func__, addr, off, sz, dir); - return sync_single(dev, addr, off + sz, dir); + + buf = find_safe_buffer_dev(dev, addr, __func__); + if (!buf) + return 1; + + dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", + __func__, buf->ptr, virt_to_dma(dev, buf->ptr), + buf->safe, buf->safe_dma_addr); + + DO_STATS(dev->archdata.dmabounce->bounce_count++); + + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { + dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n", + __func__,buf->ptr + off, buf->safe + off, sz); + memcpy(buf->safe + off, buf->ptr + off, sz); + } + return 0; } EXPORT_SYMBOL(dmabounce_sync_for_device); diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index c003ad390def..1204dc958c43 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -241,6 +241,15 @@ extern void dmabounce_unregister_dev(struct device *); */ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); +/* + * The DMA API, implemented by dmabounce.c. See below for descriptions. + */ +extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); +extern dma_addr_t dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir); +extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); + /* * Private functions */ @@ -251,7 +260,6 @@ int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, #else #define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) #define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) -#endif /* CONFIG_DMABOUNCE */ /** @@ -268,7 +276,6 @@ int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, * can regain ownership by calling dma_unmap_single() or * dma_sync_single_for_cpu(). */ -#ifndef CONFIG_DMABOUNCE static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) @@ -278,9 +285,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size, return virt_to_dma(dev, cpu_addr); } -#else -extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); -#endif + /** * dma_map_page - map a portion of a page for streaming DMA @@ -297,7 +302,6 @@ extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_d * can regain ownership by calling dma_unmap_page() or * dma_sync_single_for_cpu(). */ -#ifndef CONFIG_DMABOUNCE static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, @@ -308,11 +312,6 @@ dma_map_page(struct device *dev, struct page *page, return page_to_dma(dev, page) + offset; } -#else -extern dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir); -#endif /** * dma_unmap_single - unmap a single buffer previously mapped @@ -328,16 +327,13 @@ extern dma_addr_t dma_map_page(struct device *dev, struct page *page, * After this call, reads by the CPU to the buffer are guaranteed to see * whatever the device wrote there. */ -#ifndef CONFIG_DMABOUNCE static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { /* nothing to do */ } -#else -extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); -#endif +#endif /* CONFIG_DMABOUNCE */ /** * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() -- 2.30.2