[ARM] dma: don't touch cache on dma_*_for_cpu()
authorRussell King <rmk@dyn-67.arm.linux.org.uk>
Mon, 29 Sep 2008 18:50:59 +0000 (19:50 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Tue, 30 Sep 2008 10:01:36 +0000 (11:01 +0100)
As per the dma_unmap_* calls, we don't touch the cache when a DMA
buffer transitions from device to CPU ownership.  Presently, no
problems have been identified with speculative cache prefetching
which in itself is a new feature in later architectures.  We may
have to revisit the DMA API later for these architectures anyway.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/include/asm/dma-mapping.h
arch/arm/mm/dma-mapping.c

index 2544a087c213a72f835b2589f5bba6bc09413dd5..ad62020763f1c3cd84aa0e88efb712ec28fad565 100644 (file)
@@ -376,11 +376,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
 {
        BUG_ON(!valid_dma_direction(dir));
 
-       if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
-               return;
-
-       if (!arch_is_coherent())
-               dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
+       dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
 }
 
 static inline void dma_sync_single_range_for_device(struct device *dev,
index 0e28cf33f7dd96bc2f28d085e0c9494aa4afaae1..67960017dc8f1f79c40bd083205f3409cdf0fbd1 100644 (file)
@@ -585,12 +585,8 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
        int i;
 
        for_each_sg(sg, s, nents, i) {
-               if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
-                                       sg_dma_len(s), dir))
-                       continue;
-
-               if (!arch_is_coherent())
-                       dma_cache_maint(sg_virt(s), s->length, dir);
+               dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
+                                       sg_dma_len(s), dir);
        }
 }
 EXPORT_SYMBOL(dma_sync_sg_for_cpu);