As per the dma_unmap_* calls, we don't touch the cache when a DMA
buffer transitions from device to CPU ownership. Presently, no
problems have been identified with speculative cache prefetching
which in itself is a new feature in later architectures. We may
have to revisit the DMA API later for these architectures anyway.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
{
BUG_ON(!valid_dma_direction(dir));
- if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
- return;
-
- if (!arch_is_coherent())
- dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
+ dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
}
static inline void dma_sync_single_range_for_device(struct device *dev,
int i;
for_each_sg(sg, s, nents, i) {
- if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
- sg_dma_len(s), dir))
- continue;
-
- if (!arch_is_coherent())
- dma_cache_maint(sg_virt(s), s->length, dir);
+ dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
+ sg_dma_len(s), dir);
}
}
EXPORT_SYMBOL(dma_sync_sg_for_cpu);