From 8c172467be36f7c9591e59b647e4cd342ce2ef41 Mon Sep 17 00:00:00 2001 From: Alex Smith Date: Thu, 30 Jul 2015 12:03:42 +0100 Subject: [PATCH] MIPS: Add implementation of dma_map_ops.mmap() The generic implementation of dma_map_ops.mmap(), dma_common_mmap(), is not correct for non-coherent devices. It expects to be passed a virtual address previously returned by dma_alloc_coherent(), which for a non-coherent device will return a KSEG1 address. It then attempts to convert that virtual address to a physical address using virt_to_page() which will yield an incorrect address. Also, dma_common_mmap() does not handle the DMA_ATTR_WRITE_COMBINE attribute, and therefore dma_mmap_writecombine() will not actually set the appropriate pgprot_t flags for write combining. This patch adds an implementation of dma_map_ops.mmap() that correctly handles KSEG1 addresses, and enables write combining when requested. Signed-off-by: Alex Smith Cc: Sadegh Abbasi Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10808/ Signed-off-by: Ralf Baechle --- arch/mips/mm/dma-default.c | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index eeaf0245c3b1..8f23cf08f4ba 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -194,6 +194,40 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, __free_pages(page, get_order(size)); } +static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + struct dma_attrs *attrs) +{ + unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; + unsigned long addr = (unsigned long)cpu_addr; + unsigned long off = vma->vm_pgoff; + unsigned long pfn; + int ret = -ENXIO; + + if (!plat_device_is_coherent(dev) && !hw_coherentio) + addr = CAC_ADDR(addr); + + pfn = page_to_pfn(virt_to_page((void *)addr)); + + if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + else + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) + return ret; + + if (off < count && user_count <= (count - off)) { + ret = remap_pfn_range(vma, vma->vm_start, + pfn + off, + user_count << PAGE_SHIFT, + vma->vm_page_prot); + } + + return ret; +} + static inline void __dma_sync_virtual(void *addr, size_t size, enum dma_data_direction direction) { @@ -380,6 +414,7 @@ EXPORT_SYMBOL(dma_cache_sync); static struct dma_map_ops mips_default_dma_map_ops = { .alloc = mips_dma_alloc_coherent, .free = mips_dma_free_coherent, + .mmap = mips_dma_mmap, .map_page = mips_dma_map_page, .unmap_page = mips_dma_unmap_page, .map_sg = mips_dma_map_sg, -- 2.30.2