powerpc/dma: remove dma_nommu_mmap_coherent
authorChristoph Hellwig <hch@lst.de>
Wed, 13 Feb 2019 07:01:24 +0000 (08:01 +0100)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 18 Feb 2019 11:41:03 +0000 (22:41 +1100)
The coherent cache version of this function already is functionally
identicall to the default version, and by defining the
arch_dma_coherent_to_pfn hook the same is ture for the noncoherent
version as well.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Christian Zigotzky <chzigotzky@xenosoft.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/dma-mapping.h
arch/powerpc/kernel/dma-iommu.c
arch/powerpc/kernel/dma-swiotlb.c
arch/powerpc/kernel/dma.c
arch/powerpc/mm/dma-noncoherent.c
arch/powerpc/platforms/Kconfig.cputype
arch/powerpc/platforms/pseries/vio.c

index 16d45518d9bbd3ee2d94d04236233aa232ec7168..f19c486e7b3f5c477ebc31ea027a75dee91e0dd5 100644 (file)
@@ -25,10 +25,6 @@ extern void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
 extern void __dma_nommu_free_coherent(struct device *dev, size_t size,
                                       void *vaddr, dma_addr_t dma_handle,
                                       unsigned long attrs);
-extern int dma_nommu_mmap_coherent(struct device *dev,
-                                   struct vm_area_struct *vma,
-                                   void *cpu_addr, dma_addr_t handle,
-                                   size_t size, unsigned long attrs);
 int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
                int nents, enum dma_data_direction direction,
                unsigned long attrs);
index 5a0b5e863b08f249d4e2d552dd7acdaf74c9fdb7..ed8b60829a9003fa72643180afb1dd46b5ef8d45 100644 (file)
@@ -167,7 +167,6 @@ u64 dma_iommu_get_required_mask(struct device *dev)
 const struct dma_map_ops dma_iommu_ops = {
        .alloc                  = dma_iommu_alloc_coherent,
        .free                   = dma_iommu_free_coherent,
-       .mmap                   = dma_nommu_mmap_coherent,
        .map_sg                 = dma_iommu_map_sg,
        .unmap_sg               = dma_iommu_unmap_sg,
        .dma_supported          = dma_iommu_dma_supported,
index 0e21c318300e3b8411da2f548e4dc605d3509710..d5950a0cb7585b882e9e803d95e67bb5f7b7a5f3 100644 (file)
@@ -34,7 +34,6 @@ unsigned int ppc_swiotlb_enable;
 const struct dma_map_ops powerpc_swiotlb_dma_ops = {
        .alloc = __dma_nommu_alloc_coherent,
        .free = __dma_nommu_free_coherent,
-       .mmap = dma_nommu_mmap_coherent,
        .map_sg = dma_direct_map_sg,
        .unmap_sg = dma_direct_unmap_sg,
        .dma_supported = swiotlb_dma_supported,
index 10fa4e18b4e9999e66cb9049c8880eee31274ebb..841c43355a7e97dc6233cc8201d4c66aaf3f1cf8 100644 (file)
@@ -114,24 +114,6 @@ void __dma_nommu_free_coherent(struct device *dev, size_t size,
 }
 #endif /* !CONFIG_NOT_COHERENT_CACHE */
 
-int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
-                            void *cpu_addr, dma_addr_t handle, size_t size,
-                            unsigned long attrs)
-{
-       unsigned long pfn;
-
-#ifdef CONFIG_NOT_COHERENT_CACHE
-       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
-#else
-       pfn = page_to_pfn(virt_to_page(cpu_addr));
-#endif
-       return remap_pfn_range(vma, vma->vm_start,
-                              pfn + vma->vm_pgoff,
-                              vma->vm_end - vma->vm_start,
-                              vma->vm_page_prot);
-}
-
 int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
                int nents, enum dma_data_direction direction,
                unsigned long attrs)
@@ -218,7 +200,6 @@ static inline void dma_nommu_sync_single(struct device *dev,
 const struct dma_map_ops dma_nommu_ops = {
        .alloc                          = __dma_nommu_alloc_coherent,
        .free                           = __dma_nommu_free_coherent,
-       .mmap                           = dma_nommu_mmap_coherent,
        .map_sg                         = dma_nommu_map_sg,
        .unmap_sg                       = dma_nommu_unmap_sg,
        .dma_supported                  = dma_nommu_dma_supported,
index e955539686a41e0fa5c89cc79671c170c35f6d66..ee95da19c82dbedbd5e8885f69d74de487ac21c5 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/types.h>
 #include <linux/highmem.h>
 #include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
 #include <linux/export.h>
 
 #include <asm/tlbflush.h>
@@ -400,14 +401,16 @@ EXPORT_SYMBOL(__dma_sync_page);
 
 /*
  * Return the PFN for a given cpu virtual address returned by
- * __dma_nommu_alloc_coherent. This is used by dma_mmap_coherent()
+ * __dma_nommu_alloc_coherent.
  */
-unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
+long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr,
+               dma_addr_t dma_addr)
 {
        /* This should always be populated, so we don't test every
         * level. If that fails, we'll have a nice crash which
         * will be as good as a BUG_ON()
         */
+       unsigned long cpu_addr = (unsigned long)vaddr;
        pgd_t *pgd = pgd_offset_k(cpu_addr);
        pud_t *pud = pud_offset(pgd, cpu_addr);
        pmd_t *pmd = pmd_offset(pud, cpu_addr);
index 8c7464c3f27fee422df6790b4c7cff2504361333..48cd5aa90ad25cc8be69fa8a65241da036fb8999 100644 (file)
@@ -402,6 +402,7 @@ config NOT_COHERENT_CACHE
        bool
        depends on 4xx || PPC_8xx || E200 || PPC_MPC512x || \
                GAMECUBE_COMMON || AMIGAONE
+       select ARCH_HAS_DMA_COHERENT_TO_PFN
        default n if PPC_47x
        default y
 
index 7870bf99168c603018f6ace19121b0f7b427e47d..b7dc8bd41fd0071ebbdc37a785b63cd06ae12e0e 100644 (file)
@@ -603,7 +603,6 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
 static const struct dma_map_ops vio_dma_mapping_ops = {
        .alloc             = vio_dma_iommu_alloc_coherent,
        .free              = vio_dma_iommu_free_coherent,
-       .mmap              = dma_nommu_mmap_coherent,
        .map_sg            = vio_dma_iommu_map_sg,
        .unmap_sg          = vio_dma_iommu_unmap_sg,
        .map_page          = vio_dma_iommu_map_page,