swiotlb: add common swiotlb_map_ops
authorChristoph Hellwig <hch@lst.de>
Tue, 9 Jan 2018 15:44:16 +0000 (16:44 +0100)
committerChristoph Hellwig <hch@lst.de>
Mon, 15 Jan 2018 08:35:45 +0000 (09:35 +0100)
Currently all architectures that want to use swiotlb have to implement
their own dma_map_ops instances.  Provide a generic one based on the
x86 implementation which first calls into dma_direct to try a full blown
direct mapping implementation (including e.g. CMA) before falling back
allocating from the swiotlb buffer.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Christian König <christian.koenig@amd.com>
include/linux/swiotlb.h
lib/swiotlb.c

index 606375e35d87ff21930a78363ea2b3600e2f1376..5b1f2a00491c15c9be4800ceda4662bc72ac0e54 100644 (file)
@@ -66,6 +66,12 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
                                    enum dma_sync_target target);
 
 /* Accessory functions. */
+
+void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
+               gfp_t flags, unsigned long attrs);
+void swiotlb_free(struct device *dev, size_t size, void *vaddr,
+               dma_addr_t dma_addr, unsigned long attrs);
+
 extern void
 *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                        dma_addr_t *dma_handle, gfp_t flags);
@@ -126,4 +132,6 @@ extern void swiotlb_print_info(void);
 extern int is_swiotlb_buffer(phys_addr_t paddr);
 extern void swiotlb_set_max_segment(unsigned int);
 
+extern const struct dma_map_ops swiotlb_dma_ops;
+
 #endif /* __LINUX_SWIOTLB_H */
index cf5311908fa9481b62e400445ba0dbc4e67c8484..0fae2f45c3c01170684bf25d5ea9609a0f05dc5e 100644 (file)
@@ -1087,3 +1087,46 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask)
        return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
 }
 EXPORT_SYMBOL(swiotlb_dma_supported);
+
+#ifdef CONFIG_DMA_DIRECT_OPS
+void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+               gfp_t gfp, unsigned long attrs)
+{
+       void *vaddr;
+
+       /*
+        * Don't print a warning when the first allocation attempt fails.
+        * swiotlb_alloc_coherent() will print a warning when the DMA memory
+        * allocation ultimately failed.
+        */
+       gfp |= __GFP_NOWARN;
+
+       vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+       if (!vaddr)
+               vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
+       return vaddr;
+}
+
+void swiotlb_free(struct device *dev, size_t size, void *vaddr,
+               dma_addr_t dma_addr, unsigned long attrs)
+{
+       if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
+               swiotlb_free_coherent(dev, size, vaddr, dma_addr);
+       else
+               dma_direct_free(dev, size, vaddr, dma_addr, attrs);
+}
+
+const struct dma_map_ops swiotlb_dma_ops = {
+       .mapping_error          = swiotlb_dma_mapping_error,
+       .alloc                  = swiotlb_alloc,
+       .free                   = swiotlb_free,
+       .sync_single_for_cpu    = swiotlb_sync_single_for_cpu,
+       .sync_single_for_device = swiotlb_sync_single_for_device,
+       .sync_sg_for_cpu        = swiotlb_sync_sg_for_cpu,
+       .sync_sg_for_device     = swiotlb_sync_sg_for_device,
+       .map_sg                 = swiotlb_map_sg_attrs,
+       .unmap_sg               = swiotlb_unmap_sg_attrs,
+       .map_page               = swiotlb_map_page,
+       .unmap_page             = swiotlb_unmap_page,
+};
+#endif /* CONFIG_DMA_DIRECT_OPS */