dma-buf: start caching of sg_table objects v2
authorChristian König <christian.koenig@amd.com>
Tue, 3 Jul 2018 14:42:26 +0000 (16:42 +0200)
committerChristian König <christian.koenig@amd.com>
Wed, 22 May 2019 13:34:55 +0000 (15:34 +0200)
To allow a smooth transition from pinning buffer objects to dynamic
invalidation we first start to cache the sg_table for an attachment.

v2: keep closer to the DRM implementation

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.kernel.org/patch/10943053/
drivers/dma-buf/dma-buf.c
include/linux/dma-buf.h

index 3ae6c0c2cc02b2099c766f23a3d286667c881b67..f4104a21b06907ebdb13a3b768e640f0f5ee0b79 100644 (file)
@@ -576,6 +576,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
        list_add(&attach->node, &dmabuf->attachments);
 
        mutex_unlock(&dmabuf->lock);
+
        return attach;
 
 err_attach:
@@ -598,6 +599,9 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
        if (WARN_ON(!dmabuf || !attach))
                return;
 
+       if (attach->sgt)
+               dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
+
        mutex_lock(&dmabuf->lock);
        list_del(&attach->node);
        if (dmabuf->ops->detach)
@@ -633,10 +637,27 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
        if (WARN_ON(!attach || !attach->dmabuf))
                return ERR_PTR(-EINVAL);
 
+       if (attach->sgt) {
+               /*
+                * Two mappings with different directions for the same
+                * attachment are not allowed.
+                */
+               if (attach->dir != direction &&
+                   attach->dir != DMA_BIDIRECTIONAL)
+                       return ERR_PTR(-EBUSY);
+
+               return attach->sgt;
+       }
+
        sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
        if (!sg_table)
                sg_table = ERR_PTR(-ENOMEM);
 
+       if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
+               attach->sgt = sg_table;
+               attach->dir = direction;
+       }
+
        return sg_table;
 }
 EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
@@ -660,8 +681,10 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
        if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
                return;
 
-       attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
-                                               direction);
+       if (attach->sgt == sg_table)
+               return;
+
+       attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
 }
 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
 
index a0bd071466fc50dccf5988a0f144eeb2764e504a..8a327566d7f4e0d21fb6be1f903d237b07676786 100644 (file)
@@ -44,6 +44,15 @@ struct dma_buf_attachment;
  * @vunmap: [optional] unmaps a vmap from the buffer
  */
 struct dma_buf_ops {
+       /**
+         * @cache_sgt_mapping:
+         *
+         * If true the framework will cache the first mapping made for each
+         * attachment. This avoids creating mappings for attachments multiple
+         * times.
+         */
+       bool cache_sgt_mapping;
+
        /**
         * @attach:
         *
@@ -323,6 +332,8 @@ struct dma_buf {
  * @dmabuf: buffer for this attachment.
  * @dev: device attached to the buffer.
  * @node: list of dma_buf_attachment.
+ * @sgt: cached mapping.
+ * @dir: direction of cached mapping.
  * @priv: exporter specific attachment data.
  *
  * This structure holds the attachment information between the dma_buf buffer
@@ -338,6 +349,8 @@ struct dma_buf_attachment {
        struct dma_buf *dmabuf;
        struct device *dev;
        struct list_head node;
+       struct sg_table *sgt;
+       enum dma_data_direction dir;
        void *priv;
 };