list_add(&attach->node, &dmabuf->attachments);
mutex_unlock(&dmabuf->lock);
+
return attach;
err_attach:
if (WARN_ON(!dmabuf || !attach))
return;
+ if (attach->sgt)
+ dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
+
mutex_lock(&dmabuf->lock);
list_del(&attach->node);
if (dmabuf->ops->detach)
if (WARN_ON(!attach || !attach->dmabuf))
return ERR_PTR(-EINVAL);
+ if (attach->sgt) {
+ /*
+ * Two mappings with different directions for the same
+ * attachment are not allowed.
+ */
+ if (attach->dir != direction &&
+ attach->dir != DMA_BIDIRECTIONAL)
+ return ERR_PTR(-EBUSY);
+
+ return attach->sgt;
+ }
+
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
+ if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
+ attach->sgt = sg_table;
+ attach->dir = direction;
+ }
+
return sg_table;
}
EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
return;
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
- direction);
+ if (attach->sgt == sg_table)
+ return;
+
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
* @vunmap: [optional] unmaps a vmap from the buffer
*/
struct dma_buf_ops {
+ /**
+ * @cache_sgt_mapping:
+ *
+ * If true the framework will cache the first mapping made for each
+ * attachment. This avoids creating mappings for attachments multiple
+ * times.
+ */
+ bool cache_sgt_mapping;
+
/**
* @attach:
*
* @dmabuf: buffer for this attachment.
* @dev: device attached to the buffer.
* @node: list of dma_buf_attachment.
+ * @sgt: cached mapping.
+ * @dir: direction of cached mapping.
* @priv: exporter specific attachment data.
*
* This structure holds the attachment information between the dma_buf buffer
struct dma_buf *dmabuf;
struct device *dev;
struct list_head node;
+ struct sg_table *sgt;
+ enum dma_data_direction dir;
void *priv;
};