1 From 904c0d6a47b181b134a3626bfd93b456ec6b411d Mon Sep 17 00:00:00 2001
2 From: Dave Stevenson <dave.stevenson@raspberrypi.org>
3 Date: Fri, 21 Dec 2018 16:50:53 +0000
4 Subject: [PATCH 630/703] staging: vc-sm-cma: Add in allocation for VPU
7 Module has to change from tristate to bool as all CMA functions
10 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
12 .../staging/vc04_services/vc-sm-cma/Kconfig | 4 +-
13 .../staging/vc04_services/vc-sm-cma/Makefile | 2 +-
14 .../staging/vc04_services/vc-sm-cma/vc_sm.c | 642 +++++++++++++++---
15 .../staging/vc04_services/vc-sm-cma/vc_sm.h | 30 +-
16 .../vc04_services/vc-sm-cma/vc_sm_cma.c | 99 +++
17 .../vc04_services/vc-sm-cma/vc_sm_cma.h | 39 ++
18 .../vc04_services/vc-sm-cma/vc_sm_cma_vchi.c | 10 +
19 .../vc04_services/vc-sm-cma/vc_sm_cma_vchi.h | 4 +
20 .../vc04_services/vc-sm-cma/vc_sm_defs.h | 2 +
21 9 files changed, 723 insertions(+), 109 deletions(-)
22 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma.c
23 create mode 100644 drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma.h
25 --- a/drivers/staging/vc04_services/vc-sm-cma/Kconfig
26 +++ b/drivers/staging/vc04_services/vc-sm-cma/Kconfig
29 - tristate "VideoCore Shared Memory (CMA) driver"
30 - depends on BCM2835_VCHIQ
31 + bool "VideoCore Shared Memory (CMA) driver"
32 + depends on BCM2835_VCHIQ && DMA_CMA
34 select DMA_SHARED_BUFFER
36 --- a/drivers/staging/vc04_services/vc-sm-cma/Makefile
37 +++ b/drivers/staging/vc04_services/vc-sm-cma/Makefile
38 @@ -3,6 +3,6 @@ ccflags-y += -Idrivers/staging/vc04_serv
39 ccflags-y += -D__VCCOREVER__=0
41 vc-sm-cma-$(CONFIG_BCM_VC_SM_CMA) := \
42 - vc_sm.o vc_sm_cma_vchi.o
43 + vc_sm.o vc_sm_cma_vchi.o vc_sm_cma.o
45 obj-$(CONFIG_BCM_VC_SM_CMA) += vc-sm-cma.o
46 --- a/drivers/staging/vc04_services/vc-sm-cma/vc_sm.c
47 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm.c
49 * and taking some code for CMA/dmabuf handling from the Android Ion
50 * driver (Google/Linaro).
52 - * This is cut down version to only support import of dma_bufs from
53 - * other kernel drivers. A more complete implementation of the old
54 - * vmcs_sm functionality can follow later.
56 + * This driver has 3 main uses:
57 + * 1) Allocating buffers for the kernel or userspace that can be shared with the
59 + * 2) Importing dmabufs from elsewhere for sharing with the VPU.
60 + * 3) Allocating buffers for use by the VPU.
62 + * In the first and second cases the native handle is a dmabuf. Releasing the
63 + * resource inherently comes from releasing the dmabuf, and this will trigger
64 + * unmapping on the VPU. The underlying allocation and our buffer structure are
65 + * retained until the VPU has confirmed that it has finished with it.
67 + * For the VPU allocations the VPU is responsible for triggering the release,
68 + * and therefore the released message decrements the dma_buf refcount (with the
69 + * VPU mapping having already been marked as released).
72 /* ---- Include Files ----------------------------------------------------- */
74 #include "vc_sm_cma_vchi.h"
77 +#include "vc_sm_cma.h"
78 #include "vc_sm_knl.h"
80 /* ---- Private Constants and Types --------------------------------------- */
81 @@ -72,6 +84,7 @@ struct sm_state_t {
82 struct platform_device *pdev;
84 struct sm_instance *sm_handle; /* Handle for videocore service. */
85 + struct cma *cma_heap;
87 spinlock_t kernelid_map_lock; /* Spinlock protecting kernelid_map */
88 struct idr kernelid_map;
89 @@ -80,6 +93,7 @@ struct sm_state_t {
90 struct list_head buffer_list; /* List of buffer. */
92 struct vc_sm_privdata_t *data_knl; /* Kernel internal data tracking. */
93 + struct vc_sm_privdata_t *vpu_allocs; /* All allocations from the VPU */
94 struct dentry *dir_root; /* Debug fs entries root. */
95 struct sm_pde_t dir_state; /* Debug fs entries state sub-tree. */
97 @@ -89,6 +103,12 @@ struct sm_state_t {
98 u32 int_trans_id; /* Interrupted transaction. */
101 +struct vc_sm_dma_buf_attachment {
102 + struct device *dev;
103 + struct sg_table *table;
104 + struct list_head list;
107 /* ---- Private Variables ----------------------------------------------- */
109 static struct sm_state_t *sm_state;
110 @@ -172,12 +192,14 @@ static int vc_sm_cma_global_state_show(s
112 seq_printf(s, " DMABUF %p\n",
114 - seq_printf(s, " ATTACH %p\n",
116 + if (resource->imported) {
117 + seq_printf(s, " ATTACH %p\n",
118 + resource->import.attach);
119 + seq_printf(s, " SGT %p\n",
120 + resource->import.sgt);
122 seq_printf(s, " SG_TABLE %p\n",
124 - seq_printf(s, " SGT %p\n",
126 seq_printf(s, " DMA_ADDR %pad\n",
127 &resource->dma_addr);
128 seq_printf(s, " VC_HANDLE %08x\n",
129 @@ -209,17 +231,33 @@ static void vc_sm_add_resource(struct vc
133 - * Release an allocation.
134 - * All refcounting is done via the dma buf object.
135 + * Cleans up imported dmabuf.
137 -static void vc_sm_release_resource(struct vc_sm_buffer *buffer, int force)
138 +static void vc_sm_clean_up_dmabuf(struct vc_sm_buffer *buffer)
140 - mutex_lock(&sm_state->map_lock);
141 - mutex_lock(&buffer->lock);
142 + if (!buffer->imported)
145 - pr_debug("[%s]: buffer %p (name %s, size %zu)\n",
146 - __func__, buffer, buffer->name, buffer->size);
147 + /* Handle cleaning up imported dmabufs */
148 + mutex_lock(&buffer->lock);
149 + if (buffer->import.sgt) {
150 + dma_buf_unmap_attachment(buffer->import.attach,
151 + buffer->import.sgt,
152 + DMA_BIDIRECTIONAL);
153 + buffer->import.sgt = NULL;
155 + if (buffer->import.attach) {
156 + dma_buf_detach(buffer->dma_buf, buffer->import.attach);
157 + buffer->import.attach = NULL;
159 + mutex_unlock(&buffer->lock);
163 + * Instructs VPU to decrement the refcount on a buffer.
165 +static void vc_sm_vpu_free(struct vc_sm_buffer *buffer)
167 if (buffer->vc_handle && buffer->vpu_state == VPU_MAPPED) {
168 struct vc_sm_free_t free = { buffer->vc_handle, 0 };
169 int status = vc_sm_cma_vchi_free(sm_state->sm_handle, &free,
170 @@ -230,17 +268,32 @@ static void vc_sm_release_resource(struc
173 if (sm_state->require_released_callback) {
174 - /* Need to wait for the VPU to confirm the free */
175 + /* Need to wait for the VPU to confirm the free. */
177 /* Retain a reference on this until the VPU has
180 buffer->vpu_state = VPU_UNMAPPING;
183 + buffer->vpu_state = VPU_NOT_MAPPED;
184 + buffer->vc_handle = 0;
186 - buffer->vpu_state = VPU_NOT_MAPPED;
187 - buffer->vc_handle = 0;
192 + * Release an allocation.
193 + * All refcounting is done via the dma buf object.
195 + * Must be called with the mutex held. The function will either release the
196 + * mutex (if defering the release) or destroy it. The caller must therefore not
197 + * reuse the buffer on return.
199 +static void vc_sm_release_resource(struct vc_sm_buffer *buffer)
201 + pr_debug("[%s]: buffer %p (name %s, size %zu)\n",
202 + __func__, buffer, buffer->name, buffer->size);
204 if (buffer->vc_handle) {
205 /* We've sent the unmap request but not had the response. */
206 pr_err("[%s]: Waiting for VPU unmap response on %p\n",
207 @@ -248,45 +301,43 @@ static void vc_sm_release_resource(struc
210 if (buffer->in_use) {
211 - /* Don't release dmabuf here - we await the release */
212 + /* dmabuf still in use - we await the release */
213 pr_err("[%s]: buffer %p is still in use\n",
218 - /* Handle cleaning up imported dmabufs */
220 - dma_buf_unmap_attachment(buffer->attach, buffer->sgt,
221 - DMA_BIDIRECTIONAL);
222 - buffer->sgt = NULL;
224 - if (buffer->attach) {
225 - dma_buf_detach(buffer->dma_buf, buffer->attach);
226 - buffer->attach = NULL;
229 - /* Release the dma_buf (whether ours or imported) */
230 - if (buffer->import_dma_buf) {
231 - dma_buf_put(buffer->import_dma_buf);
232 - buffer->import_dma_buf = NULL;
233 - buffer->dma_buf = NULL;
234 - } else if (buffer->dma_buf) {
235 - dma_buf_put(buffer->dma_buf);
236 - buffer->dma_buf = NULL;
237 + /* Release the allocation (whether imported dmabuf or CMA allocation) */
238 + if (buffer->imported) {
239 + pr_debug("%s: Release imported dmabuf %p\n", __func__,
240 + buffer->import.dma_buf);
241 + if (buffer->import.dma_buf)
242 + dma_buf_put(buffer->import.dma_buf);
244 + pr_err("%s: Imported dmabuf already been put for buf %p\n",
246 + buffer->import.dma_buf = NULL;
248 + if (buffer->sg_table) {
249 + /* Our own allocation that we need to dma_unmap_sg */
250 + dma_unmap_sg(&sm_state->pdev->dev,
251 + buffer->sg_table->sgl,
252 + buffer->sg_table->nents,
253 + DMA_BIDIRECTIONAL);
255 + pr_debug("%s: Release our allocation\n", __func__);
256 + vc_sm_cma_buffer_free(&buffer->alloc);
257 + pr_debug("%s: Release our allocation - done\n", __func__);
260 - if (buffer->sg_table && !buffer->import_dma_buf) {
261 - /* Our own allocation that we need to dma_unmap_sg */
262 - dma_unmap_sg(&sm_state->pdev->dev, buffer->sg_table->sgl,
263 - buffer->sg_table->nents, DMA_BIDIRECTIONAL);
266 - /* Free the local resource. Start by removing it from the list */
267 - buffer->private = NULL;
268 + /* Free our buffer. Start by removing it from the list */
269 + mutex_lock(&sm_state->map_lock);
270 list_del(&buffer->global_buffer_list);
271 + mutex_unlock(&sm_state->map_lock);
273 + pr_debug("%s: Release our allocation - done\n", __func__);
274 mutex_unlock(&buffer->lock);
275 - mutex_unlock(&sm_state->map_lock);
277 mutex_destroy(&buffer->lock);
279 @@ -295,7 +346,7 @@ static void vc_sm_release_resource(struc
282 mutex_unlock(&buffer->lock);
283 - mutex_unlock(&sm_state->map_lock);
287 /* Create support for private data tracking. */
288 @@ -317,16 +368,267 @@ static struct vc_sm_privdata_t *vc_sm_cm
292 +static struct sg_table *dup_sg_table(struct sg_table *table)
294 + struct sg_table *new_table;
296 + struct scatterlist *sg, *new_sg;
298 + new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
300 + return ERR_PTR(-ENOMEM);
302 + ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
305 + return ERR_PTR(-ENOMEM);
308 + new_sg = new_table->sgl;
309 + for_each_sg(table->sgl, sg, table->nents, i) {
310 + memcpy(new_sg, sg, sizeof(*sg));
311 + sg->dma_address = 0;
312 + new_sg = sg_next(new_sg);
318 +static void free_duped_table(struct sg_table *table)
320 + sg_free_table(table);
324 +/* Dma buf operations for use with our own allocations */
326 +static int vc_sm_dma_buf_attach(struct dma_buf *dmabuf,
327 + struct dma_buf_attachment *attachment)
330 + struct vc_sm_dma_buf_attachment *a;
331 + struct sg_table *table;
332 + struct vc_sm_buffer *buf = dmabuf->priv;
334 + a = kzalloc(sizeof(*a), GFP_KERNEL);
338 + table = dup_sg_table(buf->sg_table);
339 + if (IS_ERR(table)) {
345 + INIT_LIST_HEAD(&a->list);
347 + attachment->priv = a;
349 + mutex_lock(&buf->lock);
350 + list_add(&a->list, &buf->attachments);
351 + mutex_unlock(&buf->lock);
352 + pr_debug("%s dmabuf %p attachment %p\n", __func__, dmabuf, attachment);
357 +static void vc_sm_dma_buf_detatch(struct dma_buf *dmabuf,
358 + struct dma_buf_attachment *attachment)
360 + struct vc_sm_dma_buf_attachment *a = attachment->priv;
361 + struct vc_sm_buffer *buf = dmabuf->priv;
363 + pr_debug("%s dmabuf %p attachment %p\n", __func__, dmabuf, attachment);
364 + free_duped_table(a->table);
365 + mutex_lock(&buf->lock);
366 + list_del(&a->list);
367 + mutex_unlock(&buf->lock);
372 +static struct sg_table *vc_sm_map_dma_buf(struct dma_buf_attachment *attachment,
373 + enum dma_data_direction direction)
375 + struct vc_sm_dma_buf_attachment *a = attachment->priv;
376 + struct sg_table *table;
380 + if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
382 + return ERR_PTR(-ENOMEM);
384 + pr_debug("%s attachment %p\n", __func__, attachment);
388 +static void vc_sm_unmap_dma_buf(struct dma_buf_attachment *attachment,
389 + struct sg_table *table,
390 + enum dma_data_direction direction)
392 + pr_debug("%s attachment %p\n", __func__, attachment);
393 + dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
396 +static int vc_sm_dmabuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
398 + struct vc_sm_buffer *buf = dmabuf->priv;
399 + struct sg_table *table = buf->sg_table;
400 + unsigned long addr = vma->vm_start;
401 + unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
402 + struct scatterlist *sg;
406 + pr_debug("%s dmabuf %p, buf %p, vm_start %08lX\n", __func__, dmabuf,
409 + mutex_lock(&buf->lock);
411 + /* now map it to userspace */
412 + for_each_sg(table->sgl, sg, table->nents, i) {
413 + struct page *page = sg_page(sg);
414 + unsigned long remainder = vma->vm_end - addr;
415 + unsigned long len = sg->length;
417 + if (offset >= sg->length) {
418 + offset -= sg->length;
420 + } else if (offset) {
421 + page += offset / PAGE_SIZE;
422 + len = sg->length - offset;
425 + len = min(len, remainder);
426 + ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
427 + vma->vm_page_prot);
431 + if (addr >= vma->vm_end)
434 + mutex_unlock(&buf->lock);
437 + pr_err("%s: failure mapping buffer to userspace\n",
443 +static void vc_sm_dma_buf_release(struct dma_buf *dmabuf)
445 + struct vc_sm_buffer *buffer;
450 + buffer = (struct vc_sm_buffer *)dmabuf->priv;
452 + mutex_lock(&buffer->lock);
454 + pr_debug("%s dmabuf %p, buffer %p\n", __func__, dmabuf, buffer);
456 + buffer->in_use = 0;
458 + /* Unmap on the VPU */
459 + vc_sm_vpu_free(buffer);
460 + pr_debug("%s vpu_free done\n", __func__);
462 + /* Unmap our dma_buf object (the vc_sm_buffer remains until released
465 + vc_sm_clean_up_dmabuf(buffer);
466 + pr_debug("%s clean_up dmabuf done\n", __func__);
468 + vc_sm_release_resource(buffer);
469 + pr_debug("%s done\n", __func__);
472 +static int vc_sm_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
473 + enum dma_data_direction direction)
475 + struct vc_sm_buffer *buf;
476 + struct vc_sm_dma_buf_attachment *a;
481 + buf = dmabuf->priv;
485 + mutex_lock(&buf->lock);
487 + list_for_each_entry(a, &buf->attachments, list) {
488 + dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
491 + mutex_unlock(&buf->lock);
496 +static int vc_sm_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
497 + enum dma_data_direction direction)
499 + struct vc_sm_buffer *buf;
500 + struct vc_sm_dma_buf_attachment *a;
504 + buf = dmabuf->priv;
508 + mutex_lock(&buf->lock);
510 + list_for_each_entry(a, &buf->attachments, list) {
511 + dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
514 + mutex_unlock(&buf->lock);
519 +static void *vc_sm_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
525 +static void vc_sm_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
531 +static const struct dma_buf_ops dma_buf_ops = {
532 + .map_dma_buf = vc_sm_map_dma_buf,
533 + .unmap_dma_buf = vc_sm_unmap_dma_buf,
534 + .mmap = vc_sm_dmabuf_mmap,
535 + .release = vc_sm_dma_buf_release,
536 + .attach = vc_sm_dma_buf_attach,
537 + .detach = vc_sm_dma_buf_detatch,
538 + .begin_cpu_access = vc_sm_dma_buf_begin_cpu_access,
539 + .end_cpu_access = vc_sm_dma_buf_end_cpu_access,
540 + .map = vc_sm_dma_buf_kmap,
541 + .unmap = vc_sm_dma_buf_kunmap,
543 /* Dma_buf operations for chaining through to an imported dma_buf */
545 int vc_sm_import_dma_buf_attach(struct dma_buf *dmabuf,
546 struct dma_buf_attachment *attachment)
548 - struct vc_sm_buffer *res = dmabuf->priv;
549 + struct vc_sm_buffer *buf = dmabuf->priv;
551 - if (!res->import_dma_buf)
552 + if (!buf->imported)
554 - return res->import_dma_buf->ops->attach(res->import_dma_buf,
555 + return buf->import.dma_buf->ops->attach(buf->import.dma_buf,
559 @@ -334,22 +636,23 @@ static
560 void vc_sm_import_dma_buf_detatch(struct dma_buf *dmabuf,
561 struct dma_buf_attachment *attachment)
563 - struct vc_sm_buffer *res = dmabuf->priv;
564 + struct vc_sm_buffer *buf = dmabuf->priv;
566 - if (!res->import_dma_buf)
567 + if (!buf->imported)
569 - res->import_dma_buf->ops->detach(res->import_dma_buf, attachment);
570 + buf->import.dma_buf->ops->detach(buf->import.dma_buf, attachment);
574 struct sg_table *vc_sm_import_map_dma_buf(struct dma_buf_attachment *attachment,
575 enum dma_data_direction direction)
577 - struct vc_sm_buffer *res = attachment->dmabuf->priv;
578 + struct vc_sm_buffer *buf = attachment->dmabuf->priv;
580 - if (!res->import_dma_buf)
581 + if (!buf->imported)
583 - return res->import_dma_buf->ops->map_dma_buf(attachment, direction);
584 + return buf->import.dma_buf->ops->map_dma_buf(attachment,
589 @@ -357,87 +660,88 @@ void vc_sm_import_unmap_dma_buf(struct d
590 struct sg_table *table,
591 enum dma_data_direction direction)
593 - struct vc_sm_buffer *res = attachment->dmabuf->priv;
594 + struct vc_sm_buffer *buf = attachment->dmabuf->priv;
596 - if (!res->import_dma_buf)
597 + if (!buf->imported)
599 - res->import_dma_buf->ops->unmap_dma_buf(attachment, table, direction);
600 + buf->import.dma_buf->ops->unmap_dma_buf(attachment, table, direction);
604 int vc_sm_import_dmabuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
606 - struct vc_sm_buffer *res = dmabuf->priv;
607 + struct vc_sm_buffer *buf = dmabuf->priv;
609 - pr_debug("%s: mmap dma_buf %p, res %p, imported db %p\n", __func__,
610 - dmabuf, res, res->import_dma_buf);
611 - if (!res->import_dma_buf) {
612 + pr_debug("%s: mmap dma_buf %p, buf %p, imported db %p\n", __func__,
613 + dmabuf, buf, buf->import.dma_buf);
614 + if (!buf->imported) {
615 pr_err("%s: mmap dma_buf %p- not an imported buffer\n",
619 - return res->import_dma_buf->ops->mmap(res->import_dma_buf, vma);
620 + return buf->import.dma_buf->ops->mmap(buf->import.dma_buf, vma);
624 void vc_sm_import_dma_buf_release(struct dma_buf *dmabuf)
626 - struct vc_sm_buffer *res = dmabuf->priv;
627 + struct vc_sm_buffer *buf = dmabuf->priv;
629 pr_debug("%s: Relasing dma_buf %p\n", __func__, dmabuf);
630 - if (!res->import_dma_buf)
631 + mutex_lock(&buf->lock);
632 + if (!buf->imported)
638 - vc_sm_release_resource(res, 0);
639 + vc_sm_vpu_free(buf);
641 + vc_sm_release_resource(buf);
645 void *vc_sm_import_dma_buf_kmap(struct dma_buf *dmabuf,
646 unsigned long offset)
648 - struct vc_sm_buffer *res = dmabuf->priv;
649 + struct vc_sm_buffer *buf = dmabuf->priv;
651 - if (!res->import_dma_buf)
652 + if (!buf->imported)
654 - return res->import_dma_buf->ops->map(res->import_dma_buf,
656 + return buf->import.dma_buf->ops->map(buf->import.dma_buf, offset);
660 void vc_sm_import_dma_buf_kunmap(struct dma_buf *dmabuf,
661 unsigned long offset, void *ptr)
663 - struct vc_sm_buffer *res = dmabuf->priv;
664 + struct vc_sm_buffer *buf = dmabuf->priv;
666 - if (!res->import_dma_buf)
667 + if (!buf->imported)
669 - res->import_dma_buf->ops->unmap(res->import_dma_buf,
671 + buf->import.dma_buf->ops->unmap(buf->import.dma_buf, offset, ptr);
675 int vc_sm_import_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
676 enum dma_data_direction direction)
678 - struct vc_sm_buffer *res = dmabuf->priv;
679 + struct vc_sm_buffer *buf = dmabuf->priv;
681 - if (!res->import_dma_buf)
682 + if (!buf->imported)
684 - return res->import_dma_buf->ops->begin_cpu_access(res->import_dma_buf,
686 + return buf->import.dma_buf->ops->begin_cpu_access(buf->import.dma_buf,
691 int vc_sm_import_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
692 enum dma_data_direction direction)
694 - struct vc_sm_buffer *res = dmabuf->priv;
695 + struct vc_sm_buffer *buf = dmabuf->priv;
697 - if (!res->import_dma_buf)
698 + if (!buf->imported)
700 - return res->import_dma_buf->ops->end_cpu_access(res->import_dma_buf,
701 + return buf->import.dma_buf->ops->end_cpu_access(buf->import.dma_buf,
705 @@ -516,9 +820,8 @@ vc_sm_cma_import_dmabuf_internal(struct
706 memcpy(import.name, VC_SM_RESOURCE_NAME_DEFAULT,
707 sizeof(VC_SM_RESOURCE_NAME_DEFAULT));
709 - pr_debug("[%s]: attempt to import \"%s\" data - type %u, addr %pad, size %u\n",
710 - __func__, import.name, import.type, &dma_addr,
712 + pr_debug("[%s]: attempt to import \"%s\" data - type %u, addr %pad, size %u.\n",
713 + __func__, import.name, import.type, &dma_addr, import.size);
715 /* Allocate the videocore buffer. */
716 status = vc_sm_cma_vchi_import(sm_state->sm_handle, &import, &result,
717 @@ -548,12 +851,14 @@ vc_sm_cma_import_dmabuf_internal(struct
718 buffer->size = import.size;
719 buffer->vpu_state = VPU_MAPPED;
721 - buffer->import_dma_buf = dma_buf;
722 + buffer->imported = 1;
723 + buffer->import.dma_buf = dma_buf;
725 - buffer->attach = attach;
727 + buffer->import.attach = attach;
728 + buffer->import.sgt = sgt;
729 buffer->dma_addr = dma_addr;
731 + buffer->kernel_id = import.kernel_id;
734 * We're done - we need to export a new dmabuf chaining through most
735 @@ -594,6 +899,91 @@ error:
739 +static int vc_sm_cma_vpu_alloc(u32 size, uint32_t align, const char *name,
740 + u32 mem_handle, struct vc_sm_buffer **ret_buffer)
742 + DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
743 + struct vc_sm_buffer *buffer = NULL;
747 + /* Align to the user requested align */
748 + aligned_size = ALIGN(size, align);
749 + /* and then to a page boundary */
750 + aligned_size = PAGE_ALIGN(aligned_size);
755 + /* Allocate local buffer to track this allocation. */
756 + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
760 + mutex_init(&buffer->lock);
762 + if (vc_sm_cma_buffer_allocate(sm_state->cma_heap, &buffer->alloc,
764 + pr_err("[%s]: cma alloc of %d bytes failed\n",
765 + __func__, aligned_size);
769 + buffer->sg_table = buffer->alloc.sg_table;
771 + pr_debug("[%s]: cma alloc of %d bytes success\n",
772 + __func__, aligned_size);
774 + if (dma_map_sg(&sm_state->pdev->dev, buffer->sg_table->sgl,
775 + buffer->sg_table->nents, DMA_BIDIRECTIONAL) <= 0) {
776 + pr_err("[%s]: dma_map_sg failed\n", __func__);
780 + INIT_LIST_HEAD(&buffer->attachments);
782 + memcpy(buffer->name, name,
783 + min(sizeof(buffer->name), strlen(name)));
785 + exp_info.ops = &dma_buf_ops;
786 + exp_info.size = aligned_size;
787 + exp_info.flags = O_RDWR;
788 + exp_info.priv = buffer;
790 + buffer->dma_buf = dma_buf_export(&exp_info);
791 + if (IS_ERR(buffer->dma_buf)) {
792 + ret = PTR_ERR(buffer->dma_buf);
795 + buffer->dma_addr = (uint32_t)sg_dma_address(buffer->sg_table->sgl);
796 + if ((buffer->dma_addr & 0xC0000000) != 0xC0000000) {
797 + pr_err("%s: Expecting an uncached alias for dma_addr %pad\n",
798 + __func__, &buffer->dma_addr);
799 + buffer->dma_addr |= 0xC0000000;
801 + buffer->private = sm_state->vpu_allocs;
803 + buffer->vc_handle = mem_handle;
804 + buffer->vpu_state = VPU_MAPPED;
805 + buffer->vpu_allocated = 1;
806 + buffer->size = size;
808 + * Create an ID that will be passed along with our message so
809 + * that when we service the release reply, we can look up which
810 + * resource is being released.
812 + buffer->kernel_id = get_kernel_id(buffer);
814 + vc_sm_add_resource(sm_state->vpu_allocs, buffer);
816 + *ret_buffer = buffer;
820 + vc_sm_release_resource(buffer);
825 vc_sm_vpu_event(struct sm_instance *instance, struct vc_sm_result_t *reply,
827 @@ -612,21 +1002,61 @@ vc_sm_vpu_event(struct sm_instance *inst
828 struct vc_sm_released *release = (struct vc_sm_released *)reply;
829 struct vc_sm_buffer *buffer =
830 lookup_kernel_id(release->kernel_id);
832 + pr_err("%s: VC released a buffer that is already released, kernel_id %d\n",
833 + __func__, release->kernel_id);
836 + mutex_lock(&buffer->lock);
839 - * FIXME: Need to check buffer is still valid and allocated
840 - * before continuing
842 pr_debug("%s: Released addr %08x, size %u, id %08x, mem_handle %08x\n",
843 __func__, release->addr, release->size,
844 release->kernel_id, release->vc_handle);
845 - mutex_lock(&buffer->lock);
847 buffer->vc_handle = 0;
848 buffer->vpu_state = VPU_NOT_MAPPED;
849 - mutex_unlock(&buffer->lock);
850 free_kernel_id(release->kernel_id);
852 - vc_sm_release_resource(buffer, 0);
853 + if (buffer->vpu_allocated) {
854 + /* VPU allocation, so release the dmabuf which will
855 + * trigger the clean up.
857 + mutex_unlock(&buffer->lock);
858 + dma_buf_put(buffer->dma_buf);
860 + vc_sm_release_resource(buffer);
864 + case VC_SM_MSG_TYPE_VC_MEM_REQUEST:
866 + struct vc_sm_buffer *buffer = NULL;
867 + struct vc_sm_vc_mem_request *req =
868 + (struct vc_sm_vc_mem_request *)reply;
869 + struct vc_sm_vc_mem_request_result reply;
872 + pr_debug("%s: Request %u bytes of memory, align %d name %s, trans_id %08x\n",
873 + __func__, req->size, req->align, req->name,
875 + ret = vc_sm_cma_vpu_alloc(req->size, req->align, req->name,
876 + req->vc_handle, &buffer);
878 + reply.trans_id = req->trans_id;
880 + reply.addr = buffer->dma_addr;
881 + reply.kernel_id = buffer->kernel_id;
882 + pr_debug("%s: Allocated resource buffer %p, addr %pad\n",
883 + __func__, buffer, &buffer->dma_addr);
885 + pr_err("%s: Allocation failed size %u, name %s, vc_handle %u\n",
886 + __func__, req->size, req->name, req->vc_handle);
888 + reply.kernel_id = 0;
890 + vc_sm_vchi_client_vc_mem_req_reply(sm_state->sm_handle, &reply,
891 + &sm_state->int_trans_id);
896 @@ -645,6 +1075,14 @@ static void vc_sm_connected_init(void)
898 pr_info("[%s]: start\n", __func__);
900 + if (vc_sm_cma_add_heaps(&sm_state->cma_heap) ||
901 + !sm_state->cma_heap) {
902 + pr_err("[%s]: failed to initialise CMA heaps\n",
909 * Initialize and create a VCHI connection for the shared memory service
910 * running on videocore.
911 @@ -696,7 +1134,7 @@ static void vc_sm_connected_init(void)
912 goto err_remove_shared_memory;
915 - version.version = 1;
916 + version.version = 2;
917 ret = vc_sm_cma_vchi_client_version(sm_state->sm_handle, &version,
919 &sm_state->int_trans_id);
920 @@ -768,7 +1206,7 @@ static int bcm2835_vc_sm_cma_remove(stru
921 int vc_sm_cma_int_handle(void *handle)
923 struct dma_buf *dma_buf = (struct dma_buf *)handle;
924 - struct vc_sm_buffer *res;
925 + struct vc_sm_buffer *buf;
927 /* Validate we can work with this device. */
928 if (!sm_state || !handle) {
929 @@ -776,8 +1214,8 @@ int vc_sm_cma_int_handle(void *handle)
933 - res = (struct vc_sm_buffer *)dma_buf->priv;
934 - return res->vc_handle;
935 + buf = (struct vc_sm_buffer *)dma_buf->priv;
936 + return buf->vc_handle;
938 EXPORT_SYMBOL_GPL(vc_sm_cma_int_handle);
940 @@ -804,7 +1242,7 @@ EXPORT_SYMBOL_GPL(vc_sm_cma_free);
941 int vc_sm_cma_import_dmabuf(struct dma_buf *src_dmabuf, void **handle)
943 struct dma_buf *new_dma_buf;
944 - struct vc_sm_buffer *res;
945 + struct vc_sm_buffer *buf;
948 /* Validate we can work with this device. */
949 @@ -818,7 +1256,7 @@ int vc_sm_cma_import_dmabuf(struct dma_b
952 pr_debug("%s: imported to ptr %p\n", __func__, new_dma_buf);
953 - res = (struct vc_sm_buffer *)new_dma_buf->priv;
954 + buf = (struct vc_sm_buffer *)new_dma_buf->priv;
956 /* Assign valid handle at this time.*/
957 *handle = new_dma_buf;
958 --- a/drivers/staging/vc04_services/vc-sm-cma/vc_sm.h
959 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm.h
961 #include <linux/types.h>
962 #include <linux/miscdevice.h>
964 +#include "vc_sm_cma.h"
966 #define VC_SM_MAX_NAME_LEN 32
968 enum vc_sm_vpu_mapping_state {
969 @@ -29,31 +31,51 @@ enum vc_sm_vpu_mapping_state {
973 +struct vc_sm_imported {
974 + struct dma_buf *dma_buf;
975 + struct dma_buf_attachment *attach;
976 + struct sg_table *sgt;
979 struct vc_sm_buffer {
980 struct list_head global_buffer_list; /* Global list of buffers. */
982 + /* Index in the kernel_id idr so that we can find the
983 + * mmal_msg_context again when servicing the VCHI reply.
989 /* Lock over all the following state for this buffer */
991 - struct sg_table *sg_table;
992 struct list_head attachments;
994 char name[VC_SM_MAX_NAME_LEN];
996 int in_use:1; /* Kernel is still using this resource */
997 + int imported:1; /* Imported dmabuf */
999 + struct sg_table *sg_table;
1001 enum vc_sm_vpu_mapping_state vpu_state;
1002 u32 vc_handle; /* VideoCore handle for this buffer */
1003 + int vpu_allocated; /*
1004 + * The VPU made this allocation. Release the
1005 + * local dma_buf when the VPU releases the
1009 /* DMABUF related fields */
1010 - struct dma_buf *import_dma_buf;
1011 struct dma_buf *dma_buf;
1012 - struct dma_buf_attachment *attach;
1013 - struct sg_table *sgt;
1014 dma_addr_t dma_addr;
1016 struct vc_sm_privdata_t *private;
1019 + struct vc_sm_cma_alloc_data alloc;
1020 + struct vc_sm_imported import;
1026 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma.c
1028 +// SPDX-License-Identifier: GPL-2.0
1030 + * VideoCore Shared Memory CMA allocator
1032 + * Copyright: 2018, Raspberry Pi (Trading) Ltd
1034 + * Based on the Android ION allocator
1035 + * Copyright (C) Linaro 2012
1036 + * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
1040 +#include <linux/slab.h>
1041 +#include <linux/errno.h>
1042 +#include <linux/err.h>
1043 +#include <linux/cma.h>
1044 +#include <linux/scatterlist.h>
1046 +#include "vc_sm_cma.h"
1048 +/* CMA heap operations functions */
1049 +int vc_sm_cma_buffer_allocate(struct cma *cma_heap,
1050 + struct vc_sm_cma_alloc_data *buffer,
1051 + unsigned long len)
1053 + /* len should already be page aligned */
1054 + unsigned long num_pages = len / PAGE_SIZE;
1055 + struct sg_table *table;
1056 + struct page *pages;
1059 + pages = cma_alloc(cma_heap, num_pages, 0, GFP_KERNEL);
1063 + table = kmalloc(sizeof(*table), GFP_KERNEL);
1067 + ret = sg_alloc_table(table, 1, GFP_KERNEL);
1071 + sg_set_page(table->sgl, pages, len, 0);
1073 + buffer->priv_virt = pages;
1074 + buffer->sg_table = table;
1075 + buffer->cma_heap = cma_heap;
1076 + buffer->num_pages = num_pages;
1082 + cma_release(cma_heap, pages, num_pages);
1086 +void vc_sm_cma_buffer_free(struct vc_sm_cma_alloc_data *buffer)
1088 + struct cma *cma_heap = buffer->cma_heap;
1089 + struct page *pages = buffer->priv_virt;
1091 + /* release memory */
1093 + cma_release(cma_heap, pages, buffer->num_pages);
1095 + /* release sg table */
1096 + if (buffer->sg_table) {
1097 + sg_free_table(buffer->sg_table);
1098 + kfree(buffer->sg_table);
1099 + buffer->sg_table = NULL;
1103 +int __vc_sm_cma_add_heaps(struct cma *cma, void *priv)
1105 + struct cma **heap = (struct cma **)priv;
1106 + const char *name = cma_get_name(cma);
1109 + phys_addr_t phys_addr = cma_get_base(cma);
1111 + pr_debug("%s: Adding cma heap %s (start %pap, size %lu) for use by vcsm\n",
1112 + __func__, name, &phys_addr, cma_get_size(cma));
1115 + pr_err("%s: Ignoring heap %s as already set\n",
1122 +int vc_sm_cma_add_heaps(struct cma **cma_heap)
1124 + cma_for_each_area(__vc_sm_cma_add_heaps, cma_heap);
1128 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma.h
1130 +/* SPDX-License-Identifier: GPL-2.0 */
1133 + * VideoCore Shared Memory CMA allocator
1135 + * Copyright: 2018, Raspberry Pi (Trading) Ltd
1137 + * Based on the Android ION allocator
1138 + * Copyright (C) Linaro 2012
1139 + * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
1141 + * This software is licensed under the terms of the GNU General Public
1142 + * License version 2, as published by the Free Software Foundation, and
1143 + * may be copied, distributed, and modified under those terms.
1145 + * This program is distributed in the hope that it will be useful,
1146 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1147 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1148 + * GNU General Public License for more details.
1151 +#ifndef VC_SM_CMA_H
1152 +#define VC_SM_CMA_H
1154 +struct vc_sm_cma_alloc_data {
1155 + struct cma *cma_heap;
1156 + unsigned long num_pages;
1158 + struct sg_table *sg_table;
1161 +int vc_sm_cma_buffer_allocate(struct cma *cma_heap,
1162 + struct vc_sm_cma_alloc_data *buffer,
1163 + unsigned long len);
1164 +void vc_sm_cma_buffer_free(struct vc_sm_cma_alloc_data *buffer);
1166 +int vc_sm_cma_add_heaps(struct cma **cma_heap);
1169 --- a/drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma_vchi.c
1170 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma_vchi.c
1171 @@ -500,3 +500,13 @@ int vc_sm_cma_vchi_client_version(struct
1172 msg, sizeof(*msg), NULL, 0,
1176 +int vc_sm_vchi_client_vc_mem_req_reply(struct sm_instance *handle,
1177 + struct vc_sm_vc_mem_request_result *msg,
1178 + uint32_t *cur_trans_id)
1180 + return vc_sm_cma_vchi_send_msg(handle,
1181 + VC_SM_MSG_TYPE_VC_MEM_REQUEST_REPLY,
1182 + msg, sizeof(*msg), 0, 0, cur_trans_id,
1185 --- a/drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma_vchi.h
1186 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm_cma_vchi.h
1187 @@ -56,4 +56,8 @@ int vc_sm_cma_vchi_client_version(struct
1188 struct vc_sm_result_t *result,
1191 +int vc_sm_vchi_client_vc_mem_req_reply(struct sm_instance *handle,
1192 + struct vc_sm_vc_mem_request_result *msg,
1193 + uint32_t *cur_trans_id);
1195 #endif /* __VC_SM_CMA_VCHI_H__INCLUDED__ */
1196 --- a/drivers/staging/vc04_services/vc-sm-cma/vc_sm_defs.h
1197 +++ b/drivers/staging/vc04_services/vc-sm-cma/vc_sm_defs.h
1198 @@ -264,6 +264,8 @@ struct vc_sm_vc_mem_request {
1200 /* resource name (for easier tracking) */
1201 char name[VC_SM_RESOURCE_NAME];
1202 + /* VPU handle for the resource */
1206 /* Response from the kernel to provide the VPU with some memory */