dma-buf/fence: Fix lock inversion within dma-fence-array
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 14 Nov 2017 16:27:19 +0000 (16:27 +0000)
committerSumit Semwal <sumit.semwal@linaro.org>
Tue, 21 Nov 2017 17:07:05 +0000 (22:37 +0530)
Ages ago Rob Clark noted,

"Currently with fence-array, we have a potential deadlock situation.  If
we fence_add_callback() on an array-fence, the array-fence's lock is
acquired first, and in it's ->enable_signaling() callback, it will install
cbs on it's array-member fences, so the array-member's lock is acquired
second.

But in the signal path, the array-member's lock is acquired first, and
the array-fence's lock acquired second."

Rob proposed either extensive changes to dma-fence to unnest the
fence-array signaling, or to defer the signaling onto a workqueue. This
is a more refined version of the later, that should keep the latency
of the fence signaling to a minimum by using an irq-work, which is
executed asap.

Reported-by: Rob Clark <robdclark@gmail.com>
Suggested-by: Rob Clark <robdclark@gmail.com>
References: 1476635975-21981-1-git-send-email-robdclark@gmail.com
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Christian König <christian.koenig@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20171114162719.30958-1-chris@chris-wilson.co.uk
drivers/base/Kconfig
drivers/dma-buf/dma-fence-array.c
include/linux/dma-fence-array.h

index 1a5f6a157a57d7bf13b85ff63d5ed33e2198aeba..62b0de06836e1e2eba9b098fc5a40a9de5d5b432 100644 (file)
@@ -244,6 +244,7 @@ config DMA_SHARED_BUFFER
        bool
        default n
        select ANON_INODES
+       select IRQ_WORK
        help
          This option enables the framework for buffer-sharing between
          multiple drivers. A buffer is associated with a file using driver
index 0350829ba62e76d8a2194d9d71e87fc04ac9d156..dd1edfb27b61a3a24f64f1372f904467390e8fe4 100644 (file)
@@ -31,6 +31,14 @@ static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
        return "unbound";
 }
 
+static void irq_dma_fence_array_work(struct irq_work *wrk)
+{
+       struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
+
+       dma_fence_signal(&array->base);
+       dma_fence_put(&array->base);
+}
+
 static void dma_fence_array_cb_func(struct dma_fence *f,
                                    struct dma_fence_cb *cb)
 {
@@ -39,8 +47,9 @@ static void dma_fence_array_cb_func(struct dma_fence *f,
        struct dma_fence_array *array = array_cb->array;
 
        if (atomic_dec_and_test(&array->num_pending))
-               dma_fence_signal(&array->base);
-       dma_fence_put(&array->base);
+               irq_work_queue(&array->work);
+       else
+               dma_fence_put(&array->base);
 }
 
 static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
@@ -136,6 +145,7 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
        spin_lock_init(&array->lock);
        dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
                       context, seqno);
+       init_irq_work(&array->work, irq_dma_fence_array_work);
 
        array->num_fences = num_fences;
        atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
index 332a5420243c44f1b9e8c4d5bbf20d64637b1950..bc8940ca280dc74c36c706dc6d7c831a39381cc4 100644 (file)
@@ -21,6 +21,7 @@
 #define __LINUX_DMA_FENCE_ARRAY_H
 
 #include <linux/dma-fence.h>
+#include <linux/irq_work.h>
 
 /**
  * struct dma_fence_array_cb - callback helper for fence array
@@ -47,6 +48,8 @@ struct dma_fence_array {
        unsigned num_fences;
        atomic_t num_pending;
        struct dma_fence **fences;
+
+       struct irq_work work;
 };
 
 extern const struct dma_fence_ops dma_fence_array_ops;