drm/i915: Hold reference to previous active fence as we queue
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 3 Feb 2020 09:41:47 +0000 (09:41 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 3 Feb 2020 11:25:39 +0000 (11:25 +0000)
Take a reference to the previous exclusive fence on the i915_active, as
we wish to add an await to it in the caller (and so must prevent it from
being freed until we have completed that task).

Fixes: e3793468b466 ("drm/i915: Use the async worker to avoid reclaim tainting the ggtt->mutex")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200203094152.4150550-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_vma.c

index da58e5d084f4c0fd343c88604e095fe6793bdac0..9ccb931a733e8a087efb73dc3bf5d31425318505 100644 (file)
@@ -398,9 +398,13 @@ i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
        /* We expect the caller to manage the exclusive timeline ordering */
        GEM_BUG_ON(i915_active_is_idle(ref));
 
+       rcu_read_lock();
        prev = __i915_active_fence_set(&ref->excl, f);
-       if (!prev)
+       if (prev)
+               prev = dma_fence_get_rcu(prev);
+       else
                atomic_inc(&ref->count);
+       rcu_read_unlock();
 
        return prev;
 }
index e801e28de4705c3222907faa626fec8065179546..74dc3ba59ce52784af84f23adb6aecd3a83746ca 100644 (file)
@@ -422,10 +422,12 @@ int i915_vma_bind(struct i915_vma *vma,
                 * execution and not content or object's backing store lifetime.
                 */
                prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
-               if (prev)
+               if (prev) {
                        __i915_sw_fence_await_dma_fence(&work->base.chain,
                                                        prev,
                                                        &work->cb);
+                       dma_fence_put(prev);
+               }
 
                work->base.dma.error = 0; /* enable the queue_work() */