drm/i915: Markup expected timeline locks for i915_active
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 16 Aug 2019 12:10:00 +0000 (13:10 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 16 Aug 2019 17:02:07 +0000 (18:02 +0100)
As every i915_active_request should be serialised by a dedicated lock,
i915_active consists of a tree of locks; one for each node. Markup up
the i915_active_request with what lock is supposed to be guarding it so
that we can verify that the serialised updated are indeed serialised.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190816121000.8507-2-chris@chris-wilson.co.uk
13 files changed:
drivers/gpu/drm/i915/display/intel_overlay.c
drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gt/intel_context.c
drivers/gpu/drm/i915/gt/intel_engine_pool.h
drivers/gpu/drm/i915/gt/intel_timeline.c
drivers/gpu/drm/i915/gt/selftest_timeline.c
drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_active.h
drivers/gpu/drm/i915/i915_active_types.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/selftests/i915_active.c

index e1248eace0e1690b635cbce18077d48e72f5a73a..eca41c4a5aa6afd2b9a4ad37ed2e488f6a5dc790 100644 (file)
@@ -230,7 +230,7 @@ alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
        if (IS_ERR(rq))
                return rq;
 
-       err = i915_active_ref(&overlay->last_flip, rq->fence.context, rq);
+       err = i915_active_ref(&overlay->last_flip, rq->timeline, rq);
        if (err) {
                i915_request_add(rq);
                return ERR_PTR(err);
index ac14677dd537f387512b62ac5852b805c6adc333..2536d1f54629fe75d15b17d1bdb1cf99ba35e3ac 100644 (file)
@@ -211,7 +211,7 @@ static void clear_pages_worker(struct work_struct *work)
         * keep track of the GPU activity within this vma/request, and
         * propagate the signal from the request to w->dma.
         */
-       err = i915_active_ref(&vma->active, rq->fence.context, rq);
+       err = i915_active_ref(&vma->active, rq->timeline, rq);
        if (err)
                goto out_request;
 
index a6b0cb714292d3252fb067319beb5adbef6d58f9..cd1fd2e5423ac1a3a3f1b22a3e5e7762ebc169ae 100644 (file)
@@ -908,7 +908,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
                if (emit)
                        err = emit(rq, data);
                if (err == 0)
-                       err = i915_active_ref(&cb->base, rq->fence.context, rq);
+                       err = i915_active_ref(&cb->base, rq->timeline, rq);
 
                i915_request_add(rq);
                if (err)
index 9114953bf920549a870051d18a99569e73667e0b..f55691d151aec2c4106ac11c7899bb04b3086700 100644 (file)
@@ -306,10 +306,10 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
 
                /* Queue this switch after current activity by this context. */
                err = i915_active_request_set(&tl->last_request, rq);
+               mutex_unlock(&tl->mutex);
                if (err)
-                       goto unlock;
+                       return err;
        }
-       lockdep_assert_held(&tl->mutex);
 
        /*
         * Guarantee context image and the timeline remains pinned until the
@@ -319,12 +319,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
         * words transfer the pinned ce object to tracked active request.
         */
        GEM_BUG_ON(i915_active_is_idle(&ce->active));
-       err = i915_active_ref(&ce->active, rq->fence.context, rq);
-
-unlock:
-       if (rq->timeline != tl)
-               mutex_unlock(&tl->mutex);
-       return err;
+       return i915_active_ref(&ce->active, rq->timeline, rq);
 }
 
 struct i915_request *intel_context_create_request(struct intel_context *ce)
index f7a0a660c1c9a960f0cd3248463eb413277330d1..8d069efd9457600c8d232994de84a08c6d932284 100644 (file)
@@ -18,7 +18,7 @@ static inline int
 intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
                              struct i915_request *rq)
 {
-       return i915_active_ref(&node->active, rq->fence.context, rq);
+       return i915_active_ref(&node->active, rq->timeline, rq);
 }
 
 static inline void
index eafd94d5e211092c5d99f206a5e5d6d5815385a7..02fbe11b671be36507b2e1b8f480729c51f55c79 100644 (file)
@@ -254,7 +254,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
 
        mutex_init(&timeline->mutex);
 
-       INIT_ACTIVE_REQUEST(&timeline->last_request);
+       INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
        INIT_LIST_HEAD(&timeline->requests);
 
        i915_syncmap_init(&timeline->sync);
@@ -440,8 +440,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
         * free it after the current request is retired, which ensures that
         * all writes into the cacheline from previous requests are complete.
         */
-       err = i915_active_ref(&tl->hwsp_cacheline->active,
-                             tl->fence_context, rq);
+       err = i915_active_ref(&tl->hwsp_cacheline->active, tl, rq);
        if (err)
                goto err_cacheline;
 
@@ -492,7 +491,7 @@ int intel_timeline_get_seqno(struct intel_timeline *tl,
 static int cacheline_ref(struct intel_timeline_cacheline *cl,
                         struct i915_request *rq)
 {
-       return i915_active_ref(&cl->active, rq->fence.context, rq);
+       return i915_active_ref(&cl->active, rq->timeline, rq);
 }
 
 int intel_timeline_read_hwsp(struct i915_request *from,
index d5411369774578659b5e205785cb34d459a762cf..3214814031650e5844e5991622e168776b9c9fa3 100644 (file)
@@ -689,7 +689,9 @@ static int live_hwsp_wrap(void *arg)
 
                tl->seqno = -4u;
 
+               mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
                err = intel_timeline_get_seqno(tl, rq, &seqno[0]);
+               mutex_unlock(&tl->mutex);
                if (err) {
                        i915_request_add(rq);
                        goto out;
@@ -704,7 +706,9 @@ static int live_hwsp_wrap(void *arg)
                }
                hwsp_seqno[0] = tl->hwsp_seqno;
 
+               mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
                err = intel_timeline_get_seqno(tl, rq, &seqno[1]);
+               mutex_unlock(&tl->mutex);
                if (err) {
                        i915_request_add(rq);
                        goto out;
index 5c549205828a935cca6406b4166b1f53af300db3..598170efcaf60cc793916f7165c221130b617333 100644 (file)
@@ -15,7 +15,7 @@ void mock_timeline_init(struct intel_timeline *timeline, u64 context)
 
        mutex_init(&timeline->mutex);
 
-       INIT_ACTIVE_REQUEST(&timeline->last_request);
+       INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
        INIT_LIST_HEAD(&timeline->requests);
 
        i915_syncmap_init(&timeline->sync);
index 2439c4f62ad83847374143f1bfd131468fdfbc0a..df6164591702295716368bb0f380fd7d483c521d 100644 (file)
@@ -169,10 +169,11 @@ node_retire(struct i915_active_request *base, struct i915_request *rq)
 }
 
 static struct i915_active_request *
-active_instance(struct i915_active *ref, u64 idx)
+active_instance(struct i915_active *ref, struct intel_timeline *tl)
 {
        struct active_node *node, *prealloc;
        struct rb_node **p, *parent;
+       u64 idx = tl->fence_context;
 
        /*
         * We track the most recently used timeline to skip a rbtree search
@@ -211,7 +212,7 @@ active_instance(struct i915_active *ref, u64 idx)
        }
 
        node = prealloc;
-       i915_active_request_init(&node->base, NULL, node_retire);
+       i915_active_request_init(&node->base, &tl->mutex, NULL, node_retire);
        node->ref = ref;
        node->timeline = idx;
 
@@ -294,18 +295,20 @@ __active_del_barrier(struct i915_active *ref, struct active_node *node)
 }
 
 int i915_active_ref(struct i915_active *ref,
-                   u64 timeline,
+                   struct intel_timeline *tl,
                    struct i915_request *rq)
 {
        struct i915_active_request *active;
        int err;
 
+       lockdep_assert_held(&tl->mutex);
+
        /* Prevent reaping in case we malloc/wait while building the tree */
        err = i915_active_acquire(ref);
        if (err)
                return err;
 
-       active = active_instance(ref, timeline);
+       active = active_instance(ref, tl);
        if (!active) {
                err = -ENOMEM;
                goto out;
@@ -596,6 +599,10 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
                                goto unwind;
                        }
 
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+                       node->base.lock =
+                               &engine->kernel_context->timeline->mutex;
+#endif
                        RCU_INIT_POINTER(node->base.request, NULL);
                        node->base.retire = node_retire;
                        node->timeline = idx;
@@ -701,6 +708,10 @@ int i915_active_request_set(struct i915_active_request *active,
 {
        int err;
 
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+       lockdep_assert_held(active->lock);
+#endif
+
        /* Must maintain ordering wrt previous active requests */
        err = i915_request_await_active_request(rq, active);
        if (err)
index f6d730cf2fe690d264062291677a80c62eaa1795..f95058f99057b0c5de25e489298c1e564cae0ea8 100644 (file)
@@ -58,15 +58,20 @@ void i915_active_retire_noop(struct i915_active_request *active,
  */
 static inline void
 i915_active_request_init(struct i915_active_request *active,
+                        struct mutex *lock,
                         struct i915_request *rq,
                         i915_active_retire_fn retire)
 {
        RCU_INIT_POINTER(active->request, rq);
        INIT_LIST_HEAD(&active->link);
        active->retire = retire ?: i915_active_retire_noop;
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+       active->lock = lock;
+#endif
 }
 
-#define INIT_ACTIVE_REQUEST(name) i915_active_request_init((name), NULL, NULL)
+#define INIT_ACTIVE_REQUEST(name, lock) \
+       i915_active_request_init((name), (lock), NULL, NULL)
 
 /**
  * i915_active_request_set - updates the tracker to watch the current request
@@ -81,6 +86,9 @@ static inline void
 __i915_active_request_set(struct i915_active_request *active,
                          struct i915_request *request)
 {
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+       lockdep_assert_held(active->lock);
+#endif
        list_move(&active->link, &request->active_list);
        rcu_assign_pointer(active->request, request);
 }
@@ -362,7 +370,7 @@ void __i915_active_init(struct drm_i915_private *i915,
 } while (0)
 
 int i915_active_ref(struct i915_active *ref,
-                   u64 timeline,
+                   struct intel_timeline *tl,
                    struct i915_request *rq);
 
 int i915_active_wait(struct i915_active *ref);
index ae3ee441c1146460a07ca00a78754cc9d9e0fb5b..1854e7d168c1e0ffde852dc30b0f537b6ded6bce 100644 (file)
@@ -24,6 +24,21 @@ struct i915_active_request {
        struct i915_request __rcu *request;
        struct list_head link;
        i915_active_retire_fn retire;
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+       /*
+        * Incorporeal!
+        *
+        * Updates to the i915_active_request must be serialised under a lock
+        * to ensure that the timeline is ordered. Normally, this is the
+        * timeline->mutex, but another mutex may be used so long as it is
+        * done so consistently.
+        *
+        * For lockdep tracking of the above, we store the lock we intend
+        * to always use for updates of this i915_active_request during
+        * construction and assert that is held on every update.
+        */
+       struct mutex *lock;
+#endif
 };
 
 struct active_node;
index d38ef2ef3ce474a67afa121fb8d3e30154a64507..68260b40982bb07f222503bdf658c51ec70d907c 100644 (file)
@@ -903,14 +903,14 @@ int i915_vma_move_to_active(struct i915_vma *vma,
         * add the active reference first and queue for it to be dropped
         * *last*.
         */
-       err = i915_active_ref(&vma->active, rq->fence.context, rq);
+       err = i915_active_ref(&vma->active, rq->timeline, rq);
        if (unlikely(err))
                return err;
 
        if (flags & EXEC_OBJECT_WRITE) {
                if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
                        i915_active_ref(&obj->frontbuffer->write,
-                                       rq->fence.context,
+                                       rq->timeline,
                                        rq);
 
                reservation_object_add_excl_fence(vma->resv, &rq->fence);
index e5cd5d47e380b708d659318816fcea0d008267c8..77d844ac8b713e9aae4879a0f8a53f6f53f29d6d 100644 (file)
@@ -110,8 +110,7 @@ __live_active_setup(struct drm_i915_private *i915)
                                                       submit,
                                                       GFP_KERNEL);
                if (err >= 0)
-                       err = i915_active_ref(&active->base,
-                                             rq->fence.context, rq);
+                       err = i915_active_ref(&active->base, rq->timeline, rq);
                i915_request_add(rq);
                if (err) {
                        pr_err("Failed to track active ref!\n");