drm/i915/selftests: Apply a heavy handed flush to i915_active
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 6 Mar 2020 13:38:36 +0000 (13:38 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Sat, 7 Mar 2020 00:05:54 +0000 (00:05 +0000)
Due to the ordering of cmpxchg()/dma_fence_signal() inside node_retire(),
we must also use the xchg() as our primary memory barrier to flush the
outstanding callbacks after expected completion of the i915_active.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200306133852.3420322-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/selftests/i915_active.c

index 3a37c67ab6c42338c2490979c7361e4b74c40f33..68bbb158016263d79024c9ef96eee16bf7151314 100644 (file)
@@ -311,20 +311,33 @@ static void spin_unlock_wait(spinlock_t *lock)
        spin_unlock_irq(lock);
 }
 
+static void active_flush(struct i915_active *ref,
+                        struct i915_active_fence *active)
+{
+       struct dma_fence *fence;
+
+       fence = xchg(__active_fence_slot(active), NULL);
+       if (!fence)
+               return;
+
+       spin_lock_irq(fence->lock);
+       __list_del_entry(&active->cb.node);
+       spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */
+       atomic_dec(&ref->count);
+
+       GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+}
+
 void i915_active_unlock_wait(struct i915_active *ref)
 {
        if (i915_active_acquire_if_busy(ref)) {
                struct active_node *it, *n;
 
+               /* Wait for all active callbacks */
                rcu_read_lock();
-               rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
-                       struct dma_fence *f;
-
-                       /* Wait for all active callbacks */
-                       f = rcu_dereference(it->base.fence);
-                       if (f)
-                               spin_unlock_wait(f->lock);
-               }
+               active_flush(ref, &ref->excl);
+               rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)
+                       active_flush(ref, &it->base);
                rcu_read_unlock();
 
                i915_active_release(ref);