drm/i915: Add smp_rmb() to busy ioctl's RCU dance
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 9 Aug 2016 08:23:33 +0000 (09:23 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 9 Aug 2016 09:17:26 +0000 (10:17 +0100)
In the debate as to whether the second read of active->request is
ordered after the dependent reads of the first read of active->request,
just give in and throw a smp_rmb() in there so that ordering of loads is
assured.

v2: Explain the manual smp_rmb()

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1470731014-6894-1-git-send-email-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_request.h

index bb83069ce0169eb95c784f41a5092769bfe0b02e..373136f57d7b3e1f5747cd0e3755e25d72ea5ef5 100644 (file)
@@ -3733,7 +3733,7 @@ i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
        i915_vma_unpin(i915_gem_obj_to_ggtt_view(obj, view));
 }
 
-static __always_inline unsigned __busy_read_flag(unsigned int id)
+static __always_inline unsigned int __busy_read_flag(unsigned int id)
 {
        /* Note that we could alias engines in the execbuf API, but
         * that would be very unwise as it prevents userspace from
@@ -3751,7 +3751,7 @@ static __always_inline unsigned int __busy_write_id(unsigned int id)
        return id;
 }
 
-static __always_inline unsigned
+static __always_inline unsigned int
 __busy_set_if_active(const struct i915_gem_active *active,
                     unsigned int (*flag)(unsigned int id))
 {
@@ -3768,19 +3768,45 @@ __busy_set_if_active(const struct i915_gem_active *active,
 
                id = request->engine->exec_id;
 
-               /* Check that the pointer wasn't reassigned and overwritten. */
+               /* Check that the pointer wasn't reassigned and overwritten.
+                *
+                * In __i915_gem_active_get_rcu(), we enforce ordering between
+                * the first rcu pointer dereference (imposing a
+                * read-dependency only on access through the pointer) and
+                * the second lockless access through the memory barrier
+                * following a successful atomic_inc_not_zero(). Here there
+                * is no such barrier, and so we must manually insert an
+                * explicit read barrier to ensure that the following
+                * access occurs after all the loads through the first
+                * pointer.
+                *
+                * It is worth comparing this sequence with
+                * raw_write_seqcount_latch() which operates very similarly.
+                * The challenge here is the visibility of the other CPU
+                * writes to the reallocated request vs the local CPU ordering.
+                * Before the other CPU can overwrite the request, it will
+                * have updated our active->request and gone through a wmb.
+                * During the read here, we want to make sure that the values
+                * we see have not been overwritten as we do so - and we do
+                * that by serialising the second pointer check with the writes
+                * on other other CPUs.
+                *
+                * The corresponding write barrier is part of
+                * rcu_assign_pointer().
+                */
+               smp_rmb();
                if (request == rcu_access_pointer(active->request))
                        return flag(id);
        } while (1);
 }
 
-static inline unsigned
+static __always_inline unsigned int
 busy_check_reader(const struct i915_gem_active *active)
 {
        return __busy_set_if_active(active, __busy_read_flag);
 }
 
-static inline unsigned
+static __always_inline unsigned int
 busy_check_writer(const struct i915_gem_active *active)
 {
        return __busy_set_if_active(active, __busy_write_id);
index f6661f31a34845a90adb25b6efadf707468d3410..6dd83b172c7ad3963b4a0c35396d1faf4c8dc3f8 100644 (file)
@@ -490,6 +490,9 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
                 * incremented) then the following read for rcu_access_pointer()
                 * must occur after the atomic operation and so confirm
                 * that this request is the one currently being tracked.
+                *
+                * The corresponding write barrier is part of
+                * rcu_assign_pointer().
                 */
                if (!request || request == rcu_access_pointer(active->request))
                        return rcu_pointer_handoff(request);