drm/i915: Drain the device workqueue on unload
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 18 Jul 2017 13:41:24 +0000 (14:41 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 19 Jul 2017 12:19:24 +0000 (13:19 +0100)
Workers on the i915->wq may rearm themselves so for completeness we need
to replace our flush_workqueue() with a call to drain_workqueue() before
unloading the device.

v2: Reinforce the drain_workqueue with an preceding rcu_barrier() as a
few of the tasks that need to be drained may first be armed by RCU.

References: https://bugs.freedesktop.org/show_bug.cgi?id=101627
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170718134124.14832-1-chris@chris-wilson.co.uk
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/selftests/mock_gem_device.c

index 6f750efe9c3d048e50a0f015af1c8893c1be2694..799dd3890cd9071ce7c88814a72c3285a7f767bd 100644 (file)
@@ -596,7 +596,8 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
 
 static void i915_gem_fini(struct drm_i915_private *dev_priv)
 {
-       flush_workqueue(dev_priv->wq);
+       /* Flush any outstanding unpin_work. */
+       i915_gem_drain_workqueue(dev_priv);
 
        mutex_lock(&dev_priv->drm.struct_mutex);
        intel_uc_fini_hw(dev_priv);
@@ -1413,9 +1414,6 @@ void i915_driver_unload(struct drm_device *dev)
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
        i915_reset_error_state(dev_priv);
 
-       /* Flush any outstanding unpin_work. */
-       drain_workqueue(dev_priv->wq);
-
        i915_gem_fini(dev_priv);
        intel_uc_fini_fw(dev_priv);
        intel_fbc_cleanup_cfb(dev_priv);
index 559fdc7bb39334321f5e430ca19ccd688d6734be..017361833c583d8a099a78eca888f17ecb206724 100644 (file)
@@ -3300,6 +3300,26 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
        } while (flush_work(&i915->mm.free_work));
 }
 
+static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
+{
+       /*
+        * Similar to objects above (see i915_gem_drain_freed-objects), in
+        * general we have workers that are armed by RCU and then rearm
+        * themselves in their callbacks. To be paranoid, we need to
+        * drain the workqueue a second time after waiting for the RCU
+        * grace period so that we catch work queued via RCU from the first
+        * pass. As neither drain_workqueue() nor flush_workqueue() report
+        * a result, we make an assumption that we only don't require more
+        * than 2 passes to catch all recursive RCU delayed work.
+        *
+        */
+       int pass = 2;
+       do {
+               rcu_barrier();
+               drain_workqueue(i915->wq);
+       } while (--pass);
+}
+
 struct i915_vma * __must_check
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
                         const struct i915_ggtt_view *view,
index 47613d20bba8e2dffe011e32a05a0b9cd6b2e352..7a468cb309466d631223268deef736128740d855 100644 (file)
@@ -57,7 +57,7 @@ static void mock_device_release(struct drm_device *dev)
 
        cancel_delayed_work_sync(&i915->gt.retire_work);
        cancel_delayed_work_sync(&i915->gt.idle_work);
-       flush_workqueue(i915->wq);
+       i915_gem_drain_workqueue(i915);
 
        mutex_lock(&i915->drm.struct_mutex);
        for_each_engine(engine, i915, id)