drm/i915: Protect i915_active iterators from the shrinker
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 8 Feb 2019 13:47:04 +0000 (13:47 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 11 Feb 2019 11:17:20 +0000 (11:17 +0000)
If we allocate while iterating the rbtree of active nodes, we may hit
the shrinker and so retire the i915_active, reaping the rbtree. Modifying
the rbtree as we iterate is not good behaviour, so acquire the
i915_active first to keep the tree intact whenever we allocate.

Fixes: a42375af0a30 ("drm/i915: Release the active tracker tree upon idling")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190208134704.23039-1-chris@chris-wilson.co.uk
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
drivers/gpu/drm/i915/i915_active.c

index 215b6ff8aa7301ec16b1395b0356c27ff788e004..db7bb5bd5adde4863df871b71e1279d633ea74f6 100644 (file)
@@ -163,17 +163,25 @@ int i915_active_ref(struct i915_active *ref,
                    struct i915_request *rq)
 {
        struct i915_active_request *active;
+       int err = 0;
+
+       /* Prevent reaping in case we malloc/wait while building the tree */
+       i915_active_acquire(ref);
 
        active = active_instance(ref, timeline);
-       if (IS_ERR(active))
-               return PTR_ERR(active);
+       if (IS_ERR(active)) {
+               err = PTR_ERR(active);
+               goto out;
+       }
 
        if (!i915_active_request_isset(active))
                ref->count++;
        __i915_active_request_set(active, rq);
 
        GEM_BUG_ON(!ref->count);
-       return 0;
+out:
+       i915_active_release(ref);
+       return err;
 }
 
 bool i915_active_acquire(struct i915_active *ref)
@@ -223,19 +231,25 @@ int i915_request_await_active_request(struct i915_request *rq,
 int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
 {
        struct active_node *it, *n;
-       int ret;
+       int err = 0;
 
-       ret = i915_request_await_active_request(rq, &ref->last);
-       if (ret)
-               return ret;
+       /* await allocates and so we need to avoid hitting the shrinker */
+       if (i915_active_acquire(ref))
+               goto out; /* was idle */
+
+       err = i915_request_await_active_request(rq, &ref->last);
+       if (err)
+               goto out;
 
        rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
-               ret = i915_request_await_active_request(rq, &it->base);
-               if (ret)
-                       return ret;
+               err = i915_request_await_active_request(rq, &it->base);
+               if (err)
+                       goto out;
        }
 
-       return 0;
+out:
+       i915_active_release(ref);
+       return err;
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)