drm/i915: Flush the freed object list on file close
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 2 Aug 2019 21:21:37 +0000 (22:21 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 2 Aug 2019 22:39:48 +0000 (23:39 +0100)
As we increase the number of RCU objects, it becomes easier for us to
have several hundred thousand objects in the deferred RCU free queues.
An example is gem_ctx_create/files which continually creates active
contexts, which are not immediately freed upon close as they are kept
alive by outstanding requests. This lack of backpressure allows the
context objects to persist until they overwhelm and starve the system.
We can increase our backpressure by flushing the freed object queue upon
closing the device fd which should then not impact other clients.

Testcase: igt/gem_ctx_create/*files
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190802212137.22207-2-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c

index 4ea97fca9c3586d12f47a58ca1476f0194c43b55..19d55115747c73440a7d86b9336b1602beaf444b 100644 (file)
@@ -211,48 +211,18 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
 void i915_gem_flush_free_objects(struct drm_i915_private *i915)
 {
-       struct llist_node *freed;
-
-       /* Free the oldest, most stale object to keep the free_list short */
-       freed = NULL;
-       if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
-               /* Only one consumer of llist_del_first() allowed */
-               spin_lock(&i915->mm.free_lock);
-               freed = llist_del_first(&i915->mm.free_list);
-               spin_unlock(&i915->mm.free_lock);
-       }
-       if (unlikely(freed)) {
-               freed->next = NULL;
+       struct llist_node *freed = llist_del_all(&i915->mm.free_list);
+
+       if (unlikely(freed))
                __i915_gem_free_objects(i915, freed);
-       }
 }
 
 static void __i915_gem_free_work(struct work_struct *work)
 {
        struct drm_i915_private *i915 =
                container_of(work, struct drm_i915_private, mm.free_work);
-       struct llist_node *freed;
-
-       /*
-        * All file-owned VMA should have been released by this point through
-        * i915_gem_close_object(), or earlier by i915_gem_context_close().
-        * However, the object may also be bound into the global GTT (e.g.
-        * older GPUs without per-process support, or for direct access through
-        * the GTT either for the user or for scanout). Those VMA still need to
-        * unbound now.
-        */
-
-       spin_lock(&i915->mm.free_lock);
-       while ((freed = llist_del_all(&i915->mm.free_list))) {
-               spin_unlock(&i915->mm.free_lock);
 
-               __i915_gem_free_objects(i915, freed);
-               if (need_resched())
-                       return;
-
-               spin_lock(&i915->mm.free_lock);
-       }
-       spin_unlock(&i915->mm.free_lock);
+       i915_gem_flush_free_objects(i915);
 }
 
 void i915_gem_free_object(struct drm_gem_object *gem_obj)
index 21f1b29d06a2ebbfb6fee0008ecdcb7a1abb6fc4..b9c6ae09d61feb2a3c912b3bc1ed693bd5d7afeb 100644 (file)
@@ -2052,6 +2052,9 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
        mutex_unlock(&dev->struct_mutex);
 
        kfree(file_priv);
+
+       /* Catch up with all the deferred frees from "this" client */
+       i915_gem_flush_free_objects(to_i915(dev));
 }
 
 static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
index 98045875ebbaec8c39223271a0c803cd771ee7ee..5f3e5c13fbaae2d9742cad89d605d9863f6658df 100644 (file)
@@ -773,7 +773,6 @@ struct i915_gem_mm {
         */
        struct llist_head free_list;
        struct work_struct free_work;
-       spinlock_t free_lock;
        /**
         * Count of objects pending destructions. Used to skip needlessly
         * waiting on an RCU barrier if no objects are waiting to be freed.
index deaca3c2416df87d2bfaba596752c4e870e682b1..eb34f3e5a74d73d08c1ca840e7061e260054fa94 100644 (file)
@@ -1648,7 +1648,6 @@ void i915_gem_init_mmio(struct drm_i915_private *i915)
 static void i915_gem_init__mm(struct drm_i915_private *i915)
 {
        spin_lock_init(&i915->mm.obj_lock);
-       spin_lock_init(&i915->mm.free_lock);
 
        init_llist_head(&i915->mm.free_list);