drm/i915: Remove request retirement before each batch
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 4 Aug 2016 15:32:20 +0000 (16:32 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Thu, 4 Aug 2016 19:19:51 +0000 (20:19 +0100)
This reimplements the denial-of-service protection against igt from
commit 227f782e4667 ("drm/i915: Retire requests before creating a new
one") and transfers the stall from before each batch into get_pages().
The issue is that the stall is increasing latency between batches which
is detrimental in some cases (especially coupled with execlists) to
keeping the GPU well fed. Also we have made the observation that retiring
requests can of itself free objects (and requests) and therefore makes
a good first step when shrinking.

v2: Recycle objects prior to i915_gem_object_get_pages()
v3: Remove the reference to the ring from i915_gem_requests_ring() as it
operates on an intel_engine_cs.
v4: Since commit 9b5f4e5ed6fd ("drm/i915: Retire oldest completed request
before allocating next") we no longer need the safeguard to retire
requests before get_pages(). We no longer see the huge latencies when
hitting the shrinker between allocations.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470324762-2545-4-git-send-email-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_request.c

index fbda38f25c6be8d111161365ccea31d5f1f1ad5f..2de3d16f7b8076d83dde15bf3e144f66de96287c 100644 (file)
@@ -3169,7 +3169,6 @@ struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *engine);
 
 void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
-void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
 
 static inline u32 i915_reset_counter(struct i915_gpu_error *error)
 {
index 5e3b5054f72de2a8182a69b62f5485693c9daa49..0593ea3ba21152e9332441e62838abd67d38555d 100644 (file)
@@ -781,8 +781,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
        bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
        int retry;
 
-       i915_gem_retire_requests_ring(engine);
-
        vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
 
        INIT_LIST_HEAD(&ordered_vmas);
index 6faa84832ade9a5c90d6ebf63a170270c7550669..773b942a3a5f8d1cdbb822cc0f062737c94cd195 100644 (file)
@@ -732,7 +732,7 @@ complete:
        return ret;
 }
 
-void i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
+static void engine_retire_requests(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request, *next;
 
@@ -756,7 +756,7 @@ void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
        GEM_BUG_ON(!dev_priv->gt.awake);
 
        for_each_engine(engine, dev_priv) {
-               i915_gem_retire_requests_ring(engine);
+               engine_retire_requests(engine);
                if (list_empty(&engine->request_list))
                        dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
        }