/* Ensure irq handler finishes, and not run again. */
for_each_engine(engine, dev_priv, id) {
- struct drm_i915_gem_request *request;
+ struct drm_i915_gem_request *request = NULL;
/* Prevent the signaler thread from updating the request
* state (by calling dma_fence_signal) as we are processing
if (request && request->fence.error == -EIO)
err = -EIO; /* Previous reset failed! */
}
+
+ engine->hangcheck.active_request = request;
}
i915_gem_revoke_fences(dev_priv);
static bool i915_gem_reset_request(struct drm_i915_gem_request *request)
{
/* Read once and return the resolution */
- const bool guilty = engine_stalled(request->engine);
+ const bool guilty = !i915_gem_request_completed(request);
/* The guilty request will get skipped on a hung engine.
*
return guilty;
}
-static void i915_gem_reset_engine(struct intel_engine_cs *engine)
+static void i915_gem_reset_engine(struct intel_engine_cs *engine,
+ struct drm_i915_gem_request *request)
{
- struct drm_i915_gem_request *request;
-
- request = i915_gem_find_active_request(engine);
if (request && i915_gem_reset_request(request)) {
DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
engine->name, request->global_seqno);
for_each_engine(engine, dev_priv, id) {
struct i915_gem_context *ctx;
- i915_gem_reset_engine(engine);
+ i915_gem_reset_engine(engine, engine->hangcheck.active_request);
ctx = fetch_and_zero(&engine->last_retired_context);
if (ctx)
engine->context_unpin(engine, ctx);
lockdep_assert_held(&dev_priv->drm.struct_mutex);
for_each_engine(engine, dev_priv, id) {
+ engine->hangcheck.active_request = NULL;
tasklet_enable(&engine->irq_tasklet);
kthread_unpark(engine->breadcrumbs.signaler);
}