active->retire = fn ?: i915_active_retire_noop;
}
-static inline struct i915_request *
-__i915_active_request_peek(const struct i915_active_request *active)
-{
- /*
- * Inside the error capture (running with the driver in an unknown
- * state), we want to bend the rules slightly (a lot).
- *
- * Work is in progress to make it safer, in the meantime this keeps
- * the known issue from spamming the logs.
- */
- return rcu_dereference_protected(active->request, 1);
-}
-
/**
* i915_active_request_raw - return the active request
* @active - the active tracker
round_jiffies_up_relative(HZ));
}
-static void assert_kernel_context_is_current(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- if (i915_reset_failed(i915))
- return;
-
- i915_retire_requests(i915);
-
- for_each_engine(engine, i915, id) {
- GEM_BUG_ON(__i915_active_request_peek(&engine->timeline.last_request));
- GEM_BUG_ON(engine->last_retired_context !=
- to_intel_context(i915->kernel_context, engine));
- }
-}
-
static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
unsigned long mask)
{
I915_GEM_IDLE_TIMEOUT))
result = false;
- if (result) {
- assert_kernel_context_is_current(i915);
- } else {
+ if (!result) {
/* Forcibly cancel outstanding work and leave the gpu quiet. */
dev_err(i915->drm.dev,
"Failed to idle engines, declaring wedged!\n");
static bool ggtt_is_idle(struct drm_i915_private *i915)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- if (i915->gt.active_requests)
- return false;
-
- for_each_engine(engine, i915, id) {
- if (!intel_engine_has_kernel_context(engine))
- return false;
- }
-
- return true;
+ return !i915->gt.active_requests;
}
static int ggtt_flush(struct drm_i915_private *i915)
{
int err;
- /* Not everything in the GGTT is tracked via vma (otherwise we
+ /*
+ * Not everything in the GGTT is tracked via vma (otherwise we
* could evict as required with minimal stalling) so we are forced
* to idle the GPU and explicitly retire outstanding requests in
* the hopes that we can then remove contexts and the like only
return true;
}
-/**
- * intel_engine_has_kernel_context:
- * @engine: the engine
- *
- * Returns true if the last context to be executed on this engine, or has been
- * executed if the engine is already idle, is the kernel context
- * (#i915.kernel_context).
- */
-bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
-{
- const struct intel_context *kernel_context =
- to_intel_context(engine->i915->kernel_context, engine);
- struct i915_request *rq;
-
- lockdep_assert_held(&engine->i915->drm.struct_mutex);
-
- if (!engine->context_size)
- return true;
-
- /*
- * Check the last context seen by the engine. If active, it will be
- * the last request that remains in the timeline. When idle, it is
- * the last executed context as tracked by retirement.
- */
- rq = __i915_active_request_peek(&engine->timeline.last_request);
- if (rq)
- return rq->hw_context == kernel_context;
- else
- return engine->last_retired_context == kernel_context;
-}
-
void intel_engines_reset_default_submission(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
-bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
void intel_engine_lost_context(struct intel_engine_cs *engine);
void intel_engines_park(struct drm_i915_private *i915);