* wakeref that we hold until the GPU has been idle for at least
* 100ms.
*/
- intel_gt_pm_get(eb.i915);
+ intel_gt_pm_get(&eb.i915->gt);
err = i915_mutex_lock_interruptible(dev);
if (err)
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
- intel_gt_pm_put(eb.i915);
+ intel_gt_pm_put(&eb.i915->gt);
i915_gem_context_put(eb.gem_context);
err_destroy:
eb_destroy(&eb);
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
- intel_gt_resume(i915);
+ intel_gt_resume(&i915->gt);
if (i915_gem_init_hw(i915))
goto err_wedged;
{
i915_gem_shrinker_unregister(i915);
- intel_gt_pm_get(i915);
+ intel_gt_pm_get(&i915->gt);
cancel_delayed_work_sync(&i915->gem.retire_work);
flush_work(&i915->gem.idle_work);
static void restore_retire_worker(struct drm_i915_private *i915)
{
- intel_gt_pm_put(i915);
+ intel_gt_pm_put(&i915->gt);
mutex_lock(&i915->drm.struct_mutex);
igt_flush_test(i915, I915_WAIT_LOCKED);
GEM_TRACE("%s\n", engine->name);
- intel_gt_pm_get(engine->i915);
+ intel_gt_pm_get(engine->gt);
/* Pin the default state for fast resets from atomic context. */
map = NULL;
engine->execlists.no_priolist = false;
- intel_gt_pm_put(engine->i915);
+ intel_gt_pm_put(engine->gt);
return 0;
}
enum intel_engine_id id;
int err = 0;
- intel_gt_pm_get(i915);
+ intel_gt_pm_get(&i915->gt);
for_each_engine(engine, i915, id) {
intel_engine_pm_get(engine);
engine->serial++; /* kernel context lost */
break;
}
}
- intel_gt_pm_put(i915);
+ intel_gt_pm_put(&i915->gt);
return err;
}
return 0;
}
-void intel_gt_pm_get(struct drm_i915_private *i915)
+void intel_gt_pm_get(struct intel_gt *gt)
{
- intel_wakeref_get(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_unpark);
+ struct intel_runtime_pm *rpm = >->i915->runtime_pm;
+
+ intel_wakeref_get(rpm, >->wakeref, intel_gt_unpark);
}
static int intel_gt_park(struct intel_wakeref *wf)
return 0;
}
-void intel_gt_pm_put(struct drm_i915_private *i915)
+void intel_gt_pm_put(struct intel_gt *gt)
{
- intel_wakeref_put(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_park);
+ struct intel_runtime_pm *rpm = >->i915->runtime_pm;
+
+ intel_wakeref_put(rpm, >->wakeref, intel_gt_park);
}
void intel_gt_pm_init_early(struct intel_gt *gt)
/**
* intel_gt_sanitize: called after the GPU has lost power
- * @i915: the i915 device
+ * @gt: the i915 GT container
* @force: ignore a failed reset and sanitize engine state anyway
*
* Anytime we reset the GPU, either with an explicit GPU reset or through a
* to match. Note that calling intel_gt_sanitize() if the GPU has not
* been reset results in much confusion!
*/
-void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
+void intel_gt_sanitize(struct intel_gt *gt, bool force)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
GEM_TRACE("\n");
- if (!reset_engines(i915) && !force)
+ if (!reset_engines(gt->i915) && !force)
return;
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt->i915, id)
intel_engine_reset(engine, false);
}
-void intel_gt_resume(struct drm_i915_private *i915)
+void intel_gt_resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
* Only the kernel contexts should remain pinned over suspend,
* allowing us to fixup the user contexts on their first pin.
*/
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
struct intel_context *ce;
ce = engine->kernel_context;
#include <linux/types.h>
-struct drm_i915_private;
struct intel_gt;
enum {
INTEL_GT_PARK,
};
-void intel_gt_pm_get(struct drm_i915_private *i915);
-void intel_gt_pm_put(struct drm_i915_private *i915);
+void intel_gt_pm_get(struct intel_gt *gt);
+void intel_gt_pm_put(struct intel_gt *gt);
void intel_gt_pm_init_early(struct intel_gt *gt);
-void intel_gt_sanitize(struct drm_i915_private *i915, bool force);
-void intel_gt_resume(struct drm_i915_private *i915);
+void intel_gt_sanitize(struct intel_gt *gt, bool force);
+void intel_gt_resume(struct intel_gt *gt);
#endif /* INTEL_GT_PM_H */
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_gt_pm_get(i915);
+ intel_gt_pm_get(&i915->gt);
for_each_engine(engine, i915, id)
reset_prepare_engine(engine);
reset_finish_engine(engine);
intel_engine_signal_breadcrumbs(engine);
}
- intel_gt_pm_put(i915);
+ intel_gt_pm_put(&i915->gt);
}
static void nop_submit_request(struct i915_request *request)
}
mutex_unlock(&i915->gt.timelines.mutex);
- intel_gt_sanitize(i915, false);
+ intel_gt_sanitize(&i915->gt, false);
/*
* Undo nop_submit_request. We prevent all new i915 requests from
intel_power_domains_resume(dev_priv);
- intel_gt_sanitize(dev_priv, true);
+ intel_gt_sanitize(&dev_priv->gt, true);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
- intel_gt_sanitize(i915, false);
+ intel_gt_sanitize(&i915->gt, false);
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
* that runtime-pm just works.
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- intel_gt_sanitize(i915, false);
+ intel_gt_sanitize(&i915->gt, false);
i915_gem_sanitize(i915);
i915_gem_resume(i915);
}