struct i915_gem_context *ctx;
unsigned int reset_count, count;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
struct drm_file *file;
IGT_TIMEOUT(end_time);
int err = 0;
}
i915_gem_context_clear_bannable(ctx);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
reset_count = i915_reset_count(&i915->gpu_error);
count = 0;
do {
err = igt_flush_test(i915, I915_WAIT_LOCKED);
mutex_unlock(&i915->drm.struct_mutex);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-
out:
mock_file_free(i915, file);
if (i915_reset_failed(i915))
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
struct drm_file *file;
int err = 0;
}
i915_gem_context_clear_bannable(ctx);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for_each_engine(engine, i915, id) {
unsigned int reset_count, reset_engine_count;
unsigned int count;
err = igt_flush_test(i915, I915_WAIT_LOCKED);
mutex_unlock(&i915->drm.struct_mutex);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
out:
mock_file_free(i915, file);
if (i915_reset_failed(i915))
/* Check that the resets are usable from atomic context */
+ intel_gt_pm_get(&i915->gt);
igt_global_reset_lock(i915);
- mutex_lock(&i915->drm.struct_mutex);
/* Flush any requests before we get started and check basics */
if (!igt_force_reset(i915))
igt_force_reset(i915);
unlock:
- mutex_unlock(&i915->drm.struct_mutex);
igt_global_reset_unlock(i915);
+ intel_gt_pm_put(&i915->gt);
return err;
}