When we first introduced the reset to sanitize the GPU on taking over
from the BIOS and before returning control to third parties (the BIOS!),
we restricted it to only systems utilizing HW contexts as we were
uncertain of how stable our reset mechanism truly was. We now have
reasonable coverage across all machines that expose a GPU reset method,
and so we should be safe to sanitize the GPU state everywhere.
v2: We _have_ to skip the reset if it would clobber the display.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190103112104.19561-1-chris@chris-wilson.co.uk
intel_power_domains_resume(dev_priv);
- intel_engines_sanitize(dev_priv);
+ intel_engines_sanitize(dev_priv, true);
enable_rpm_wakeref_asserts(dev_priv);
i915_retire_requests(i915);
GEM_BUG_ON(i915->gt.active_requests);
- if (!intel_gpu_reset(i915, ALL_ENGINES))
- intel_engines_sanitize(i915);
+ intel_engines_sanitize(i915, false);
/*
* Undo nop_submit_request. We prevent all new i915 requests from
void i915_gem_sanitize(struct drm_i915_private *i915)
{
- int err;
-
GEM_TRACE("\n");
mutex_lock(&i915->drm.struct_mutex);
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
- err = -ENODEV;
- if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
- err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
- if (!err)
- intel_engines_sanitize(i915);
+ intel_engines_sanitize(i915, false);
intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
intel_runtime_pm_put(i915);
.display.has_overlay = 1, \
.display.overlay_needs_physical = 1, \
.display.has_gmch_display = 1, \
+ .gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
.ring_mask = RENDER_RING, \
GEN(3), \
.num_pipes = 2, \
.display.has_gmch_display = 1, \
+ .gpu_reset_clobbers_display = true, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
.num_pipes = 2, \
.display.has_hotplug = 1, \
.display.has_gmch_display = 1, \
+ .gpu_reset_clobbers_display = true, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
GEN4_FEATURES,
PLATFORM(INTEL_G45),
.ring_mask = RENDER_RING | BSD_RING,
+ .gpu_reset_clobbers_display = false,
};
static const struct intel_device_info intel_gm45_info = {
.display.has_fbc = 1,
.display.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
+ .gpu_reset_clobbers_display = false,
};
#define GEN5_FEATURES \
func(is_alpha_support); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
+ func(gpu_reset_clobbers_display); \
func(has_reset_engine); \
func(has_fpga_dbg); \
func(has_guc); \
static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
{
- return intel_has_gpu_reset(dev_priv) &&
- INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
+ return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
+ intel_has_gpu_reset(dev_priv));
}
void intel_prepare_reset(struct drm_i915_private *dev_priv)
engine->set_default_submission(engine);
}
+static bool reset_engines(struct drm_i915_private *i915)
+{
+ if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
+ return false;
+
+ return intel_gpu_reset(i915, ALL_ENGINES) == 0;
+}
+
/**
* intel_engines_sanitize: called after the GPU has lost power
* @i915: the i915 device
+ * @force: ignore a failed reset and sanitize engine state anyway
*
* Anytime we reset the GPU, either with an explicit GPU reset or through a
* PCI power cycle, the GPU loses state and we must reset our state tracking
* to match. Note that calling intel_engines_sanitize() if the GPU has not
* been reset results in much confusion!
*/
-void intel_engines_sanitize(struct drm_i915_private *i915)
+void intel_engines_sanitize(struct drm_i915_private *i915, bool force)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
GEM_TRACE("\n");
+ if (!reset_engines(i915) && !force)
+ return;
+
for_each_engine(engine, i915, id) {
if (engine->reset.reset)
engine->reset.reset(engine, NULL);
return cs;
}
-void intel_engines_sanitize(struct drm_i915_private *i915);
+void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
*/
intel_runtime_pm_get(i915);
- intel_engines_sanitize(i915);
+ intel_engines_sanitize(i915, false);
i915_gem_sanitize(i915);
i915_gem_resume(i915);