*/
int suspended;
- /**
- * Flag if the hardware appears to be wedged.
- *
- * This is set when attempts to idle the device timeout.
- * It prevents command submission from occurring and makes
- * every pending request fail
- */
- atomic_t wedged;
-
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
unsigned long last_reset;
+ atomic_t wedged;
+
/* For gpu hang simulation. */
unsigned int stop_rings;
};
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
-int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
void i915_gem_reset(struct drm_device *dev);
}
static int
-i915_gem_wait_for_error(struct drm_device *dev)
+i915_gem_wait_for_error(struct i915_gpu_error *error)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct completion *x = &dev_priv->gpu_error.completion;
+ struct completion *x = &error->completion;
unsigned long flags;
int ret;
- if (!atomic_read(&dev_priv->mm.wedged))
+ if (!atomic_read(&error->wedged))
return 0;
/*
return ret;
}
- if (atomic_read(&dev_priv->mm.wedged)) {
+ if (atomic_read(&error->wedged)) {
/* GPU is hung, bump the completion count to account for
* the token we just consumed so that we never hit zero and
* end up waiting upon a subsequent completion event that
int i915_mutex_lock_interruptible(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_gem_wait_for_error(dev);
+ ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
if (ret)
return ret;
}
int
-i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible)
{
- if (atomic_read(&dev_priv->mm.wedged)) {
- struct completion *x = &dev_priv->gpu_error.completion;
+ if (atomic_read(&error->wedged)) {
+ struct completion *x = &error->completion;
bool recovery_complete;
unsigned long flags;
#define EXIT_COND \
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
- atomic_read(&dev_priv->mm.wedged))
+ atomic_read(&dev_priv->gpu_error.wedged))
do {
if (interruptible)
end = wait_event_interruptible_timeout(ring->irq_queue,
end = wait_event_timeout(ring->irq_queue, EXIT_COND,
timeout_jiffies);
- ret = i915_gem_check_wedge(dev_priv, interruptible);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
end = ret;
} while (end == 0 && wait_forever);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(seqno == 0);
- ret = i915_gem_check_wedge(dev_priv, interruptible);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
return ret;
if (seqno == 0)
return 0;
- ret = i915_gem_check_wedge(dev_priv, true);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
if (ret)
return ret;
/* If this -EIO is due to a gpu hang, give the reset code a
* chance to clean up the mess. Otherwise return the proper
* SIGBUS. */
- if (!atomic_read(&dev_priv->mm.wedged))
+ if (!atomic_read(&dev_priv->gpu_error.wedged))
return VM_FAULT_SIGBUS;
case -EAGAIN:
/* Give the error handler a chance to run and move the
u32 seqno = 0;
int ret;
- if (atomic_read(&dev_priv->mm.wedged))
+ if (atomic_read(&dev_priv->gpu_error.wedged))
return -EIO;
spin_lock(&file_priv->mm.lock);
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- if (atomic_read(&dev_priv->mm.wedged)) {
+ if (atomic_read(&dev_priv->gpu_error.wedged)) {
DRM_ERROR("Reenabling wedged hardware, good luck\n");
- atomic_set(&dev_priv->mm.wedged, 0);
+ atomic_set(&dev_priv->gpu_error.wedged, 0);
}
mutex_lock(&dev->struct_mutex);
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
- if (atomic_read(&dev_priv->mm.wedged)) {
+ if (atomic_read(&dev_priv->gpu_error.wedged)) {
DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
if (!i915_reset(dev)) {
- atomic_set(&dev_priv->mm.wedged, 0);
+ atomic_set(&dev_priv->gpu_error.wedged, 0);
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
}
complete_all(&dev_priv->gpu_error.completion);
if (wedged) {
INIT_COMPLETION(dev_priv->gpu_error.completion);
- atomic_set(&dev_priv->mm.wedged, 1);
+ atomic_set(&dev_priv->gpu_error.wedged, 1);
/*
* Wakeup waiting processes so they don't hang