static void idle_work_handler(struct work_struct *work)
{
struct drm_i915_private *i915 =
- container_of(work, typeof(*i915), gem.idle_work.work);
+ container_of(work, typeof(*i915), gem.idle_work);
mutex_lock(&i915->drm.struct_mutex);
intel_wakeref_lock(&i915->gt.wakeref);
- if (!intel_wakeref_active(&i915->gt.wakeref))
+ if (!intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work))
i915_gem_park(i915);
intel_wakeref_unlock(&i915->gt.wakeref);
break;
case INTEL_GT_PARK:
- mod_delayed_work(i915->wq,
- &i915->gem.idle_work,
- msecs_to_jiffies(100));
+ queue_work(i915->wq, &i915->gem.idle_work);
break;
}
* Assert that we successfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
- GEM_BUG_ON(i915->gt.awake);
- cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
-
drain_delayed_work(&i915->gem.retire_work);
+ GEM_BUG_ON(i915->gt.awake);
+ flush_work(&i915->gem.idle_work);
- /*
- * As the idle_work is rearming if it detects a race, play safe and
- * repeat the flush until it is definitely idle.
- */
- drain_delayed_work(&i915->gem.idle_work);
+ cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
i915_gem_drain_freed_objects(i915);
void i915_gem_init__pm(struct drm_i915_private *i915)
{
- INIT_DELAYED_WORK(&i915->gem.idle_work, idle_work_handler);
+ INIT_WORK(&i915->gem.idle_work, idle_work_handler);
INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
i915->gem.pm_notifier.notifier_call = pm_notifier;
mutex_unlock(&i915->drm.struct_mutex);
drain_delayed_work(&i915->gem.retire_work);
- drain_delayed_work(&i915->gem.idle_work);
+ flush_work(&i915->gem.idle_work);
i915_gem_drain_workqueue(i915);
mutex_lock(&i915->drm.struct_mutex);
mock_init_contexts(i915);
INIT_DELAYED_WORK(&i915->gem.retire_work, mock_retire_work_handler);
- INIT_DELAYED_WORK(&i915->gem.idle_work, mock_idle_work_handler);
+ INIT_WORK(&i915->gem.idle_work, mock_idle_work_handler);
i915->gt.awake = true;