*/
trace_i915_gem_shrink(i915, target, shrink);
- i915_retire_requests(i915);
/*
* Unbinding of objects will require HW access; Let us not wake the
if (shrink & I915_SHRINK_BOUND)
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- i915_retire_requests(i915);
-
shrinker_unlock(i915, unlock);
if (nr_scanned)
if (!shrinker_lock(i915, 0, &unlock))
return NOTIFY_DONE;
- /* Force everything onto the inactive lists */
- if (i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT))
- goto out;
-
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
}
mutex_unlock(&i915->ggtt.vm.mutex);
-out:
shrinker_unlock(i915, unlock);
*(unsigned long *)ptr += freed_pages;
u64 size;
int err;
+ /* Keep in GEM's good graces */
+ i915_retire_requests(ctx->i915);
+
size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
struct i915_ggtt *ggtt;
struct intel_gt_timelines {
- struct mutex mutex; /* protects list, tainted by GPU */
+ struct mutex mutex; /* protects list */
struct list_head active_list;
/* Pack multiple timelines' seqnos into the same page */
spin_lock_init(&timelines->hwsp_lock);
INIT_LIST_HEAD(&timelines->hwsp_free_list);
-
- /* via i915_gem_wait_for_idle() */
- i915_gem_shrinker_taints_mutex(gt->i915, &timelines->mutex);
}
void intel_timelines_init(struct drm_i915_private *i915)