}
mutex_unlock(&i915->ggtt.vm.mutex);
- if (i915_gem_object_is_shrinkable(obj) &&
- obj->mm.madv == I915_MADV_WILLNEED) {
- struct list_head *list;
-
- spin_lock(&i915->mm.obj_lock);
- list = obj->bind_count ?
- &i915->mm.bound_list : &i915->mm.unbound_list;
- list_move_tail(&obj->mm.link, list);
- spin_unlock(&i915->mm.obj_lock);
+ if (i915_gem_object_is_shrinkable(obj)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+
+ if (obj->mm.madv == I915_MADV_WILLNEED) {
+ struct list_head *list;
+
+ list = obj->bind_count ?
+ &i915->mm.bound_list : &i915->mm.unbound_list;
+ list_move_tail(&obj->mm.link, list);
+ }
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
}
*/
if (i915_gem_object_has_pages(obj) &&
i915_gem_object_is_shrinkable(obj)) {
- spin_lock(&i915->mm.obj_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
list_del_init(&obj->mm.link);
- spin_unlock(&i915->mm.obj_lock);
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
mutex_unlock(&i915->drm.struct_mutex);
obj->mm.madv = I915_MADV_DONTNEED;
if (i915_gem_object_has_pages(obj)) {
- spin_lock(&i915->mm.obj_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
list_move_tail(&obj->mm.link, &i915->mm.purge_list);
- spin_unlock(&i915->mm.obj_lock);
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
}
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
if (i915_gem_object_is_shrinkable(obj)) {
- spin_lock(&i915->mm.obj_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+
i915->mm.shrink_count++;
i915->mm.shrink_memory += obj->base.size;
list_add(&obj->mm.link, &i915->mm.unbound_list);
- spin_unlock(&i915->mm.obj_lock);
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
}
return pages;
if (i915_gem_object_is_shrinkable(obj)) {
- spin_lock(&i915->mm.obj_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+
list_del(&obj->mm.link);
i915->mm.shrink_count--;
i915->mm.shrink_memory -= obj->base.size;
- spin_unlock(&i915->mm.obj_lock);
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
if (obj->mm.mapping) {
i915_gem_shrink(struct drm_i915_private *i915,
unsigned long target,
unsigned long *nr_scanned,
- unsigned flags)
+ unsigned int shrink)
{
const struct {
struct list_head *list;
unsigned long scanned = 0;
bool unlock;
- if (!shrinker_lock(i915, flags, &unlock))
+ if (!shrinker_lock(i915, shrink, &unlock))
return 0;
/*
* We don't care about errors here; if we cannot wait upon the GPU,
* we will free as much as we can and hope to get a second chance.
*/
- if (flags & I915_SHRINK_ACTIVE)
+ if (shrink & I915_SHRINK_ACTIVE)
i915_gem_wait_for_idle(i915,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
- trace_i915_gem_shrink(i915, target, flags);
+ trace_i915_gem_shrink(i915, target, shrink);
i915_retire_requests(i915);
/*
* device just to recover a little memory. If absolutely necessary,
* we will force the wake during oom-notifier.
*/
- if (flags & I915_SHRINK_BOUND) {
+ if (shrink & I915_SHRINK_BOUND) {
wakeref = intel_runtime_pm_get_if_in_use(i915);
if (!wakeref)
- flags &= ~I915_SHRINK_BOUND;
+ shrink &= ~I915_SHRINK_BOUND;
}
/*
for (phase = phases; phase->list; phase++) {
struct list_head still_in_list;
struct drm_i915_gem_object *obj;
+ unsigned long flags;
- if ((flags & phase->bit) == 0)
+ if ((shrink & phase->bit) == 0)
continue;
INIT_LIST_HEAD(&still_in_list);
* to be able to shrink their pages, so they remain on
* the unbound/bound list until actually freed.
*/
- spin_lock(&i915->mm.obj_lock);
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
while (count < target &&
(obj = list_first_entry_or_null(phase->list,
typeof(*obj),
mm.link))) {
list_move_tail(&obj->mm.link, &still_in_list);
- if (flags & I915_SHRINK_VMAPS &&
+ if (shrink & I915_SHRINK_VMAPS &&
!is_vmalloc_addr(obj->mm.mapping))
continue;
- if (!(flags & I915_SHRINK_ACTIVE) &&
+ if (!(shrink & I915_SHRINK_ACTIVE) &&
(i915_gem_object_is_active(obj) ||
i915_gem_object_is_framebuffer(obj)))
continue;
- if (!(flags & I915_SHRINK_BOUND) &&
+ if (!(shrink & I915_SHRINK_BOUND) &&
READ_ONCE(obj->bind_count))
continue;
if (!can_release_pages(obj))
continue;
- spin_unlock(&i915->mm.obj_lock);
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
if (unsafe_drop_pages(obj)) {
/* May arrive from get_pages on another bo */
mutex_lock_nested(&obj->mm.lock,
I915_MM_SHRINKER);
if (!i915_gem_object_has_pages(obj)) {
- try_to_writeback(obj, flags);
+ try_to_writeback(obj, shrink);
count += obj->base.size >> PAGE_SHIFT;
}
mutex_unlock(&obj->mm.lock);
}
scanned += obj->base.size >> PAGE_SHIFT;
- spin_lock(&i915->mm.obj_lock);
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
}
list_splice_tail(&still_in_list, phase->list);
- spin_unlock(&i915->mm.obj_lock);
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
- if (flags & I915_SHRINK_BOUND)
+ if (shrink & I915_SHRINK_BOUND)
intel_runtime_pm_put(i915, wakeref);
i915_retire_requests(i915);
struct drm_i915_gem_object *obj;
unsigned long unevictable, bound, unbound, freed_pages;
intel_wakeref_t wakeref;
+ unsigned long flags;
freed_pages = 0;
with_intel_runtime_pm(i915, wakeref)
* being pointed to by hardware.
*/
unbound = bound = unevictable = 0;
- spin_lock(&i915->mm.obj_lock);
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) {
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
bound += obj->base.size >> PAGE_SHIFT;
}
- spin_unlock(&i915->mm.obj_lock);
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
if (freed_pages || unbound || bound)
pr_info("Purging GPU memory, %lu pages freed, "
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
struct i915_vma *vma;
+ unsigned long flags;
int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
mutex_unlock(&ggtt->vm.mutex);
- spin_lock(&dev_priv->mm.obj_lock);
+ spin_lock_irqsave(&dev_priv->mm.obj_lock, flags);
GEM_BUG_ON(i915_gem_object_is_shrinkable(obj));
obj->bind_count++;
- spin_unlock(&dev_priv->mm.obj_lock);
+ spin_unlock_irqrestore(&dev_priv->mm.obj_lock, flags);
return obj;
struct drm_i915_gem_object *obj;
u64 total_obj_size, total_gtt_size;
unsigned long total, count, n;
+ unsigned long flags;
int ret;
total = READ_ONCE(dev_priv->mm.shrink_count);
total_obj_size = total_gtt_size = count = 0;
- spin_lock(&dev_priv->mm.obj_lock);
+ spin_lock_irqsave(&dev_priv->mm.obj_lock, flags);
list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
if (count == total)
break;
objects[count++] = obj;
total_obj_size += obj->base.size;
}
- spin_unlock(&dev_priv->mm.obj_lock);
+ spin_unlock_irqrestore(&dev_priv->mm.obj_lock, flags);
sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
struct drm_i915_gem_object *obj;
unsigned int page_sizes = 0;
+ unsigned long flags;
char buf[80];
int ret;
purgeable_size = purgeable_count = 0;
huge_size = huge_count = 0;
- spin_lock(&dev_priv->mm.obj_lock);
+ spin_lock_irqsave(&dev_priv->mm.obj_lock, flags);
list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
size += obj->base.size;
++count;
page_sizes |= obj->mm.page_sizes.sg;
}
}
- spin_unlock(&dev_priv->mm.obj_lock);
+ spin_unlock_irqrestore(&dev_priv->mm.obj_lock, flags);
seq_printf(m, "%u bound objects, %llu bytes\n",
count, size);
struct list_head *list;
if (i915_gem_object_is_shrinkable(obj)) {
- spin_lock(&i915->mm.obj_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+
if (obj->mm.madv != I915_MADV_WILLNEED)
list = &i915->mm.purge_list;
else if (obj->bind_count)
else
list = &i915->mm.unbound_list;
list_move_tail(&obj->mm.link, list);
- spin_unlock(&i915->mm.obj_lock);
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
}
static void obj_bump_mru(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
- spin_lock(&i915->mm.obj_lock);
if (obj->bind_count)
list_move_tail(&obj->mm.link, &i915->mm.bound_list);
- spin_unlock(&i915->mm.obj_lock);
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
obj->mm.dirty = true; /* be paranoid */
}
if (vma->obj) {
struct drm_i915_gem_object *obj = vma->obj;
+ unsigned long flags;
- spin_lock(&dev_priv->mm.obj_lock);
+ spin_lock_irqsave(&dev_priv->mm.obj_lock, flags);
if (i915_gem_object_is_shrinkable(obj))
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
obj->bind_count++;
assert_bind_count(obj);
- spin_unlock(&dev_priv->mm.obj_lock);
+ spin_unlock_irqrestore(&dev_priv->mm.obj_lock, flags);
}
return 0;
*/
if (vma->obj) {
struct drm_i915_gem_object *obj = vma->obj;
+ unsigned long flags;
- spin_lock(&i915->mm.obj_lock);
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
GEM_BUG_ON(obj->bind_count == 0);
if (--obj->bind_count == 0 &&
obj->mm.madv == I915_MADV_WILLNEED)
list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
- spin_unlock(&i915->mm.obj_lock);
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
/*
* And finally now the object is completely decoupled from this