}
}
- list_for_each_entry(vma, &obj->vma.list, obj_link)
- vma->node.color = cache_level;
+ list_for_each_entry(vma, &obj->vma.list, obj_link) {
+ if (i915_vm_has_cache_coloring(vma->vm))
+ vma->node.color = cache_level;
+ }
i915_gem_object_set_cache_coherency(obj, cache_level);
obj->cache_dirty = true; /* Always invalidate stale cachelines */
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
u64 min_size, u64 alignment,
- unsigned cache_level,
+ unsigned long color,
u64 start, u64 end,
unsigned flags);
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
* @vm: address space to evict from
* @min_size: size of the desired free space
* @alignment: alignment constraint of the desired free space
- * @cache_level: cache_level for the desired space
+ * @color: color for the desired space
* @start: start (inclusive) of the range from which to evict objects
* @end: end (exclusive) of the range from which to evict objects
* @flags: additional flags to control the eviction algorithm
int
i915_gem_evict_something(struct i915_address_space *vm,
u64 min_size, u64 alignment,
- unsigned cache_level,
+ unsigned long color,
u64 start, u64 end,
unsigned flags)
{
if (flags & PIN_MAPPABLE)
mode = DRM_MM_INSERT_LOW;
drm_mm_scan_init_with_range(&scan, &vm->mm,
- min_size, alignment, cache_level,
+ min_size, alignment, color,
start, end, mode);
/*
u64 start = target->start;
u64 end = start + target->size;
struct i915_vma *vma, *next;
- bool check_color;
int ret = 0;
lockdep_assert_held(&vm->i915->drm.struct_mutex);
if (!(flags & PIN_NONBLOCK))
i915_retire_requests(vm->i915);
- check_color = vm->mm.color_adjust;
- if (check_color) {
+ if (i915_vm_has_cache_coloring(vm)) {
/* Expand search to cover neighbouring guard pages (or lack!) */
if (start)
start -= I915_GTT_PAGE_SIZE;
* abutt and conflict. If they are in conflict, then we evict
* those as well to make room for our guard pages.
*/
- if (check_color) {
+ if (i915_vm_has_cache_coloring(vm)) {
if (node->start + node->size == target->start) {
if (node->color == target->color)
continue;
return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
}
+static inline bool
+i915_vm_has_cache_coloring(struct i915_address_space *vm)
+{
+ return i915_is_ggtt(vm) && vm->mm.color_adjust;
+}
+
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
vma->flags &= ~I915_VMA_CAN_FENCE;
}
-bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
{
struct drm_mm_node *node = &vma->node;
struct drm_mm_node *other;
* these constraints apply and set the drm_mm.color_adjust
* appropriately.
*/
- if (vma->vm->mm.color_adjust == NULL)
+ if (!i915_vm_has_cache_coloring(vma->vm))
return true;
/* Only valid to be called on an already inserted vma */
GEM_BUG_ON(list_empty(&node->node_list));
other = list_prev_entry(node, node_list);
- if (i915_node_color_differs(other, cache_level) &&
+ if (i915_node_color_differs(other, color) &&
!drm_mm_hole_follows(other))
return false;
other = list_next_entry(node, node_list);
- if (i915_node_color_differs(other, cache_level) &&
+ if (i915_node_color_differs(other, color) &&
!drm_mm_hole_follows(node))
return false;
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
struct drm_i915_private *dev_priv = vma->vm->i915;
- unsigned int cache_level;
+ unsigned long color;
u64 start, end;
int ret;
return -ENOSPC;
}
+ color = 0;
if (vma->obj) {
ret = i915_gem_object_pin_pages(vma->obj);
if (ret)
return ret;
- cache_level = vma->obj->cache_level;
- } else {
- cache_level = 0;
+ if (i915_vm_has_cache_coloring(vma->vm))
+ color = vma->obj->cache_level;
}
GEM_BUG_ON(vma->pages);
}
ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
- size, offset, cache_level,
+ size, offset, color,
flags);
if (ret)
goto err_clear;
}
ret = i915_gem_gtt_insert(vma->vm, &vma->node,
- size, alignment, cache_level,
+ size, alignment, color,
start, end, flags);
if (ret)
goto err_clear;
GEM_BUG_ON(vma->node.start + vma->node.size > end);
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
+ GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
mutex_lock(&vma->vm->mutex);
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags);
-bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color);
bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
LIST_HEAD(objects);
int err;
- /* Currently the use of color_adjust is limited to cache domains within
- * the ggtt, and so the presence of mm.color_adjust is assumed to be
- * i915_ggtt_color_adjust throughout our driver, so using a mock color
- * adjust will work just fine for our purposes.
+ /*
+ * Currently the use of color_adjust for the GGTT is limited to cache
+ * coloring and guard pages, and so the presence of mm.color_adjust for
+ * the GGTT is assumed to be i915_ggtt_color_adjust, hence using a mock
+ * color adjust will work just fine for our purposes.
*/
ggtt->vm.mm.color_adjust = mock_color_adjust;
+ GEM_BUG_ON(!i915_vm_has_cache_coloring(&ggtt->vm));
obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {