drm/i915: cleanup cache-coloring
authorMatthew Auld <matthew.auld@intel.com>
Mon, 9 Sep 2019 12:40:52 +0000 (13:40 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 9 Sep 2019 20:00:20 +0000 (21:00 +0100)
Try to tidy up the cache-coloring such that we rid the code of any
mm.color_adjust assumptions, this should hopefully make it more obvious
in the code when we need to actually use the cache-level as the color,
and as a bonus should make adding a different color-scheme simpler.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190909124052.22900-3-matthew.auld@intel.com
drivers/gpu/drm/i915/gem/i915_gem_domain.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h
drivers/gpu/drm/i915/selftests/i915_gem_evict.c

index 6af740a5e3db100be95dc8cf46f77ba46b4a6249..da3e7cf12aa174fa216df1b49287d6a452adab43 100644 (file)
@@ -294,8 +294,10 @@ restart:
                }
        }
 
-       list_for_each_entry(vma, &obj->vma.list, obj_link)
-               vma->node.color = cache_level;
+       list_for_each_entry(vma, &obj->vma.list, obj_link) {
+               if (i915_vm_has_cache_coloring(vma->vm))
+                       vma->node.color = cache_level;
+       }
        i915_gem_object_set_cache_coherency(obj, cache_level);
        obj->cache_dirty = true; /* Always invalidate stale cachelines */
 
index db7480831e52b02e8d7eefcde96370244d7db8ca..e289b4ffd34bb6ca7966f54cc6848bd94937cd9f 100644 (file)
@@ -2364,7 +2364,7 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
 /* i915_gem_evict.c */
 int __must_check i915_gem_evict_something(struct i915_address_space *vm,
                                          u64 min_size, u64 alignment,
-                                         unsigned cache_level,
+                                         unsigned long color,
                                          u64 start, u64 end,
                                          unsigned flags);
 int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
index 52c86c6e06739b260710d76f8e03258e012f354a..e76c9da9992db36a55822deea0c95a6cc5e12e50 100644 (file)
@@ -70,7 +70,7 @@ mark_free(struct drm_mm_scan *scan,
  * @vm: address space to evict from
  * @min_size: size of the desired free space
  * @alignment: alignment constraint of the desired free space
- * @cache_level: cache_level for the desired space
+ * @color: color for the desired space
  * @start: start (inclusive) of the range from which to evict objects
  * @end: end (exclusive) of the range from which to evict objects
  * @flags: additional flags to control the eviction algorithm
@@ -91,7 +91,7 @@ mark_free(struct drm_mm_scan *scan,
 int
 i915_gem_evict_something(struct i915_address_space *vm,
                         u64 min_size, u64 alignment,
-                        unsigned cache_level,
+                        unsigned long color,
                         u64 start, u64 end,
                         unsigned flags)
 {
@@ -124,7 +124,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
        if (flags & PIN_MAPPABLE)
                mode = DRM_MM_INSERT_LOW;
        drm_mm_scan_init_with_range(&scan, &vm->mm,
-                                   min_size, alignment, cache_level,
+                                   min_size, alignment, color,
                                    start, end, mode);
 
        /*
@@ -266,7 +266,6 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
        u64 start = target->start;
        u64 end = start + target->size;
        struct i915_vma *vma, *next;
-       bool check_color;
        int ret = 0;
 
        lockdep_assert_held(&vm->i915->drm.struct_mutex);
@@ -283,8 +282,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
        if (!(flags & PIN_NONBLOCK))
                i915_retire_requests(vm->i915);
 
-       check_color = vm->mm.color_adjust;
-       if (check_color) {
+       if (i915_vm_has_cache_coloring(vm)) {
                /* Expand search to cover neighbouring guard pages (or lack!) */
                if (start)
                        start -= I915_GTT_PAGE_SIZE;
@@ -310,7 +308,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
                 * abutt and conflict. If they are in conflict, then we evict
                 * those as well to make room for our guard pages.
                 */
-               if (check_color) {
+               if (i915_vm_has_cache_coloring(vm)) {
                        if (node->start + node->size == target->start) {
                                if (node->color == target->color)
                                        continue;
index 07c85c134d4c47bb1d4a6fb32ba5cf08e55ddc98..201788126a8932fd564a93a763a55384aad4eed9 100644 (file)
@@ -376,6 +376,12 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
        return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
 }
 
+static inline bool
+i915_vm_has_cache_coloring(struct i915_address_space *vm)
+{
+       return i915_is_ggtt(vm) && vm->mm.color_adjust;
+}
+
 /* The Graphics Translation Table is the way in which GEN hardware translates a
  * Graphics Virtual Address into a Physical Address. In addition to the normal
  * collateral associated with any va->pa translations GEN hardware also has a
index a90bd2678353a2a935c310dd87b3c78ce2f5fe00..68d9f9b4d050e88e6b8e987744ebc884aa30d526 100644 (file)
@@ -477,7 +477,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
                vma->flags &= ~I915_VMA_CAN_FENCE;
 }
 
-bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
 {
        struct drm_mm_node *node = &vma->node;
        struct drm_mm_node *other;
@@ -489,7 +489,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
         * these constraints apply and set the drm_mm.color_adjust
         * appropriately.
         */
-       if (vma->vm->mm.color_adjust == NULL)
+       if (!i915_vm_has_cache_coloring(vma->vm))
                return true;
 
        /* Only valid to be called on an already inserted vma */
@@ -497,12 +497,12 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
        GEM_BUG_ON(list_empty(&node->node_list));
 
        other = list_prev_entry(node, node_list);
-       if (i915_node_color_differs(other, cache_level) &&
+       if (i915_node_color_differs(other, color) &&
            !drm_mm_hole_follows(other))
                return false;
 
        other = list_next_entry(node, node_list);
-       if (i915_node_color_differs(other, cache_level) &&
+       if (i915_node_color_differs(other, color) &&
            !drm_mm_hole_follows(node))
                return false;
 
@@ -539,7 +539,7 @@ static int
 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
        struct drm_i915_private *dev_priv = vma->vm->i915;
-       unsigned int cache_level;
+       unsigned long color;
        u64 start, end;
        int ret;
 
@@ -580,14 +580,14 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
                return -ENOSPC;
        }
 
+       color = 0;
        if (vma->obj) {
                ret = i915_gem_object_pin_pages(vma->obj);
                if (ret)
                        return ret;
 
-               cache_level = vma->obj->cache_level;
-       } else {
-               cache_level = 0;
+               if (i915_vm_has_cache_coloring(vma->vm))
+                       color = vma->obj->cache_level;
        }
 
        GEM_BUG_ON(vma->pages);
@@ -605,7 +605,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
                }
 
                ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
-                                          size, offset, cache_level,
+                                          size, offset, color,
                                           flags);
                if (ret)
                        goto err_clear;
@@ -644,7 +644,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
                }
 
                ret = i915_gem_gtt_insert(vma->vm, &vma->node,
-                                         size, alignment, cache_level,
+                                         size, alignment, color,
                                          start, end, flags);
                if (ret)
                        goto err_clear;
@@ -653,7 +653,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
                GEM_BUG_ON(vma->node.start + vma->node.size > end);
        }
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-       GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
+       GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
 
        mutex_lock(&vma->vm->mutex);
        list_move_tail(&vma->vm_link, &vma->vm->bound_list);
index 5b1e0cf7669d637a87b20a0726eab8dfb349b59f..425bda4db2c2620c2f194c687210fbc1dc5ed895 100644 (file)
@@ -295,7 +295,7 @@ i915_vma_compare(struct i915_vma *vma,
 
 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
                  u32 flags);
-bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color);
 bool i915_vma_misplaced(const struct i915_vma *vma,
                        u64 size, u64 alignment, u64 flags);
 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
index fca38167bdce4dbec8ea6ca1b79e44c36d89e489..2905fb21d86636352f7f0ab2a762ec164f9364bf 100644 (file)
@@ -274,12 +274,14 @@ static int igt_evict_for_cache_color(void *arg)
        LIST_HEAD(objects);
        int err;
 
-       /* Currently the use of color_adjust is limited to cache domains within
-        * the ggtt, and so the presence of mm.color_adjust is assumed to be
-        * i915_ggtt_color_adjust throughout our driver, so using a mock color
-        * adjust will work just fine for our purposes.
+       /*
+        * Currently the use of color_adjust for the GGTT is limited to cache
+        * coloring and guard pages, and so the presence of mm.color_adjust for
+        * the GGTT is assumed to be i915_ggtt_color_adjust, hence using a mock
+        * color adjust will work just fine for our purposes.
         */
        ggtt->vm.mm.color_adjust = mock_color_adjust;
+       GEM_BUG_ON(!i915_vm_has_cache_coloring(&ggtt->vm));
 
        obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
        if (IS_ERR(obj)) {