drm/i915: Use helpers for drm_mm_node booleans
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 3 Oct 2019 21:00:58 +0000 (22:00 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 4 Oct 2019 14:34:27 +0000 (15:34 +0100)
A subset of 71724f708997 ("drm/mm: Use helpers for drm_mm_node booleans")
in order to prepare drm-intel-next-queued for subsequent patches before
we can backmerge 71724f708997 itself.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004142226.13711-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h

index 8fbb454cfd6bde3a47a7ca1d5f59935c9c585934..228ce24ea280693e710b66939a9d6a7efeb6fc7b 100644 (file)
@@ -968,7 +968,7 @@ static void reloc_cache_reset(struct reloc_cache *cache)
                intel_gt_flush_ggtt_writes(ggtt->vm.gt);
                io_mapping_unmap_atomic((void __iomem *)vaddr);
 
-               if (cache->node.allocated) {
+               if (drm_mm_node_allocated(&cache->node)) {
                        ggtt->vm.clear_range(&ggtt->vm,
                                             cache->node.start,
                                             cache->node.size);
@@ -1061,7 +1061,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
        }
 
        offset = cache->node.start;
-       if (cache->node.allocated) {
+       if (drm_mm_node_allocated(&cache->node)) {
                ggtt->vm.insert_page(&ggtt->vm,
                                     i915_gem_object_get_dma_address(obj, page),
                                     offset, I915_CACHE_NONE, 0);
index bb878119f06cae65448bdd663c13f01649224078..bb4889d2346d897542b01c01df4ed46f739086c0 100644 (file)
@@ -387,7 +387,7 @@ static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
 {
        struct drm_mm_node *node = &ggtt->uc_fw;
 
-       GEM_BUG_ON(!node->allocated);
+       GEM_BUG_ON(!drm_mm_node_allocated(node));
        GEM_BUG_ON(upper_32_bits(node->start));
        GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
 
index 1426e506700d1bfec184bfd0db389bbd6439da69..fa8e028ac0b56f2b2dce0e6856b6578ae8da6b6c 100644 (file)
@@ -356,7 +356,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
                ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
                if (ret)
                        goto out_unlock;
-               GEM_BUG_ON(!node.allocated);
+               GEM_BUG_ON(!drm_mm_node_allocated(&node));
        }
 
        mutex_unlock(&i915->drm.struct_mutex);
@@ -393,7 +393,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
                unsigned page_offset = offset_in_page(offset);
                unsigned page_length = PAGE_SIZE - page_offset;
                page_length = remain < page_length ? remain : page_length;
-               if (node.allocated) {
+               if (drm_mm_node_allocated(&node)) {
                        ggtt->vm.insert_page(&ggtt->vm,
                                             i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
                                             node.start, I915_CACHE_NONE, 0);
@@ -415,7 +415,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
        i915_gem_object_unlock_fence(obj, fence);
 out_unpin:
        mutex_lock(&i915->drm.struct_mutex);
-       if (node.allocated) {
+       if (drm_mm_node_allocated(&node)) {
                ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
                remove_mappable_node(&node);
        } else {
@@ -566,7 +566,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
                if (ret)
                        goto out_rpm;
-               GEM_BUG_ON(!node.allocated);
+               GEM_BUG_ON(!drm_mm_node_allocated(&node));
        }
 
        mutex_unlock(&i915->drm.struct_mutex);
@@ -604,7 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                unsigned int page_offset = offset_in_page(offset);
                unsigned int page_length = PAGE_SIZE - page_offset;
                page_length = remain < page_length ? remain : page_length;
-               if (node.allocated) {
+               if (drm_mm_node_allocated(&node)) {
                        /* flush the write before we modify the GGTT */
                        intel_gt_flush_ggtt_writes(ggtt->vm.gt);
                        ggtt->vm.insert_page(&ggtt->vm,
@@ -636,7 +636,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 out_unpin:
        mutex_lock(&i915->drm.struct_mutex);
        intel_gt_flush_ggtt_writes(ggtt->vm.gt);
-       if (node.allocated) {
+       if (drm_mm_node_allocated(&node)) {
                ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
                remove_mappable_node(&node);
        } else {
index e76c9da9992db36a55822deea0c95a6cc5e12e50..8c1e04f402bccef917b6e16ded42dddabe3d3277 100644 (file)
@@ -299,7 +299,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
                        break;
                }
 
-               GEM_BUG_ON(!node->allocated);
+               GEM_BUG_ON(!drm_mm_node_allocated(node));
                vma = container_of(node, typeof(*vma), node);
 
                /* If we are using coloring to insert guard pages between
index 9d5b0f87c21003b243480447a8cc1944e0597d1b..68c34b1a20e4847ea0a12564daab3ba4cce566b1 100644 (file)
@@ -795,7 +795,7 @@ void i915_vma_reopen(struct i915_vma *vma)
 
 static void __i915_vma_destroy(struct i915_vma *vma)
 {
-       GEM_BUG_ON(vma->node.allocated);
+       GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
        GEM_BUG_ON(vma->fence);
 
        mutex_lock(&vma->vm->mutex);
index 8bcb5812c4467bc5e10d6b7a4f6b1c24ab6cc8d0..e49b199f7de7a1354051d15f81c2e0f1b8a46a5c 100644 (file)
@@ -228,7 +228,7 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
 static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
 {
        GEM_BUG_ON(!i915_vma_is_ggtt(vma));
-       GEM_BUG_ON(!vma->node.allocated);
+       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
        GEM_BUG_ON(upper_32_bits(vma->node.start));
        GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
        return lower_32_bits(vma->node.start);
@@ -390,7 +390,7 @@ static inline bool i915_vma_is_bound(const struct i915_vma *vma,
 static inline bool i915_node_color_differs(const struct drm_mm_node *node,
                                           unsigned long color)
 {
-       return node->allocated && node->color != color;
+       return drm_mm_node_allocated(node) && node->color != color;
 }
 
 /**