drm/amdgpu: remove amdgpu_bo_list_entry.robj (v2)
authorChristian König <christian.koenig@amd.com>
Mon, 10 Sep 2018 14:07:57 +0000 (16:07 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 13 Sep 2018 20:14:12 +0000 (15:14 -0500)
We can get that just by casting tv.bo.

v2: squash in kfd fix (Alex)

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index e7ceae05d51790ce7c2b114f721ec30b15c520a5..6ee9dc476c860aff3a0913dd7b181099da63114b 100644 (file)
@@ -675,7 +675,6 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
        if (!ctx->vm_pd)
                return -ENOMEM;
 
-       ctx->kfd_bo.robj = bo;
        ctx->kfd_bo.priority = 0;
        ctx->kfd_bo.tv.bo = &bo->tbo;
        ctx->kfd_bo.tv.shared = true;
@@ -740,7 +739,6 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
                        return -ENOMEM;
        }
 
-       ctx->kfd_bo.robj = bo;
        ctx->kfd_bo.priority = 0;
        ctx->kfd_bo.tv.bo = &bo->tbo;
        ctx->kfd_bo.tv.shared = true;
index b80243d3972e490fbd0d97804e978269b5cd7372..14d2982a47cce32b028e151f6a78b291efe08263 100644 (file)
@@ -49,8 +49,11 @@ static void amdgpu_bo_list_free(struct kref *ref)
                                                   refcount);
        struct amdgpu_bo_list_entry *e;
 
-       amdgpu_bo_list_for_each_entry(e, list)
-               amdgpu_bo_unref(&e->robj);
+       amdgpu_bo_list_for_each_entry(e, list) {
+               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+
+               amdgpu_bo_unref(&bo);
+       }
 
        call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
 }
@@ -112,21 +115,20 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
                        entry = &array[last_entry++];
                }
 
-               entry->robj = bo;
                entry->priority = min(info[i].bo_priority,
                                      AMDGPU_BO_LIST_MAX_PRIORITY);
-               entry->tv.bo = &entry->robj->tbo;
-               entry->tv.shared = !entry->robj->prime_shared_count;
-
-               if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
-                       list->gds_obj = entry->robj;
-               if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
-                       list->gws_obj = entry->robj;
-               if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
-                       list->oa_obj = entry->robj;
-
-               total_size += amdgpu_bo_size(entry->robj);
-               trace_amdgpu_bo_list_set(list, entry->robj);
+               entry->tv.bo = &bo->tbo;
+               entry->tv.shared = !bo->prime_shared_count;
+
+               if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
+                       list->gds_obj = bo;
+               if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
+                       list->gws_obj = bo;
+               if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
+                       list->oa_obj = bo;
+
+               total_size += amdgpu_bo_size(bo);
+               trace_amdgpu_bo_list_set(list, bo);
        }
 
        list->first_userptr = first_userptr;
@@ -138,8 +140,11 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
        return 0;
 
 error_free:
-       while (i--)
-               amdgpu_bo_unref(&array[i].robj);
+       while (i--) {
+               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
+
+               amdgpu_bo_unref(&bo);
+       }
        kvfree(list);
        return r;
 
@@ -191,9 +196,10 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
         * with the same priority, i.e. it must be stable.
         */
        amdgpu_bo_list_for_each_entry(e, list) {
+               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
                unsigned priority = e->priority;
 
-               if (!e->robj->parent)
+               if (!bo->parent)
                        list_add_tail(&e->tv.head, &bucket[priority]);
 
                e->user_pages = NULL;
index 61b089768e1ce73bb65229b3e8fce8800c5f49b8..7c5f5d1601e68282e2c163c80af68200395cd793 100644 (file)
@@ -32,7 +32,6 @@ struct amdgpu_bo_va;
 struct amdgpu_fpriv;
 
 struct amdgpu_bo_list_entry {
-       struct amdgpu_bo                *robj;
        struct ttm_validate_buffer      tv;
        struct amdgpu_bo_va             *bo_va;
        uint32_t                        priority;
index 1081fd00b059e95639ce83d142f572613c89fc87..d762d78e51024e9a8e3f9aaff22a6327e7a3305d 100644 (file)
@@ -39,6 +39,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
                                      uint32_t *offset)
 {
        struct drm_gem_object *gobj;
+       struct amdgpu_bo *bo;
        unsigned long size;
        int r;
 
@@ -46,21 +47,21 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
        if (gobj == NULL)
                return -EINVAL;
 
-       p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+       bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
        p->uf_entry.priority = 0;
-       p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
+       p->uf_entry.tv.bo = &bo->tbo;
        p->uf_entry.tv.shared = true;
        p->uf_entry.user_pages = NULL;
 
        drm_gem_object_put_unlocked(gobj);
 
-       size = amdgpu_bo_size(p->uf_entry.robj);
+       size = amdgpu_bo_size(bo);
        if (size != PAGE_SIZE || (data->offset + 8) > size) {
                r = -EINVAL;
                goto error_unref;
        }
 
-       if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
+       if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
                r = -EINVAL;
                goto error_unref;
        }
@@ -70,7 +71,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
        return 0;
 
 error_unref:
-       amdgpu_bo_unref(&p->uf_entry.robj);
+       amdgpu_bo_unref(&bo);
        return r;
 }
 
@@ -229,7 +230,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
                goto free_all_kdata;
        }
 
-       if (p->uf_entry.robj)
+       if (p->uf_entry.tv.bo)
                p->job->uf_addr = uf_offset;
        kfree(chunk_array);
 
@@ -458,13 +459,13 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
             p->evictable = list_prev_entry(p->evictable, tv.head)) {
 
                struct amdgpu_bo_list_entry *candidate = p->evictable;
-               struct amdgpu_bo *bo = candidate->robj;
+               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo);
                struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
                bool update_bytes_moved_vis;
                uint32_t other;
 
                /* If we reached our current BO we can forget it */
-               if (candidate->robj == validated)
+               if (bo == validated)
                        break;
 
                /* We can't move pinned BOs here */
@@ -529,7 +530,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
        int r;
 
        list_for_each_entry(lobj, validated, tv.head) {
-               struct amdgpu_bo *bo = lobj->robj;
+               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
                bool binding_userptr = false;
                struct mm_struct *usermm;
 
@@ -604,7 +605,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
        INIT_LIST_HEAD(&duplicates);
        amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
 
-       if (p->uf_entry.robj && !p->uf_entry.robj->parent)
+       if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
                list_add(&p->uf_entry.tv.head, &p->validated);
 
        while (1) {
@@ -620,7 +621,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 
                INIT_LIST_HEAD(&need_pages);
                amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
-                       struct amdgpu_bo *bo = e->robj;
+                       struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 
                        if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
                                 &e->user_invalidated) && e->user_pages) {
@@ -639,7 +640,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                                list_del(&e->tv.head);
                                list_add(&e->tv.head, &need_pages);
 
-                               amdgpu_bo_unreserve(e->robj);
+                               amdgpu_bo_unreserve(bo);
                        }
                }
 
@@ -658,7 +659,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 
                /* Fill the page arrays for all userptrs. */
                list_for_each_entry(e, &need_pages, tv.head) {
-                       struct ttm_tt *ttm = e->robj->tbo.ttm;
+                       struct ttm_tt *ttm = e->tv.bo->ttm;
 
                        e->user_pages = kvmalloc_array(ttm->num_pages,
                                                         sizeof(struct page*),
@@ -717,7 +718,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
        oa = p->bo_list->oa_obj;
 
        amdgpu_bo_list_for_each_entry(e, p->bo_list)
-               e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
+               e->bo_va = amdgpu_vm_bo_find(vm, ttm_to_amdgpu_bo(e->tv.bo));
 
        if (gds) {
                p->job->gds_base = amdgpu_bo_gpu_offset(gds);
@@ -732,8 +733,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                p->job->oa_size = amdgpu_bo_size(oa);
        }
 
-       if (!r && p->uf_entry.robj) {
-               struct amdgpu_bo *uf = p->uf_entry.robj;
+       if (!r && p->uf_entry.tv.bo) {
+               struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
 
                r = amdgpu_ttm_alloc_gart(&uf->tbo);
                p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
@@ -749,8 +750,7 @@ error_free_pages:
                if (!e->user_pages)
                        continue;
 
-               release_pages(e->user_pages,
-                             e->robj->tbo.ttm->num_pages);
+               release_pages(e->user_pages, e->tv.bo->ttm->num_pages);
                kvfree(e->user_pages);
        }
 
@@ -763,9 +763,11 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
        int r;
 
        list_for_each_entry(e, &p->validated, tv.head) {
-               struct reservation_object *resv = e->robj->tbo.resv;
+               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+               struct reservation_object *resv = bo->tbo.resv;
+
                r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
-                                    amdgpu_bo_explicit_sync(e->robj));
+                                    amdgpu_bo_explicit_sync(bo));
 
                if (r)
                        return r;
@@ -808,7 +810,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
        kfree(parser->chunks);
        if (parser->job)
                amdgpu_job_free(parser->job);
-       amdgpu_bo_unref(&parser->uf_entry.robj);
+       if (parser->uf_entry.tv.bo) {
+               struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
+
+               amdgpu_bo_unref(&uf);
+       }
 }
 
 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
@@ -919,7 +925,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
                struct dma_fence *f;
 
                /* ignore duplicates */
-               bo = e->robj;
+               bo = ttm_to_amdgpu_bo(e->tv.bo);
                if (!bo)
                        continue;
 
@@ -958,11 +964,13 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
        if (amdgpu_vm_debug) {
                /* Invalidate all BOs to test for userspace bugs */
                amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+                       struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+
                        /* ignore duplicates */
-                       if (!e->robj)
+                       if (!bo)
                                continue;
 
-                       amdgpu_vm_bo_invalidate(adev, e->robj, false);
+                       amdgpu_vm_bo_invalidate(adev, bo, false);
                }
        }
 
@@ -1211,7 +1219,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        /* No memory allocation is allowed while holding the mn lock */
        amdgpu_mn_lock(p->mn);
        amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
-               struct amdgpu_bo *bo = e->robj;
+               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 
                if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
                        r = -ERESTARTSYS;
index dd5a0cdd67bc0319be56fa3f0dba46b2c9993eb0..234764ac58cfa74eeb658c3b883c09144d017034 100644 (file)
@@ -593,9 +593,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
                         struct list_head *validated,
                         struct amdgpu_bo_list_entry *entry)
 {
-       entry->robj = vm->root.base.bo;
        entry->priority = 0;
-       entry->tv.bo = &entry->robj->tbo;
+       entry->tv.bo = &vm->root.base.bo->tbo;
        entry->tv.shared = true;
        entry->user_pages = NULL;
        list_add(&entry->tv.head, validated);