drm/amdgpu: split VM PD and PT handling during CS
authorChristian König <christian.koenig@amd.com>
Fri, 11 Dec 2015 14:16:32 +0000 (15:16 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 18 Dec 2015 22:29:45 +0000 (17:29 -0500)
This way we avoid the extra allocation for the page directory entry.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index dc3dab539e4c3984c5e14d03789858968b5cf6ab..40850afa763f65aaefba9bece411ba8b218cbce2 100644 (file)
@@ -980,10 +980,11 @@ struct amdgpu_vm_manager {
 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
-                                              struct amdgpu_vm *vm,
-                                              struct list_head *validated,
-                                              struct list_head *duplicates);
+void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
+                        struct list_head *validated,
+                        struct amdgpu_bo_list_entry *entry);
+struct amdgpu_bo_list_entry *amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm,
+                                                 struct list_head *duplicates);
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                      struct amdgpu_sync *sync);
 void amdgpu_vm_flush(struct amdgpu_ring *ring,
@@ -1253,6 +1254,7 @@ struct amdgpu_cs_parser {
        unsigned                nchunks;
        struct amdgpu_cs_chunk  *chunks;
        /* relocations */
+       struct amdgpu_bo_list_entry     vm_pd;
        struct amdgpu_bo_list_entry     *vm_bos;
        struct list_head        validated;
        struct fence            *fence;
index 9591c13781bdd196578da0b2c1c14c6b77a095a2..3fb21ecd29e0748874da57cb4207c7e83a5aba8e 100644 (file)
@@ -387,8 +387,7 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
        }
 
        INIT_LIST_HEAD(&duplicates);
-       p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm,
-                                     &p->validated, &duplicates);
+       amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
 
        if (need_mmap_lock)
                down_read(&current->mm->mmap_sem);
@@ -397,6 +396,12 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
        if (unlikely(r != 0))
                goto error_reserve;
 
+       p->vm_bos = amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
+       if (!p->vm_bos) {
+               r = -ENOMEM;
+               goto error_validate;
+       }
+
        r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated);
        if (r)
                goto error_validate;
index 7fe7f8afa5ffe0a9f0f60d1474c92677607a8bee..ea0fe94e4b54c2b7a69167829ff10f3d5c2fb8fc 100644 (file)
@@ -448,6 +448,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 {
        struct ttm_validate_buffer tv, *entry;
        struct amdgpu_bo_list_entry *vm_bos;
+       struct amdgpu_bo_list_entry vm_pd;
        struct ww_acquire_ctx ticket;
        struct list_head list, duplicates;
        unsigned domain;
@@ -460,14 +461,18 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
        tv.shared = true;
        list_add(&tv.head, &list);
 
-       vm_bos = amdgpu_vm_get_bos(adev, bo_va->vm, &list, &duplicates);
-       if (!vm_bos)
-               return;
+       amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
 
        /* Provide duplicates to avoid -EALREADY */
        r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
        if (r)
-               goto error_free;
+               goto error_print;
+
+       vm_bos = amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
+       if (!vm_bos) {
+               r = -ENOMEM;
+               goto error_unreserve;
+       }
 
        list_for_each_entry(entry, &list, head) {
                domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
@@ -489,10 +494,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 
 error_unreserve:
        ttm_eu_backoff_reservation(&ticket, &list);
-
-error_free:
        drm_free_large(vm_bos);
 
+error_print:
        if (r && r != -ERESTARTSYS)
                DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
 }
index f6c1d6f0bf3787f9e9bef3d1fd7d2ce848b0e18c..592be6438a6cc6dde1de4007f7f96675706e5306 100644 (file)
@@ -75,39 +75,50 @@ static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
 }
 
 /**
- * amdgpu_vm_get_bos - add the vm BOs to a validation list
+ * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
  *
  * @vm: vm providing the BOs
  * @validated: head of validation list
+ * @entry: entry to add
+ *
+ * Add the page directory to the list of BOs to
+ * validate for command submission.
+ */
+void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
+                        struct list_head *validated,
+                        struct amdgpu_bo_list_entry *entry)
+{
+       entry->robj = vm->page_directory;
+       entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
+       entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
+       entry->priority = 0;
+       entry->tv.bo = &vm->page_directory->tbo;
+       entry->tv.shared = true;
+       list_add(&entry->tv.head, validated);
+}
+
+/**
+ * amdgpu_vm_get_bos - add the vm BOs to a validation list
+ *
+ * @vm: vm providing the BOs
  * @duplicates: head of duplicates list
  *
  * Add the page directory to the list of BOs to
  * validate for command submission (cayman+).
  */
-struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
-                                              struct amdgpu_vm *vm,
-                                              struct list_head *validated,
-                                              struct list_head *duplicates)
+struct amdgpu_bo_list_entry *amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm,
+                                                 struct list_head *duplicates)
 {
        struct amdgpu_bo_list_entry *list;
        unsigned i, idx;
 
-       list = drm_malloc_ab(vm->max_pde_used + 2,
+       list = drm_malloc_ab(vm->max_pde_used + 1,
                             sizeof(struct amdgpu_bo_list_entry));
-       if (!list) {
+       if (!list)
                return NULL;
-       }
 
        /* add the vm page table to the list */
-       list[0].robj = vm->page_directory;
-       list[0].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
-       list[0].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
-       list[0].priority = 0;
-       list[0].tv.bo = &vm->page_directory->tbo;
-       list[0].tv.shared = true;
-       list_add(&list[0].tv.head, validated);
-
-       for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
+       for (i = 0, idx = 0; i <= vm->max_pde_used; i++) {
                if (!vm->page_tables[i].bo)
                        continue;