drm/amdgpu: use leaf iterator for filling PTs
authorChristian König <christian.koenig@amd.com>
Mon, 3 Sep 2018 12:34:51 +0000 (14:34 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 13 Sep 2018 20:14:10 +0000 (15:14 -0500)
Less overhead and is the starting point for further cleanups and
improvements.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index cca8fc931bbb9902c0a3bf3334a43b337a3bb1ad..e873bbb2f0c7563f647e4fa39c552feffc91f7b6 100644 (file)
@@ -1487,36 +1487,6 @@ error:
        return r;
 }
 
-/**
- * amdgpu_vm_find_entry - find the entry for an address
- *
- * @p: see amdgpu_pte_update_params definition
- * @addr: virtual address in question
- * @entry: resulting entry or NULL
- * @parent: parent entry
- *
- * Find the vm_pt entry and it's parent for the given address.
- */
-void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
-                        struct amdgpu_vm_pt **entry,
-                        struct amdgpu_vm_pt **parent)
-{
-       unsigned level = p->adev->vm_manager.root_level;
-
-       *parent = NULL;
-       *entry = &p->vm->root;
-       while ((*entry)->entries) {
-               unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
-
-               *parent = *entry;
-               *entry = &(*entry)->entries[addr >> shift];
-               addr &= (1ULL << shift) - 1;
-       }
-
-       if (level != AMDGPU_VM_PTB)
-               *entry = NULL;
-}
-
 /**
  * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
  *
@@ -1580,36 +1550,34 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
 {
        struct amdgpu_device *adev = params->adev;
        const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
-
-       uint64_t addr, pe_start;
-       struct amdgpu_bo *pt;
-       unsigned nptes;
+       struct amdgpu_vm_pt_cursor cursor;
 
        /* walk over the address space and update the page tables */
-       for (addr = start; addr < end; addr += nptes,
-            dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
-               struct amdgpu_vm_pt *entry, *parent;
+       for_each_amdgpu_vm_pt_leaf(adev, params->vm, start, end - 1, cursor) {
+               struct amdgpu_bo *pt = cursor.entry->base.bo;
+               uint64_t pe_start;
+               unsigned nptes;
 
-               amdgpu_vm_get_entry(params, addr, &entry, &parent);
-               if (!entry)
+               if (!pt || cursor.level != AMDGPU_VM_PTB)
                        return -ENOENT;
 
-               if ((addr & ~mask) == (end & ~mask))
-                       nptes = end - addr;
+               if ((cursor.pfn & ~mask) == (end & ~mask))
+                       nptes = end - cursor.pfn;
                else
-                       nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
+                       nptes = AMDGPU_VM_PTE_COUNT(adev) - (cursor.pfn & mask);
 
-               amdgpu_vm_handle_huge_pages(params, entry, parent,
+               amdgpu_vm_handle_huge_pages(params, cursor.entry, cursor.parent,
                                            nptes, dst, flags);
                /* We don't need to update PTEs for huge pages */
-               if (entry->huge)
+               if (cursor.entry->huge) {
+                       dst += nptes * AMDGPU_GPU_PAGE_SIZE;
                        continue;
+               }
 
-               pt = entry->base.bo;
-               pe_start = (addr & mask) * 8;
+               pe_start = (cursor.pfn & mask) * 8;
                amdgpu_vm_update_func(params, pt, pe_start, dst, nptes,
                                      AMDGPU_GPU_PAGE_SIZE, flags);
-
+               dst += nptes * AMDGPU_GPU_PAGE_SIZE;
        }
 
        return 0;