drm/amdgpu: Add AMDGPU_GPU_PAGES_IN_CPU_PAGE define
authorMichel Dänzer <michel.daenzer@amd.com>
Fri, 22 Jun 2018 16:54:03 +0000 (18:54 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 5 Jul 2018 21:39:49 +0000 (16:39 -0500)
To hopefully make the code dealing with GPU vs CPU pages a little
clearer.

Suggested-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index dd11b7313ca07b960867bb305f3174513b374e9c..36113cb60ca2f466631c78b84d23b5d93c5bc319 100644 (file)
@@ -234,7 +234,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
        }
 
        t = offset / AMDGPU_GPU_PAGE_SIZE;
-       p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+       p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
        for (i = 0; i < pages; i++, p++) {
 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
                adev->gart.pages[p] = NULL;
@@ -243,7 +243,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
                if (!adev->gart.ptr)
                        continue;
 
-               for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+               for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
                        amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
                                               t, page_base, flags);
                        page_base += AMDGPU_GPU_PAGE_SIZE;
@@ -282,7 +282,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
 
        for (i = 0; i < pages; i++) {
                page_base = dma_addr[i];
-               for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+               for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
                        amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
                        page_base += AMDGPU_GPU_PAGE_SIZE;
                }
@@ -319,7 +319,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
 
 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
        t = offset / AMDGPU_GPU_PAGE_SIZE;
-       p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+       p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
        for (i = 0; i < pages; i++, p++)
                adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
 #endif
index 456295c002915d56b9fb57629bcc33275f76aa5a..9f9e9dc87da11c42b2e414fbe88a0251bcf9897d 100644 (file)
@@ -37,6 +37,8 @@ struct amdgpu_bo;
 #define AMDGPU_GPU_PAGE_SHIFT 12
 #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
 
+#define AMDGPU_GPU_PAGES_IN_CPU_PAGE (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE)
+
 struct amdgpu_gart {
        u64                             table_addr;
        struct amdgpu_bo                *robj;
index 837066076ccf5144accd45fb0d7a911470ef882c..712af5c1a5d692a68e4da876de531960b2e48a4f 100644 (file)
@@ -1567,7 +1567,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                if (nodes) {
                        addr = nodes->start << PAGE_SHIFT;
                        max_entries = (nodes->size - pfn) *
-                               (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+                               AMDGPU_GPU_PAGES_IN_CPU_PAGE;
                } else {
                        addr = 0;
                        max_entries = S64_MAX;
@@ -1578,7 +1578,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
 
                        max_entries = min(max_entries, 16ull * 1024ull);
                        for (count = 1;
-                            count < max_entries / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+                            count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
                             ++count) {
                                uint64_t idx = pfn + count;
 
@@ -1592,7 +1592,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                                dma_addr = pages_addr;
                        } else {
                                addr = pages_addr[pfn];
-                               max_entries = count * (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+                               max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
                        }
 
                } else if (flags & AMDGPU_PTE_VALID) {
@@ -1607,7 +1607,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                if (r)
                        return r;
 
-               pfn += (last - start + 1) / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+               pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
                if (nodes && nodes->size == pfn) {
                        pfn = 0;
                        ++nodes;