drm/amdgpu: cleanup PTE flag generation v3
authorChristian König <christian.koenig@amd.com>
Mon, 2 Sep 2019 14:39:40 +0000 (16:39 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 16 Sep 2019 14:59:29 +0000 (09:59 -0500)
Move the ASIC specific code into a new callback function.

v2: mask the flags for SI and CIK instead of a BUG_ON().
v3: remove last missed BUG_ON().

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c

index 6a74059b776c0f97c80231f268674df6a81535b8..232a8ff5642b0241f9841fd734873b9ce7f71815 100644 (file)
@@ -104,6 +104,10 @@ struct amdgpu_gmc_funcs {
        /* get the pde for a given mc addr */
        void (*get_vm_pde)(struct amdgpu_device *adev, int level,
                           u64 *dst, u64 *flags);
+       /* get the pte flags to use for a BO VA mapping */
+       void (*get_vm_pte)(struct amdgpu_device *adev,
+                          struct amdgpu_bo_va_mapping *mapping,
+                          uint64_t *flags);
 };
 
 struct amdgpu_xgmi {
@@ -185,6 +189,7 @@ struct amdgpu_gmc {
 #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
 #define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
 #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
+#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
 
 /**
  * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
index 928c2cd456b3cd09f82a0ff855dbdad6803b70b0..5acedbab759626facd4c5df7a190525fc7f35a65 100644 (file)
@@ -1571,33 +1571,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
        if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
                flags &= ~AMDGPU_PTE_WRITEABLE;
 
-       if (adev->asic_type >= CHIP_TONGA) {
-               flags &= ~AMDGPU_PTE_EXECUTABLE;
-               flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
-       }
-
-       if (adev->asic_type >= CHIP_NAVI10) {
-               flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
-               flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
-       } else {
-               flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
-               flags |= (mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK);
-       }
-
-       if ((mapping->flags & AMDGPU_PTE_PRT) &&
-           (adev->asic_type >= CHIP_VEGA10)) {
-               flags |= AMDGPU_PTE_PRT;
-               if (adev->asic_type >= CHIP_NAVI10) {
-                       flags |= AMDGPU_PTE_SNOOPED;
-                       flags |= AMDGPU_PTE_LOG;
-                       flags |= AMDGPU_PTE_SYSTEM;
-               }
-               flags &= ~AMDGPU_PTE_VALID;
-       }
-       if (adev->asic_type == CHIP_ARCTURUS &&
-           !(flags & AMDGPU_PTE_SYSTEM) &&
-           mapping->bo_va->is_xgmi)
-               flags |= AMDGPU_PTE_SNOOPED;
+       /* Apply ASIC specific mapping flags */
+       amdgpu_gmc_get_vm_pte(adev, mapping, &flags);
 
        trace_amdgpu_vm_bo_update(mapping);
 
index 36137b3b83e9518c0b67e83ef628800cd43daad4..40d75991f4d5bda635e3917487aec44cdbb21ff2 100644 (file)
@@ -440,12 +440,32 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
        }
 }
 
+static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
+                                struct amdgpu_bo_va_mapping *mapping,
+                                uint64_t *flags)
+{
+       *flags &= ~AMDGPU_PTE_EXECUTABLE;
+       *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+
+       *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
+       *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+
+       if (mapping->flags & AMDGPU_PTE_PRT) {
+               *flags |= AMDGPU_PTE_PRT;
+               *flags |= AMDGPU_PTE_SNOOPED;
+               *flags |= AMDGPU_PTE_LOG;
+               *flags |= AMDGPU_PTE_SYSTEM;
+               *flags &= ~AMDGPU_PTE_VALID;
+       }
+}
+
 static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
        .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
        .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
        .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
        .map_mtype = gmc_v10_0_map_mtype,
-       .get_vm_pde = gmc_v10_0_get_vm_pde
+       .get_vm_pde = gmc_v10_0_get_vm_pde,
+       .get_vm_pte = gmc_v10_0_get_vm_pte
 };
 
 static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
index 8cb8de31c4a9b5a6c74f7f9c6c960b9ac811cd59..b205039350b6c9d469ecdb3d738a71ea3cb40f17 100644 (file)
@@ -392,6 +392,14 @@ static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
        BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
 }
 
+static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
+                               struct amdgpu_bo_va_mapping *mapping,
+                               uint64_t *flags)
+{
+       *flags &= ~AMDGPU_PTE_EXECUTABLE;
+       *flags &= ~AMDGPU_PTE_PRT;
+}
+
 static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
                                              bool value)
 {
@@ -1138,6 +1146,7 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
        .emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
        .set_prt = gmc_v6_0_set_prt,
        .get_vm_pde = gmc_v6_0_get_vm_pde,
+       .get_vm_pte = gmc_v6_0_get_vm_pte,
 };
 
 static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
index 9e656f9e4b35c2cafcbfd8d85085c495ee708db0..f08e5330642d6f525cbf65107b7dd5422f84b1ab 100644 (file)
@@ -469,6 +469,14 @@ static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
        BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
 }
 
+static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
+                               struct amdgpu_bo_va_mapping *mapping,
+                               uint64_t *flags)
+{
+       *flags &= ~AMDGPU_PTE_EXECUTABLE;
+       *flags &= ~AMDGPU_PTE_PRT;
+}
+
 /**
  * gmc_v8_0_set_fault_enable_default - update VM fault handling
  *
@@ -1328,7 +1336,8 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
        .emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
        .emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
        .set_prt = gmc_v7_0_set_prt,
-       .get_vm_pde = gmc_v7_0_get_vm_pde
+       .get_vm_pde = gmc_v7_0_get_vm_pde,
+       .get_vm_pte = gmc_v7_0_get_vm_pte
 };
 
 static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
index ba6c47c50f6f7dcda4cd7417570761e2011f7134..6d96d40fbcb8f8382dd3ab9c4945c05695f9bdf6 100644 (file)
@@ -692,6 +692,15 @@ static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
        BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
 }
 
+static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
+                               struct amdgpu_bo_va_mapping *mapping,
+                               uint64_t *flags)
+{
+       *flags &= ~AMDGPU_PTE_EXECUTABLE;
+       *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+       *flags &= ~AMDGPU_PTE_PRT;
+}
+
 /**
  * gmc_v8_0_set_fault_enable_default - update VM fault handling
  *
@@ -1694,7 +1703,8 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
        .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
        .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
        .set_prt = gmc_v8_0_set_prt,
-       .get_vm_pde = gmc_v8_0_get_vm_pde
+       .get_vm_pde = gmc_v8_0_get_vm_pde,
+       .get_vm_pte = gmc_v8_0_get_vm_pte
 };
 
 static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
index a30d1cc4d45f96bc2eb235e2c465e38571482a9b..0c46a0144f1b9c8c14d48b557258529a557e810c 100644 (file)
@@ -653,12 +653,34 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
        }
 }
 
+static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
+                               struct amdgpu_bo_va_mapping *mapping,
+                               uint64_t *flags)
+{
+       *flags &= ~AMDGPU_PTE_EXECUTABLE;
+       *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+
+       *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
+       *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
+
+       if (mapping->flags & AMDGPU_PTE_PRT) {
+               *flags |= AMDGPU_PTE_PRT;
+               *flags &= ~AMDGPU_PTE_VALID;
+       }
+
+       if (adev->asic_type == CHIP_ARCTURUS &&
+           !(*flags & AMDGPU_PTE_SYSTEM) &&
+           mapping->bo_va->is_xgmi)
+               *flags |= AMDGPU_PTE_SNOOPED;
+}
+
 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
        .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
        .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
        .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
        .map_mtype = gmc_v9_0_map_mtype,
-       .get_vm_pde = gmc_v9_0_get_vm_pde
+       .get_vm_pde = gmc_v9_0_get_vm_pde,
+       .get_vm_pte = gmc_v9_0_get_vm_pte
 };
 
 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)