Move the ASIC specific code into a new callback function.
v2: mask the flags for SI and CIK instead of a BUG_ON().
v3: remove last missed BUG_ON().
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
+ /* get the pte flags to use for a BO VA mapping */
+ void (*get_vm_pte)(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags);
};
struct amdgpu_xgmi {
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
+#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
/**
* amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
flags &= ~AMDGPU_PTE_WRITEABLE;
- if (adev->asic_type >= CHIP_TONGA) {
- flags &= ~AMDGPU_PTE_EXECUTABLE;
- flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
- }
-
- if (adev->asic_type >= CHIP_NAVI10) {
- flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
- flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
- } else {
- flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
- flags |= (mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK);
- }
-
- if ((mapping->flags & AMDGPU_PTE_PRT) &&
- (adev->asic_type >= CHIP_VEGA10)) {
- flags |= AMDGPU_PTE_PRT;
- if (adev->asic_type >= CHIP_NAVI10) {
- flags |= AMDGPU_PTE_SNOOPED;
- flags |= AMDGPU_PTE_LOG;
- flags |= AMDGPU_PTE_SYSTEM;
- }
- flags &= ~AMDGPU_PTE_VALID;
- }
- if (adev->asic_type == CHIP_ARCTURUS &&
- !(flags & AMDGPU_PTE_SYSTEM) &&
- mapping->bo_va->is_xgmi)
- flags |= AMDGPU_PTE_SNOOPED;
+ /* Apply ASIC specific mapping flags */
+ amdgpu_gmc_get_vm_pte(adev, mapping, &flags);
trace_amdgpu_vm_bo_update(mapping);
}
}
+static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+
+ *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
+ *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+
+ if (mapping->flags & AMDGPU_PTE_PRT) {
+ *flags |= AMDGPU_PTE_PRT;
+ *flags |= AMDGPU_PTE_SNOOPED;
+ *flags |= AMDGPU_PTE_LOG;
+ *flags |= AMDGPU_PTE_SYSTEM;
+ *flags &= ~AMDGPU_PTE_VALID;
+ }
+}
+
static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
.map_mtype = gmc_v10_0_map_mtype,
- .get_vm_pde = gmc_v10_0_get_vm_pde
+ .get_vm_pde = gmc_v10_0_get_vm_pde,
+ .get_vm_pte = gmc_v10_0_get_vm_pte
};
static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
+static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags &= ~AMDGPU_PTE_PRT;
+}
+
static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
.set_prt = gmc_v6_0_set_prt,
.get_vm_pde = gmc_v6_0_get_vm_pde,
+ .get_vm_pte = gmc_v6_0_get_vm_pte,
};
static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
+static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags &= ~AMDGPU_PTE_PRT;
+}
+
/**
* gmc_v8_0_set_fault_enable_default - update VM fault handling
*
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
.set_prt = gmc_v7_0_set_prt,
- .get_vm_pde = gmc_v7_0_get_vm_pde
+ .get_vm_pde = gmc_v7_0_get_vm_pde,
+ .get_vm_pte = gmc_v7_0_get_vm_pte
};
static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
+static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ *flags &= ~AMDGPU_PTE_PRT;
+}
+
/**
* gmc_v8_0_set_fault_enable_default - update VM fault handling
*
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
.set_prt = gmc_v8_0_set_prt,
- .get_vm_pde = gmc_v8_0_get_vm_pde
+ .get_vm_pde = gmc_v8_0_get_vm_pde,
+ .get_vm_pte = gmc_v8_0_get_vm_pte
};
static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
}
}
+static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+
+ *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
+ *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
+
+ if (mapping->flags & AMDGPU_PTE_PRT) {
+ *flags |= AMDGPU_PTE_PRT;
+ *flags &= ~AMDGPU_PTE_VALID;
+ }
+
+ if (adev->asic_type == CHIP_ARCTURUS &&
+ !(*flags & AMDGPU_PTE_SYSTEM) &&
+ mapping->bo_va->is_xgmi)
+ *flags |= AMDGPU_PTE_SNOOPED;
+}
+
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
.map_mtype = gmc_v9_0_map_mtype,
- .get_vm_pde = gmc_v9_0_get_vm_pde
+ .get_vm_pde = gmc_v9_0_get_vm_pde,
+ .get_vm_pte = gmc_v9_0_get_vm_pte
};
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)