Rename adjust_mc_addr to get_vm_pde and check the address bits in one place.
v2: handle vcn as well, keep setting the valid bit manually,
add a BUG_ON() for GMC v6, v7 and v8 as well.
v3: handle vcn_v1_0_enc_ring_emit_vm_flush as well.
v4: fix the BUG_ON mask for GFX6-8
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
/* set pte flags based per asic */
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
uint32_t flags);
- /* adjust mc addr in fb for APU case */
- u64 (*adjust_mc_addr)(struct amdgpu_device *adev, u64 addr);
+ /* get the pde for a given mc addr */
+ u64 (*get_vm_pde)(struct amdgpu_device *adev, u64 addr);
uint32_t (*get_invalidate_req)(unsigned int vm_id);
};
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
+#define amdgpu_gart_get_vm_pde(adev, addr) (adev)->gart.gart_funcs->get_vm_pde((adev), (addr))
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
return false;
}
-static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
-{
- u64 addr = mc_addr;
-
- if (adev->gart.gart_funcs->adjust_mc_addr)
- addr = adev->gart.gart_funcs->adjust_mc_addr(adev, addr);
-
- return addr;
-}
-
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job)
{
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
if (count) {
- uint64_t pt_addr =
- amdgpu_vm_adjust_mc_addr(adev, last_pt);
+ uint64_t entry;
+ entry = amdgpu_gart_get_vm_pde(adev, last_pt);
if (shadow)
amdgpu_vm_do_set_ptes(¶ms,
last_shadow,
- pt_addr, count,
+ entry, count,
incr,
AMDGPU_PTE_VALID);
amdgpu_vm_do_set_ptes(¶ms, last_pde,
- pt_addr, count, incr,
+ entry, count, incr,
AMDGPU_PTE_VALID);
}
}
if (count) {
- uint64_t pt_addr = amdgpu_vm_adjust_mc_addr(adev, last_pt);
+ uint64_t entry;
+
+ entry = amdgpu_gart_get_vm_pde(adev, last_pt);
if (vm->root.bo->shadow)
- amdgpu_vm_do_set_ptes(¶ms, last_shadow, pt_addr,
+ amdgpu_vm_do_set_ptes(¶ms, last_shadow, entry,
count, incr, AMDGPU_PTE_VALID);
- amdgpu_vm_do_set_ptes(¶ms, last_pde, pt_addr,
+ amdgpu_vm_do_set_ptes(¶ms, last_pde, entry,
count, incr, AMDGPU_PTE_VALID);
}
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->vm_inv_eng;
- pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr);
- pd_addr = pd_addr | 0x1; /* valid bit */
- /* now only use physical base address of PDE and valid */
- BUG_ON(pd_addr & 0xFFFF00000000003EULL);
+ pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+ pd_addr |= AMDGPU_PTE_VALID;
gfx_v9_0_write_data_to_reg(ring, usepfp, true,
hub->ctx0_ptb_addr_lo32 + (2 * vm_id),
return pte_flag;
}
+static uint64_t gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
+{
+ BUG_ON(addr & 0xFFFFFF0000000FFFULL);
+ return addr;
+}
+
static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
.flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
.set_pte_pde = gmc_v6_0_gart_set_pte_pde,
.set_prt = gmc_v6_0_set_prt,
+ .get_vm_pde = gmc_v6_0_get_vm_pde,
.get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
};
return pte_flag;
}
+static uint64_t gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
+{
+ BUG_ON(addr & 0xFFFFFF0000000FFFULL);
+ return addr;
+}
+
/**
* gmc_v8_0_set_fault_enable_default - update VM fault handling
*
.flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
.set_pte_pde = gmc_v7_0_gart_set_pte_pde,
.set_prt = gmc_v7_0_set_prt,
- .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags
+ .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
+ .get_vm_pde = gmc_v7_0_get_vm_pde
};
static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
return pte_flag;
}
+static uint64_t gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
+{
+ BUG_ON(addr & 0xFFFFFF0000000FFFULL);
+ return addr;
+}
+
/**
* gmc_v8_0_set_fault_enable_default - update VM fault handling
*
.flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
.set_pte_pde = gmc_v8_0_gart_set_pte_pde,
.set_prt = gmc_v8_0_set_prt,
- .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags
+ .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
+ .get_vm_pde = gmc_v8_0_get_vm_pde
};
static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
return pte_flag;
}
-static u64 gmc_v9_0_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
+static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr)
{
- return adev->vm_manager.vram_base_offset + mc_addr - adev->mc.vram_start;
+ addr = adev->vm_manager.vram_base_offset + addr - adev->mc.vram_start;
+ BUG_ON(addr & 0xFFFF00000000003FULL);
+ return addr;
}
static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
.flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
.set_pte_pde = gmc_v9_0_gart_set_pte_pde,
- .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
- .adjust_mc_addr = gmc_v9_0_adjust_mc_addr,
.get_invalidate_req = gmc_v9_0_get_invalidate_req,
+ .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
+ .get_vm_pde = gmc_v9_0_get_vm_pde
};
static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->vm_inv_eng;
- pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr);
- pd_addr = pd_addr | 0x1; /* valid bit */
- /* now only use physical base address of PDE and valid */
- BUG_ON(pd_addr & 0xFFFF00000000003EULL);
+ pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+ pd_addr |= AMDGPU_PTE_VALID;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
uint32_t data0, data1, mask;
unsigned eng = ring->vm_inv_eng;
- pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr);
- pd_addr = pd_addr | 0x1; /* valid bit */
- /* now only use physical base address of PDE and valid */
- BUG_ON(pd_addr & 0xFFFF00000000003EULL);
+ pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+ pd_addr |= AMDGPU_PTE_VALID;
data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
data1 = upper_32_bits(pd_addr);
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->vm_inv_eng;
- pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr);
- pd_addr = pd_addr | 0x1; /* valid bit */
- /* now only use physical base address of PDE and valid */
- BUG_ON(pd_addr & 0xFFFF00000000003EULL);
+ pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+ pd_addr |= AMDGPU_PTE_VALID;
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->vm_inv_eng;
- pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr);
- pd_addr = pd_addr | 0x1; /* valid bit */
- /* now only use physical base address of PDE and valid */
- BUG_ON(pd_addr & 0xFFFF00000000003EULL);
+ pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+ pd_addr |= AMDGPU_PTE_VALID;
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
uint32_t data0, data1, mask;
unsigned eng = ring->vm_inv_eng;
- pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr);
- pd_addr = pd_addr | 0x1; /* valid bit */
- /* now only use physical base address of PDE and valid */
- BUG_ON(pd_addr & 0xFFFF00000000003EULL);
+ pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+ pd_addr |= AMDGPU_PTE_VALID;
data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
data1 = upper_32_bits(pd_addr);
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->vm_inv_eng;
- pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr);
- pd_addr = pd_addr | 0x1; /* valid bit */
- /* now only use physical base address of PDE and valid */
- BUG_ON(pd_addr & 0xFFFF00000000003EULL);
+ pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+ pd_addr |= AMDGPU_PTE_VALID;
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring,