drm/amdgpu: cleanup adjust_mc_addr handling v4
authorChristian König <christian.koenig@amd.com>
Fri, 12 May 2017 13:39:39 +0000 (15:39 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 31 May 2017 18:16:35 +0000 (14:16 -0400)
Rename adjust_mc_addr to get_vm_pde and check the address bits in one place.

v2: handle vcn as well, keep setting the valid bit manually,
    add a BUG_ON() for GMC v6, v7 and v8 as well.
v3: handle vcn_v1_0_enc_ring_emit_vm_flush as well.
v4: fix the BUG_ON mask for GFX6-8

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c

index a3576dbefa0faf1277c58e5b4bd0d37d05f33553..abf5a58edc82f8ba6bc10a574d83379691a6fc9c 100644 (file)
@@ -308,8 +308,8 @@ struct amdgpu_gart_funcs {
        /* set pte flags based per asic */
        uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
                                     uint32_t flags);
-       /* adjust mc addr in fb for APU case */
-       u64 (*adjust_mc_addr)(struct amdgpu_device *adev, u64 addr);
+       /* get the pde for a given mc addr */
+       u64 (*get_vm_pde)(struct amdgpu_device *adev, u64 addr);
        uint32_t (*get_invalidate_req)(unsigned int vm_id);
 };
 
@@ -1813,6 +1813,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
 #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
 #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
+#define amdgpu_gart_get_vm_pde(adev, addr) (adev)->gart.gart_funcs->get_vm_pde((adev), (addr))
 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
 #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
index 3ecde81821ad48b47fce6df96e036295f3ceb495..c11903257b947eed4e4cb7849406215c43783b5e 100644 (file)
@@ -682,16 +682,6 @@ static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
        return false;
 }
 
-static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
-{
-       u64 addr = mc_addr;
-
-       if (adev->gart.gart_funcs->adjust_mc_addr)
-               addr = adev->gart.gart_funcs->adjust_mc_addr(adev, addr);
-
-       return addr;
-}
-
 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
                                  struct amdgpu_job *job)
 {
@@ -1033,18 +1023,18 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
                    (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
 
                        if (count) {
-                               uint64_t pt_addr =
-                                       amdgpu_vm_adjust_mc_addr(adev, last_pt);
+                               uint64_t entry;
 
+                               entry = amdgpu_gart_get_vm_pde(adev, last_pt);
                                if (shadow)
                                        amdgpu_vm_do_set_ptes(&params,
                                                              last_shadow,
-                                                             pt_addr, count,
+                                                             entry, count,
                                                              incr,
                                                              AMDGPU_PTE_VALID);
 
                                amdgpu_vm_do_set_ptes(&params, last_pde,
-                                                     pt_addr, count, incr,
+                                                     entry, count, incr,
                                                      AMDGPU_PTE_VALID);
                        }
 
@@ -1058,13 +1048,15 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
        }
 
        if (count) {
-               uint64_t pt_addr = amdgpu_vm_adjust_mc_addr(adev, last_pt);
+               uint64_t entry;
+
+               entry = amdgpu_gart_get_vm_pde(adev, last_pt);
 
                if (vm->root.bo->shadow)
-                       amdgpu_vm_do_set_ptes(&params, last_shadow, pt_addr,
+                       amdgpu_vm_do_set_ptes(&params, last_shadow, entry,
                                              count, incr, AMDGPU_PTE_VALID);
 
-               amdgpu_vm_do_set_ptes(&params, last_pde, pt_addr,
+               amdgpu_vm_do_set_ptes(&params, last_pde, entry,
                                      count, incr, AMDGPU_PTE_VALID);
        }
 
index ec891b3f4a8266a41cd96f3790e16f894f980d3e..f97fc0dafc36c81bcace4d8fe64528d5e2828d1a 100644 (file)
@@ -3832,10 +3832,8 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
        uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
        unsigned eng = ring->vm_inv_eng;
 
-       pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr);
-       pd_addr = pd_addr | 0x1; /* valid bit */
-       /* now only use physical base address of PDE and valid */
-       BUG_ON(pd_addr & 0xFFFF00000000003EULL);
+       pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+       pd_addr |= AMDGPU_PTE_VALID;
 
        gfx_v9_0_write_data_to_reg(ring, usepfp, true,
                                   hub->ctx0_ptb_addr_lo32 + (2 * vm_id),
index d07ec13b42a840c4671527cfedb414a0e6f2739b..569828ced31d25cc9113e2d83aa6b361fb4aa07e 100644 (file)
@@ -395,6 +395,12 @@ static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
        return pte_flag;
 }
 
+static uint64_t gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
+{
+       BUG_ON(addr & 0xFFFFFF0000000FFFULL);
+       return addr;
+}
+
 static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
                                              bool value)
 {
@@ -1121,6 +1127,7 @@ static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
        .flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
        .set_pte_pde = gmc_v6_0_gart_set_pte_pde,
        .set_prt = gmc_v6_0_set_prt,
+       .get_vm_pde = gmc_v6_0_get_vm_pde,
        .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
 };
 
index 967505bd2fc88b6da10d4d0b3ea9f7ffbd1b793b..8b39d9a4f801ee304a7693bc0d63041d15ba9b86 100644 (file)
@@ -472,6 +472,12 @@ static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
        return pte_flag;
 }
 
+static uint64_t gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
+{
+       BUG_ON(addr & 0xFFFFFF0000000FFFULL);
+       return addr;
+}
+
 /**
  * gmc_v8_0_set_fault_enable_default - update VM fault handling
  *
@@ -1293,7 +1299,8 @@ static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
        .flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
        .set_pte_pde = gmc_v7_0_gart_set_pte_pde,
        .set_prt = gmc_v7_0_set_prt,
-       .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags
+       .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
+       .get_vm_pde = gmc_v7_0_get_vm_pde
 };
 
 static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
index 3b5ea0f52d89c2ed92417cade41ec91757431418..73a9653b4e60843027c8658c46a7b9c96b66f221 100644 (file)
@@ -656,6 +656,12 @@ static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
        return pte_flag;
 }
 
+static uint64_t gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
+{
+       BUG_ON(addr & 0xFFFFFF0000000FFFULL);
+       return addr;
+}
+
 /**
  * gmc_v8_0_set_fault_enable_default - update VM fault handling
  *
@@ -1612,7 +1618,8 @@ static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
        .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
        .set_pte_pde = gmc_v8_0_gart_set_pte_pde,
        .set_prt = gmc_v8_0_set_prt,
-       .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags
+       .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
+       .get_vm_pde = gmc_v8_0_get_vm_pde
 };
 
 static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
index 19e10276e585715199c3a8d5d60f94b6b6473ed3..047b1a7d20b579e31633b9746128436cba2f340d 100644 (file)
@@ -358,17 +358,19 @@ static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
        return pte_flag;
 }
 
-static u64 gmc_v9_0_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
+static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr)
 {
-       return adev->vm_manager.vram_base_offset + mc_addr - adev->mc.vram_start;
+       addr = adev->vm_manager.vram_base_offset + addr - adev->mc.vram_start;
+       BUG_ON(addr & 0xFFFF00000000003FULL);
+       return addr;
 }
 
 static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
        .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
        .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
-       .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
-       .adjust_mc_addr = gmc_v9_0_adjust_mc_addr,
        .get_invalidate_req = gmc_v9_0_get_invalidate_req,
+       .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
+       .get_vm_pde = gmc_v9_0_get_vm_pde
 };
 
 static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
index 9cad3b11889916722b7cda7110eefd8c03619644..4a65697ccc942ec1b2b7393acbcf360274bb8e13 100644 (file)
@@ -1124,10 +1124,8 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
        uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
        unsigned eng = ring->vm_inv_eng;
 
-       pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr);
-       pd_addr = pd_addr | 0x1; /* valid bit */
-       /* now only use physical base address of PDE and valid */
-       BUG_ON(pd_addr & 0xFFFF00000000003EULL);
+       pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+       pd_addr |= AMDGPU_PTE_VALID;
 
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
                          SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
index 74e3f23ac5e0a617f2b8e34b074aeaf796958151..dd9ec81f116d7d67c827165c094c320c8fb1a449 100644 (file)
@@ -1316,10 +1316,8 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
        uint32_t data0, data1, mask;
        unsigned eng = ring->vm_inv_eng;
 
-       pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr);
-       pd_addr = pd_addr | 0x1; /* valid bit */
-       /* now only use physical base address of PDE and valid */
-       BUG_ON(pd_addr & 0xFFFF00000000003EULL);
+       pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+       pd_addr |= AMDGPU_PTE_VALID;
 
        data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
        data1 = upper_32_bits(pd_addr);
@@ -1358,10 +1356,8 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
        uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
        unsigned eng = ring->vm_inv_eng;
 
-       pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr);
-       pd_addr = pd_addr | 0x1; /* valid bit */
-       /* now only use physical base address of PDE and valid */
-       BUG_ON(pd_addr & 0xFFFF00000000003EULL);
+       pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+       pd_addr |= AMDGPU_PTE_VALID;
 
        amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
        amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
index 0012835e85c60f2ca0bf34e1b10dd57e2a0b8ef2..0b7fcc1b6c009654397bddf81a5dda06413d3575 100644 (file)
@@ -926,10 +926,8 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
        uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
        unsigned eng = ring->vm_inv_eng;
 
-       pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr);
-       pd_addr = pd_addr | 0x1; /* valid bit */
-       /* now only use physical base address of PDE and valid */
-       BUG_ON(pd_addr & 0xFFFF00000000003EULL);
+       pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+       pd_addr |= AMDGPU_PTE_VALID;
 
        amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
        amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
index e8c8c78e389a1949171559c16f93deb59ec04801..ec33e8fa83c1ae60812f06cfac1867b3ff974c58 100644 (file)
@@ -882,10 +882,8 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
        uint32_t data0, data1, mask;
        unsigned eng = ring->vm_inv_eng;
 
-       pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr);
-       pd_addr = pd_addr | 0x1; /* valid bit */
-       /* now only use physical base address of PDE and valid */
-       BUG_ON(pd_addr & 0xFFFF00000000003EULL);
+       pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+       pd_addr |= AMDGPU_PTE_VALID;
 
        data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
        data1 = upper_32_bits(pd_addr);
@@ -1015,10 +1013,8 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
        uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
        unsigned eng = ring->vm_inv_eng;
 
-       pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr);
-       pd_addr = pd_addr | 0x1; /* valid bit */
-       /* now only use physical base address of PDE and valid */
-       BUG_ON(pd_addr & 0xFFFF00000000003EULL);
+       pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+       pd_addr |= AMDGPU_PTE_VALID;
 
        amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
        amdgpu_ring_write(ring,