drm/amdgpu: implement gmc_v7_0_emit_flush_gpu_tlb
authorChristian König <christian.koenig@amd.com>
Fri, 12 Jan 2018 16:08:22 +0000 (17:08 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 19 Feb 2018 19:18:08 +0000 (14:18 -0500)
Unify tlb flushing for gmc v7.

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/cik.h
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c

index c4989f51ecefab5520137e484f412aa1f3bb82ae..201d8786f9cba0f01ed0987c8daf3aeef444b183 100644 (file)
@@ -24,6 +24,8 @@
 #ifndef __CIK_H__
 #define __CIK_H__
 
+#define CIK_FLUSH_GPU_TLB_NUM_WREG     2
+
 void cik_srbm_select(struct amdgpu_device *adev,
                     u32 me, u32 pipe, u32 queue, u32 vmid);
 int cik_set_ip_blocks(struct amdgpu_device *adev);
index fbbac849804416eabf54120af2f01f2da08f9411..1d32dedb2534f6628530e0b3cfd72266e61a4569 100644 (file)
@@ -886,18 +886,7 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
        u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
                          SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
 
-       amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
-       if (vmid < 8) {
-               amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
-       } else {
-               amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
-       }
-       amdgpu_ring_write(ring, pd_addr >> 12);
-
-       /* flush TLB */
-       amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
-       amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
-       amdgpu_ring_write(ring, 1 << vmid);
+       amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pasid, pd_addr);
 
        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
        amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
@@ -1290,7 +1279,7 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
                6 + /* cik_sdma_ring_emit_hdp_flush */
                3 + /* cik_sdma_ring_emit_hdp_invalidate */
                6 + /* cik_sdma_ring_emit_pipeline_sync */
-               12 + /* cik_sdma_ring_emit_vm_flush */
+               CIK_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* cik_sdma_ring_emit_vm_flush */
                9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
        .emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */
        .emit_ib = cik_sdma_ring_emit_ib,
index fbce0c0fdb141e21ddbae712eae0ffd41054c65c..3c2b678436f2560a4ed38acecb9d94049d6e44a0 100644 (file)
@@ -3244,26 +3244,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 {
        int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
 
-       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
-                                WRITE_DATA_DST_SEL(0)));
-       if (vmid < 8) {
-               amdgpu_ring_write(ring,
-                                 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
-       } else {
-               amdgpu_ring_write(ring,
-                                 (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
-       }
-       amdgpu_ring_write(ring, 0);
-       amdgpu_ring_write(ring, pd_addr >> 12);
-
-       /* bits 0-15 are the VM contexts0-15 */
-       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
-                                WRITE_DATA_DST_SEL(0)));
-       amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
-       amdgpu_ring_write(ring, 0);
-       amdgpu_ring_write(ring, 1 << vmid);
+       amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pasid, pd_addr);
 
        /* wait for the invalidate to complete */
        amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
@@ -5132,7 +5113,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
                5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
                12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
                7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
-               17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
+               CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
                3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
        .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
        .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
@@ -5163,7 +5144,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
                7 + /* gfx_v7_0_ring_emit_hdp_flush */
                5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
                7 + /* gfx_v7_0_ring_emit_pipeline_sync */
-               17 + /* gfx_v7_0_ring_emit_vm_flush */
+               CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */
                7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
        .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */
        .emit_ib = gfx_v7_0_ring_emit_ib_compute,
index 082500222ef97331ef005940052262cb2aa006d3..cd2f834282fb5b1012fd96fcbd5265883101c50d 100644 (file)
@@ -435,6 +435,24 @@ static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
        WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 }
 
+static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
+                                           unsigned vmid, unsigned pasid,
+                                           uint64_t pd_addr)
+{
+       uint32_t reg;
+
+       if (vmid < 8)
+               reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
+       else
+               reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
+       amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
+
+       /* bits 0-15 are the VM contexts0-15 */
+       amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
+
+       return pd_addr;
+}
+
 /**
  * gmc_v7_0_set_pte_pde - update the page tables using MMIO
  *
@@ -1305,6 +1323,7 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
 
 static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
        .flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
+       .emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
        .set_pte_pde = gmc_v7_0_set_pte_pde,
        .set_prt = gmc_v7_0_set_prt,
        .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,