drm/amdgpu: Retire amdgpu_ring.ready flag v4
authorAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Fri, 19 Oct 2018 20:22:48 +0000 (16:22 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 5 Nov 2018 19:21:23 +0000 (14:21 -0500)
Start using drm_gpu_scheduler.ready isntead.

v3:
Add helper function to run ring test and set
sched.ready flag status accordingly, clean explicit
sched.ready sets from the IP specific files.

v4: Add kerneldoc and rebase.

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
26 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/si_dma.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c

index bce5f7711cf1de9a215567b6c6fccc26a30c1bd5..60f9a87e9c744c92a62f8f664cc6464b005615ac 100644 (file)
@@ -144,7 +144,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
                                  KGD_MAX_QUEUES);
 
                /* remove the KIQ bit as well */
-               if (adev->gfx.kiq.ring.ready)
+               if (adev->gfx.kiq.ring.sched.ready)
                        clear_bit(amdgpu_gfx_queue_to_bit(adev,
                                                          adev->gfx.kiq.ring.me - 1,
                                                          adev->gfx.kiq.ring.pipe,
index f4b47065425ccfbfddd4bf20206a6af85560f65d..40a9f348b37dfdb64e6095569a2b5816fbbfe76b 100644 (file)
@@ -786,7 +786,7 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
        if (adev->in_gpu_reset)
                return -EIO;
 
-       if (ring->ready)
+       if (ring->sched.ready)
                return invalidate_tlbs_with_kiq(adev, pasid);
 
        for (vmid = 0; vmid < 16; vmid++) {
index b8963b725dfa05baf9353f0859bde8af7c4e14f7..fc74f40a59129d14988af54448ce5bfe0fbe5c99 100644 (file)
@@ -146,7 +146,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                fence_ctx = 0;
        }
 
-       if (!ring->ready) {
+       if (!ring->sched.ready) {
                dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
                return -EINVAL;
        }
@@ -351,7 +351,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
                struct amdgpu_ring *ring = adev->rings[i];
                long tmo;
 
-               if (!ring || !ring->ready)
+               if (!ring || !ring->sched.ready)
                        continue;
 
                /* skip IB tests for KIQ in general for the below reasons:
@@ -375,7 +375,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
 
                r = amdgpu_ring_test_ib(ring, tmo);
                if (r) {
-                       ring->ready = false;
+                       ring->sched.ready = false;
 
                        if (ring == &adev->gfx.gfx_ring[0]) {
                                /* oh, oh, that's really bad */
index 09fa919d250086de7715b01a78c6ece64e21fa2c..8f6ff9f895c8db013f5e3b3c95cac9d6be3c2d20 100644 (file)
@@ -336,7 +336,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
        case AMDGPU_HW_IP_GFX:
                type = AMD_IP_BLOCK_TYPE_GFX;
                for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       if (adev->gfx.gfx_ring[i].ready)
+                       if (adev->gfx.gfx_ring[i].sched.ready)
                                ++num_rings;
                ib_start_alignment = 32;
                ib_size_alignment = 32;
@@ -344,7 +344,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
        case AMDGPU_HW_IP_COMPUTE:
                type = AMD_IP_BLOCK_TYPE_GFX;
                for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       if (adev->gfx.compute_ring[i].ready)
+                       if (adev->gfx.compute_ring[i].sched.ready)
                                ++num_rings;
                ib_start_alignment = 32;
                ib_size_alignment = 32;
@@ -352,7 +352,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
        case AMDGPU_HW_IP_DMA:
                type = AMD_IP_BLOCK_TYPE_SDMA;
                for (i = 0; i < adev->sdma.num_instances; i++)
-                       if (adev->sdma.instance[i].ring.ready)
+                       if (adev->sdma.instance[i].ring.sched.ready)
                                ++num_rings;
                ib_start_alignment = 256;
                ib_size_alignment = 4;
@@ -363,7 +363,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
                        if (adev->uvd.harvest_config & (1 << i))
                                continue;
 
-                       if (adev->uvd.inst[i].ring.ready)
+                       if (adev->uvd.inst[i].ring.sched.ready)
                                ++num_rings;
                }
                ib_start_alignment = 64;
@@ -372,7 +372,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
        case AMDGPU_HW_IP_VCE:
                type = AMD_IP_BLOCK_TYPE_VCE;
                for (i = 0; i < adev->vce.num_rings; i++)
-                       if (adev->vce.ring[i].ready)
+                       if (adev->vce.ring[i].sched.ready)
                                ++num_rings;
                ib_start_alignment = 4;
                ib_size_alignment = 1;
@@ -384,7 +384,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
                                continue;
 
                        for (j = 0; j < adev->uvd.num_enc_rings; j++)
-                               if (adev->uvd.inst[i].ring_enc[j].ready)
+                               if (adev->uvd.inst[i].ring_enc[j].sched.ready)
                                        ++num_rings;
                }
                ib_start_alignment = 64;
@@ -392,7 +392,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
                break;
        case AMDGPU_HW_IP_VCN_DEC:
                type = AMD_IP_BLOCK_TYPE_VCN;
-               if (adev->vcn.ring_dec.ready)
+               if (adev->vcn.ring_dec.sched.ready)
                        ++num_rings;
                ib_start_alignment = 16;
                ib_size_alignment = 16;
@@ -400,14 +400,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
        case AMDGPU_HW_IP_VCN_ENC:
                type = AMD_IP_BLOCK_TYPE_VCN;
                for (i = 0; i < adev->vcn.num_enc_rings; i++)
-                       if (adev->vcn.ring_enc[i].ready)
+                       if (adev->vcn.ring_enc[i].sched.ready)
                                ++num_rings;
                ib_start_alignment = 64;
                ib_size_alignment = 1;
                break;
        case AMDGPU_HW_IP_VCN_JPEG:
                type = AMD_IP_BLOCK_TYPE_VCN;
-               if (adev->vcn.ring_jpeg.ready)
+               if (adev->vcn.ring_jpeg.sched.ready)
                        ++num_rings;
                ib_start_alignment = 16;
                ib_size_alignment = 16;
index 59cc678de8c1570642afc2d488f63fbc179a1e99..7235cd0b0fa904be268c6b076b502aa0a0f85145 100644 (file)
@@ -2129,7 +2129,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
 
        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
                struct amdgpu_ring *ring = adev->rings[i];
-               if (ring && ring->ready)
+               if (ring && ring->sched.ready)
                        amdgpu_fence_wait_empty(ring);
        }
 
index b70e85ec147d54d4784be1b9b66639be9ff26d8a..3c89c8aa33d8482646431693228f4ac884a5c4d2 100644 (file)
@@ -338,7 +338,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
  */
 void amdgpu_ring_fini(struct amdgpu_ring *ring)
 {
-       ring->ready = false;
+       ring->sched.ready = false;
 
        /* Not to finish a ring which is not initialized */
        if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
@@ -500,3 +500,23 @@ static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
        debugfs_remove(ring->ent);
 #endif
 }
+
+/**
+ * amdgpu_ring_test_helper - tests ring and set sched readiness status
+ *
+ * @ring: ring to try the recovery on
+ *
+ * Tests ring and set sched readiness status
+ *
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
+{
+       int r;
+
+       r = amdgpu_ring_test_ring(ring);
+
+       ring->sched.ready = !r;
+
+       return r;
+}
index 4caa301ce454884b9ebe980b3b554c19e2932be0..4cdddbc4491bbb47b76080086c1b0e573025f236 100644 (file)
@@ -189,7 +189,6 @@ struct amdgpu_ring {
        uint64_t                gpu_addr;
        uint64_t                ptr_mask;
        uint32_t                buf_mask;
-       bool                    ready;
        u32                     idx;
        u32                     me;
        u32                     pipe;
@@ -313,4 +312,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
        ring->count_dw -= count_dw;
 }
 
+int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
+
 #endif
index 31fe85dd0b50983cd50a1a58bce2d28aaf6f9158..c91ec3101d00b5d06e48db30c71b3cf938d95b63 100644 (file)
@@ -1970,7 +1970,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
        unsigned i;
        int r;
 
-       if (direct_submit && !ring->ready) {
+       if (direct_submit && !ring->sched.ready) {
                DRM_ERROR("Trying to move memory with ring turned off.\n");
                return -EINVAL;
        }
index 32eb43d165f260967a632c7ec22e2d5ebaefdcd8..561406a1cf8864f6f2b44a8ae05830e473a08811 100644 (file)
@@ -316,8 +316,8 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
        }
-       sdma0->ready = false;
-       sdma1->ready = false;
+       sdma0->sched.ready = false;
+       sdma1->sched.ready = false;
 }
 
 /**
@@ -494,18 +494,16 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
                /* enable DMA IBs */
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
-               ring->ready = true;
+               ring->sched.ready = true;
        }
 
        cik_sdma_enable(adev, true);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                ring = &adev->sdma.instance[i].ring;
-               r = amdgpu_ring_test_ring(ring);
-               if (r) {
-                       ring->ready = false;
+               r = amdgpu_ring_test_helper(ring);
+               if (r)
                        return r;
-               }
 
                if (adev->mman.buffer_funcs_ring == ring)
                        amdgpu_ttm_set_buffer_funcs_status(adev, true);
index 622dd70f310e076b8f03addd26006b101926ca69..c8f038136af00aae47721b5f6b20e0c5b6a972ca 100644 (file)
@@ -1950,9 +1950,9 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
                                      CP_ME_CNTL__CE_HALT_MASK));
                WREG32(mmSCRATCH_UMSK, 0);
                for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       adev->gfx.gfx_ring[i].ready = false;
+                       adev->gfx.gfx_ring[i].sched.ready = false;
                for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       adev->gfx.compute_ring[i].ready = false;
+                       adev->gfx.compute_ring[i].sched.ready = false;
        }
        udelay(50);
 }
@@ -2124,12 +2124,9 @@ static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
 
        /* start the rings */
        gfx_v6_0_cp_gfx_start(adev);
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
+       r = amdgpu_ring_test_helper(ring);
+       if (r)
                return r;
-       }
 
        return 0;
 }
@@ -2227,14 +2224,11 @@ static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
        WREG32(mmCP_RB2_CNTL, tmp);
        WREG32(mmCP_RB2_BASE, ring->gpu_addr >> 8);
 
-       adev->gfx.compute_ring[0].ready = false;
-       adev->gfx.compute_ring[1].ready = false;
 
        for (i = 0; i < 2; i++) {
-               r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[i]);
+               r = amdgpu_ring_test_helper(&adev->gfx.compute_ring[i]);
                if (r)
                        return r;
-               adev->gfx.compute_ring[i].ready = true;
        }
 
        return 0;
index cfa45d99648286b5a99b63cc9c1f36b2e9c8559b..6de6bb18bdfafeb946ec720dd5ffeb30051a3192 100644 (file)
@@ -2403,7 +2403,7 @@ static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
        } else {
                WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
                for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       adev->gfx.gfx_ring[i].ready = false;
+                       adev->gfx.gfx_ring[i].sched.ready = false;
        }
        udelay(50);
 }
@@ -2613,12 +2613,9 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
 
        /* start the ring */
        gfx_v7_0_cp_gfx_start(adev);
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
+       r = amdgpu_ring_test_helper(ring);
+       if (r)
                return r;
-       }
 
        return 0;
 }
@@ -2675,7 +2672,7 @@ static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
        } else {
                WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
                for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       adev->gfx.compute_ring[i].ready = false;
+                       adev->gfx.compute_ring[i].sched.ready = false;
        }
        udelay(50);
 }
@@ -3106,10 +3103,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
 
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                ring = &adev->gfx.compute_ring[i];
-               ring->ready = true;
-               r = amdgpu_ring_test_ring(ring);
-               if (r)
-                       ring->ready = false;
+               amdgpu_ring_test_helper(ring);
        }
 
        return 0;
index e0fe0c6115a8c9509463b9bb79eb53ccb3181f4b..02f8ca56386f1f3c116daa2f707dfb0b04d9f90a 100644 (file)
@@ -1629,7 +1629,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
                return 0;
 
        /* bail if the compute ring is not ready */
-       if (!ring->ready)
+       if (!ring->sched.ready)
                return 0;
 
        tmp = RREG32(mmGB_EDC_MODE);
@@ -4197,7 +4197,7 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
                tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
                tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
                for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       adev->gfx.gfx_ring[i].ready = false;
+                       adev->gfx.gfx_ring[i].sched.ready = false;
        }
        WREG32(mmCP_ME_CNTL, tmp);
        udelay(50);
@@ -4379,10 +4379,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
        /* start the ring */
        amdgpu_ring_clear_ring(ring);
        gfx_v8_0_cp_gfx_start(adev);
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r)
-               ring->ready = false;
+       ring->sched.ready = true;
+       r = amdgpu_ring_test_helper(ring);
 
        return r;
 }
@@ -4396,8 +4394,8 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
        } else {
                WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
                for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       adev->gfx.compute_ring[i].ready = false;
-               adev->gfx.kiq.ring.ready = false;
+                       adev->gfx.compute_ring[i].sched.ready = false;
+               adev->gfx.kiq.ring.sched.ready = false;
        }
        udelay(50);
 }
@@ -4473,11 +4471,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
                amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
        }
 
-       r = amdgpu_ring_test_ring(kiq_ring);
-       if (r) {
+       r = amdgpu_ring_test_helper(kiq_ring);
+       if (r)
                DRM_ERROR("KCQ enable failed\n");
-               kiq_ring->ready = false;
-       }
        return r;
 }
 
@@ -4781,7 +4777,7 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
        amdgpu_bo_kunmap(ring->mqd_obj);
        ring->mqd_ptr = NULL;
        amdgpu_bo_unreserve(ring->mqd_obj);
-       ring->ready = true;
+       ring->sched.ready = true;
        return 0;
 }
 
@@ -4820,10 +4816,7 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
         */
        for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) {
                ring = &adev->gfx.compute_ring[i];
-               ring->ready = true;
-               r = amdgpu_ring_test_ring(ring);
-               if (r)
-                       ring->ready = false;
+               r = amdgpu_ring_test_helper(ring);
        }
 
 done:
@@ -4899,7 +4892,7 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
                amdgpu_ring_write(kiq_ring, 0);
                amdgpu_ring_write(kiq_ring, 0);
        }
-       r = amdgpu_ring_test_ring(kiq_ring);
+       r = amdgpu_ring_test_helper(kiq_ring);
        if (r)
                DRM_ERROR("KCQ disable failed\n");
 
index 4281a37a7feb52a3c5a86a1be291f961fb309702..d71c9c47444e009b91d681cfa02e2fc9f4ba4174 100644 (file)
@@ -2537,7 +2537,7 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
        if (!enable) {
                for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                       adev->gfx.gfx_ring[i].ready = false;
+                       adev->gfx.gfx_ring[i].sched.ready = false;
        }
        WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
        udelay(50);
@@ -2727,7 +2727,7 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
 
        /* start the ring */
        gfx_v9_0_cp_gfx_start(adev);
-       ring->ready = true;
+       ring->sched.ready = true;
 
        return 0;
 }
@@ -2742,8 +2742,8 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
                WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
                        (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
                for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                       adev->gfx.compute_ring[i].ready = false;
-               adev->gfx.kiq.ring.ready = false;
+                       adev->gfx.compute_ring[i].sched.ready = false;
+               adev->gfx.kiq.ring.sched.ready = false;
        }
        udelay(50);
 }
@@ -2866,11 +2866,9 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
                amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
        }
 
-       r = amdgpu_ring_test_ring(kiq_ring);
-       if (r) {
+       r = amdgpu_ring_test_helper(kiq_ring);
+       if (r)
                DRM_ERROR("KCQ enable failed\n");
-               kiq_ring->ready = false;
-       }
 
        return r;
 }
@@ -3249,7 +3247,7 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
        amdgpu_bo_kunmap(ring->mqd_obj);
        ring->mqd_ptr = NULL;
        amdgpu_bo_unreserve(ring->mqd_obj);
-       ring->ready = true;
+       ring->sched.ready = true;
        return 0;
 }
 
@@ -3314,19 +3312,13 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
                return r;
 
        ring = &adev->gfx.gfx_ring[0];
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
+       r = amdgpu_ring_test_helper(ring);
+       if (r)
                return r;
-       }
 
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                ring = &adev->gfx.compute_ring[i];
-
-               ring->ready = true;
-               r = amdgpu_ring_test_ring(ring);
-               if (r)
-                       ring->ready = false;
+               amdgpu_ring_test_helper(ring);
        }
 
        gfx_v9_0_enable_gui_idle_interrupt(adev, true);
@@ -3391,7 +3383,7 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
                amdgpu_ring_write(kiq_ring, 0);
                amdgpu_ring_write(kiq_ring, 0);
        }
-       r = amdgpu_ring_test_ring(kiq_ring);
+       r = amdgpu_ring_test_helper(kiq_ring);
        if (r)
                DRM_ERROR("KCQ disable failed\n");
 
index af786b5513bcda6971e94adbe958cf65f67e8d4f..a7e61c6de71c12726365764fb21c7f287fa8c912 100644 (file)
@@ -381,7 +381,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
                struct amdgpu_vmhub *hub = &adev->vmhub[i];
                u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
 
-               if (adev->gfx.kiq.ring.ready &&
+               if (adev->gfx.kiq.ring.sched.ready &&
                    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
                    !adev->in_gpu_reset) {
                        r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng,
index bedbd5f296c5f493ad4c37e5b3ab44396469e159..fa2f6bea1d60333bd9d1aa02320238bc3095415b 100644 (file)
@@ -349,8 +349,8 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
        }
-       sdma0->ready = false;
-       sdma1->ready = false;
+       sdma0->sched.ready = false;
+       sdma1->sched.ready = false;
 }
 
 /**
@@ -471,17 +471,15 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
                /* enable DMA IBs */
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
-               ring->ready = true;
+               ring->sched.ready = true;
        }
 
        sdma_v2_4_enable(adev, true);
        for (i = 0; i < adev->sdma.num_instances; i++) {
                ring = &adev->sdma.instance[i].ring;
-               r = amdgpu_ring_test_ring(ring);
-               if (r) {
-                       ring->ready = false;
+               r = amdgpu_ring_test_helper(ring);
+               if (r)
                        return r;
-               }
 
                if (adev->mman.buffer_funcs_ring == ring)
                        amdgpu_ttm_set_buffer_funcs_status(adev, true);
index 415968dc6c872bad5642493036e0fab03384e294..942fe3696ef0a3ddae05dc7e6b7e3ae10b4db59e 100644 (file)
@@ -523,8 +523,8 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
        }
-       sdma0->ready = false;
-       sdma1->ready = false;
+       sdma0->sched.ready = false;
+       sdma1->sched.ready = false;
 }
 
 /**
@@ -739,7 +739,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
                /* enable DMA IBs */
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
-               ring->ready = true;
+               ring->sched.ready = true;
        }
 
        /* unhalt the MEs */
@@ -749,11 +749,9 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                ring = &adev->sdma.instance[i].ring;
-               r = amdgpu_ring_test_ring(ring);
-               if (r) {
-                       ring->ready = false;
+               r = amdgpu_ring_test_helper(ring);
+               if (r)
                        return r;
-               }
 
                if (adev->mman.buffer_funcs_ring == ring)
                        amdgpu_ttm_set_buffer_funcs_status(adev, true);
index 88d93430dfb1eb71beae7f917b4142095d1ff773..65312897b8ba930e94980995d16c398ed20cb455 100644 (file)
@@ -634,8 +634,8 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
                WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
        }
 
-       sdma0->ready = false;
-       sdma1->ready = false;
+       sdma0->sched.ready = false;
+       sdma1->sched.ready = false;
 }
 
 /**
@@ -675,8 +675,8 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
                WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
        }
 
-       sdma0->ready = false;
-       sdma1->ready = false;
+       sdma0->sched.ready = false;
+       sdma1->sched.ready = false;
 }
 
 /**
@@ -863,7 +863,7 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
        /* enable DMA IBs */
        WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
 
-       ring->ready = true;
+       ring->sched.ready = true;
 }
 
 /**
@@ -956,7 +956,7 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
        /* enable DMA IBs */
        WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
 
-       ring->ready = true;
+       ring->sched.ready = true;
 }
 
 static void
@@ -1144,20 +1144,16 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
        for (i = 0; i < adev->sdma.num_instances; i++) {
                ring = &adev->sdma.instance[i].ring;
 
-               r = amdgpu_ring_test_ring(ring);
-               if (r) {
-                       ring->ready = false;
+               r = amdgpu_ring_test_helper(ring);
+               if (r)
                        return r;
-               }
 
                if (adev->sdma.has_page_queue) {
                        struct amdgpu_ring *page = &adev->sdma.instance[i].page;
 
-                       r = amdgpu_ring_test_ring(page);
-                       if (r) {
-                               page->ready = false;
+                       r = amdgpu_ring_test_helper(page);
+                       if (r)
                                return r;
-                       }
                }
 
                if (adev->mman.buffer_funcs_ring == ring)
index d9b27d7017ddd1be22e8f8642e4fd2cb1a1bbae5..05ce1ca4c78954c2147f3e082bcccff2a2bae865 100644 (file)
@@ -122,7 +122,7 @@ static void si_dma_stop(struct amdgpu_device *adev)
 
                if (adev->mman.buffer_funcs_ring == ring)
                        amdgpu_ttm_set_buffer_funcs_status(adev, false);
-               ring->ready = false;
+               ring->sched.ready = false;
        }
 }
 
@@ -175,13 +175,11 @@ static int si_dma_start(struct amdgpu_device *adev)
                WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
                WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
 
-               ring->ready = true;
+               ring->sched.ready = true;
 
-               r = amdgpu_ring_test_ring(ring);
-               if (r) {
-                       ring->ready = false;
+               r = amdgpu_ring_test_helper(ring);
+               if (r)
                        return r;
-               }
 
                if (adev->mman.buffer_funcs_ring == ring)
                        amdgpu_ttm_set_buffer_funcs_status(adev, true);
index 1fc17bf39fed710f77c8ff94d741af579965639d..8cabe982a61ddf1139366de59f1f8e0c50ed4dc5 100644 (file)
@@ -162,12 +162,9 @@ static int uvd_v4_2_hw_init(void *handle)
        uvd_v4_2_enable_mgcg(adev, true);
        amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
 
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
+       r = amdgpu_ring_test_helper(ring);
+       if (r)
                goto done;
-       }
 
        r = amdgpu_ring_alloc(ring, 10);
        if (r) {
@@ -218,7 +215,7 @@ static int uvd_v4_2_hw_fini(void *handle)
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v4_2_stop(adev);
 
-       ring->ready = false;
+       ring->sched.ready = false;
 
        return 0;
 }
index fde6ad5ac9ab3ff8dc640a5a73cfb32dd99976e6..56b02ee543f95036091e37b2fc5bfec2e1da0681 100644 (file)
@@ -158,12 +158,9 @@ static int uvd_v5_0_hw_init(void *handle)
        uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
        uvd_v5_0_enable_mgcg(adev, true);
 
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
+       r = amdgpu_ring_test_helper(ring);
+       if (r)
                goto done;
-       }
 
        r = amdgpu_ring_alloc(ring, 10);
        if (r) {
@@ -215,7 +212,7 @@ static int uvd_v5_0_hw_fini(void *handle)
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v5_0_stop(adev);
 
-       ring->ready = false;
+       ring->sched.ready = false;
 
        return 0;
 }
index 7a5b40275e8e7b0ffa7600c99af464dfe5031a23..3027607a187cffeccf9a632d68b9969cdffca617 100644 (file)
@@ -476,12 +476,9 @@ static int uvd_v6_0_hw_init(void *handle)
        uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
        uvd_v6_0_enable_mgcg(adev, true);
 
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
+       r = amdgpu_ring_test_helper(ring);
+       if (r)
                goto done;
-       }
 
        r = amdgpu_ring_alloc(ring, 10);
        if (r) {
@@ -513,12 +510,9 @@ static int uvd_v6_0_hw_init(void *handle)
        if (uvd_v6_0_enc_support(adev)) {
                for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
                        ring = &adev->uvd.inst->ring_enc[i];
-                       ring->ready = true;
-                       r = amdgpu_ring_test_ring(ring);
-                       if (r) {
-                               ring->ready = false;
+                       r = amdgpu_ring_test_helper(ring);
+                       if (r)
                                goto done;
-                       }
                }
        }
 
@@ -548,7 +542,7 @@ static int uvd_v6_0_hw_fini(void *handle)
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v6_0_stop(adev);
 
-       ring->ready = false;
+       ring->sched.ready = false;
 
        return 0;
 }
index 58b39afcfb86461d68e47496578184d2c7b74317..76a7fbef532ae4448787f50753479bd25047ab18 100644 (file)
@@ -540,12 +540,9 @@ static int uvd_v7_0_hw_init(void *handle)
                ring = &adev->uvd.inst[j].ring;
 
                if (!amdgpu_sriov_vf(adev)) {
-                       ring->ready = true;
-                       r = amdgpu_ring_test_ring(ring);
-                       if (r) {
-                               ring->ready = false;
+                       r = amdgpu_ring_test_helper(ring);
+                       if (r)
                                goto done;
-                       }
 
                        r = amdgpu_ring_alloc(ring, 10);
                        if (r) {
@@ -582,12 +579,9 @@ static int uvd_v7_0_hw_init(void *handle)
 
                for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
                        ring = &adev->uvd.inst[j].ring_enc[i];
-                       ring->ready = true;
-                       r = amdgpu_ring_test_ring(ring);
-                       if (r) {
-                               ring->ready = false;
+                       r = amdgpu_ring_test_helper(ring);
+                       if (r)
                                goto done;
-                       }
                }
        }
 done:
@@ -619,7 +613,7 @@ static int uvd_v7_0_hw_fini(void *handle)
        for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
                if (adev->uvd.harvest_config & (1 << i))
                        continue;
-               adev->uvd.inst[i].ring.ready = false;
+               adev->uvd.inst[i].ring.sched.ready = false;
        }
 
        return 0;
index ea28828360d3b3c1d181d0214134b33f8dba7704..bed78a778e3f1f546e30e18dc67f709751d87fc6 100644 (file)
@@ -463,15 +463,11 @@ static int vce_v2_0_hw_init(void *handle)
 
        amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
        vce_v2_0_enable_mgcg(adev, true, false);
-       for (i = 0; i < adev->vce.num_rings; i++)
-               adev->vce.ring[i].ready = false;
 
        for (i = 0; i < adev->vce.num_rings; i++) {
-               r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+               r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
                if (r)
                        return r;
-               else
-                       adev->vce.ring[i].ready = true;
        }
 
        DRM_INFO("VCE initialized successfully.\n");
index 6dbd39730070a30132f7841a1e7dc18a8e54a35a..2b1a5a793942633b4b0146e2d6ba3adeaa78e4aa 100644 (file)
@@ -474,15 +474,10 @@ static int vce_v3_0_hw_init(void *handle)
 
        amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
 
-       for (i = 0; i < adev->vce.num_rings; i++)
-               adev->vce.ring[i].ready = false;
-
        for (i = 0; i < adev->vce.num_rings; i++) {
-               r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+               r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
                if (r)
                        return r;
-               else
-                       adev->vce.ring[i].ready = true;
        }
 
        DRM_INFO("VCE initialized successfully.\n");
index 1c9471890bf71b9ce94a5eecb96f4594eba01bc0..65b71fc2f7b90f9f15f9e58e6c24f774f2fe73ec 100644 (file)
@@ -519,15 +519,10 @@ static int vce_v4_0_hw_init(void *handle)
        if (r)
                return r;
 
-       for (i = 0; i < adev->vce.num_rings; i++)
-               adev->vce.ring[i].ready = false;
-
        for (i = 0; i < adev->vce.num_rings; i++) {
-               r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+               r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
                if (r)
                        return r;
-               else
-                       adev->vce.ring[i].ready = true;
        }
 
        DRM_INFO("VCE initialized successfully.\n");
@@ -549,7 +544,7 @@ static int vce_v4_0_hw_fini(void *handle)
        }
 
        for (i = 0; i < adev->vce.num_rings; i++)
-               adev->vce.ring[i].ready = false;
+               adev->vce.ring[i].sched.ready = false;
 
        return 0;
 }
index eae90922fdbe0f4356be31c4fd16eeb6846409a2..29628f60d50c84d232bde50ff12787add13f561d 100644 (file)
@@ -176,30 +176,22 @@ static int vcn_v1_0_hw_init(void *handle)
        struct amdgpu_ring *ring = &adev->vcn.ring_dec;
        int i, r;
 
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
+       r = amdgpu_ring_test_helper(ring);
+       if (r)
                goto done;
-       }
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
                ring = &adev->vcn.ring_enc[i];
-               ring->ready = true;
-               r = amdgpu_ring_test_ring(ring);
-               if (r) {
-                       ring->ready = false;
+               ring->sched.ready = true;
+               r = amdgpu_ring_test_helper(ring);
+               if (r)
                        goto done;
-               }
        }
 
        ring = &adev->vcn.ring_jpeg;
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
+       r = amdgpu_ring_test_helper(ring);
+       if (r)
                goto done;
-       }
 
 done:
        if (!r)
@@ -224,7 +216,7 @@ static int vcn_v1_0_hw_fini(void *handle)
        if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
                vcn_v1_0_stop(adev);
 
-       ring->ready = false;
+       ring->sched.ready = false;
 
        return 0;
 }