drm/amdgpu: remove the ring lock v2
authorChristian König <christian.koenig@amd.com>
Thu, 21 Jan 2016 10:28:53 +0000 (11:28 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 10 Feb 2016 19:16:58 +0000 (14:16 -0500)
It's not needed any more because all access goes through the scheduler now.

v2: Update commit message.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
15 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c

index 3d4c2abfcfe826bbaca89add3f00f27d606efb13..081953e1ec6cabeeee4f9d27952ff326efba4f03 100644 (file)
@@ -814,7 +814,6 @@ struct amdgpu_ring {
        struct amd_gpu_scheduler        sched;
 
        spinlock_t              fence_lock;
-       struct mutex            *ring_lock;
        struct amdgpu_bo        *ring_obj;
        volatile uint32_t       *ring;
        unsigned                rptr_offs;
@@ -1190,12 +1189,9 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
 /* Ring access between begin & end cannot sleep */
 void amdgpu_ring_free_size(struct amdgpu_ring *ring);
 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
-int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw);
 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
-void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
-void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring);
 unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
                            uint32_t **data);
 int amdgpu_ring_restore(struct amdgpu_ring *ring,
@@ -2009,7 +2005,6 @@ struct amdgpu_device {
 
        /* rings */
        unsigned                        fence_context;
-       struct mutex                    ring_lock;
        unsigned                        num_rings;
        struct amdgpu_ring              *rings[AMDGPU_MAX_RINGS];
        bool                            ib_pool_ready;
index bbe8023bf58fce6fb9aea62984e3c0de19b4ad5c..cf8a3b37a111d126b2d489516b0e0518ddca4869 100644 (file)
@@ -1455,7 +1455,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 
        /* mutex initialization are all done here so we
         * can recall function without having locking issues */
-       mutex_init(&adev->ring_lock);
        mutex_init(&adev->vm_manager.lock);
        atomic_set(&adev->irq.ih.lock, 0);
        mutex_init(&adev->gem.mutex);
index 08963fc83168623df173f616a6c566653d677db5..72105020086cc3662831f5b87a4c342f9a371f49 100644 (file)
@@ -487,7 +487,6 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
 
        if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
                kmem_cache_destroy(amdgpu_fence_slab);
-       mutex_lock(&adev->ring_lock);
        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
                struct amdgpu_ring *ring = adev->rings[i];
 
@@ -505,7 +504,6 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
                del_timer_sync(&ring->fence_drv.fallback_timer);
                ring->fence_drv.initialized = false;
        }
-       mutex_unlock(&adev->ring_lock);
 }
 
 /**
@@ -520,7 +518,6 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
 {
        int i, r;
 
-       mutex_lock(&adev->ring_lock);
        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
                struct amdgpu_ring *ring = adev->rings[i];
                if (!ring || !ring->fence_drv.initialized)
@@ -537,7 +534,6 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
                amdgpu_irq_put(adev, ring->fence_drv.irq_src,
                               ring->fence_drv.irq_type);
        }
-       mutex_unlock(&adev->ring_lock);
 }
 
 /**
@@ -556,7 +552,6 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
 {
        int i;
 
-       mutex_lock(&adev->ring_lock);
        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
                struct amdgpu_ring *ring = adev->rings[i];
                if (!ring || !ring->fence_drv.initialized)
@@ -566,7 +561,6 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
                amdgpu_irq_get(adev, ring->fence_drv.irq_src,
                               ring->fence_drv.irq_type);
        }
-       mutex_unlock(&adev->ring_lock);
 }
 
 /**
index 56ae9a58dbc5efd278cd589deacca02272047205..40c9779993c86b7c6c3fb55ab9754c45c892edf0 100644 (file)
@@ -147,7 +147,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
                return -EINVAL;
        }
 
-       r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
+       r = amdgpu_ring_alloc(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
        if (r) {
                dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
                return r;
@@ -155,7 +155,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
 
        r = amdgpu_sync_wait(&ibs->sync);
        if (r) {
-               amdgpu_ring_unlock_undo(ring);
+               amdgpu_ring_undo(ring);
                dev_err(adev->dev, "failed to sync wait (%d)\n", r);
                return r;
        }
@@ -180,7 +180,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
 
                if (ib->ring != ring || ib->ctx != ctx || ib->vm != vm) {
                        ring->current_ctx = old_ctx;
-                       amdgpu_ring_unlock_undo(ring);
+                       amdgpu_ring_undo(ring);
                        return -EINVAL;
                }
                amdgpu_ring_emit_ib(ring, ib);
@@ -191,7 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
        if (r) {
                dev_err(adev->dev, "failed to emit fence (%d)\n", r);
                ring->current_ctx = old_ctx;
-               amdgpu_ring_unlock_undo(ring);
+               amdgpu_ring_undo(ring);
                return r;
        }
 
@@ -203,7 +203,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
                                       AMDGPU_FENCE_FLAG_64BIT);
        }
 
-       amdgpu_ring_unlock_commit(ring);
+       amdgpu_ring_commit(ring);
        return 0;
 }
 
index 7d8d84eaea4a5ab258eb5dac4a383f6aff81850d..a0da563c8c82f8a3f9ba025514d99019af992e2f 100644 (file)
@@ -623,14 +623,12 @@ force:
                amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
        }
 
-       mutex_lock(&adev->ring_lock);
-
        /* update whether vce is active */
        ps->vce_active = adev->pm.dpm.vce_active;
 
        ret = amdgpu_dpm_pre_set_power_state(adev);
        if (ret)
-               goto done;
+               return;
 
        /* update display watermarks based on new power state */
        amdgpu_display_bandwidth_update(adev);
@@ -667,9 +665,6 @@ force:
                        amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
                }
        }
-
-done:
-       mutex_unlock(&adev->ring_lock);
 }
 
 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
@@ -802,13 +797,11 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
                int i = 0;
 
                amdgpu_display_bandwidth_update(adev);
-               mutex_lock(&adev->ring_lock);
-                       for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
-                               struct amdgpu_ring *ring = adev->rings[i];
-                               if (ring && ring->ready)
-                                       amdgpu_fence_wait_empty(ring);
-                       }
-               mutex_unlock(&adev->ring_lock);
+               for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+                       struct amdgpu_ring *ring = adev->rings[i];
+                       if (ring && ring->ready)
+                               amdgpu_fence_wait_empty(ring);
+               }
 
                amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
        } else {
index 66c6bbd27309ed7c03e3a6869d8bb3a0476eed33..81d06d772dde31ad2fa557e71efe79f55bb4caf8 100644 (file)
@@ -105,30 +105,6 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
        return 0;
 }
 
-/**
- * amdgpu_ring_lock - lock the ring and allocate space on it
- *
- * @adev: amdgpu_device pointer
- * @ring: amdgpu_ring structure holding ring information
- * @ndw: number of dwords to allocate in the ring buffer
- *
- * Lock the ring and allocate @ndw dwords in the ring buffer
- * (all asics).
- * Returns 0 on success, error on failure.
- */
-int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw)
-{
-       int r;
-
-       mutex_lock(ring->ring_lock);
-       r = amdgpu_ring_alloc(ring, ndw);
-       if (r) {
-               mutex_unlock(ring->ring_lock);
-               return r;
-       }
-       return 0;
-}
-
 /** amdgpu_ring_insert_nop - insert NOP packets
  *
  * @ring: amdgpu_ring structure holding ring information
@@ -167,20 +143,6 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
        amdgpu_ring_set_wptr(ring);
 }
 
-/**
- * amdgpu_ring_unlock_commit - tell the GPU to execute the new
- * commands on the ring buffer and unlock it
- *
- * @ring: amdgpu_ring structure holding ring information
- *
- * Call amdgpu_ring_commit() then unlock the ring (all asics).
- */
-void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring)
-{
-       amdgpu_ring_commit(ring);
-       mutex_unlock(ring->ring_lock);
-}
-
 /**
  * amdgpu_ring_undo - reset the wptr
  *
@@ -193,19 +155,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
        ring->wptr = ring->wptr_old;
 }
 
-/**
- * amdgpu_ring_unlock_undo - reset the wptr and unlock the ring
- *
- * @ring: amdgpu_ring structure holding ring information
- *
- * Call amdgpu_ring_undo() then unlock the ring (all asics).
- */
-void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring)
-{
-       amdgpu_ring_undo(ring);
-       mutex_unlock(ring->ring_lock);
-}
-
 /**
  * amdgpu_ring_backup - Back up the content of a ring
  *
@@ -218,43 +167,32 @@ unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
 {
        unsigned size, ptr, i;
 
-       /* just in case lock the ring */
-       mutex_lock(ring->ring_lock);
        *data = NULL;
 
-       if (ring->ring_obj == NULL) {
-               mutex_unlock(ring->ring_lock);
+       if (ring->ring_obj == NULL)
                return 0;
-       }
 
        /* it doesn't make sense to save anything if all fences are signaled */
-       if (!amdgpu_fence_count_emitted(ring)) {
-               mutex_unlock(ring->ring_lock);
+       if (!amdgpu_fence_count_emitted(ring))
                return 0;
-       }
 
        ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
 
        size = ring->wptr + (ring->ring_size / 4);
        size -= ptr;
        size &= ring->ptr_mask;
-       if (size == 0) {
-               mutex_unlock(ring->ring_lock);
+       if (size == 0)
                return 0;
-       }
 
        /* and then save the content of the ring */
        *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
-       if (!*data) {
-               mutex_unlock(ring->ring_lock);
+       if (!*data)
                return 0;
-       }
        for (i = 0; i < size; ++i) {
                (*data)[i] = ring->ring[ptr++];
                ptr &= ring->ptr_mask;
        }
 
-       mutex_unlock(ring->ring_lock);
        return size;
 }
 
@@ -276,7 +214,7 @@ int amdgpu_ring_restore(struct amdgpu_ring *ring,
                return 0;
 
        /* restore the saved ring content */
-       r = amdgpu_ring_lock(ring, size);
+       r = amdgpu_ring_alloc(ring, size);
        if (r)
                return r;
 
@@ -284,7 +222,7 @@ int amdgpu_ring_restore(struct amdgpu_ring *ring,
                amdgpu_ring_write(ring, data[i]);
        }
 
-       amdgpu_ring_unlock_commit(ring);
+       amdgpu_ring_commit(ring);
        kfree(data);
        return 0;
 }
@@ -352,7 +290,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                return r;
        }
 
-       ring->ring_lock = &adev->ring_lock;
        /* Align ring size */
        rb_bufsz = order_base_2(ring_size / 8);
        ring_size = (1 << (rb_bufsz + 1)) * 4;
@@ -410,15 +347,10 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
        int r;
        struct amdgpu_bo *ring_obj;
 
-       if (ring->ring_lock == NULL)
-               return;
-
-       mutex_lock(ring->ring_lock);
        ring_obj = ring->ring_obj;
        ring->ready = false;
        ring->ring = NULL;
        ring->ring_obj = NULL;
-       mutex_unlock(ring->ring_lock);
 
        amdgpu_wb_free(ring->adev, ring->fence_offs);
        amdgpu_wb_free(ring->adev, ring->rptr_offs);
index 16fbde9c5f56c916c14bee508d19e77528d9d826..c90517f61210827492d4ed48eb87680620a659a1 100644 (file)
@@ -788,14 +788,14 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
        unsigned i;
        int r;
 
-       r = amdgpu_ring_lock(ring, 16);
+       r = amdgpu_ring_alloc(ring, 16);
        if (r) {
                DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
                          ring->idx, r);
                return r;
        }
        amdgpu_ring_write(ring, VCE_CMD_END);
-       amdgpu_ring_unlock_commit(ring);
+       amdgpu_ring_commit(ring);
 
        for (i = 0; i < adev->usec_timeout; i++) {
                if (amdgpu_ring_get_rptr(ring) != rptr)
index 538d6a26e52997837f20c62e63dc9b9e534f122b..e8a48ae8d3609b140fbdedf5836e0744b9d74217 100644 (file)
@@ -560,7 +560,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
 
-       r = amdgpu_ring_lock(ring, 5);
+       r = amdgpu_ring_alloc(ring, 5);
        if (r) {
                DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
                amdgpu_wb_free(adev, index);
@@ -571,7 +571,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
        amdgpu_ring_write(ring, 1); /* number of DWs to follow */
        amdgpu_ring_write(ring, 0xDEADBEEF);
-       amdgpu_ring_unlock_commit(ring);
+       amdgpu_ring_commit(ring);
 
        for (i = 0; i < adev->usec_timeout; i++) {
                tmp = le32_to_cpu(adev->wb.wb[index]);
index b20eb7969e05db8ec70152c5d59d3016ae523521..1d796eac421b9bde0de2089615649f593800ce36 100644 (file)
@@ -2379,7 +2379,7 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
                return r;
        }
        WREG32(scratch, 0xCAFEDEAD);
-       r = amdgpu_ring_lock(ring, 3);
+       r = amdgpu_ring_alloc(ring, 3);
        if (r) {
                DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
                amdgpu_gfx_scratch_free(adev, scratch);
@@ -2388,7 +2388,7 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
        amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
        amdgpu_ring_write(ring, 0xDEADBEEF);
-       amdgpu_ring_unlock_commit(ring);
+       amdgpu_ring_commit(ring);
 
        for (i = 0; i < adev->usec_timeout; i++) {
                tmp = RREG32(scratch);
@@ -2812,7 +2812,7 @@ static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
 
        gfx_v7_0_cp_gfx_enable(adev, true);
 
-       r = amdgpu_ring_lock(ring, gfx_v7_0_get_csb_size(adev) + 8);
+       r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8);
        if (r) {
                DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
                return r;
@@ -2881,7 +2881,7 @@ static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
        amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
        amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
 
-       amdgpu_ring_unlock_commit(ring);
+       amdgpu_ring_commit(ring);
 
        return 0;
 }
index 248271050d0a37795d2bdf2158ceca0c9bd8c4b4..0d9ef524cf71d46f03176bc912e150fa96fef863 100644 (file)
@@ -652,7 +652,7 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
                return r;
        }
        WREG32(scratch, 0xCAFEDEAD);
-       r = amdgpu_ring_lock(ring, 3);
+       r = amdgpu_ring_alloc(ring, 3);
        if (r) {
                DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
                          ring->idx, r);
@@ -662,7 +662,7 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
        amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
        amdgpu_ring_write(ring, 0xDEADBEEF);
-       amdgpu_ring_unlock_commit(ring);
+       amdgpu_ring_commit(ring);
 
        for (i = 0; i < adev->usec_timeout; i++) {
                tmp = RREG32(scratch);
@@ -3062,7 +3062,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
 
        gfx_v8_0_cp_gfx_enable(adev, true);
 
-       r = amdgpu_ring_lock(ring, gfx_v8_0_get_csb_size(adev) + 4);
+       r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4);
        if (r) {
                DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
                return r;
@@ -3126,7 +3126,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
        amdgpu_ring_write(ring, 0x8000);
        amdgpu_ring_write(ring, 0x8000);
 
-       amdgpu_ring_unlock_commit(ring);
+       amdgpu_ring_commit(ring);
 
        return 0;
 }
index 622f5dad6f2a2b29b18e3db8243a228ae4e4122e..9fae4bf1a6c6403e5a9c97b9f00d0cbabac232fe 100644 (file)
@@ -611,7 +611,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
 
-       r = amdgpu_ring_lock(ring, 5);
+       r = amdgpu_ring_alloc(ring, 5);
        if (r) {
                DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
                amdgpu_wb_free(adev, index);
@@ -624,7 +624,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
        amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
        amdgpu_ring_write(ring, 0xDEADBEEF);
-       amdgpu_ring_unlock_commit(ring);
+       amdgpu_ring_commit(ring);
 
        for (i = 0; i < adev->usec_timeout; i++) {
                tmp = le32_to_cpu(adev->wb.wb[index]);
index d6170e69eb34adf3e5dc9bc8f3dcf880e4060b6a..b2fbf96dad7a18e200bd099394860a8393e8a0a1 100644 (file)
@@ -762,7 +762,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
 
-       r = amdgpu_ring_lock(ring, 5);
+       r = amdgpu_ring_alloc(ring, 5);
        if (r) {
                DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
                amdgpu_wb_free(adev, index);
@@ -775,7 +775,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
        amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
        amdgpu_ring_write(ring, 0xDEADBEEF);
-       amdgpu_ring_unlock_commit(ring);
+       amdgpu_ring_commit(ring);
 
        for (i = 0; i < adev->usec_timeout; i++) {
                tmp = le32_to_cpu(adev->wb.wb[index]);
index c4753726a79fd48f9a1c6ee7ec3e09cfb9cf5fbc..e7a141c75467affd71f0d62876d071b9f354a68a 100644 (file)
@@ -164,7 +164,7 @@ static int uvd_v4_2_hw_init(void *handle)
                goto done;
        }
 
-       r = amdgpu_ring_lock(ring, 10);
+       r = amdgpu_ring_alloc(ring, 10);
        if (r) {
                DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
                goto done;
@@ -189,7 +189,7 @@ static int uvd_v4_2_hw_init(void *handle)
        amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
        amdgpu_ring_write(ring, 3);
 
-       amdgpu_ring_unlock_commit(ring);
+       amdgpu_ring_commit(ring);
 
 done:
        /* lower clocks again */
@@ -453,7 +453,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
        int r;
 
        WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
-       r = amdgpu_ring_lock(ring, 3);
+       r = amdgpu_ring_alloc(ring, 3);
        if (r) {
                DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
                          ring->idx, r);
@@ -461,7 +461,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
        }
        amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
        amdgpu_ring_write(ring, 0xDEADBEEF);
-       amdgpu_ring_unlock_commit(ring);
+       amdgpu_ring_commit(ring);
        for (i = 0; i < adev->usec_timeout; i++) {
                tmp = RREG32(mmUVD_CONTEXT_ID);
                if (tmp == 0xDEADBEEF)
index 774033ab2b86bc375936c7505ce2069367507e07..3775f7756cf264f05a6bd0d9318cb607ce7dc35c 100644 (file)
@@ -160,7 +160,7 @@ static int uvd_v5_0_hw_init(void *handle)
                goto done;
        }
 
-       r = amdgpu_ring_lock(ring, 10);
+       r = amdgpu_ring_alloc(ring, 10);
        if (r) {
                DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
                goto done;
@@ -185,7 +185,7 @@ static int uvd_v5_0_hw_init(void *handle)
        amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
        amdgpu_ring_write(ring, 3);
 
-       amdgpu_ring_unlock_commit(ring);
+       amdgpu_ring_commit(ring);
 
 done:
        /* lower clocks again */
@@ -497,7 +497,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
        int r;
 
        WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
-       r = amdgpu_ring_lock(ring, 3);
+       r = amdgpu_ring_alloc(ring, 3);
        if (r) {
                DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
                          ring->idx, r);
@@ -505,7 +505,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
        }
        amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
        amdgpu_ring_write(ring, 0xDEADBEEF);
-       amdgpu_ring_unlock_commit(ring);
+       amdgpu_ring_commit(ring);
        for (i = 0; i < adev->usec_timeout; i++) {
                tmp = RREG32(mmUVD_CONTEXT_ID);
                if (tmp == 0xDEADBEEF)
index eb0bbbab5b355c39e9f282572a85410575f4274e..0b2fccad8e9ae85bc712e2e1ea0f32e3ae7fa168 100644 (file)
@@ -157,7 +157,7 @@ static int uvd_v6_0_hw_init(void *handle)
                goto done;
        }
 
-       r = amdgpu_ring_lock(ring, 10);
+       r = amdgpu_ring_alloc(ring, 10);
        if (r) {
                DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
                goto done;
@@ -182,7 +182,7 @@ static int uvd_v6_0_hw_init(void *handle)
        amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
        amdgpu_ring_write(ring, 3);
 
-       amdgpu_ring_unlock_commit(ring);
+       amdgpu_ring_commit(ring);
 
 done:
        if (!r)
@@ -736,7 +736,7 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
        int r;
 
        WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
-       r = amdgpu_ring_lock(ring, 3);
+       r = amdgpu_ring_alloc(ring, 3);
        if (r) {
                DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
                          ring->idx, r);
@@ -744,7 +744,7 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
        }
        amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
        amdgpu_ring_write(ring, 0xDEADBEEF);
-       amdgpu_ring_unlock_commit(ring);
+       amdgpu_ring_commit(ring);
        for (i = 0; i < adev->usec_timeout; i++) {
                tmp = RREG32(mmUVD_CONTEXT_ID);
                if (tmp == 0xDEADBEEF)