drm/amdgpu: send UVD IB tests directly to the ring again
authorChristian König <christian.koenig@amd.com>
Wed, 3 Feb 2016 15:01:06 +0000 (16:01 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 10 Feb 2016 19:17:23 +0000 (14:17 -0500)
We need the IB test for GPU resets as well and
the scheduler should be stoped then.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c

index 4e98be8b29fb0c9f87da7b44a73ef0ebb46a9183..00b608b6c8c474c47512191e77238b67dd197b9d 100644 (file)
@@ -241,7 +241,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
 
                        amdgpu_uvd_note_usage(adev);
 
-                       r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
+                       r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence);
                        if (r) {
                                DRM_ERROR("Error destroying UVD (%d)!\n", r);
                                continue;
@@ -295,7 +295,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
 
                        amdgpu_uvd_note_usage(adev);
 
-                       r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
+                       r = amdgpu_uvd_get_destroy_msg(ring, handle,
+                                                      false, &fence);
                        if (r) {
                                DRM_ERROR("Error destroying UVD (%d)!\n", r);
                                continue;
@@ -823,9 +824,8 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
        return 0;
 }
 
-static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
-                              struct amdgpu_bo *bo,
-                              struct fence **fence)
+static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+                              bool direct, struct fence **fence)
 {
        struct ttm_validate_buffer tv;
        struct ww_acquire_ctx ticket;
@@ -872,9 +872,19 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
                ib->ptr[i] = PACKET2(0);
        ib->length_dw = 16;
 
-       r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_UNDEFINED, &f);
-       if (r)
-               goto err_free;
+       if (direct) {
+               r = amdgpu_ib_schedule(ring, 1, ib,
+                                      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+               if (r)
+                       goto err_free;
+
+               amdgpu_job_free(job);
+       } else {
+               r = amdgpu_job_submit(job, ring,
+                                     AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+               if (r)
+                       goto err_free;
+       }
 
        ttm_eu_fence_buffer_objects(&ticket, &head, f);
 
@@ -942,11 +952,11 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
        amdgpu_bo_kunmap(bo);
        amdgpu_bo_unreserve(bo);
 
-       return amdgpu_uvd_send_msg(ring, bo, fence);
+       return amdgpu_uvd_send_msg(ring, bo, true, fence);
 }
 
 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-                              struct fence **fence)
+                              bool direct, struct fence **fence)
 {
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_bo *bo;
@@ -984,7 +994,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
        amdgpu_bo_kunmap(bo);
        amdgpu_bo_unreserve(bo);
 
-       return amdgpu_uvd_send_msg(ring, bo, fence);
+       return amdgpu_uvd_send_msg(ring, bo, direct, fence);
 }
 
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
index 1724c2c861516eea61b7d23f7521e7c8c834a5e6..9a3b449081a72fd9c9ac79c9b3c9c5e28614d4ec 100644 (file)
@@ -31,7 +31,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev);
 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
                              struct fence **fence);
 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-                              struct fence **fence);
+                              bool direct, struct fence **fence);
 void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
                             struct drm_file *filp);
 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
index d50679222b65cea53f8eda4d5533aedfbffb52c0..232fe2a3a9b8a297dc00c6f9e919e5cb80e99892 100644 (file)
@@ -522,7 +522,7 @@ static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring)
                goto error;
        }
 
-       r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence);
+       r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
        if (r) {
                DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
                goto error;
index e3e7136dfd78762d870c7e94c87d9e82bc7582cb..5b4aa2a36c023fd3ca1a9a2c920f7f52a5f8d187 100644 (file)
@@ -568,7 +568,7 @@ static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
                goto error;
        }
 
-       r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence);
+       r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
        if (r) {
                DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
                goto error;
index aa536076e6954086aa977db907ffff8193ccdd5e..c6bca730d25b4e5ba4485f25efeabf526a14ed30 100644 (file)
@@ -800,7 +800,7 @@ static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
                goto error;
        }
 
-       r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence);
+       r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
        if (r) {
                DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
                goto error;