drm/amd: fix deadlock of job_list_lock V2
authorChunming Zhou <David1.Zhou@amd.com>
Mon, 25 Jul 2016 05:55:35 +0000 (13:55 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 29 Jul 2016 18:37:06 +0000 (14:37 -0400)
run_job involves mutex, which could sleep.

V2: use list_for_each_entry_safe, since the job might complete
while we dropped the lock.

Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c

index 21c49d38fcb341d34e9996465ce9c98255edc821..ef312bb75fda094b1f51bac52ce391d459341ca0 100644 (file)
@@ -399,7 +399,7 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
 
 void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
 {
-       struct amd_sched_job *s_job;
+       struct amd_sched_job *s_job, *tmp;
        int r;
 
        spin_lock(&sched->job_list_lock);
@@ -408,10 +408,12 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
        if (s_job)
                schedule_delayed_work(&s_job->work_tdr, sched->timeout);
 
-       list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+       list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
                struct amd_sched_fence *s_fence = s_job->s_fence;
-               struct fence *fence = sched->ops->run_job(s_job);
+               struct fence *fence;
 
+               spin_unlock(&sched->job_list_lock);
+               fence = sched->ops->run_job(s_job);
                atomic_inc(&sched->hw_rq_count);
                if (fence) {
                        s_fence->parent = fence_get(fence);
@@ -427,6 +429,7 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
                        DRM_ERROR("Failed to run job!\n");
                        amd_sched_process_job(NULL, &s_fence->cb);
                }
+               spin_lock(&sched->job_list_lock);
        }
        spin_unlock(&sched->job_list_lock);
 }