drm/amdkfd: avoid HMM change cause circular lock
authorPhilip Yang <Philip.Yang@amd.com>
Wed, 5 Dec 2018 19:03:43 +0000 (14:03 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 24 May 2019 17:21:01 +0000 (12:21 -0500)
There is circular lock between gfx and kfd path with HMM change:
lock(dqm) -> bo::reserve -> amdgpu_mn_lock

To avoid this, move init/unint_mqd() out of lock(dqm), to remove nested
locking between mmap_sem and bo::reserve. The locking order
is: bo::reserve -> amdgpu_mn_lock(p->mn)

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c

index d6fe75245d0533cb9f58f3dbb9f207503f42d35b..cf6b57627842b82f1afd92975959655ebeecce97 100644 (file)
@@ -1161,21 +1161,17 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
        int retval;
        struct mqd_manager *mqd_mgr;
 
-       retval = 0;
-
-       dqm_lock(dqm);
-
        if (dqm->total_queue_count >= max_num_of_queues_per_device) {
                pr_warn("Can't create new usermode queue because %d queues were already created\n",
                                dqm->total_queue_count);
                retval = -EPERM;
-               goto out_unlock;
+               goto out;
        }
 
        if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
                retval = allocate_sdma_queue(dqm, &q->sdma_id);
                if (retval)
-                       goto out_unlock;
+                       goto out;
                q->properties.sdma_queue_id =
                        q->sdma_id / get_num_sdma_engines(dqm);
                q->properties.sdma_engine_id =
@@ -1189,6 +1185,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
        if (retval)
                goto out_deallocate_sdma_queue;
 
+       /* Do init_mqd before dqm_lock(dqm) to avoid circular locking order:
+        * lock(dqm) -> bo::reserve
+        */
        mqd_mgr = dqm->ops.get_mqd_manager(dqm,
                        get_mqd_type_from_queue_type(q->properties.type));
 
@@ -1196,6 +1195,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
                retval = -ENOMEM;
                goto out_deallocate_doorbell;
        }
+
        /*
         * Eviction state logic: we only mark active queues as evicted
         * to avoid the overhead of restoring inactive queues later
@@ -1204,9 +1204,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
                q->properties.is_evicted = (q->properties.queue_size > 0 &&
                                            q->properties.queue_percent > 0 &&
                                            q->properties.queue_address != 0);
-
        dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
-
        q->properties.tba_addr = qpd->tba_addr;
        q->properties.tma_addr = qpd->tma_addr;
        retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
@@ -1214,6 +1212,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
        if (retval)
                goto out_deallocate_doorbell;
 
+       dqm_lock(dqm);
+
        list_add(&q->list, &qpd->queues_list);
        qpd->queue_count++;
        if (q->properties.is_active) {
@@ -1241,9 +1241,7 @@ out_deallocate_doorbell:
 out_deallocate_sdma_queue:
        if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
                deallocate_sdma_queue(dqm, q->sdma_id);
-out_unlock:
-       dqm_unlock(dqm);
-
+out:
        return retval;
 }
 
@@ -1406,8 +1404,6 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
                        qpd->reset_wavefronts = true;
        }
 
-       mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
-
        /*
         * Unconditionally decrement this counter, regardless of the queue's
         * type
@@ -1418,6 +1414,9 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
 
        dqm_unlock(dqm);
 
+       /* Do uninit_mqd after dqm_unlock(dqm) to avoid circular locking */
+       mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+
        return retval;
 
 failed:
@@ -1641,7 +1640,11 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
                qpd->reset_wavefronts = false;
        }
 
-       /* lastly, free mqd resources */
+       dqm_unlock(dqm);
+
+       /* Lastly, free mqd resources.
+        * Do uninit_mqd() after dqm_unlock to avoid circular locking.
+        */
        list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
                mqd_mgr = dqm->ops.get_mqd_manager(dqm,
                        get_mqd_type_from_queue_type(q->properties.type));
@@ -1655,7 +1658,6 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
        }
 
 out:
-       dqm_unlock(dqm);
        return retval;
 }