drm/amd/sched: allow clients to edit an entity's rq v2
authorAndres Rodriguez <andresx7@gmail.com>
Fri, 2 Jun 2017 19:09:00 +0000 (15:09 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 9 Oct 2017 20:30:22 +0000 (16:30 -0400)
This is useful for changing an entity's priority at runtime.

v2: don't modify the order of amd_sched_entity members

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Andres Rodriguez <andresx7@gmail.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h

index 08e1332d814a2f090885f46c433f8446a3b87f0a..59f1325d975c9dc5c65f92ac25351462f3011023 100644 (file)
@@ -133,6 +133,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
        entity->rq = rq;
        entity->sched = sched;
 
+       spin_lock_init(&entity->rq_lock);
        spin_lock_init(&entity->queue_lock);
        r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
        if (r)
@@ -204,7 +205,6 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
                           struct amd_sched_entity *entity)
 {
-       struct amd_sched_rq *rq = entity->rq;
        int r;
 
        if (!amd_sched_entity_is_initialized(sched, entity))
@@ -218,7 +218,7 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
        else
                r = wait_event_killable(sched->job_scheduled,
                                        amd_sched_entity_is_idle(entity));
-       amd_sched_rq_remove_entity(rq, entity);
+       amd_sched_entity_set_rq(entity, NULL);
        if (r) {
                struct amd_sched_job *job;
 
@@ -257,6 +257,24 @@ static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb
        dma_fence_put(f);
 }
 
+void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
+                            struct amd_sched_rq *rq)
+{
+       if (entity->rq == rq)
+               return;
+
+       spin_lock(&entity->rq_lock);
+
+       if (entity->rq)
+               amd_sched_rq_remove_entity(entity->rq, entity);
+
+       entity->rq = rq;
+       if (rq)
+               amd_sched_rq_add_entity(rq, entity);
+
+       spin_unlock(&entity->rq_lock);
+}
+
 bool amd_sched_dependency_optimized(struct dma_fence* fence,
                                    struct amd_sched_entity *entity)
 {
@@ -354,7 +372,9 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
        /* first job wakes up scheduler */
        if (first) {
                /* Add the entity to the run queue */
+               spin_lock(&entity->rq_lock);
                amd_sched_rq_add_entity(entity->rq, entity);
+               spin_unlock(&entity->rq_lock);
                amd_sched_wakeup(sched);
        }
        return added;
index da040bc17e6147fcc07e3929c9509f5e97e33cc0..4b528f7abbfe9fb057f8b0490c1fa8380f8fc9df 100644 (file)
@@ -39,6 +39,7 @@ struct amd_sched_rq;
 struct amd_sched_entity {
        struct list_head                list;
        struct amd_sched_rq             *rq;
+       spinlock_t                      rq_lock;
        struct amd_gpu_scheduler        *sched;
 
        spinlock_t                      queue_lock;
@@ -154,6 +155,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
                           struct amd_sched_entity *entity);
 void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
+                            struct amd_sched_rq *rq);
 
 int amd_sched_fence_slab_init(void);
 void amd_sched_fence_slab_fini(void);