Allows us to set priorities in the scheduler.
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
struct idr ctx_handles;
};
-int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
+int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
struct amdgpu_ctx *ctx);
void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);
#include <drm/drmP.h>
#include "amdgpu.h"
-int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
+int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
struct amdgpu_ctx *ctx)
{
unsigned i, j;
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amd_sched_rq *rq;
- if (kernel)
- rq = &adev->rings[i]->sched.kernel_rq;
- else
- rq = &adev->rings[i]->sched.sched_rq;
+ if (pri >= AMD_SCHED_MAX_PRIORITY)
+ return -EINVAL;
+ rq = &adev->rings[i]->sched.sched_rq[pri];
r = amd_sched_entity_init(&adev->rings[i]->sched,
&ctx->rings[i].entity,
rq, amdgpu_sched_jobs);
return r;
}
*id = (uint32_t)r;
- r = amdgpu_ctx_init(adev, false, ctx);
+ r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_NORMAL, ctx);
mutex_unlock(&mgr->lock);
return r;
return r;
}
- r = amdgpu_ctx_init(adev, true, &adev->kernel_ctx);
+ r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_KERNEL, &adev->kernel_ctx);
if (r) {
dev_err(adev->dev, "failed to create kernel context (%d).\n", r);
return r;
amd_sched_select_entity(struct amd_gpu_scheduler *sched)
{
struct amd_sched_entity *entity;
+ int i;
if (!amd_sched_ready(sched))
return NULL;
/* Kernel run queue has higher priority than normal run queue*/
- entity = amd_sched_rq_select_entity(&sched->kernel_rq);
- if (entity == NULL)
- entity = amd_sched_rq_select_entity(&sched->sched_rq);
+ for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) {
+ entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
+ if (entity)
+ break;
+ }
return entity;
}
struct amd_sched_backend_ops *ops,
unsigned hw_submission, long timeout, const char *name)
{
+ int i;
sched->ops = ops;
sched->hw_submission_limit = hw_submission;
sched->name = name;
sched->timeout = timeout;
- amd_sched_rq_init(&sched->sched_rq);
- amd_sched_rq_init(&sched->kernel_rq);
+ for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++)
+ amd_sched_rq_init(&sched->sched_rq[i]);
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
struct fence *(*run_job)(struct amd_sched_job *sched_job);
};
+enum amd_sched_priority {
+ AMD_SCHED_PRIORITY_KERNEL = 0,
+ AMD_SCHED_PRIORITY_NORMAL,
+ AMD_SCHED_MAX_PRIORITY
+};
+
/**
* One scheduler is implemented for each hardware ring
*/
uint32_t hw_submission_limit;
long timeout;
const char *name;
- struct amd_sched_rq sched_rq;
- struct amd_sched_rq kernel_rq;
+ struct amd_sched_rq sched_rq[AMD_SCHED_MAX_PRIORITY];
wait_queue_head_t wake_up_worker;
wait_queue_head_t job_scheduled;
atomic_t hw_rq_count;