ring->fence_drv.ring = ring;
if (amdgpu_enable_scheduler) {
- ring->scheduler = amd_sched_create((void *)ring->adev,
- &amdgpu_sched_ops,
- ring->idx, 5, 0,
+ ring->scheduler = amd_sched_create(&amdgpu_sched_ops,
+ ring->idx,
amdgpu_sched_hw_submission);
if (!ring->scheduler)
DRM_ERROR("Failed to create scheduler on ring %d.\n",
/**
* Create a gpu scheduler
*
- * @device The device context for this scheduler
- * @ops The backend operations for this scheduler.
- * @id The scheduler is per ring, here is ring id.
- * @granularity The minumum ms unit the scheduler will scheduled.
- * @preemption Indicate whether this ring support preemption, 0 is no.
+ * @ops The backend operations for this scheduler.
+ * @ring The the ring id for the scheduler.
+ * @hw_submissions Number of hw submissions to do.
*
- * return the pointer to scheduler for success, otherwise return NULL
+ * Return the pointer to scheduler for success, otherwise return NULL
*/
-struct amd_gpu_scheduler *amd_sched_create(void *device,
- struct amd_sched_backend_ops *ops,
- unsigned ring,
- unsigned granularity,
- unsigned preemption,
- unsigned hw_submission)
+struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops,
+ unsigned ring, unsigned hw_submission)
{
struct amd_gpu_scheduler *sched;
char name[20];
if (!sched)
return NULL;
- sched->device = device;
sched->ops = ops;
- sched->granularity = granularity;
sched->ring_id = ring;
- sched->preemption = preemption;
sched->hw_submission_limit = hw_submission;
snprintf(name, sizeof(name), "gpu_sched[%d]", ring);
amd_sched_rq_init(&sched->sched_rq);
* One scheduler is implemented for each hardware ring
*/
struct amd_gpu_scheduler {
- void *device;
struct task_struct *thread;
struct amd_sched_rq sched_rq;
struct amd_sched_rq kernel_rq;
atomic_t hw_rq_count;
struct amd_sched_backend_ops *ops;
uint32_t ring_id;
- uint32_t granularity; /* in ms unit */
- uint32_t preemption;
wait_queue_head_t wait_queue;
uint32_t hw_submission_limit;
};
-struct amd_gpu_scheduler *amd_sched_create(void *device,
- struct amd_sched_backend_ops *ops,
- uint32_t ring,
- uint32_t granularity,
- uint32_t preemption,
- uint32_t hw_submission);
+struct amd_gpu_scheduler *
+amd_sched_create(struct amd_sched_backend_ops *ops,
+ uint32_t ring, uint32_t hw_submission);
int amd_sched_destroy(struct amd_gpu_scheduler *sched);
int amd_sched_push_job(struct amd_sched_job *sched_job);