/* job_finish is called after hw fence signaled, and
* the job had already been deleted from ring_mirror_list
*/
-void amd_sched_job_finish(struct amd_sched_job *s_job)
+static void amd_sched_job_finish(struct amd_sched_job *s_job)
{
struct amd_sched_job *next;
struct amd_gpu_scheduler *sched = s_job->sched;
}
}
-void amd_sched_job_begin(struct amd_sched_job *s_job)
+static void amd_sched_job_begin(struct amd_sched_job *s_job)
{
struct amd_gpu_scheduler *sched = s_job->sched;
/* remove job from ring_mirror_list */
spin_lock_irqsave(&sched->job_list_lock, flags);
list_del_init(&s_fence->s_job->node);
- sched->ops->finish_job(s_fence->s_job);
+ amd_sched_job_finish(s_fence->s_job);
spin_unlock_irqrestore(&sched->job_list_lock, flags);
amd_sched_fence_signal(s_fence);
{
struct sched_param sparam = {.sched_priority = 1};
struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
+ unsigned long flags;
int r, count;
sched_setscheduler(current, SCHED_FIFO, &sparam);
s_fence = sched_job->s_fence;
atomic_inc(&sched->hw_rq_count);
- amd_sched_job_pre_schedule(sched, sched_job);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+ list_add_tail(&sched_job->node, &sched->ring_mirror_list);
+ amd_sched_job_begin(sched_job);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence);
if (fence) {
struct amd_sched_backend_ops {
struct fence *(*dependency)(struct amd_sched_job *sched_job);
struct fence *(*run_job)(struct amd_sched_job *sched_job);
- void (*begin_job)(struct amd_sched_job *sched_job);
- void (*finish_job)(struct amd_sched_job *sched_job);
};
enum amd_sched_priority {
void (*timeout_cb)(struct work_struct *work),
void (*free_cb)(struct kref* refcount),
void *owner, struct fence **fence);
-void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
- struct amd_sched_job *s_job);
-void amd_sched_job_finish(struct amd_sched_job *s_job);
-void amd_sched_job_begin(struct amd_sched_job *s_job);
static inline void amd_sched_job_get(struct amd_sched_job *job)
{
if (job)
FENCE_TRACE(&fence->base, "was already signaled\n");
}
-void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
- struct amd_sched_job *s_job)
-{
- unsigned long flags;
- spin_lock_irqsave(&sched->job_list_lock, flags);
- list_add_tail(&s_job->node, &sched->ring_mirror_list);
- sched->ops->begin_job(s_job);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
-}
-
void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
{
struct fence_cb *cur, *tmp;