#include "amdgpu.h"
#include "amdgpu_trace.h"
-static void amdgpu_job_free_handler(struct work_struct *ws)
-{
- struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job);
- amd_sched_job_put(&job->base);
-}
-
static void amdgpu_job_timedout(struct amd_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
job->base.sched->name,
atomic_read(&job->ring->fence_drv.last_seq),
job->ring->fence_drv.sync_seq);
-
- amd_sched_job_put(&job->base);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
(*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
- INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler);
amdgpu_sync_create(&(*job)->sync);
amdgpu_sync_free(&job->sync);
}
-void amdgpu_job_free_func(struct kref *refcount)
+void amdgpu_job_free_cb(struct amd_sched_job *s_job)
{
- struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount);
+ struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+
kfree(job);
}
if (!f)
return -EINVAL;
- r = amd_sched_job_init(&job->base, &ring->sched,
- entity, amdgpu_job_free_func, owner, &fence);
+ r = amd_sched_job_init(&job->base, &ring->sched, entity, owner, &fence);
if (r)
return r;
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
.timedout_job = amdgpu_job_timedout,
+ .free_job = amdgpu_job_free_cb
};
return added;
}
-static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
- struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
- cb_free_job);
-
- schedule_work(&job->work_free_job);
-}
-
/* job_finish is called after hw fence signaled, and
* the job had already been deleted from ring_mirror_list
*/
-static void amd_sched_job_finish(struct amd_sched_job *s_job)
+static void amd_sched_job_finish(struct work_struct *work)
{
- struct amd_sched_job *next;
+ struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
+ finish_work);
struct amd_gpu_scheduler *sched = s_job->sched;
unsigned long flags;
spin_lock_irqsave(&sched->job_list_lock, flags);
list_del_init(&s_job->node);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
- if (cancel_delayed_work(&s_job->work_tdr))
- amd_sched_job_put(s_job);
+ struct amd_sched_job *next;
+
+ cancel_delayed_work_sync(&s_job->work_tdr);
/* queue TDR for next job */
next = list_first_entry_or_null(&sched->ring_mirror_list,
struct amd_sched_job, node);
- if (next) {
- amd_sched_job_get(next);
+ if (next)
schedule_delayed_work(&next->work_tdr, sched->timeout);
- }
}
spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ sched->ops->free_job(s_job);
+}
+
+static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
+{
+ struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
+ finish_cb);
+ schedule_work(&job->finish_work);
}
static void amd_sched_job_begin(struct amd_sched_job *s_job)
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
list_first_entry_or_null(&sched->ring_mirror_list,
struct amd_sched_job, node) == s_job)
- {
- amd_sched_job_get(s_job);
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
- }
spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
{
struct amd_sched_entity *entity = sched_job->s_entity;
- fence_add_callback(&sched_job->s_fence->base,
- &sched_job->cb_free_job, amd_sched_free_job);
trace_amd_sched_job(sched_job);
+ fence_add_callback(&sched_job->s_fence->base, &sched_job->finish_cb,
+ amd_sched_job_finish_cb);
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
}
int amd_sched_job_init(struct amd_sched_job *job,
struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity,
- void (*free_cb)(struct kref *refcount),
void *owner, struct fence **fence)
{
- INIT_LIST_HEAD(&job->node);
- kref_init(&job->refcount);
job->sched = sched;
job->s_entity = entity;
job->s_fence = amd_sched_fence_create(entity, owner);
if (!job->s_fence)
return -ENOMEM;
- job->s_fence->s_job = job;
+ INIT_WORK(&job->finish_work, amd_sched_job_finish);
+ INIT_LIST_HEAD(&job->node);
INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
- job->free_callback = free_cb;
if (fence)
*fence = &job->s_fence->base;
struct amd_gpu_scheduler *sched = s_fence->sched;
atomic_dec(&sched->hw_rq_count);
-
- amd_sched_job_finish(s_fence->s_job);
-
amd_sched_fence_signal(s_fence);
trace_amd_sched_process_job(s_fence);
struct amd_gpu_scheduler *sched;
spinlock_t lock;
void *owner;
- struct amd_sched_job *s_job;
};
struct amd_sched_job {
- struct kref refcount;
struct amd_gpu_scheduler *sched;
struct amd_sched_entity *s_entity;
struct amd_sched_fence *s_fence;
- struct fence_cb cb_free_job;
- struct work_struct work_free_job;
- struct list_head node;
- struct delayed_work work_tdr;
- void (*free_callback)(struct kref *refcount);
+ struct fence_cb finish_cb;
+ struct work_struct finish_work;
+ struct list_head node;
+ struct delayed_work work_tdr;
};
extern const struct fence_ops amd_sched_fence_ops;
struct fence *(*dependency)(struct amd_sched_job *sched_job);
struct fence *(*run_job)(struct amd_sched_job *sched_job);
void (*timedout_job)(struct amd_sched_job *sched_job);
+ void (*free_job)(struct amd_sched_job *sched_job);
};
enum amd_sched_priority {
int amd_sched_job_init(struct amd_sched_job *job,
struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity,
- void (*free_cb)(struct kref* refcount),
void *owner, struct fence **fence);
-static inline void amd_sched_job_get(struct amd_sched_job *job)
-{
- if (job)
- kref_get(&job->refcount);
-}
-
-static inline void amd_sched_job_put(struct amd_sched_job *job)
-{
- if (job)
- kref_put(&job->refcount, job->free_callback);
-}
-
#endif