priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
- ring = to_amdgpu_ring(entity->sched);
+ ring = to_amdgpu_ring(entity->rq->sched);
amdgpu_ring_priority_get(ring, priority);
return 0;
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched);
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
struct dma_fence *fence;
ats_entries = 0;
}
- ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
r = reservation_object_reserve_shared(bo->tbo.resv);
if (r)
struct amdgpu_ring *ring;
struct dma_fence *fence;
- ring = container_of(vm->entity.sched, struct amdgpu_ring,
+ ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
sched);
amdgpu_ring_pad_ib(ring, params.ib);
addr, flags);
}
- ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
nptes = last - start + 1;
memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
entity->rq = rq_list[0];
- entity->sched = rq_list[0]->sched;
entity->guilty = guilty;
entity->last_scheduled = NULL;
static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity)
{
- return entity->sched == sched &&
- entity->rq != NULL;
+ return entity->rq != NULL &&
+ entity->rq->sched == sched;
}
/**
struct drm_gpu_scheduler *sched;
long ret = timeout;
- sched = entity->sched;
+ sched = entity->rq->sched;
if (!drm_sched_entity_is_initialized(sched, entity))
return ret;
/**
{
struct drm_gpu_scheduler *sched;
- sched = entity->sched;
+ sched = entity->rq->sched;
drm_sched_entity_set_rq(entity, NULL);
/* Consumption of existing IBs wasn't completed. Forcefully
container_of(cb, struct drm_sched_entity, cb);
entity->dependency = NULL;
dma_fence_put(f);
- drm_sched_wakeup(entity->sched);
+ drm_sched_wakeup(entity->rq->sched);
}
static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
bool drm_sched_dependency_optimized(struct dma_fence* fence,
struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
struct drm_sched_fence *s_fence;
if (!fence || dma_fence_is_signaled(fence))
static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
struct dma_fence * fence = entity->dependency;
struct drm_sched_fence *s_fence;
static struct drm_sched_job *
drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
struct drm_sched_job *sched_job = to_drm_sched_job(
spsc_queue_peek(&entity->job_queue));
struct drm_sched_entity *entity,
void *owner)
{
- struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
job->sched = sched;
job->entity = entity;
* runqueue.
* @rq: runqueue to which this entity belongs.
* @rq_lock: lock to modify the runqueue to which this entity belongs.
- * @sched: the scheduler instance to which this entity is enqueued.
* @job_queue: the list of jobs of this entity.
* @fence_seq: a linearly increasing seqno incremented with each
* new &drm_sched_fence which is part of the entity.
struct list_head list;
struct drm_sched_rq *rq;
spinlock_t rq_lock;
- struct drm_gpu_scheduler *sched;
struct spsc_queue job_queue;