{
struct amdgpu_ring *ring = p->ring;
struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
+ enum drm_sched_priority priority;
struct amdgpu_job *job;
unsigned i;
uint64_t seq;
job->uf_sequence = seq;
amdgpu_job_free_resources(job);
- amdgpu_ring_priority_get(p->ring, job->base.s_priority);
trace_amdgpu_cs_ioctl(job);
+ priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
+ ring = to_amdgpu_ring(entity->sched);
+ amdgpu_ring_priority_get(ring, priority);
+
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
amdgpu_mn_unlock(p->mn);
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(entity->sched);
+ enum drm_sched_priority priority;
+ struct amdgpu_ring *ring;
int r;
if (!f)
job->owner = owner;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
- amdgpu_ring_priority_get(ring, job->base.s_priority);
+ priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
+ ring = to_amdgpu_ring(entity->sched);
+ amdgpu_ring_priority_get(ring, priority);
+
return 0;
}