drm/amdgpu: remove job->ring
authorChristian König <christian.koenig@amd.com>
Fri, 13 Jul 2018 13:08:44 +0000 (15:08 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 16 Jul 2018 21:11:53 +0000 (16:11 -0500)
We can easily get that from the scheduler.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c

index e0cc9f878e803aff3f11ec242df7b80364da4350..90780b0f0ce5706eb33569cf2ec7cc26e4aa1362 100644 (file)
@@ -1027,6 +1027,7 @@ struct amdgpu_cs_parser {
 
        /* scheduler job object */
        struct amdgpu_job       *job;
+       struct amdgpu_ring      *ring;
 
        /* buffer objects */
        struct ww_acquire_ctx           ticket;
index 6eb7ee859ffd1a131bf7f15c699ea1634704529f..72dc9b36b937f9a9f29d40b891a1a9643ecce177 100644 (file)
@@ -912,11 +912,11 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
 {
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
        struct amdgpu_vm *vm = &fpriv->vm;
-       struct amdgpu_ring *ring = p->job->ring;
+       struct amdgpu_ring *ring = p->ring;
        int r;
 
        /* Only for UVD/VCE VM emulation */
-       if (p->job->ring->funcs->parse_cs) {
+       if (p->ring->funcs->parse_cs) {
                unsigned i, j;
 
                for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
@@ -1030,10 +1030,10 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
                        }
                }
 
-               if (parser->job->ring && parser->job->ring != ring)
+               if (parser->ring && parser->ring != ring)
                        return -EINVAL;
 
-               parser->job->ring = ring;
+               parser->ring = ring;
 
                r =  amdgpu_ib_get(adev, vm,
                                        ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
@@ -1052,11 +1052,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 
        /* UVD & VCE fw doesn't support user fences */
        if (parser->job->uf_addr && (
-           parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
-           parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
+           parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+           parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
                return -EINVAL;
 
-       return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
+       return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx);
 }
 
 static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
@@ -1207,7 +1207,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
                            union drm_amdgpu_cs *cs)
 {
-       struct amdgpu_ring *ring = p->job->ring;
+       struct amdgpu_ring *ring = p->ring;
        struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
        struct amdgpu_job *job;
        unsigned i;
@@ -1256,7 +1256,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        job->uf_sequence = seq;
 
        amdgpu_job_free_resources(job);
-       amdgpu_ring_priority_get(job->ring, job->base.s_priority);
+       amdgpu_ring_priority_get(p->ring, job->base.s_priority);
 
        trace_amdgpu_cs_ioctl(job);
        drm_sched_entity_push_job(&job->base, entity);
index 2720444ff23a0b08c5200fb3dc04b6b49682fb5a..2b2de5f3e6e35185071f1cd40f7868d01520bea9 100644 (file)
@@ -3253,7 +3253,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
                kthread_park(ring->sched.thread);
 
-               if (job && job->ring->idx != i)
+               if (job && job->base.sched == &ring->sched)
                        continue;
 
                drm_sched_hw_job_reset(&ring->sched, &job->base);
@@ -3277,7 +3277,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                 * or all rings (in the case @job is NULL)
                 * after above amdgpu_reset accomplished
                 */
-               if ((!job || job->ring->idx == i) && !r)
+               if ((!job || job->base.sched == &ring->sched) && !r)
                        drm_sched_job_recovery(&ring->sched);
 
                kthread_unpark(ring->sched.thread);
index 51ff751e093b9c7343a09979aa6ef47300486a1e..2496f2269bcb94acf65466cafb36728f90922946 100644 (file)
 
 static void amdgpu_job_timedout(struct drm_sched_job *s_job)
 {
-       struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+       struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
+       struct amdgpu_job *job = to_amdgpu_job(s_job);
 
        DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
-                 job->base.sched->name,
-                 atomic_read(&job->ring->fence_drv.last_seq),
-                 job->ring->fence_drv.sync_seq);
+                 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
+                 ring->fence_drv.sync_seq);
 
        amdgpu_device_gpu_recover(job->adev, job, false);
 }
@@ -98,9 +98,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
 
 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
 {
-       struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+       struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
+       struct amdgpu_job *job = to_amdgpu_job(s_job);
 
-       amdgpu_ring_priority_put(job->ring, s_job->s_priority);
+       amdgpu_ring_priority_put(ring, s_job->s_priority);
        dma_fence_put(job->fence);
        amdgpu_sync_free(&job->sync);
        amdgpu_sync_free(&job->sched_sync);
@@ -120,6 +121,7 @@ void amdgpu_job_free(struct amdgpu_job *job)
 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
                      void *owner, struct dma_fence **f)
 {
+       struct amdgpu_ring *ring = to_amdgpu_ring(entity->sched);
        int r;
 
        if (!f)
@@ -130,10 +132,9 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
                return r;
 
        job->owner = owner;
-       job->ring = to_amdgpu_ring(entity->sched);
        *f = dma_fence_get(&job->base.s_fence->finished);
        amdgpu_job_free_resources(job);
-       amdgpu_ring_priority_get(job->ring, job->base.s_priority);
+       amdgpu_ring_priority_get(ring, job->base.s_priority);
        drm_sched_entity_push_job(&job->base, entity);
 
        return 0;
@@ -142,6 +143,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
 static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
                                               struct drm_sched_entity *s_entity)
 {
+       struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched);
        struct amdgpu_job *job = to_amdgpu_job(sched_job);
        struct amdgpu_vm *vm = job->vm;
        bool explicit = false;
@@ -157,8 +159,6 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
        }
 
        while (fence == NULL && vm && !job->vmid) {
-               struct amdgpu_ring *ring = job->ring;
-
                r = amdgpu_vmid_grab(vm, ring, &job->sync,
                                     &job->base.s_fence->finished,
                                     job);
@@ -173,6 +173,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
 
 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
 {
+       struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
        struct dma_fence *fence = NULL, *finished;
        struct amdgpu_device *adev;
        struct amdgpu_job *job;
@@ -196,7 +197,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
        if (finished->error < 0) {
                DRM_INFO("Skip scheduling IBs!\n");
        } else {
-               r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
+               r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
                                       &fence);
                if (r)
                        DRM_ERROR("Error scheduling IBs (%d)\n", r);
index 39f4230e1d37f3accbcac7f2fa7a4b1741b0b5e4..c663c1925f91f954fbd52d70eceb0e167947e4e8 100644 (file)
@@ -37,7 +37,6 @@ struct amdgpu_job {
        struct drm_sched_job    base;
        struct amdgpu_device    *adev;
        struct amdgpu_vm        *vm;
-       struct amdgpu_ring      *ring;
        struct amdgpu_sync      sync;
        struct amdgpu_sync      sched_sync;
        struct amdgpu_ib        *ibs;
index e96e26d3f3b085f38986f5cd6112e0e7325bd56c..76920035eb22da59d1204795db3479ebb312b42d 100644 (file)
@@ -150,10 +150,10 @@ TRACE_EVENT(amdgpu_cs,
 
            TP_fast_assign(
                           __entry->bo_list = p->bo_list;
-                          __entry->ring = p->job->ring->idx;
+                          __entry->ring = p->ring->idx;
                           __entry->dw = p->job->ibs[i].length_dw;
                           __entry->fences = amdgpu_fence_count_emitted(
-                               p->job->ring);
+                               p->ring);
                           ),
            TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
                      __entry->bo_list, __entry->ring, __entry->dw,
@@ -178,7 +178,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
                           __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
                           __entry->context = job->base.s_fence->finished.context;
                           __entry->seqno = job->base.s_fence->finished.seqno;
-                          __entry->ring_name = job->ring->name;
+                          __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
                           __entry->num_ibs = job->num_ibs;
                           ),
            TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
@@ -203,7 +203,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
                           __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
                           __entry->context = job->base.s_fence->finished.context;
                           __entry->seqno = job->base.s_fence->finished.seqno;
-                          __entry->ring_name = job->ring->name;
+                          __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
                           __entry->num_ibs = job->num_ibs;
                           ),
            TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
index 848b2e89881852a89c3c64f2b7ae6964188ef2c9..7738d2b90dae3e8734e59774deeab9db772c7eb3 100644 (file)
@@ -692,11 +692,11 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
                             struct amdgpu_bo *bo, unsigned offset)
 {
        struct amdgpu_device *adev = ctx->parser->adev;
+       uint32_t ip_instance = ctx->parser->ring->me;
        int32_t *msg, msg_type, handle;
        void *ptr;
        long r;
        int i;
-       uint32_t ip_instance = ctx->parser->job->ring->me;
 
        if (offset & 0x3F) {
                DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);