drm/amdgpu: remove job->adev (v2)
authorChristian König <christian.koenig@amd.com>
Fri, 13 Jul 2018 15:15:54 +0000 (17:15 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 17 Jul 2018 19:18:28 +0000 (14:18 -0500)
We can get that from the ring.

v2: squash in "drm/amdgpu: always initialize job->base.sched" (Alex)

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h

index 0e2b18ccdf2e32d53873f9bcc662bd071d14a5d9..9b1c54ace5834772f0b4a0d4887780d899eb96f3 100644 (file)
@@ -37,7 +37,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
                  job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
                  ring->fence_drv.sync_seq);
 
-       amdgpu_device_gpu_recover(job->adev, job, false);
+       amdgpu_device_gpu_recover(ring->adev, job, false);
 }
 
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -54,7 +54,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
        if (!*job)
                return -ENOMEM;
 
-       (*job)->adev = adev;
+       /*
+        * Initialize the scheduler to at least some ring so that we always
+        * have a pointer to adev.
+        */
+       (*job)->base.sched = &adev->rings[0]->sched;
        (*job)->vm = vm;
        (*job)->ibs = (void *)&(*job)[1];
        (*job)->num_ibs = num_ibs;
@@ -86,6 +90,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
 
 void amdgpu_job_free_resources(struct amdgpu_job *job)
 {
+       struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
        struct dma_fence *f;
        unsigned i;
 
@@ -93,7 +98,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
        f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
 
        for (i = 0; i < job->num_ibs; ++i)
-               amdgpu_ib_free(job->adev, &job->ibs[i], f);
+               amdgpu_ib_free(ring->adev, &job->ibs[i], f);
 }
 
 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
@@ -167,7 +172,8 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
 
        if (fence && explicit) {
                if (drm_sched_dependency_optimized(fence, s_entity)) {
-                       r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
+                       r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
+                                             fence, false);
                        if (r)
                                DRM_ERROR("Error adding fence to sync (%d)\n", r);
                }
@@ -190,7 +196,6 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
 {
        struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
        struct dma_fence *fence = NULL, *finished;
-       struct amdgpu_device *adev;
        struct amdgpu_job *job;
        int r;
 
@@ -200,13 +205,12 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
        }
        job = to_amdgpu_job(sched_job);
        finished = &job->base.s_fence->finished;
-       adev = job->adev;
 
        BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
 
        trace_amdgpu_sched_run_job(job);
 
-       if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
+       if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
                dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
 
        if (finished->error < 0) {
index d77fd232f7ce3e3510a95467e60f2cdd7a8d734e..57cfe78a262b11e70eb037afda5755769932fabd 100644 (file)
@@ -37,7 +37,6 @@ struct amdgpu_fence;
 
 struct amdgpu_job {
        struct drm_sched_job    base;
-       struct amdgpu_device    *adev;
        struct amdgpu_vm        *vm;
        struct amdgpu_sync      sync;
        struct amdgpu_sync      sched_sync;