drm/amdgpu: unify AMDGPU_CTX_MAX_CS_PENDING and amdgpu_sched_jobs
authorChunming Zhou <David1.Zhou@amd.com>
Thu, 10 Dec 2015 07:45:11 +0000 (15:45 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 15 Dec 2015 00:45:24 +0000 (19:45 -0500)
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c

index 3b5d3706f0cb122bd052621512cf83dda429d396..c3996e0e2e7e52ce25db5600ade58d62d6d120fa 100644 (file)
@@ -1023,11 +1023,9 @@ int amdgpu_vm_free_job(struct amdgpu_job *job);
  * context related structures
  */
 
-#define AMDGPU_CTX_MAX_CS_PENDING      16
-
 struct amdgpu_ctx_ring {
        uint64_t                sequence;
-       struct fence            *fences[AMDGPU_CTX_MAX_CS_PENDING];
+       struct fence            **fences;
        struct amd_sched_entity entity;
 };
 
@@ -1036,6 +1034,7 @@ struct amdgpu_ctx {
        struct amdgpu_device    *adev;
        unsigned                reset_counter;
        spinlock_t              ring_lock;
+       struct fence            **fences;
        struct amdgpu_ctx_ring  rings[AMDGPU_MAX_RINGS];
 };
 
index 15e341634536f202442205f93e663e08b2dae0df..ee121ec2917bb560dbd67e38e53021b5607aafb3 100644 (file)
@@ -35,15 +35,24 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
        ctx->adev = adev;
        kref_init(&ctx->refcount);
        spin_lock_init(&ctx->ring_lock);
-       for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
-               ctx->rings[i].sequence = 1;
+       ctx->fences = kzalloc(sizeof(struct fence *) * amdgpu_sched_jobs *
+                        AMDGPU_MAX_RINGS, GFP_KERNEL);
+       if (!ctx->fences)
+               return -ENOMEM;
 
+       for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+               ctx->rings[i].sequence = 1;
+               ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) *
+                       amdgpu_sched_jobs * i;
+       }
        if (amdgpu_enable_scheduler) {
                /* create context entity for each ring */
                for (i = 0; i < adev->num_rings; i++) {
                        struct amd_sched_rq *rq;
-                       if (pri >= AMD_SCHED_MAX_PRIORITY)
+                       if (pri >= AMD_SCHED_MAX_PRIORITY) {
+                               kfree(ctx->fences);
                                return -EINVAL;
+                       }
                        rq = &adev->rings[i]->sched.sched_rq[pri];
                        r = amd_sched_entity_init(&adev->rings[i]->sched,
                                                  &ctx->rings[i].entity,
@@ -56,6 +65,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
                        for (j = 0; j < i; j++)
                                amd_sched_entity_fini(&adev->rings[j]->sched,
                                                      &ctx->rings[j].entity);
+                       kfree(ctx->fences);
                        return r;
                }
        }
@@ -71,8 +81,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
                return;
 
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
-               for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
+               for (j = 0; j < amdgpu_sched_jobs; ++j)
                        fence_put(ctx->rings[i].fences[j]);
+       kfree(ctx->fences);
 
        if (amdgpu_enable_scheduler) {
                for (i = 0; i < adev->num_rings; i++)
@@ -241,7 +252,7 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
        unsigned idx = 0;
        struct fence *other = NULL;
 
-       idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
+       idx = seq % amdgpu_sched_jobs;
        other = cring->fences[idx];
        if (other) {
                signed long r;
@@ -276,12 +287,12 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
        }
 
 
-       if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) {
+       if (seq + amdgpu_sched_jobs < cring->sequence) {
                spin_unlock(&ctx->ring_lock);
                return NULL;
        }
 
-       fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
+       fence = fence_get(cring->fences[seq % amdgpu_sched_jobs]);
        spin_unlock(&ctx->ring_lock);
 
        return fence;