ctx->adev = adev;
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
- ctx->rings[i].sequence = 1;
+ ctx->fences = kzalloc(sizeof(struct fence *) * amdgpu_sched_jobs *
+ AMDGPU_MAX_RINGS, GFP_KERNEL);
+ if (!ctx->fences)
+ return -ENOMEM;
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ ctx->rings[i].sequence = 1;
+ ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) *
+ amdgpu_sched_jobs * i;
+ }
if (amdgpu_enable_scheduler) {
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amd_sched_rq *rq;
- if (pri >= AMD_SCHED_MAX_PRIORITY)
+ if (pri >= AMD_SCHED_MAX_PRIORITY) {
+ kfree(ctx->fences);
return -EINVAL;
+ }
rq = &adev->rings[i]->sched.sched_rq[pri];
r = amd_sched_entity_init(&adev->rings[i]->sched,
&ctx->rings[i].entity,
for (j = 0; j < i; j++)
amd_sched_entity_fini(&adev->rings[j]->sched,
&ctx->rings[j].entity);
+ kfree(ctx->fences);
return r;
}
}
return;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
- for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
+ for (j = 0; j < amdgpu_sched_jobs; ++j)
fence_put(ctx->rings[i].fences[j]);
+ kfree(ctx->fences);
if (amdgpu_enable_scheduler) {
for (i = 0; i < adev->num_rings; i++)
unsigned idx = 0;
struct fence *other = NULL;
- idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
+ idx = seq % amdgpu_sched_jobs;
other = cring->fences[idx];
if (other) {
signed long r;
}
- if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) {
+ if (seq + amdgpu_sched_jobs < cring->sequence) {
spin_unlock(&ctx->ring_lock);
return NULL;
}
- fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
+ fence = fence_get(cring->fences[seq % amdgpu_sched_jobs]);
spin_unlock(&ctx->ring_lock);
return fence;