ctx = container_of(ref, struct amdgpu_ctx, refcount);
- for (i = 0; i < ctx->adev->num_rings; i++)
+ for (i = 0; i < ctx->adev->num_rings; i++) {
+
+ if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
+ continue;
+
drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity);
+ }
amdgpu_ctx_fini(ref);
}
if (!ctx->adev)
return;
- for (i = 0; i < ctx->adev->num_rings; i++)
+ for (i = 0; i < ctx->adev->num_rings; i++) {
+
+ if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
+ continue;
+
if (kref_read(&ctx->refcount) == 1)
drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity);
else
DRM_ERROR("ctx %p is still alive\n", ctx);
+ }
}
}
if (!ctx->adev)
return;
- for (i = 0; i < ctx->adev->num_rings; i++)
+ for (i = 0; i < ctx->adev->num_rings; i++) {
+
+ if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
+ continue;
+
if (kref_read(&ctx->refcount) == 1)
drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity);
else
DRM_ERROR("ctx %p is still alive\n", ctx);
+ }
}
}