failed:
for (j = 0; j < i; j++)
- drm_sched_entity_fini(&adev->rings[j]->sched,
+ drm_sched_entity_destroy(&adev->rings[j]->sched,
&ctx->rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
- drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
+ drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity);
}
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
- max_wait = drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
+ max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity, max_wait);
}
}
continue;
if (kref_read(&ctx->refcount) == 1)
- drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
+ drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity);
else
DRM_ERROR("ctx %p is still alive\n", ctx);
static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
{
if (adev->mman.mem_global_referenced) {
- drm_sched_entity_fini(adev->mman.entity.sched,
+ drm_sched_entity_destroy(adev->mman.entity.sched,
&adev->mman.entity);
mutex_destroy(&adev->mman.gtt_window_lock);
drm_global_item_unref(&adev->mman.bo_global_ref.ref);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
kfree(adev->uvd.inst[j].saved_bo);
- drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
+ drm_sched_entity_destroy(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
&adev->uvd.inst[j].gpu_addr,
if (adev->vce.vcpu_bo == NULL)
return 0;
- drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
+ drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
(void **)&adev->vce.cpu_addr);
vm->root.base.bo = NULL;
error_free_sched_entity:
- drm_sched_entity_fini(&ring->sched, &vm->entity);
+ drm_sched_entity_destroy(&ring->sched, &vm->entity);
return r;
}
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
- drm_sched_entity_fini(vm->entity.sched, &vm->entity);
+ drm_sched_entity_destroy(vm->entity.sched, &vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");
return r;
if (uvd_v6_0_enc_support(adev)) {
- drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
+ drm_sched_entity_destroy(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
return r;
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
- drm_sched_entity_fini(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc);
+ drm_sched_entity_destroy(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc);
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
gpu->lastctx = NULL;
mutex_unlock(&gpu->lock);
- drm_sched_entity_fini(&gpu->sched,
- &ctx->sched_entity[i]);
+ drm_sched_entity_destroy(&gpu->sched,
+ &ctx->sched_entity[i]);
}
}
/**
- * drm_sched_entity_do_release - Destroy a context entity
+ * drm_sched_entity_flush - Flush a context entity
*
* @sched: scheduler instance
* @entity: scheduler entity
*
* Returns the remaining time in jiffies left from the input timeout
*/
-long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
+long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity, long timeout)
{
long ret = timeout;
return ret;
}
-EXPORT_SYMBOL(drm_sched_entity_do_release);
+EXPORT_SYMBOL(drm_sched_entity_flush);
/**
* drm_sched_entity_cleanup - Destroy a context entity
* entity and signals all jobs with an error code if the process was killed.
*
*/
-void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
+void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity)
{
dma_fence_put(entity->last_scheduled);
entity->last_scheduled = NULL;
}
-EXPORT_SYMBOL(drm_sched_entity_cleanup);
+EXPORT_SYMBOL(drm_sched_entity_fini);
/**
* drm_sched_entity_fini - Destroy a context entity
*
* Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
*/
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
+void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity)
{
- drm_sched_entity_do_release(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
- drm_sched_entity_cleanup(sched, entity);
+ drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
+ drm_sched_entity_fini(sched, entity);
}
-EXPORT_SYMBOL(drm_sched_entity_fini);
+EXPORT_SYMBOL(drm_sched_entity_destroy);
static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
{
enum v3d_queue q;
for (q = 0; q < V3D_MAX_QUEUES; q++) {
- drm_sched_entity_fini(&v3d->queue[q].sched,
+ drm_sched_entity_destroy(&v3d->queue[q].sched,
&v3d_priv->sched_entity[q]);
}
struct drm_sched_entity *entity,
struct drm_sched_rq *rq,
atomic_t *guilty);
-long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
+long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity, long timeout);
-void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity);
void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity);
+void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity);
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
struct drm_sched_entity *entity);
void drm_sched_entity_set_rq(struct drm_sched_entity *entity,