return ERR_CAST(ce);
reserve_gt(i915);
+ mutex_lock(&ce->ring->timeline->mutex);
/* Move our oldest request to the slab-cache (if not in use!) */
rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
kmem_cache_free(global.slab_requests, rq);
err_unreserve:
+ mutex_unlock(&ce->ring->timeline->mutex);
unreserve_gt(i915);
intel_context_unpin(ce);
return ERR_PTR(ret);
GEM_TRACE("%s fence %llx:%lld\n",
engine->name, request->fence.context, request->fence.seqno);
- lockdep_assert_held(&request->i915->drm.struct_mutex);
+ lockdep_assert_held(&request->timeline->mutex);
trace_i915_request_add(request);
/*
*/
if (prev && i915_request_completed(prev))
i915_request_retire_upto(prev);
+
+ mutex_unlock(&request->timeline->mutex);
}
static unsigned long local_clock_us(unsigned int *cpu)
timeline->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&timeline->lock);
+ mutex_init(&timeline->mutex);
INIT_ACTIVE_REQUEST(&timeline->barrier);
INIT_ACTIVE_REQUEST(&timeline->last_request);
#define TIMELINE_CLIENT 0 /* default subclass */
#define TIMELINE_ENGINE 1
+ struct mutex mutex; /* protects the flow of requests */
+
unsigned int pin_count;
const u32 *hwsp_seqno;
struct i915_vma *hwsp_ggtt;
err = -ENOMEM;
goto out_locked;
}
- mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
pr_err("fence wait success before submit (expected timeout)!\n");
- goto out_device;
+ goto out_locked;
}
- mutex_lock(&i915->drm.struct_mutex);
i915_request_add(request);
mutex_unlock(&i915->drm.struct_mutex);
timeline->fence_context = context;
spin_lock_init(&timeline->lock);
+ mutex_init(&timeline->mutex);
INIT_ACTIVE_REQUEST(&timeline->barrier);
INIT_ACTIVE_REQUEST(&timeline->last_request);