Since unifying ringbuffer/execlist submission to use
engine->pin_context, we ensure that the intel_ring is available before
we start constructing the request. We can therefore move the assignment
of the request->ring to the central i915_gem_request_alloc() and not
require it in every engine->request_alloc() callback. Another small step
towards simplification (of the core, but at a cost of handling error
pointers in less important callers of engine->pin_context).
v2: Rearrange a few branches to reduce impact of PTR_ERR() on gcc's code
generation.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Oscar Mateo <oscar.mateo@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170504093308.4137-1-chris@chris-wilson.co.uk
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_ring *ring;
int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
* shadow_ctx pages invalid. So gvt need to pin itself. After update
* the guest context, gvt can unpin the shadow_ctx safely.
*/
- ret = engine->context_pin(engine, shadow_ctx);
- if (ret) {
+ ring = engine->context_pin(engine, shadow_ctx);
+ if (IS_ERR(ring)) {
+ ret = PTR_ERR(ring);
gvt_vgpu_err("fail to pin shadow context\n");
workload->status = ret;
mutex_unlock(&dev_priv->drm.struct_mutex);
{
struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_request *req;
+ struct intel_ring *ring;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- ret = engine->context_pin(engine, ctx);
- if (ret)
- return ERR_PTR(ret);
+ ring = engine->context_pin(engine, ctx);
+ if (IS_ERR(ring))
+ return ERR_CAST(ring);
+ GEM_BUG_ON(!ring);
ret = reserve_seqno(engine);
if (ret)
req->i915 = dev_priv;
req->engine = engine;
req->ctx = ctx;
+ req->ring = ring;
/* No zalloc, must clear what we need by hand */
req->global_seqno = 0;
{
struct drm_i915_private *dev_priv = stream->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[RCS];
+ struct intel_ring *ring;
int ret;
ret = i915_mutex_lock_interruptible(&dev_priv->drm);
*
* NB: implied RCS engine...
*/
- ret = engine->context_pin(engine, stream->ctx);
- if (ret)
- goto unlock;
+ ring = engine->context_pin(engine, stream->ctx);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
/* Explicitly track the ID (instead of calling i915_ggtt_offset()
* on the fly) considering the difference with gen8+ and
dev_priv->perf.oa.specific_ctx_id =
i915_ggtt_offset(stream->ctx->engine[engine->id].state);
-unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- return ret;
+ return 0;
}
/**
*/
int intel_engine_init_common(struct intel_engine_cs *engine)
{
+ struct intel_ring *ring;
int ret;
engine->set_default_submission(engine);
* be available. To avoid this we always pin the default
* context.
*/
- ret = engine->context_pin(engine, engine->i915->kernel_context);
- if (ret)
- return ret;
+ ring = engine->context_pin(engine, engine->i915->kernel_context);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
ret = intel_engine_init_breadcrumbs(engine);
if (ret)
/* XXX Do we need to preempt to make room for us and our deps? */
}
-static int execlists_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct intel_ring *
+execlists_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
unsigned int flags;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- if (ce->pin_count++)
- return 0;
+ if (likely(ce->pin_count++))
+ goto out;
GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
if (!ce->state) {
ce->state->obj->mm.dirty = true;
i915_gem_context_get(ctx);
- return 0;
+out:
+ return ce->ring;
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
__i915_vma_unpin(ce->state);
err:
ce->pin_count = 0;
- return ret;
+ return ERR_PTR(ret);
}
static void execlists_context_unpin(struct intel_engine_cs *engine,
*/
request->reserved_space += EXECLISTS_REQUEST_SIZE;
- GEM_BUG_ON(!ce->ring);
- request->ring = ce->ring;
-
if (i915.enable_guc_submission) {
/*
* Check that the GuC has space for the request before
return vma;
}
-static int intel_ring_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct intel_ring *
+intel_ring_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
int ret;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- if (ce->pin_count++)
- return 0;
+ if (likely(ce->pin_count++))
+ goto out;
GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
if (!ce->state && engine->context_size) {
vma = alloc_context_vma(engine);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- goto error;
+ goto err;
}
ce->state = vma;
if (ce->state) {
ret = context_pin(ctx);
if (ret)
- goto error;
+ goto err;
ce->state->obj->mm.dirty = true;
}
ce->initialised = true;
i915_gem_context_get(ctx);
- return 0;
-error:
+out:
+ /* One ringbuffer to rule them all */
+ return engine->buffer;
+
+err:
ce->pin_count = 0;
- return ret;
+ return ERR_PTR(ret);
}
static void intel_ring_context_unpin(struct intel_engine_cs *engine,
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
- GEM_BUG_ON(!request->engine->buffer);
- request->ring = request->engine->buffer;
-
cs = intel_ring_begin(request, 0);
if (IS_ERR(cs))
return PTR_ERR(cs);
void (*set_default_submission)(struct intel_engine_cs *engine);
- int (*context_pin)(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
+ struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx);
void (*context_unpin)(struct intel_engine_cs *engine,
struct i915_gem_context *ctx);
int (*request_alloc)(struct drm_i915_gem_request *req);
spin_unlock(&engine->hw_lock);
}
-static int mock_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct intel_ring *
+mock_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
i915_gem_context_get(ctx);
- return 0;
+ return engine->buffer;
}
static void mock_context_unpin(struct intel_engine_cs *engine,
INIT_LIST_HEAD(&mock->link);
mock->delay = 0;
- request->ring = request->engine->buffer;
return 0;
}