drm/i915: Make context pinning part of intel_context_ops
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 8 Mar 2019 13:25:20 +0000 (13:25 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 8 Mar 2019 13:59:59 +0000 (13:59 +0000)
Push the intel_context pin callback down from intel_engine_cs onto the
context itself by virtue of having a central caller for
intel_context_pin() being able to lookup the intel_context itself.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190308132522.21573-5-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/intel_context.c
drivers/gpu/drm/i915/intel_context.h
drivers/gpu/drm/i915/intel_context_types.h
drivers/gpu/drm/i915/intel_engine_types.h
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/selftests/mock_engine.c

index d04a1f51a90cfe9cce1cb187488e66928bc4996f..4c1dacb69f5122816eb26b41f2a555a7ed59d3df 100644 (file)
@@ -126,6 +126,40 @@ intel_context_instance(struct i915_gem_context *ctx,
        return pos;
 }
 
+struct intel_context *
+intel_context_pin(struct i915_gem_context *ctx,
+                 struct intel_engine_cs *engine)
+{
+       struct intel_context *ce;
+       int err;
+
+       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+
+       ce = intel_context_instance(ctx, engine);
+       if (IS_ERR(ce))
+               return ce;
+
+       if (unlikely(!ce->pin_count++)) {
+               err = ce->ops->pin(ce);
+               if (err)
+                       goto err_unpin;
+
+               mutex_lock(&ctx->mutex);
+               list_add(&ce->active_link, &ctx->active_engines);
+               mutex_unlock(&ctx->mutex);
+
+               i915_gem_context_get(ctx);
+               GEM_BUG_ON(ce->gem_context != ctx);
+       }
+       GEM_BUG_ON(!ce->pin_count); /* no overflow! */
+
+       return ce;
+
+err_unpin:
+       ce->pin_count = 0;
+       return ERR_PTR(err);
+}
+
 static void intel_context_retire(struct i915_active_request *active,
                                 struct i915_request *rq)
 {
index 2c910628acafc1c5c39ef6d78cacbcd9f018def3..ea21473891f724d8909679bc270c24846c4aa73a 100644 (file)
@@ -48,11 +48,8 @@ __intel_context_insert(struct i915_gem_context *ctx,
 void
 __intel_context_remove(struct intel_context *ce);
 
-static inline struct intel_context *
-intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
-{
-       return engine->context_pin(engine, ctx);
-}
+struct intel_context *
+intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
 
 static inline void __intel_context_pin(struct intel_context *ce)
 {
index 857f5c3353246ae24121f8b5b12738a9a4f59a9a..804fa93de853a36e546ca526bab31a06d7e22313 100644 (file)
@@ -19,6 +19,7 @@ struct intel_context;
 struct intel_ring;
 
 struct intel_context_ops {
+       int (*pin)(struct intel_context *ce);
        void (*unpin)(struct intel_context *ce);
        void (*destroy)(struct intel_context *ce);
 };
index 6a570e4e51c32c8b10534cf1e5cd66abcc493ef0..21f3275cc00c6a183a4263bc650e22e23a2ad162 100644 (file)
@@ -357,8 +357,6 @@ struct intel_engine_cs {
        void            (*set_default_submission)(struct intel_engine_cs *engine);
 
        const struct intel_context_ops *cops;
-       struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
-                                            struct i915_gem_context *ctx);
 
        int             (*request_alloc)(struct i915_request *rq);
        int             (*init_context)(struct i915_request *rq);
index 7aeff600f12ea129a39a1c4b0f5536bf02eff855..d9bb744c3174443861ce22e3f44620e7f461a71f 100644 (file)
 
 #define ACTIVE_PRIORITY (I915_PRIORITY_NEWCLIENT | I915_PRIORITY_NOSEMAPHORE)
 
-static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
-                                           struct intel_engine_cs *engine,
-                                           struct intel_context *ce);
+static int execlists_context_deferred_alloc(struct intel_context *ce,
+                                           struct intel_engine_cs *engine);
 static void execlists_init_reg_state(u32 *reg_state,
                                     struct intel_context *ce,
                                     struct intel_engine_cs *engine,
@@ -330,11 +329,10 @@ assert_priority_queue(const struct i915_request *prev,
  * engine info, SW context ID and SW counter need to form a unique number
  * (Context ID) per lrc.
  */
-static void
-intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
-                                  struct intel_engine_cs *engine,
-                                  struct intel_context *ce)
+static u64
+lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
 {
+       struct i915_gem_context *ctx = ce->gem_context;
        u64 desc;
 
        BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
@@ -352,7 +350,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
         * Consider updating oa_get_render_ctx_id in i915_perf.c when changing
         * anything below.
         */
-       if (INTEL_GEN(ctx->i915) >= 11) {
+       if (INTEL_GEN(engine->i915) >= 11) {
                GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
                desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
                                                                /* bits 37-47 */
@@ -369,7 +367,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
                desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;   /* bits 32-52 */
        }
 
-       ce->lrc_desc = desc;
+       return desc;
 }
 
 static void unwind_wa_tail(struct i915_request *rq)
@@ -1290,7 +1288,7 @@ static void execlists_context_unpin(struct intel_context *ce)
        i915_gem_context_put(ce->gem_context);
 }
 
-static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
+static int __context_pin(struct i915_vma *vma)
 {
        unsigned int flags;
        int err;
@@ -1313,11 +1311,14 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
 }
 
 static void
-__execlists_update_reg_state(struct intel_engine_cs *engine,
-                            struct intel_context *ce)
+__execlists_update_reg_state(struct intel_context *ce,
+                            struct intel_engine_cs *engine)
 {
-       u32 *regs = ce->lrc_reg_state;
        struct intel_ring *ring = ce->ring;
+       u32 *regs = ce->lrc_reg_state;
+
+       GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
+       GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
 
        regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
        regs[CTX_RING_HEAD + 1] = ring->head;
@@ -1329,25 +1330,26 @@ __execlists_update_reg_state(struct intel_engine_cs *engine,
                        gen8_make_rpcs(engine->i915, &ce->sseu);
 }
 
-static struct intel_context *
-__execlists_context_pin(struct intel_engine_cs *engine,
-                       struct i915_gem_context *ctx,
-                       struct intel_context *ce)
+static int
+__execlists_context_pin(struct intel_context *ce,
+                       struct intel_engine_cs *engine)
 {
        void *vaddr;
        int ret;
 
-       ret = execlists_context_deferred_alloc(ctx, engine, ce);
+       GEM_BUG_ON(!ce->gem_context->ppgtt);
+
+       ret = execlists_context_deferred_alloc(ce, engine);
        if (ret)
                goto err;
        GEM_BUG_ON(!ce->state);
 
-       ret = __context_pin(ctx, ce->state);
+       ret = __context_pin(ce->state);
        if (ret)
                goto err;
 
        vaddr = i915_gem_object_pin_map(ce->state->obj,
-                                       i915_coherent_map_type(ctx->i915) |
+                                       i915_coherent_map_type(engine->i915) |
                                        I915_MAP_OVERRIDE);
        if (IS_ERR(vaddr)) {
                ret = PTR_ERR(vaddr);
@@ -1358,26 +1360,16 @@ __execlists_context_pin(struct intel_engine_cs *engine,
        if (ret)
                goto unpin_map;
 
-       ret = i915_gem_context_pin_hw_id(ctx);
+       ret = i915_gem_context_pin_hw_id(ce->gem_context);
        if (ret)
                goto unpin_ring;
 
-       intel_lr_context_descriptor_update(ctx, engine, ce);
-
-       GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
-
+       ce->lrc_desc = lrc_descriptor(ce, engine);
        ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
-
-       __execlists_update_reg_state(engine, ce);
+       __execlists_update_reg_state(ce, engine);
 
        ce->state->obj->pin_global++;
-
-       mutex_lock(&ctx->mutex);
-       list_add(&ce->active_link, &ctx->active_engines);
-       mutex_unlock(&ctx->mutex);
-
-       i915_gem_context_get(ctx);
-       return ce;
+       return 0;
 
 unpin_ring:
        intel_ring_unpin(ce->ring);
@@ -1386,31 +1378,16 @@ unpin_map:
 unpin_vma:
        __i915_vma_unpin(ce->state);
 err:
-       ce->pin_count = 0;
-       return ERR_PTR(ret);
+       return ret;
 }
 
-static struct intel_context *
-execlists_context_pin(struct intel_engine_cs *engine,
-                     struct i915_gem_context *ctx)
+static int execlists_context_pin(struct intel_context *ce)
 {
-       struct intel_context *ce;
-
-       ce = intel_context_instance(ctx, engine);
-       if (IS_ERR(ce))
-               return ce;
-
-       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-       GEM_BUG_ON(!ctx->ppgtt);
-
-       if (likely(ce->pin_count++))
-               return ce;
-       GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
-
-       return __execlists_context_pin(engine, ctx, ce);
+       return __execlists_context_pin(ce, ce->engine);
 }
 
 static const struct intel_context_ops execlists_context_ops = {
+       .pin = execlists_context_pin,
        .unpin = execlists_context_unpin,
        .destroy = execlists_context_destroy,
 };
@@ -2034,7 +2011,7 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
        intel_ring_update_space(rq->ring);
 
        execlists_init_reg_state(regs, rq->hw_context, engine, rq->ring);
-       __execlists_update_reg_state(engine, rq->hw_context);
+       __execlists_update_reg_state(rq->hw_context, engine);
 
 out_unlock:
        spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -2359,7 +2336,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
        engine->reset.finish = execlists_reset_finish;
 
        engine->cops = &execlists_context_ops;
-       engine->context_pin = execlists_context_pin;
        engine->request_alloc = execlists_request_alloc;
 
        engine->emit_flush = gen8_emit_flush;
@@ -2836,9 +2812,13 @@ err_unpin_ctx:
        return ret;
 }
 
-static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
-                                           struct intel_engine_cs *engine,
-                                           struct intel_context *ce)
+static struct i915_timeline *get_timeline(struct i915_gem_context *ctx)
+{
+       return i915_timeline_create(ctx->i915, ctx->name, NULL);
+}
+
+static int execlists_context_deferred_alloc(struct intel_context *ce,
+                                           struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_object *ctx_obj;
        struct i915_vma *vma;
@@ -2858,23 +2838,25 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
         */
        context_size += LRC_HEADER_PAGES * PAGE_SIZE;
 
-       ctx_obj = i915_gem_object_create(ctx->i915, context_size);
+       ctx_obj = i915_gem_object_create(engine->i915, context_size);
        if (IS_ERR(ctx_obj))
                return PTR_ERR(ctx_obj);
 
-       vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
+       vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto error_deref_obj;
        }
 
-       timeline = i915_timeline_create(ctx->i915, ctx->name, NULL);
+       timeline = get_timeline(ce->gem_context);
        if (IS_ERR(timeline)) {
                ret = PTR_ERR(timeline);
                goto error_deref_obj;
        }
 
-       ring = intel_engine_create_ring(engine, timeline, ctx->ring_size);
+       ring = intel_engine_create_ring(engine,
+                                       timeline,
+                                       ce->gem_context->ring_size);
        i915_timeline_put(timeline);
        if (IS_ERR(ring)) {
                ret = PTR_ERR(ring);
@@ -2919,7 +2901,7 @@ void intel_lr_context_resume(struct drm_i915_private *i915)
                list_for_each_entry(ce, &ctx->active_engines, active_link) {
                        GEM_BUG_ON(!ce->ring);
                        intel_ring_reset(ce->ring, 0);
-                       __execlists_update_reg_state(ce->engine, ce);
+                       __execlists_update_reg_state(ce, ce->engine);
                }
        }
 }
index 602babbd737fd45259d714545e5360e5333e1350..d06908c9584cd9d1cec8cfeb138a8ac962c3edf3 100644 (file)
@@ -1509,11 +1509,9 @@ err_obj:
        return ERR_PTR(err);
 }
 
-static struct intel_context *
-__ring_context_pin(struct intel_engine_cs *engine,
-                  struct i915_gem_context *ctx,
-                  struct intel_context *ce)
+static int ring_context_pin(struct intel_context *ce)
 {
+       struct intel_engine_cs *engine = ce->engine;
        int err;
 
        /* One ringbuffer to rule them all */
@@ -1524,55 +1522,29 @@ __ring_context_pin(struct intel_engine_cs *engine,
                struct i915_vma *vma;
 
                vma = alloc_context_vma(engine);
-               if (IS_ERR(vma)) {
-                       err = PTR_ERR(vma);
-                       goto err;
-               }
+               if (IS_ERR(vma))
+                       return PTR_ERR(vma);
 
                ce->state = vma;
        }
 
        err = __context_pin(ce);
        if (err)
-               goto err;
+               return err;
 
        err = __context_pin_ppgtt(ce->gem_context);
        if (err)
                goto err_unpin;
 
-       mutex_lock(&ctx->mutex);
-       list_add(&ce->active_link, &ctx->active_engines);
-       mutex_unlock(&ctx->mutex);
-
-       i915_gem_context_get(ctx);
-       return ce;
+       return 0;
 
 err_unpin:
        __context_unpin(ce);
-err:
-       ce->pin_count = 0;
-       return ERR_PTR(err);
-}
-
-static struct intel_context *
-ring_context_pin(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
-{
-       struct intel_context *ce;
-
-       ce = intel_context_instance(ctx, engine);
-       if (IS_ERR(ce))
-               return ce;
-
-       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
-       if (likely(ce->pin_count++))
-               return ce;
-       GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
-
-       return __ring_context_pin(engine, ctx, ce);
+       return err;
 }
 
 static const struct intel_context_ops ring_context_ops = {
+       .pin = ring_context_pin,
        .unpin = ring_context_unpin,
        .destroy = ring_context_destroy,
 };
@@ -2283,7 +2255,6 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
        engine->reset.finish = reset_finish;
 
        engine->cops = &ring_context_ops;
-       engine->context_pin = ring_context_pin;
        engine->request_alloc = ring_request_alloc;
 
        /*
index d9bbb102677fbfde729b6cd78ca522fc2193f59f..e483329fd533dc877957a80c8821e69f940061cd 100644 (file)
@@ -137,41 +137,20 @@ static void mock_context_destroy(struct intel_context *ce)
                mock_ring_free(ce->ring);
 }
 
-static struct intel_context *
-mock_context_pin(struct intel_engine_cs *engine,
-                struct i915_gem_context *ctx)
+static int mock_context_pin(struct intel_context *ce)
 {
-       struct intel_context *ce;
-       int err = -ENOMEM;
-
-       ce = intel_context_instance(ctx, engine);
-       if (IS_ERR(ce))
-               return ce;
-
-       if (ce->pin_count++)
-               return ce;
-
        if (!ce->ring) {
-               ce->ring = mock_ring(engine);
+               ce->ring = mock_ring(ce->engine);
                if (!ce->ring)
-                       goto err;
+                       return -ENOMEM;
        }
 
        mock_timeline_pin(ce->ring->timeline);
-
-       mutex_lock(&ctx->mutex);
-       list_add(&ce->active_link, &ctx->active_engines);
-       mutex_unlock(&ctx->mutex);
-
-       i915_gem_context_get(ctx);
-       return ce;
-
-err:
-       ce->pin_count = 0;
-       return ERR_PTR(err);
+       return 0;
 }
 
 static const struct intel_context_ops mock_context_ops = {
+       .pin = mock_context_pin,
        .unpin = mock_context_unpin,
        .destroy = mock_context_destroy,
 };
@@ -235,7 +214,6 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
        engine->base.status_page.addr = (void *)(engine + 1);
 
        engine->base.cops = &mock_context_ops;
-       engine->base.context_pin = mock_context_pin;
        engine->base.request_alloc = mock_request_alloc;
        engine->base.emit_flush = mock_emit_flush;
        engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;