drm/i915: Move over to intel_context_lookup()
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 8 Mar 2019 13:25:19 +0000 (13:25 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 8 Mar 2019 13:59:52 +0000 (13:59 +0000)
In preparation for an ever growing number of engines and so ever
increasing static array of HW contexts within the GEM context, move the
array over to an rbtree, allocated upon first use.

Unfortunately, this imposes an rbtree lookup at a few frequent callsites,
but we should be able to mitigate those by moving over to using the HW
context as our primary type and so only incur the lookup on the boundary
with the user GEM context and engines.

v2: Check for no HW context in guc_stage_desc_init

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190308132522.21573-4-chris@chris-wilson.co.uk
21 files changed:
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_context.h
drivers/gpu/drm/i915/i915_gem_context_types.h
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_globals.c
drivers/gpu/drm/i915/i915_globals.h
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/intel_context.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_context.h
drivers/gpu/drm/i915/intel_context_types.h
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_engine_types.h
drivers/gpu/drm/i915/intel_guc_ads.c
drivers/gpu/drm/i915/intel_guc_submission.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/selftests/mock_context.c
drivers/gpu/drm/i915/selftests/mock_engine.c

index a230553367de5d8aa2b9c956fe90528d3fefa437..68fecf355471474999e9796baf110fd1dc321db4 100644 (file)
@@ -94,6 +94,7 @@ i915-y += \
          i915_trace_points.o \
          i915_vma.o \
          intel_breadcrumbs.o \
+         intel_context.o \
          intel_engine_cs.o \
          intel_hangcheck.o \
          intel_lrc.o \
index 0209d27fcaf0faf0f3bebb348cc7edc245b37b10..f64c76dd11d4f61038ac48bf02ceaa3caeaf02c3 100644 (file)
@@ -494,7 +494,8 @@ static void switch_mmio(struct intel_vgpu *pre,
                         * itself.
                         */
                        if (mmio->in_context &&
-                           !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
+                           !is_inhibit_context(intel_context_lookup(s->shadow_ctx,
+                                                                    dev_priv->engine[ring_id])))
                                continue;
 
                        if (mmio->mask)
index 1f1849f906063b7678d4090ad72885ef5014b0b9..0f32ec12896c6fd6a00d31f2d07adcd3cd3558ff 100644 (file)
@@ -4655,15 +4655,20 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
        }
 
        for_each_engine(engine, i915, id) {
+               struct intel_context *ce;
                struct i915_vma *state;
                void *vaddr;
 
-               GEM_BUG_ON(to_intel_context(ctx, engine)->pin_count);
+               ce = intel_context_lookup(ctx, engine);
+               if (!ce)
+                       continue;
 
-               state = to_intel_context(ctx, engine)->state;
+               state = ce->state;
                if (!state)
                        continue;
 
+               GEM_BUG_ON(ce->pin_count);
+
                /*
                 * As we will hold a reference to the logical state, it will
                 * not be torn down with the context, and importantly the
index 0bb2bdd0a55ffb0742b787b2ed449633321caaf7..995bc28d53d6749037959d110ed9d5137010a33d 100644 (file)
@@ -95,7 +95,7 @@
 
 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
 
-static struct i915_global_context {
+static struct i915_global_gem_context {
        struct i915_global base;
        struct kmem_cache *slab_luts;
 } global;
@@ -222,7 +222,7 @@ static void release_hw_id(struct i915_gem_context *ctx)
 
 static void i915_gem_context_free(struct i915_gem_context *ctx)
 {
-       unsigned int n;
+       struct intel_context *it, *n;
 
        lockdep_assert_held(&ctx->i915->drm.struct_mutex);
        GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
@@ -231,12 +231,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
        release_hw_id(ctx);
        i915_ppgtt_put(ctx->ppgtt);
 
-       for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
-               struct intel_context *ce = &ctx->__engine[n];
-
-               if (ce->ops)
-                       ce->ops->destroy(ce);
-       }
+       rbtree_postorder_for_each_entry_safe(it, n, &ctx->hw_contexts, node)
+               it->ops->destroy(it);
 
        kfree(ctx->name);
        put_pid(ctx->pid);
@@ -340,40 +336,11 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
        return desc;
 }
 
-static void intel_context_retire(struct i915_active_request *active,
-                                struct i915_request *rq)
-{
-       struct intel_context *ce =
-               container_of(active, typeof(*ce), active_tracker);
-
-       intel_context_unpin(ce);
-}
-
-void
-intel_context_init(struct intel_context *ce,
-                  struct i915_gem_context *ctx,
-                  struct intel_engine_cs *engine)
-{
-       ce->gem_context = ctx;
-       ce->engine = engine;
-       ce->ops = engine->cops;
-
-       INIT_LIST_HEAD(&ce->signal_link);
-       INIT_LIST_HEAD(&ce->signals);
-
-       /* Use the whole device by default */
-       ce->sseu = intel_device_default_sseu(ctx->i915);
-
-       i915_active_request_init(&ce->active_tracker,
-                                NULL, intel_context_retire);
-}
-
 static struct i915_gem_context *
 __create_hw_context(struct drm_i915_private *dev_priv,
                    struct drm_i915_file_private *file_priv)
 {
        struct i915_gem_context *ctx;
-       unsigned int n;
        int ret;
        int i;
 
@@ -388,8 +355,8 @@ __create_hw_context(struct drm_i915_private *dev_priv,
        INIT_LIST_HEAD(&ctx->active_engines);
        mutex_init(&ctx->mutex);
 
-       for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
-               intel_context_init(&ctx->__engine[n], ctx, dev_priv->engine[n]);
+       ctx->hw_contexts = RB_ROOT;
+       spin_lock_init(&ctx->hw_contexts_lock);
 
        INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
        INIT_LIST_HEAD(&ctx->handles_list);
@@ -728,8 +695,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
                struct intel_ring *ring;
                struct i915_request *rq;
 
-               GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
-
                rq = i915_request_alloc(engine, i915->kernel_context);
                if (IS_ERR(rq))
                        return PTR_ERR(rq);
@@ -865,13 +830,15 @@ static int get_sseu(struct i915_gem_context *ctx,
        if (!engine)
                return -EINVAL;
 
+       ce = intel_context_instance(ctx, engine);
+       if (IS_ERR(ce))
+               return PTR_ERR(ce);
+
        /* Only use for mutex here is to serialize get_param and set_param. */
        ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
        if (ret)
                return ret;
 
-       ce = to_intel_context(ctx, engine);
-
        user_sseu.slice_mask = ce->sseu.slice_mask;
        user_sseu.subslice_mask = ce->sseu.subslice_mask;
        user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
@@ -1037,12 +1004,16 @@ __i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
                                    struct intel_engine_cs *engine,
                                    struct intel_sseu sseu)
 {
-       struct intel_context *ce = to_intel_context(ctx, engine);
+       struct intel_context *ce;
        int ret = 0;
 
        GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8);
        GEM_BUG_ON(engine->id != RCS0);
 
+       ce = intel_context_instance(ctx, engine);
+       if (IS_ERR(ce))
+               return PTR_ERR(ce);
+
        /* Nothing to do if unmodified. */
        if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
                return 0;
@@ -1375,22 +1346,22 @@ out_unlock:
 #include "selftests/i915_gem_context.c"
 #endif
 
-static void i915_global_context_shrink(void)
+static void i915_global_gem_context_shrink(void)
 {
        kmem_cache_shrink(global.slab_luts);
 }
 
-static void i915_global_context_exit(void)
+static void i915_global_gem_context_exit(void)
 {
        kmem_cache_destroy(global.slab_luts);
 }
 
-static struct i915_global_context global = { {
-       .shrink = i915_global_context_shrink,
-       .exit = i915_global_context_exit,
+static struct i915_global_gem_context global = { {
+       .shrink = i915_global_gem_context_shrink,
+       .exit = i915_global_gem_context_exit,
 } };
 
-int __init i915_global_context_init(void)
+int __init i915_global_gem_context_init(void)
 {
        global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
        if (!global.slab_luts)
index 00698944a0ee5f7ae2e4ea2d20b1014903ed233f..5a32c4b4816f87e144300beb14b06458c0445a35 100644 (file)
@@ -31,7 +31,6 @@
 #include "i915_scheduler.h"
 #include "intel_context.h"
 #include "intel_device_info.h"
-#include "intel_ringbuffer.h"
 
 struct drm_device;
 struct drm_file;
index 59800d749510dbc256084a5d85067a5a8c47a7db..2bf19730eaa97086fe718d6bcdbf6fa38e15b572 100644 (file)
 #include <linux/kref.h>
 #include <linux/mutex.h>
 #include <linux/radix-tree.h>
+#include <linux/rbtree.h>
 #include <linux/rcupdate.h>
 #include <linux/types.h>
 
-#include "i915_gem.h" /* I915_NUM_ENGINES */
 #include "i915_scheduler.h"
 #include "intel_context_types.h"
 
@@ -140,8 +140,9 @@ struct i915_gem_context {
 
        struct i915_sched_attr sched;
 
-       /** engine: per-engine logical HW state */
-       struct intel_context __engine[I915_NUM_ENGINES];
+       /** hw_contexts: per-engine logical HW state */
+       struct rb_root hw_contexts;
+       spinlock_t hw_contexts_lock;
 
        /** ring_size: size for allocating the per-engine ring buffer */
        u32 ring_size;
index 943a221acb21a30e8d913fe458d6333e49b37f13..ee6d301a96277616788dbe94365149d50c5c7338 100644 (file)
@@ -794,8 +794,8 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb)
         * keeping all of their resources pinned.
         */
 
-       ce = to_intel_context(eb->ctx, eb->engine);
-       if (!ce->ring) /* first use, assume empty! */
+       ce = intel_context_lookup(eb->ctx, eb->engine);
+       if (!ce || !ce->ring) /* first use, assume empty! */
                return 0;
 
        rq = __eb_wait_for_ring(ce->ring);
index 1cf4e8bc8ec6fd7f7f56964a8d2a73129f47079b..2f5c72e2a9d15f1281b113ce97e15fd439541f34 100644 (file)
@@ -36,6 +36,7 @@ static void __i915_globals_cleanup(void)
 static __initconst int (* const initfn[])(void) = {
        i915_global_active_init,
        i915_global_context_init,
+       i915_global_gem_context_init,
        i915_global_objects_init,
        i915_global_request_init,
        i915_global_scheduler_init,
index a45529022a42b4c43f525565075f51375600dc4d..04c1ce107fc066cde75c2e2cb9bd416006ac2121 100644 (file)
@@ -26,6 +26,7 @@ void i915_globals_exit(void);
 /* constructors */
 int i915_global_active_init(void);
 int i915_global_context_init(void);
+int i915_global_gem_context_init(void);
 int i915_global_objects_init(void);
 int i915_global_request_init(void);
 int i915_global_scheduler_init(void);
index a0d145f976ec2fe4e055c3d489a0782a57ea1501..e19a89e4df6488e7807399c7db454f559bf83f4c 100644 (file)
@@ -1740,11 +1740,11 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
 
        /* Update all contexts now that we've stalled the submission. */
        list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
-               struct intel_context *ce = to_intel_context(ctx, engine);
+               struct intel_context *ce = intel_context_lookup(ctx, engine);
                u32 *regs;
 
                /* OA settings will be set upon first use */
-               if (!ce->state)
+               if (!ce || !ce->state)
                        continue;
 
                regs = i915_gem_object_pin_map(ce->state->obj, map_type);
diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c
new file mode 100644 (file)
index 0000000..d04a1f5
--- /dev/null
@@ -0,0 +1,180 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright Â© 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_gem_context.h"
+#include "i915_globals.h"
+#include "intel_context.h"
+#include "intel_ringbuffer.h"
+
+static struct i915_global_context {
+       struct i915_global base;
+       struct kmem_cache *slab_ce;
+} global;
+
+struct intel_context *intel_context_alloc(void)
+{
+       return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
+}
+
+void intel_context_free(struct intel_context *ce)
+{
+       kmem_cache_free(global.slab_ce, ce);
+}
+
+struct intel_context *
+intel_context_lookup(struct i915_gem_context *ctx,
+                    struct intel_engine_cs *engine)
+{
+       struct intel_context *ce = NULL;
+       struct rb_node *p;
+
+       spin_lock(&ctx->hw_contexts_lock);
+       p = ctx->hw_contexts.rb_node;
+       while (p) {
+               struct intel_context *this =
+                       rb_entry(p, struct intel_context, node);
+
+               if (this->engine == engine) {
+                       GEM_BUG_ON(this->gem_context != ctx);
+                       ce = this;
+                       break;
+               }
+
+               if (this->engine < engine)
+                       p = p->rb_right;
+               else
+                       p = p->rb_left;
+       }
+       spin_unlock(&ctx->hw_contexts_lock);
+
+       return ce;
+}
+
+struct intel_context *
+__intel_context_insert(struct i915_gem_context *ctx,
+                      struct intel_engine_cs *engine,
+                      struct intel_context *ce)
+{
+       struct rb_node **p, *parent;
+       int err = 0;
+
+       spin_lock(&ctx->hw_contexts_lock);
+
+       parent = NULL;
+       p = &ctx->hw_contexts.rb_node;
+       while (*p) {
+               struct intel_context *this;
+
+               parent = *p;
+               this = rb_entry(parent, struct intel_context, node);
+
+               if (this->engine == engine) {
+                       err = -EEXIST;
+                       ce = this;
+                       break;
+               }
+
+               if (this->engine < engine)
+                       p = &parent->rb_right;
+               else
+                       p = &parent->rb_left;
+       }
+       if (!err) {
+               rb_link_node(&ce->node, parent, p);
+               rb_insert_color(&ce->node, &ctx->hw_contexts);
+       }
+
+       spin_unlock(&ctx->hw_contexts_lock);
+
+       return ce;
+}
+
+void __intel_context_remove(struct intel_context *ce)
+{
+       struct i915_gem_context *ctx = ce->gem_context;
+
+       spin_lock(&ctx->hw_contexts_lock);
+       rb_erase(&ce->node, &ctx->hw_contexts);
+       spin_unlock(&ctx->hw_contexts_lock);
+}
+
+struct intel_context *
+intel_context_instance(struct i915_gem_context *ctx,
+                      struct intel_engine_cs *engine)
+{
+       struct intel_context *ce, *pos;
+
+       ce = intel_context_lookup(ctx, engine);
+       if (likely(ce))
+               return ce;
+
+       ce = intel_context_alloc();
+       if (!ce)
+               return ERR_PTR(-ENOMEM);
+
+       intel_context_init(ce, ctx, engine);
+
+       pos = __intel_context_insert(ctx, engine, ce);
+       if (unlikely(pos != ce)) /* Beaten! Use their HW context instead */
+               intel_context_free(ce);
+
+       GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos);
+       return pos;
+}
+
+static void intel_context_retire(struct i915_active_request *active,
+                                struct i915_request *rq)
+{
+       struct intel_context *ce =
+               container_of(active, typeof(*ce), active_tracker);
+
+       intel_context_unpin(ce);
+}
+
+void
+intel_context_init(struct intel_context *ce,
+                  struct i915_gem_context *ctx,
+                  struct intel_engine_cs *engine)
+{
+       ce->gem_context = ctx;
+       ce->engine = engine;
+       ce->ops = engine->cops;
+
+       INIT_LIST_HEAD(&ce->signal_link);
+       INIT_LIST_HEAD(&ce->signals);
+
+       /* Use the whole device by default */
+       ce->sseu = intel_device_default_sseu(ctx->i915);
+
+       i915_active_request_init(&ce->active_tracker,
+                                NULL, intel_context_retire);
+}
+
+static void i915_global_context_shrink(void)
+{
+       kmem_cache_shrink(global.slab_ce);
+}
+
+static void i915_global_context_exit(void)
+{
+       kmem_cache_destroy(global.slab_ce);
+}
+
+static struct i915_global_context global = { {
+       .shrink = i915_global_context_shrink,
+       .exit = i915_global_context_exit,
+} };
+
+int __init i915_global_context_init(void)
+{
+       global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
+       if (!global.slab_ce)
+               return -ENOMEM;
+
+       i915_global_register(&global.base);
+       return 0;
+}
index dd947692bb0bd7e83d5c6747046dc24693cf97ca..2c910628acafc1c5c39ef6d78cacbcd9f018def3 100644 (file)
@@ -7,20 +7,46 @@
 #ifndef __INTEL_CONTEXT_H__
 #define __INTEL_CONTEXT_H__
 
-#include "i915_gem_context_types.h"
 #include "intel_context_types.h"
 #include "intel_engine_types.h"
 
+struct intel_context *intel_context_alloc(void);
+void intel_context_free(struct intel_context *ce);
+
 void intel_context_init(struct intel_context *ce,
                        struct i915_gem_context *ctx,
                        struct intel_engine_cs *engine);
 
-static inline struct intel_context *
-to_intel_context(struct i915_gem_context *ctx,
-                const struct intel_engine_cs *engine)
-{
-       return &ctx->__engine[engine->id];
-}
+/**
+ * intel_context_lookup - Find the matching HW context for this (ctx, engine)
+ * @ctx - the parent GEM context
+ * @engine - the target HW engine
+ *
+ * May return NULL if the HW context hasn't been instantiated (i.e. unused).
+ */
+struct intel_context *
+intel_context_lookup(struct i915_gem_context *ctx,
+                    struct intel_engine_cs *engine);
+
+/**
+ * intel_context_instance - Lookup or allocate the HW context for (ctx, engine)
+ * @ctx - the parent GEM context
+ * @engine - the target HW engine
+ *
+ * Returns the existing HW context for this pair of (GEM context, engine), or
+ * allocates and initialises a fresh context. Once allocated, the HW context
+ * remains resident until the GEM context is destroyed.
+ */
+struct intel_context *
+intel_context_instance(struct i915_gem_context *ctx,
+                      struct intel_engine_cs *engine);
+
+struct intel_context *
+__intel_context_insert(struct i915_gem_context *ctx,
+                      struct intel_engine_cs *engine,
+                      struct intel_context *ce);
+void
+__intel_context_remove(struct intel_context *ce);
 
 static inline struct intel_context *
 intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
index 16e1306e9595b6048a1f6c9db1e5f62c5a496e67..857f5c3353246ae24121f8b5b12738a9a4f59a9a 100644 (file)
@@ -8,6 +8,7 @@
 #define __INTEL_CONTEXT_TYPES__
 
 #include <linux/list.h>
+#include <linux/rbtree.h>
 #include <linux/types.h>
 
 #include "i915_active_types.h"
@@ -52,6 +53,7 @@ struct intel_context {
        struct i915_active_request active_tracker;
 
        const struct intel_context_ops *ops;
+       struct rb_node node;
 
        /** sseu: Control eu/slice partitioning */
        struct intel_sseu sseu;
index 8e326556499eebb04312121b63694d3e43ff5df3..1937128ea267f7ca441e4974a98a26546c5ddaeb 100644 (file)
@@ -644,7 +644,7 @@ void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
 static void __intel_context_unpin(struct i915_gem_context *ctx,
                                  struct intel_engine_cs *engine)
 {
-       intel_context_unpin(to_intel_context(ctx, engine));
+       intel_context_unpin(intel_context_lookup(ctx, engine));
 }
 
 struct measure_breadcrumb {
index 4d2c0ba58e6e6c97bac406ffa8f87991c9078f77..6a570e4e51c32c8b10534cf1e5cd66abcc493ef0 100644 (file)
@@ -231,6 +231,11 @@ struct intel_engine_execlists {
         */
        u32 *csb_status;
 
+       /**
+        * @preempt_context: the HW context for injecting preempt-to-idle
+        */
+       struct intel_context *preempt_context;
+
        /**
         * @preempt_complete_status: expected CSB upon completing preemption
         */
index c51d558fd43185b934b6194f1dc21b8387e555c8..b4ff0045a605545ee0adce0647289732395f09d2 100644 (file)
@@ -121,8 +121,8 @@ int intel_guc_ads_create(struct intel_guc *guc)
         * to find it. Note that we have to skip our header (1 page),
         * because our GuC shared data is there.
         */
-       kernel_ctx_vma = to_intel_context(dev_priv->kernel_context,
-                                         dev_priv->engine[RCS0])->state;
+       kernel_ctx_vma = intel_context_lookup(dev_priv->kernel_context,
+                                             dev_priv->engine[RCS0])->state;
        blob->ads.golden_context_lrca =
                intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset;
 
index e3afc91f0e7baf8342eec4b89b6ef91ab0a29832..4a57272334190c8a82513928b8ec14373792f786 100644 (file)
@@ -382,7 +382,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
        desc->db_id = client->doorbell_id;
 
        for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
-               struct intel_context *ce = to_intel_context(ctx, engine);
+               struct intel_context *ce = intel_context_lookup(ctx, engine);
                u32 guc_engine_id = engine->guc_id;
                struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
 
@@ -393,7 +393,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
                 * for now who owns a GuC client. But for future owner of GuC
                 * client, need to make sure lrc is pinned prior to enter here.
                 */
-               if (!ce->state)
+               if (!ce || !ce->state)
                        break;  /* XXX: continue? */
 
                /*
@@ -567,7 +567,7 @@ static void inject_preempt_context(struct work_struct *work)
                                             preempt_work[engine->id]);
        struct intel_guc_client *client = guc->preempt_client;
        struct guc_stage_desc *stage_desc = __get_stage_desc(client);
-       struct intel_context *ce = to_intel_context(client->owner, engine);
+       struct intel_context *ce = intel_context_lookup(client->owner, engine);
        u32 data[7];
 
        if (!ce->ring->emit) { /* recreate upon load/resume */
index b210a27646132c89eaaaed672624e4104ee44898..7aeff600f12ea129a39a1c4b0f5536bf02eff855 100644 (file)
@@ -628,8 +628,7 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq)
 static void inject_preempt_context(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists *execlists = &engine->execlists;
-       struct intel_context *ce =
-               to_intel_context(engine->i915->preempt_context, engine);
+       struct intel_context *ce = execlists->preempt_context;
        unsigned int n;
 
        GEM_BUG_ON(execlists->preempt_complete_status !=
@@ -1237,19 +1236,24 @@ static void execlists_submit_request(struct i915_request *request)
        spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
-static void execlists_context_destroy(struct intel_context *ce)
+static void __execlists_context_fini(struct intel_context *ce)
 {
-       GEM_BUG_ON(ce->pin_count);
-
-       if (!ce->state)
-               return;
-
        intel_ring_free(ce->ring);
 
        GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
        i915_gem_object_put(ce->state->obj);
 }
 
+static void execlists_context_destroy(struct intel_context *ce)
+{
+       GEM_BUG_ON(ce->pin_count);
+
+       if (ce->state)
+               __execlists_context_fini(ce);
+
+       intel_context_free(ce);
+}
+
 static void execlists_context_unpin(struct intel_context *ce)
 {
        struct intel_engine_cs *engine;
@@ -1390,7 +1394,11 @@ static struct intel_context *
 execlists_context_pin(struct intel_engine_cs *engine,
                      struct i915_gem_context *ctx)
 {
-       struct intel_context *ce = to_intel_context(ctx, engine);
+       struct intel_context *ce;
+
+       ce = intel_context_instance(ctx, engine);
+       if (IS_ERR(ce))
+               return ce;
 
        lockdep_assert_held(&ctx->i915->drm.struct_mutex);
        GEM_BUG_ON(!ctx->ppgtt);
@@ -2441,8 +2449,9 @@ static int logical_ring_init(struct intel_engine_cs *engine)
        execlists->preempt_complete_status = ~0u;
        if (i915->preempt_context) {
                struct intel_context *ce =
-                       to_intel_context(i915->preempt_context, engine);
+                       intel_context_lookup(i915->preempt_context, engine);
 
+               execlists->preempt_context = ce;
                execlists->preempt_complete_status =
                        upper_32_bits(ce->lrc_desc);
        }
index 78e3c03aab4eb5c11ae95c298d1857e7e063088a..602babbd737fd45259d714545e5360e5333e1350 100644 (file)
@@ -1349,15 +1349,20 @@ intel_ring_free(struct intel_ring *ring)
        kfree(ring);
 }
 
+static void __ring_context_fini(struct intel_context *ce)
+{
+       GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
+       i915_gem_object_put(ce->state->obj);
+}
+
 static void ring_context_destroy(struct intel_context *ce)
 {
        GEM_BUG_ON(ce->pin_count);
 
-       if (!ce->state)
-               return;
+       if (ce->state)
+               __ring_context_fini(ce);
 
-       GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
-       i915_gem_object_put(ce->state->obj);
+       intel_context_free(ce);
 }
 
 static int __context_pin_ppgtt(struct i915_gem_context *ctx)
@@ -1552,7 +1557,11 @@ err:
 static struct intel_context *
 ring_context_pin(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 {
-       struct intel_context *ce = to_intel_context(ctx, engine);
+       struct intel_context *ce;
+
+       ce = intel_context_instance(ctx, engine);
+       if (IS_ERR(ce))
+               return ce;
 
        lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 
@@ -1754,8 +1763,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
                 * placeholder we use to flush other contexts.
                 */
                *cs++ = MI_SET_CONTEXT;
-               *cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context,
-                                                         engine)->state) |
+               *cs++ = i915_ggtt_offset(intel_context_lookup(i915->kernel_context,
+                                                             engine)->state) |
                        MI_MM_SPACE_GTT |
                        MI_RESTORE_INHIBIT;
        }
index 353b37b9f78e31786c94237e6527b297b92f84b4..8efa6892c6cd96481d31effb5296e4e41d320962 100644 (file)
@@ -30,7 +30,6 @@ mock_context(struct drm_i915_private *i915,
             const char *name)
 {
        struct i915_gem_context *ctx;
-       unsigned int n;
        int ret;
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -41,15 +40,15 @@ mock_context(struct drm_i915_private *i915,
        INIT_LIST_HEAD(&ctx->link);
        ctx->i915 = i915;
 
+       ctx->hw_contexts = RB_ROOT;
+       spin_lock_init(&ctx->hw_contexts_lock);
+
        INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
        INIT_LIST_HEAD(&ctx->handles_list);
        INIT_LIST_HEAD(&ctx->hw_id_link);
        INIT_LIST_HEAD(&ctx->active_engines);
        mutex_init(&ctx->mutex);
 
-       for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
-               intel_context_init(&ctx->__engine[n], ctx, i915->engine[n]);
-
        ret = i915_gem_context_pin_hw_id(ctx);
        if (ret < 0)
                goto err_handles;
index 856ec2706f3e7f7091e9510a5643552b11f4aa20..d9bbb102677fbfde729b6cd78ca522fc2193f59f 100644 (file)
@@ -141,9 +141,13 @@ static struct intel_context *
 mock_context_pin(struct intel_engine_cs *engine,
                 struct i915_gem_context *ctx)
 {
-       struct intel_context *ce = to_intel_context(ctx, engine);
+       struct intel_context *ce;
        int err = -ENOMEM;
 
+       ce = intel_context_instance(ctx, engine);
+       if (IS_ERR(ce))
+               return ce;
+
        if (ce->pin_count++)
                return ce;