i915_trace_points.o \
i915_vma.o \
intel_breadcrumbs.o \
+ intel_context.o \
intel_engine_cs.o \
intel_hangcheck.o \
intel_lrc.o \
* itself.
*/
if (mmio->in_context &&
- !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
+ !is_inhibit_context(intel_context_lookup(s->shadow_ctx,
+ dev_priv->engine[ring_id])))
continue;
if (mmio->mask)
}
for_each_engine(engine, i915, id) {
+ struct intel_context *ce;
struct i915_vma *state;
void *vaddr;
- GEM_BUG_ON(to_intel_context(ctx, engine)->pin_count);
+ ce = intel_context_lookup(ctx, engine);
+ if (!ce)
+ continue;
- state = to_intel_context(ctx, engine)->state;
+ state = ce->state;
if (!state)
continue;
+ GEM_BUG_ON(ce->pin_count);
+
/*
* As we will hold a reference to the logical state, it will
* not be torn down with the context, and importantly the
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
-static struct i915_global_context {
+static struct i915_global_gem_context {
struct i915_global base;
struct kmem_cache *slab_luts;
} global;
static void i915_gem_context_free(struct i915_gem_context *ctx)
{
- unsigned int n;
+ struct intel_context *it, *n;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
release_hw_id(ctx);
i915_ppgtt_put(ctx->ppgtt);
- for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
- struct intel_context *ce = &ctx->__engine[n];
-
- if (ce->ops)
- ce->ops->destroy(ce);
- }
+ rbtree_postorder_for_each_entry_safe(it, n, &ctx->hw_contexts, node)
+ it->ops->destroy(it);
kfree(ctx->name);
put_pid(ctx->pid);
return desc;
}
-static void intel_context_retire(struct i915_active_request *active,
- struct i915_request *rq)
-{
- struct intel_context *ce =
- container_of(active, typeof(*ce), active_tracker);
-
- intel_context_unpin(ce);
-}
-
-void
-intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- ce->gem_context = ctx;
- ce->engine = engine;
- ce->ops = engine->cops;
-
- INIT_LIST_HEAD(&ce->signal_link);
- INIT_LIST_HEAD(&ce->signals);
-
- /* Use the whole device by default */
- ce->sseu = intel_device_default_sseu(ctx->i915);
-
- i915_active_request_init(&ce->active_tracker,
- NULL, intel_context_retire);
-}
-
static struct i915_gem_context *
__create_hw_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
{
struct i915_gem_context *ctx;
- unsigned int n;
int ret;
int i;
INIT_LIST_HEAD(&ctx->active_engines);
mutex_init(&ctx->mutex);
- for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
- intel_context_init(&ctx->__engine[n], ctx, dev_priv->engine[n]);
+ ctx->hw_contexts = RB_ROOT;
+ spin_lock_init(&ctx->hw_contexts_lock);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
struct intel_ring *ring;
struct i915_request *rq;
- GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
-
rq = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
if (!engine)
return -EINVAL;
+ ce = intel_context_instance(ctx, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
/* Only use for mutex here is to serialize get_param and set_param. */
ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
if (ret)
return ret;
- ce = to_intel_context(ctx, engine);
-
user_sseu.slice_mask = ce->sseu.slice_mask;
user_sseu.subslice_mask = ce->sseu.subslice_mask;
user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
struct intel_engine_cs *engine,
struct intel_sseu sseu)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce;
int ret = 0;
GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8);
GEM_BUG_ON(engine->id != RCS0);
+ ce = intel_context_instance(ctx, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
/* Nothing to do if unmodified. */
if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
return 0;
#include "selftests/i915_gem_context.c"
#endif
-static void i915_global_context_shrink(void)
+static void i915_global_gem_context_shrink(void)
{
kmem_cache_shrink(global.slab_luts);
}
-static void i915_global_context_exit(void)
+static void i915_global_gem_context_exit(void)
{
kmem_cache_destroy(global.slab_luts);
}
-static struct i915_global_context global = { {
- .shrink = i915_global_context_shrink,
- .exit = i915_global_context_exit,
+static struct i915_global_gem_context global = { {
+ .shrink = i915_global_gem_context_shrink,
+ .exit = i915_global_gem_context_exit,
} };
-int __init i915_global_context_init(void)
+int __init i915_global_gem_context_init(void)
{
global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
if (!global.slab_luts)
#include "i915_scheduler.h"
#include "intel_context.h"
#include "intel_device_info.h"
-#include "intel_ringbuffer.h"
struct drm_device;
struct drm_file;
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/radix-tree.h>
+#include <linux/rbtree.h>
#include <linux/rcupdate.h>
#include <linux/types.h>
-#include "i915_gem.h" /* I915_NUM_ENGINES */
#include "i915_scheduler.h"
#include "intel_context_types.h"
struct i915_sched_attr sched;
- /** engine: per-engine logical HW state */
- struct intel_context __engine[I915_NUM_ENGINES];
+ /** hw_contexts: per-engine logical HW state */
+ struct rb_root hw_contexts;
+ spinlock_t hw_contexts_lock;
/** ring_size: size for allocating the per-engine ring buffer */
u32 ring_size;
* keeping all of their resources pinned.
*/
- ce = to_intel_context(eb->ctx, eb->engine);
- if (!ce->ring) /* first use, assume empty! */
+ ce = intel_context_lookup(eb->ctx, eb->engine);
+ if (!ce || !ce->ring) /* first use, assume empty! */
return 0;
rq = __eb_wait_for_ring(ce->ring);
static __initconst int (* const initfn[])(void) = {
i915_global_active_init,
i915_global_context_init,
+ i915_global_gem_context_init,
i915_global_objects_init,
i915_global_request_init,
i915_global_scheduler_init,
/* constructors */
int i915_global_active_init(void);
int i915_global_context_init(void);
+int i915_global_gem_context_init(void);
int i915_global_objects_init(void);
int i915_global_request_init(void);
int i915_global_scheduler_init(void);
/* Update all contexts now that we've stalled the submission. */
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce = intel_context_lookup(ctx, engine);
u32 *regs;
/* OA settings will be set upon first use */
- if (!ce->state)
+ if (!ce || !ce->state)
continue;
regs = i915_gem_object_pin_map(ce->state->obj, map_type);
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_gem_context.h"
+#include "i915_globals.h"
+#include "intel_context.h"
+#include "intel_ringbuffer.h"
+
+static struct i915_global_context {
+ struct i915_global base;
+ struct kmem_cache *slab_ce;
+} global;
+
+struct intel_context *intel_context_alloc(void)
+{
+ return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
+}
+
+void intel_context_free(struct intel_context *ce)
+{
+ kmem_cache_free(global.slab_ce, ce);
+}
+
+struct intel_context *
+intel_context_lookup(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce = NULL;
+ struct rb_node *p;
+
+ spin_lock(&ctx->hw_contexts_lock);
+ p = ctx->hw_contexts.rb_node;
+ while (p) {
+ struct intel_context *this =
+ rb_entry(p, struct intel_context, node);
+
+ if (this->engine == engine) {
+ GEM_BUG_ON(this->gem_context != ctx);
+ ce = this;
+ break;
+ }
+
+ if (this->engine < engine)
+ p = p->rb_right;
+ else
+ p = p->rb_left;
+ }
+ spin_unlock(&ctx->hw_contexts_lock);
+
+ return ce;
+}
+
+struct intel_context *
+__intel_context_insert(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct intel_context *ce)
+{
+ struct rb_node **p, *parent;
+ int err = 0;
+
+ spin_lock(&ctx->hw_contexts_lock);
+
+ parent = NULL;
+ p = &ctx->hw_contexts.rb_node;
+ while (*p) {
+ struct intel_context *this;
+
+ parent = *p;
+ this = rb_entry(parent, struct intel_context, node);
+
+ if (this->engine == engine) {
+ err = -EEXIST;
+ ce = this;
+ break;
+ }
+
+ if (this->engine < engine)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ if (!err) {
+ rb_link_node(&ce->node, parent, p);
+ rb_insert_color(&ce->node, &ctx->hw_contexts);
+ }
+
+ spin_unlock(&ctx->hw_contexts_lock);
+
+ return ce;
+}
+
+void __intel_context_remove(struct intel_context *ce)
+{
+ struct i915_gem_context *ctx = ce->gem_context;
+
+ spin_lock(&ctx->hw_contexts_lock);
+ rb_erase(&ce->node, &ctx->hw_contexts);
+ spin_unlock(&ctx->hw_contexts_lock);
+}
+
+struct intel_context *
+intel_context_instance(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce, *pos;
+
+ ce = intel_context_lookup(ctx, engine);
+ if (likely(ce))
+ return ce;
+
+ ce = intel_context_alloc();
+ if (!ce)
+ return ERR_PTR(-ENOMEM);
+
+ intel_context_init(ce, ctx, engine);
+
+ pos = __intel_context_insert(ctx, engine, ce);
+ if (unlikely(pos != ce)) /* Beaten! Use their HW context instead */
+ intel_context_free(ce);
+
+ GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos);
+ return pos;
+}
+
+static void intel_context_retire(struct i915_active_request *active,
+ struct i915_request *rq)
+{
+ struct intel_context *ce =
+ container_of(active, typeof(*ce), active_tracker);
+
+ intel_context_unpin(ce);
+}
+
+void
+intel_context_init(struct intel_context *ce,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ ce->gem_context = ctx;
+ ce->engine = engine;
+ ce->ops = engine->cops;
+
+ INIT_LIST_HEAD(&ce->signal_link);
+ INIT_LIST_HEAD(&ce->signals);
+
+ /* Use the whole device by default */
+ ce->sseu = intel_device_default_sseu(ctx->i915);
+
+ i915_active_request_init(&ce->active_tracker,
+ NULL, intel_context_retire);
+}
+
+static void i915_global_context_shrink(void)
+{
+ kmem_cache_shrink(global.slab_ce);
+}
+
+static void i915_global_context_exit(void)
+{
+ kmem_cache_destroy(global.slab_ce);
+}
+
+static struct i915_global_context global = { {
+ .shrink = i915_global_context_shrink,
+ .exit = i915_global_context_exit,
+} };
+
+int __init i915_global_context_init(void)
+{
+ global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_ce)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
#ifndef __INTEL_CONTEXT_H__
#define __INTEL_CONTEXT_H__
-#include "i915_gem_context_types.h"
#include "intel_context_types.h"
#include "intel_engine_types.h"
+struct intel_context *intel_context_alloc(void);
+void intel_context_free(struct intel_context *ce);
+
void intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
-static inline struct intel_context *
-to_intel_context(struct i915_gem_context *ctx,
- const struct intel_engine_cs *engine)
-{
- return &ctx->__engine[engine->id];
-}
+/**
+ * intel_context_lookup - Find the matching HW context for this (ctx, engine)
+ * @ctx - the parent GEM context
+ * @engine - the target HW engine
+ *
+ * May return NULL if the HW context hasn't been instantiated (i.e. unused).
+ */
+struct intel_context *
+intel_context_lookup(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+
+/**
+ * intel_context_instance - Lookup or allocate the HW context for (ctx, engine)
+ * @ctx - the parent GEM context
+ * @engine - the target HW engine
+ *
+ * Returns the existing HW context for this pair of (GEM context, engine), or
+ * allocates and initialises a fresh context. Once allocated, the HW context
+ * remains resident until the GEM context is destroyed.
+ */
+struct intel_context *
+intel_context_instance(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+
+struct intel_context *
+__intel_context_insert(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct intel_context *ce);
+void
+__intel_context_remove(struct intel_context *ce);
static inline struct intel_context *
intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
#define __INTEL_CONTEXT_TYPES__
#include <linux/list.h>
+#include <linux/rbtree.h>
#include <linux/types.h>
#include "i915_active_types.h"
struct i915_active_request active_tracker;
const struct intel_context_ops *ops;
+ struct rb_node node;
/** sseu: Control eu/slice partitioning */
struct intel_sseu sseu;
static void __intel_context_unpin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
- intel_context_unpin(to_intel_context(ctx, engine));
+ intel_context_unpin(intel_context_lookup(ctx, engine));
}
struct measure_breadcrumb {
*/
u32 *csb_status;
+ /**
+ * @preempt_context: the HW context for injecting preempt-to-idle
+ */
+ struct intel_context *preempt_context;
+
/**
* @preempt_complete_status: expected CSB upon completing preemption
*/
* to find it. Note that we have to skip our header (1 page),
* because our GuC shared data is there.
*/
- kernel_ctx_vma = to_intel_context(dev_priv->kernel_context,
- dev_priv->engine[RCS0])->state;
+ kernel_ctx_vma = intel_context_lookup(dev_priv->kernel_context,
+ dev_priv->engine[RCS0])->state;
blob->ads.golden_context_lrca =
intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset;
desc->db_id = client->doorbell_id;
for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce = intel_context_lookup(ctx, engine);
u32 guc_engine_id = engine->guc_id;
struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
* for now who owns a GuC client. But for future owner of GuC
* client, need to make sure lrc is pinned prior to enter here.
*/
- if (!ce->state)
+ if (!ce || !ce->state)
break; /* XXX: continue? */
/*
preempt_work[engine->id]);
struct intel_guc_client *client = guc->preempt_client;
struct guc_stage_desc *stage_desc = __get_stage_desc(client);
- struct intel_context *ce = to_intel_context(client->owner, engine);
+ struct intel_context *ce = intel_context_lookup(client->owner, engine);
u32 data[7];
if (!ce->ring->emit) { /* recreate upon load/resume */
static void inject_preempt_context(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
- struct intel_context *ce =
- to_intel_context(engine->i915->preempt_context, engine);
+ struct intel_context *ce = execlists->preempt_context;
unsigned int n;
GEM_BUG_ON(execlists->preempt_complete_status !=
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
-static void execlists_context_destroy(struct intel_context *ce)
+static void __execlists_context_fini(struct intel_context *ce)
{
- GEM_BUG_ON(ce->pin_count);
-
- if (!ce->state)
- return;
-
intel_ring_free(ce->ring);
GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
i915_gem_object_put(ce->state->obj);
}
+static void execlists_context_destroy(struct intel_context *ce)
+{
+ GEM_BUG_ON(ce->pin_count);
+
+ if (ce->state)
+ __execlists_context_fini(ce);
+
+ intel_context_free(ce);
+}
+
static void execlists_context_unpin(struct intel_context *ce)
{
struct intel_engine_cs *engine;
execlists_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce;
+
+ ce = intel_context_instance(ctx, engine);
+ if (IS_ERR(ce))
+ return ce;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!ctx->ppgtt);
execlists->preempt_complete_status = ~0u;
if (i915->preempt_context) {
struct intel_context *ce =
- to_intel_context(i915->preempt_context, engine);
+ intel_context_lookup(i915->preempt_context, engine);
+ execlists->preempt_context = ce;
execlists->preempt_complete_status =
upper_32_bits(ce->lrc_desc);
}
kfree(ring);
}
+static void __ring_context_fini(struct intel_context *ce)
+{
+ GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
+ i915_gem_object_put(ce->state->obj);
+}
+
static void ring_context_destroy(struct intel_context *ce)
{
GEM_BUG_ON(ce->pin_count);
- if (!ce->state)
- return;
+ if (ce->state)
+ __ring_context_fini(ce);
- GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
- i915_gem_object_put(ce->state->obj);
+ intel_context_free(ce);
}
static int __context_pin_ppgtt(struct i915_gem_context *ctx)
static struct intel_context *
ring_context_pin(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce;
+
+ ce = intel_context_instance(ctx, engine);
+ if (IS_ERR(ce))
+ return ce;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
* placeholder we use to flush other contexts.
*/
*cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context,
- engine)->state) |
+ *cs++ = i915_ggtt_offset(intel_context_lookup(i915->kernel_context,
+ engine)->state) |
MI_MM_SPACE_GTT |
MI_RESTORE_INHIBIT;
}
const char *name)
{
struct i915_gem_context *ctx;
- unsigned int n;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
+ ctx->hw_contexts = RB_ROOT;
+ spin_lock_init(&ctx->hw_contexts_lock);
+
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
INIT_LIST_HEAD(&ctx->hw_id_link);
INIT_LIST_HEAD(&ctx->active_engines);
mutex_init(&ctx->mutex);
- for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
- intel_context_init(&ctx->__engine[n], ctx, i915->engine[n]);
-
ret = i915_gem_context_pin_hw_id(ctx);
if (ret < 0)
goto err_handles;
mock_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce;
int err = -ENOMEM;
+ ce = intel_context_instance(ctx, engine);
+ if (IS_ERR(ce))
+ return ce;
+
if (ce->pin_count++)
return ce;