i915_vm_put(vm);
}
+static void __set_timeline(struct intel_timeline **dst,
+ struct intel_timeline *src)
+{
+ struct intel_timeline *old = *dst;
+
+ *dst = src ? intel_timeline_get(src) : NULL;
+
+ if (old)
+ intel_timeline_put(old);
+}
+
+static void __apply_timeline(struct intel_context *ce, void *timeline)
+{
+ __set_timeline(&ce->timeline, timeline);
+}
+
+static void __assign_timeline(struct i915_gem_context *ctx,
+ struct intel_timeline *timeline)
+{
+ __set_timeline(&ctx->timeline, timeline);
+ context_apply_all(ctx, __apply_timeline, timeline);
+}
+
static struct i915_gem_context *
i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
{
return ERR_CAST(timeline);
}
- ctx->timeline = timeline;
+ __assign_timeline(ctx, timeline);
+ intel_timeline_put(timeline);
}
trace_i915_context_create(ctx);
static int clone_timeline(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
- if (src->timeline) {
- GEM_BUG_ON(src->timeline == dst->timeline);
-
- if (dst->timeline)
- intel_timeline_put(dst->timeline);
- dst->timeline = intel_timeline_get(src->timeline);
- }
+ if (src->timeline)
+ __assign_timeline(dst, src->timeline);
return 0;
}
static void eb_unpin_context(struct i915_execbuffer *eb)
{
struct intel_context *ce = eb->context;
- struct intel_timeline *tl = ce->ring->timeline;
+ struct intel_timeline *tl = ce->timeline;
mutex_lock(&tl->mutex);
intel_context_exit(ce);
goto err;
GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
- ce->engine->name, ce->ring->timeline->fence_context,
+ ce->engine->name, ce->timeline->fence_context,
ce->ring->head, ce->ring->tail);
i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
if (likely(atomic_dec_and_test(&ce->pin_count))) {
GEM_TRACE("%s context:%llx retire\n",
- ce->engine->name, ce->ring->timeline->fence_context);
+ ce->engine->name, ce->timeline->fence_context);
ce->ops->unpin(ce);
struct intel_context *ce = container_of(active, typeof(*ce), active);
GEM_TRACE("%s context:%llx retire\n",
- ce->engine->name, ce->ring->timeline->fence_context);
+ ce->engine->name, ce->timeline->fence_context);
if (ce->state)
__context_unpin_state(ce->state);
+ intel_timeline_unpin(ce->timeline);
intel_ring_unpin(ce->ring);
intel_context_put(ce);
}
if (err)
goto err_put;
+ err = intel_timeline_pin(ce->timeline);
+ if (err)
+ goto err_ring;
+
if (!ce->state)
return 0;
err = __context_pin_state(ce->state);
if (err)
- goto err_ring;
+ goto err_timeline;
return 0;
+err_timeline:
+ intel_timeline_unpin(ce->timeline);
err_ring:
intel_ring_unpin(ce->ring);
err_put:
ce->gem_context = ctx;
ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
+ if (ctx->timeline)
+ ce->timeline = intel_timeline_get(ctx->timeline);
ce->engine = engine;
ce->ops = engine->cops;
void intel_context_fini(struct intel_context *ce)
{
+ if (ce->timeline)
+ intel_timeline_put(ce->timeline);
i915_vm_put(ce->vm);
mutex_destroy(&ce->pin_mutex);
int intel_context_prepare_remote_request(struct intel_context *ce,
struct i915_request *rq)
{
- struct intel_timeline *tl = ce->ring->timeline;
+ struct intel_timeline *tl = ce->timeline;
int err;
/* Only suitable for use in remotely modifying this context */
static inline int __must_check
intel_context_timeline_lock(struct intel_context *ce)
- __acquires(&ce->ring->timeline->mutex)
+ __acquires(&ce->timeline->mutex)
{
- return mutex_lock_interruptible(&ce->ring->timeline->mutex);
+ return mutex_lock_interruptible(&ce->timeline->mutex);
}
static inline void intel_context_timeline_unlock(struct intel_context *ce)
- __releases(&ce->ring->timeline->mutex)
+ __releases(&ce->timeline->mutex)
{
- mutex_unlock(&ce->ring->timeline->mutex);
+ mutex_unlock(&ce->timeline->mutex);
}
int intel_context_prepare_remote_request(struct intel_context *ce,
struct i915_vma *state;
struct intel_ring *ring;
+ struct intel_timeline *timeline;
unsigned long flags;
#define CONTEXT_ALLOC_BIT 0
#define CNL_HWS_CSB_WRITE_INDEX 0x2f
struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine,
- struct intel_timeline *timeline,
- int size);
+intel_engine_create_ring(struct intel_engine_cs *engine, int size);
int intel_ring_pin(struct intel_ring *ring);
void intel_ring_reset(struct intel_ring *ring, u32 tail);
unsigned int intel_ring_update_space(struct intel_ring *ring);
goto out_frame;
INIT_LIST_HEAD(&frame->ring.request_list);
- frame->ring.timeline = &frame->timeline;
frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs);
frame->ring.effective_size = frame->ring.size;
struct i915_vma *vma;
void *vaddr;
- struct intel_timeline *timeline;
struct list_head request_list;
struct list_head active_link;
struct intel_sseu sseu;
- struct intel_ring *buffer;
-
struct {
spinlock_t lock;
struct list_head requests;
struct drm_i915_gem_object *default_state;
void *pinned_default_state;
+ struct {
+ struct intel_ring *ring;
+ struct intel_timeline *timeline;
+ } legacy;
+
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
* heavyweight seqno dance, we delegate the task (of being the
int intel_execlists_submission_setup(struct intel_engine_cs *engine)
{
- /* Intentionally left blank. */
- engine->buffer = NULL;
-
tasklet_init(&engine->execlists.tasklet,
execlists_submission_tasklet, (unsigned long)engine);
timer_setup(&engine->execlists.timer, execlists_submission_timer, 0);
return ret;
}
-static struct intel_timeline *
-get_timeline(struct i915_gem_context *ctx, struct intel_gt *gt)
-{
- if (ctx->timeline)
- return intel_timeline_get(ctx->timeline);
- else
- return intel_timeline_create(gt, NULL);
-}
-
static int __execlists_context_alloc(struct intel_context *ce,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *ctx_obj;
+ struct intel_ring *ring;
struct i915_vma *vma;
u32 context_size;
- struct intel_ring *ring;
- struct intel_timeline *timeline;
int ret;
GEM_BUG_ON(ce->state);
goto error_deref_obj;
}
- timeline = get_timeline(ce->gem_context, engine->gt);
- if (IS_ERR(timeline)) {
- ret = PTR_ERR(timeline);
- goto error_deref_obj;
+ if (!ce->timeline) {
+ struct intel_timeline *tl;
+
+ tl = intel_timeline_create(engine->gt, NULL);
+ if (IS_ERR(tl)) {
+ ret = PTR_ERR(tl);
+ goto error_deref_obj;
+ }
+
+ ce->timeline = tl;
}
- ring = intel_engine_create_ring(engine, timeline,
- (unsigned long)ce->ring);
- intel_timeline_put(timeline);
+ ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
goto error_deref_obj;
static int xcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- struct intel_ring *ring = engine->buffer;
+ struct intel_ring *ring = engine->legacy.ring;
int ret = 0;
GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n",
*/
__i915_request_reset(rq, stalled);
- GEM_BUG_ON(rq->ring != engine->buffer);
+ GEM_BUG_ON(rq->ring != engine->legacy.ring);
head = rq->head;
} else {
- head = engine->buffer->tail;
+ head = engine->legacy.ring->tail;
}
- engine->buffer->head = intel_ring_wrap(engine->buffer, head);
+ engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
spin_unlock_irqrestore(&engine->active.lock, flags);
}
if (atomic_fetch_inc(&ring->pin_count))
return 0;
- ret = intel_timeline_pin(ring->timeline);
- if (ret)
- goto err_unpin;
-
flags = PIN_GLOBAL;
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
ret = i915_vma_pin(vma, 0, 0, flags);
if (unlikely(ret))
- goto err_timeline;
+ goto err_unpin;
if (i915_vma_is_map_and_fenceable(vma))
addr = (void __force *)i915_vma_pin_iomap(vma);
GEM_BUG_ON(ring->vaddr);
ring->vaddr = addr;
- GEM_TRACE("ring:%llx pin\n", ring->timeline->fence_context);
return 0;
err_ring:
i915_vma_unpin(vma);
-err_timeline:
- intel_timeline_unpin(ring->timeline);
err_unpin:
atomic_dec(&ring->pin_count);
return ret;
if (!atomic_dec_and_test(&ring->pin_count))
return;
- GEM_TRACE("ring:%llx unpin\n", ring->timeline->fence_context);
-
/* Discard any unused bytes beyond that submitted to hw. */
intel_ring_reset(ring, ring->tail);
i915_vma_unpin(vma);
i915_vma_make_purgeable(vma);
-
- intel_timeline_unpin(ring->timeline);
}
static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
}
struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine,
- struct intel_timeline *timeline,
- int size)
+intel_engine_create_ring(struct intel_engine_cs *engine, int size)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_ring *ring;
kref_init(&ring->ref);
INIT_LIST_HEAD(&ring->request_list);
- ring->timeline = intel_timeline_get(timeline);
ring->size = size;
/* Workaround an erratum on the i830 which causes a hang if
i915_vma_close(ring->vma);
i915_vma_put(ring->vma);
- intel_timeline_put(ring->timeline);
kfree(ring);
}
struct intel_engine_cs *engine = ce->engine;
/* One ringbuffer to rule them all */
- GEM_BUG_ON(!engine->buffer);
- ce->ring = engine->buffer;
+ GEM_BUG_ON(!engine->legacy.ring);
+ ce->ring = engine->legacy.ring;
+ ce->timeline = intel_timeline_get(engine->legacy.timeline);
GEM_BUG_ON(ce->state);
if (engine->context_size) {
intel_engine_cleanup_common(engine);
- intel_ring_unpin(engine->buffer);
- intel_ring_put(engine->buffer);
+ intel_ring_unpin(engine->legacy.ring);
+ intel_ring_put(engine->legacy.ring);
+
+ intel_timeline_unpin(engine->legacy.timeline);
+ intel_timeline_put(engine->legacy.timeline);
kfree(engine);
}
}
GEM_BUG_ON(timeline->has_initial_breadcrumb);
- ring = intel_engine_create_ring(engine, timeline, SZ_16K);
- intel_timeline_put(timeline);
+ err = intel_timeline_pin(timeline);
+ if (err)
+ goto err_timeline;
+
+ ring = intel_engine_create_ring(engine, SZ_16K);
if (IS_ERR(ring)) {
err = PTR_ERR(ring);
- goto err;
+ goto err_timeline_unpin;
}
err = intel_ring_pin(ring);
if (err)
goto err_ring;
- GEM_BUG_ON(engine->buffer);
- engine->buffer = ring;
+ GEM_BUG_ON(engine->legacy.ring);
+ engine->legacy.ring = ring;
+ engine->legacy.timeline = timeline;
err = intel_engine_init_common(engine);
if (err)
- goto err_unpin;
+ goto err_ring_unpin;
- GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma);
+ GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
return 0;
-err_unpin:
+err_ring_unpin:
intel_ring_unpin(ring);
err_ring:
intel_ring_put(ring);
+err_timeline_unpin:
+ intel_timeline_unpin(timeline);
+err_timeline:
+ intel_timeline_put(timeline);
err:
intel_engine_cleanup_common(engine);
return err;
#include "mock_engine.h"
#include "selftests/mock_request.h"
-struct mock_ring {
- struct intel_ring base;
- struct intel_timeline timeline;
-};
-
static void mock_timeline_pin(struct intel_timeline *tl)
{
tl->pin_count++;
static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
{
const unsigned long sz = PAGE_SIZE / 2;
- struct mock_ring *ring;
+ struct intel_ring *ring;
ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
if (!ring)
return NULL;
- if (intel_timeline_init(&ring->timeline, engine->gt, NULL)) {
- kfree(ring);
- return NULL;
- }
-
- kref_init(&ring->base.ref);
- ring->base.size = sz;
- ring->base.effective_size = sz;
- ring->base.vaddr = (void *)(ring + 1);
- ring->base.timeline = &ring->timeline;
- atomic_set(&ring->base.pin_count, 1);
-
- INIT_LIST_HEAD(&ring->base.request_list);
- intel_ring_update_space(&ring->base);
-
- return &ring->base;
-}
+ kref_init(&ring->ref);
+ ring->size = sz;
+ ring->effective_size = sz;
+ ring->vaddr = (void *)(ring + 1);
+ atomic_set(&ring->pin_count, 1);
-static void mock_ring_free(struct intel_ring *base)
-{
- struct mock_ring *ring = container_of(base, typeof(*ring), base);
+ INIT_LIST_HEAD(&ring->request_list);
+ intel_ring_update_space(ring);
- intel_timeline_fini(&ring->timeline);
- kfree(ring);
+ return ring;
}
static struct i915_request *first_request(struct mock_engine *engine)
static void mock_context_unpin(struct intel_context *ce)
{
- mock_timeline_unpin(ce->ring->timeline);
}
static void mock_context_destroy(struct kref *ref)
GEM_BUG_ON(intel_context_is_pinned(ce));
- if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
- mock_ring_free(ce->ring);
+ if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ kfree(ce->ring);
+ mock_timeline_unpin(ce->timeline);
+ }
intel_context_fini(ce);
intel_context_free(ce);
if (!ce->ring)
return -ENOMEM;
+ GEM_BUG_ON(ce->timeline);
+ ce->timeline = intel_timeline_create(ce->engine->gt, NULL);
+ if (IS_ERR(ce->timeline)) {
+ kfree(ce->engine);
+ return PTR_ERR(ce->timeline);
+ }
+
+ mock_timeline_pin(ce->timeline);
+
return 0;
}
static int mock_context_pin(struct intel_context *ce)
{
- int ret;
-
- ret = intel_context_active_acquire(ce);
- if (ret)
- return ret;
-
- mock_timeline_pin(ce->ring->timeline);
- return 0;
+ return intel_context_active_acquire(ce);
}
static const struct intel_context_ops mock_context_ops = {
static int context_sync(struct intel_context *ce)
{
- struct intel_timeline *tl = ce->ring->timeline;
+ struct intel_timeline *tl = ce->timeline;
int err = 0;
do {
struct llist_node *head = NULL, *tail = NULL;
struct llist_node *pos, *next;
- GEM_BUG_ON(node->timeline != engine->kernel_context->ring->timeline->fence_context);
+ GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
/*
* Rebuild the llist excluding our node. We may perform this
* i915_active_acquire_barrier()
*/
for_each_engine_masked(engine, i915, mask, tmp) {
- u64 idx = engine->kernel_context->ring->timeline->fence_context;
+ u64 idx = engine->kernel_context->timeline->fence_context;
struct active_node *node;
node = reuse_idle_barrier(ref, idx);
struct llist_node *node, *next;
GEM_BUG_ON(intel_engine_is_virtual(engine));
- GEM_BUG_ON(rq->timeline != engine->kernel_context->ring->timeline);
+ GEM_BUG_ON(rq->timeline != engine->kernel_context->timeline);
/*
* Attach the list of proto-fences to the in-flight request such
local_irq_enable();
- intel_context_exit(rq->hw_context);
- intel_context_unpin(rq->hw_context);
-
i915_request_remove_from_client(rq);
list_del(&rq->link);
+ intel_context_exit(rq->hw_context);
+ intel_context_unpin(rq->hw_context);
+
free_capture_list(rq);
i915_sched_node_fini(&rq->sched);
i915_request_put(rq);
struct i915_request *
__i915_request_create(struct intel_context *ce, gfp_t gfp)
{
- struct intel_timeline *tl = ce->ring->timeline;
+ struct intel_timeline *tl = ce->timeline;
struct i915_request *rq;
u32 seqno;
int ret;
goto err_unlock;
/* Check that we do not interrupt ourselves with a new request */
- rq->cookie = lockdep_pin_lock(&ce->ring->timeline->mutex);
+ rq->cookie = lockdep_pin_lock(&ce->timeline->mutex);
return rq;
{
unsigned long unbound, bound, count;
struct drm_i915_gem_object *obj;
- u64 size;
count = 0;
- for (size = 0;
- size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
- size += I915_GTT_PAGE_SIZE) {
+ do {
struct i915_vma *vma;
obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
- quirk_add(obj, objects);
-
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
- if (IS_ERR(vma))
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ if (vma == ERR_PTR(-ENOSPC))
+ break;
+
return PTR_ERR(vma);
+ }
+ quirk_add(obj, objects);
count++;
- }
+ } while (1);
+ pr_debug("Filled GGTT with %lu pages [%llu total]\n",
+ count, i915->ggtt.vm.total / PAGE_SIZE);
bound = 0;
unbound = 0;