err_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- err_printf(m, " %08x_%08x %8u %02x %02x %02x",
+ err_printf(m, " %08x_%08x %8u %02x %02x",
upper_32_bits(err->gtt_offset),
lower_32_bits(err->gtt_offset),
err->size,
err->read_domains,
- err->write_domain,
- err->wseqno);
+ err->write_domain);
err_puts(m, tiling_flag(err->tiling));
err_puts(m, dirty_flag(err->dirty));
err_puts(m, purgeable_flag(err->purgeable));
err_puts(m, err->userptr ? " userptr" : "");
- err_puts(m, err->engine != -1 ? " " : "");
- err_puts(m, engine_name(m->i915, err->engine));
err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
if (err->name)
return dst;
}
-/* The error capture is special as tries to run underneath the normal
- * locking rules - so we use the raw version of the i915_active_request lookup.
- */
-static inline u32
-__active_get_seqno(struct i915_active_request *active)
-{
- struct i915_request *request;
-
- request = __i915_active_request_peek(active);
- return request ? request->global_seqno : 0;
-}
-
-static inline int
-__active_get_engine_id(struct i915_active_request *active)
-{
- struct i915_request *request;
-
- request = __i915_active_request_peek(active);
- return request ? request->engine->id : -1;
-}
-
static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma)
{
err->size = obj->base.size;
err->name = obj->base.name;
- err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
- err->engine = __active_get_engine_id(&obj->frontbuffer_write);
-
err->gtt_offset = vma->node.start;
err->read_domains = obj->read_domains;
err->write_domain = obj->write_domain;
struct i915_gem_context *ctx = request->gem_context;
erq->flags = request->fence.flags;
- erq->context = ctx->hw_id;
+ erq->context = request->fence.context;
+ erq->seqno = request->fence.seqno;
erq->sched_attr = request->sched.attr;
erq->jiffies = request->emitted_jiffies;
erq->start = i915_ggtt_offset(request->ring->vma);
struct drm_i915_error_buffer {
u32 size;
u32 name;
- u32 wseqno;
u64 gtt_offset;
u32 read_domains;
u32 write_domain;
u32 dirty:1;
u32 purgeable:1;
u32 userptr:1;
- s32 engine:4;
u32 cache_level:3;
} *active_bo[I915_NUM_ENGINES], *pinned_bo;
u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
static void __retire_engine_request(struct intel_engine_cs *engine,
struct i915_request *rq)
{
- GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d\n",
+ GEM_TRACE("%s(%s) fence %llx:%lld, current %d\n",
__func__, engine->name,
rq->fence.context, rq->fence.seqno,
- rq->global_seqno,
hwsp_seqno(rq));
GEM_BUG_ON(!i915_request_completed(rq));
{
struct i915_active_request *active, *next;
- GEM_TRACE("%s fence %llx:%lld, global=%d, current %d\n",
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
request->engine->name,
request->fence.context, request->fence.seqno,
- request->global_seqno,
hwsp_seqno(request));
lockdep_assert_held(&request->i915->drm.struct_mutex);
struct intel_ring *ring = rq->ring;
struct i915_request *tmp;
- GEM_TRACE("%s fence %llx:%lld, global=%d, current %d\n",
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
rq->engine->name,
rq->fence.context, rq->fence.seqno,
- rq->global_seqno,
hwsp_seqno(rq));
lockdep_assert_held(&rq->i915->drm.struct_mutex);
spin_unlock(&request->timeline->lock);
}
-static u32 next_global_seqno(struct i915_timeline *tl)
-{
- if (!++tl->seqno)
- ++tl->seqno;
- return tl->seqno;
-}
-
void __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- u32 seqno;
- GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d\n",
+ GEM_TRACE("%s fence %llx:%lld -> current %d\n",
engine->name,
request->fence.context, request->fence.seqno,
- engine->timeline.seqno + 1,
hwsp_seqno(request));
GEM_BUG_ON(!irqs_disabled());
if (i915_gem_context_is_banned(request->gem_context))
i915_request_skip(request, -EIO);
- GEM_BUG_ON(request->global_seqno);
-
- seqno = next_global_seqno(&engine->timeline);
- GEM_BUG_ON(!seqno);
-
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
- request->global_seqno = seqno;
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
!i915_request_enable_breadcrumb(request))
intel_engine_queue_breadcrumbs(engine);
{
struct intel_engine_cs *engine = request->engine;
- GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d\n",
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
engine->name,
request->fence.context, request->fence.seqno,
- request->global_seqno,
hwsp_seqno(request));
GEM_BUG_ON(!irqs_disabled());
* Only unwind in reverse order, required so that the per-context list
* is kept in seqno/ring order.
*/
- GEM_BUG_ON(!request->global_seqno);
- GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
- engine->timeline.seqno--;
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
- request->global_seqno = 0;
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
i915_request_cancel_breadcrumb(request);
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
i915_sched_node_init(&rq->sched);
/* No zalloc, must clear what we need by hand */
- rq->global_seqno = 0;
rq->file_priv = NULL;
rq->batch = NULL;
rq->capture_list = NULL;
*/
const u32 *hwsp_seqno;
- /**
- * GEM sequence number associated with this request on the
- * global execution timeline. It is zero when the request is not
- * on the HW queue (i.e. not on the engine timeline list).
- * Its value is guarded by the timeline spinlock.
- */
- u32 global_seqno;
-
/** Position in the ring of the start of the request */
u32 head;
dma_fence_put(&rq->fence);
}
-/**
- * i915_request_global_seqno - report the current global seqno
- * @request - the request
- *
- * A request is assigned a global seqno only when it is on the hardware
- * execution queue. The global seqno can be used to maintain a list of
- * requests on the same engine in retirement order, for example for
- * constructing a priority queue for waiting. Prior to its execution, or
- * if it is subsequently removed in the event of preemption, its global
- * seqno is zero. As both insertion and removal from the execution queue
- * may operate in IRQ context, it is not guarded by the usual struct_mutex
- * BKL. Instead those relying on the global seqno must be prepared for its
- * value to change between reads. Only when the request is complete can
- * the global seqno be stable (due to the memory barriers on submitting
- * the commands to the hardware to write the breadcrumb, if the HWS shows
- * that it has passed the global seqno and the global seqno is unchanged
- * after the read, it is indeed complete).
- */
-static inline u32
-i915_request_global_seqno(const struct i915_request *request)
-{
- return READ_ONCE(request->global_seqno);
-}
-
int i915_request_await_object(struct i915_request *to,
struct drm_i915_gem_object *obj,
bool write);
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
- __field(u32, global)
),
TP_fast_assign(
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
- __entry->global = rq->global_seqno;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u",
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->global)
+ __entry->hw_id, __entry->ctx, __entry->seqno)
);
DEFINE_EVENT(i915_request, i915_request_add,
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
- __field(u32, global_seqno)
__field(u32, port)
__field(u32, prio)
),
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
- __entry->global_seqno = rq->global_seqno;
__entry->prio = rq->sched.attr.priority;
__entry->port = port;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, global=%u, port=%u",
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
__entry->dev, __entry->class, __entry->instance,
__entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->prio, __entry->global_seqno, __entry->port)
+ __entry->prio, __entry->port)
);
TRACE_EVENT(i915_request_out,
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
- __field(u32, global_seqno)
__field(u32, completed)
),
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
- __entry->global_seqno = rq->global_seqno;
__entry->completed = i915_request_completed(rq);
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, completed?=%u",
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, completed?=%u",
__entry->dev, __entry->class, __entry->instance,
__entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->global_seqno, __entry->completed)
+ __entry->completed)
);
#else
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
- __field(u32, global)
__field(unsigned int, flags)
),
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
- __entry->global = rq->global_seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, blocking=%u, flags=0x%x",
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, blocking=%u, flags=0x%x",
__entry->dev, __entry->class, __entry->instance,
__entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
+ !!(__entry->flags & I915_WAIT_LOCKED),
__entry->flags)
);
x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
- drm_printf(m, "%s%x%s%s [%llx:%llx]%s @ %dms: %s\n",
+ drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
prefix,
- rq->global_seqno,
+ rq->fence.context, rq->fence.seqno,
i915_request_completed(rq) ? "!" :
i915_request_started(rq) ? "*" :
"",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&rq->fence.flags) ? "+" : "",
- rq->fence.context, rq->fence.seqno,
buf,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
name);
spin_lock(&client->wq_lock);
guc_wq_item_append(client, engine->guc_id, ctx_desc,
- ring_tail, rq->global_seqno);
+ ring_tail, rq->fence.seqno);
guc_ring_doorbell(client);
client->submissions[engine->id] += 1;
struct intel_engine_cs *engine,
struct intel_ring *ring);
-static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
-{
- return (i915_ggtt_offset(engine->status_page.vma) +
- I915_GEM_HWS_INDEX_ADDR);
-}
-
static inline u32 intel_hws_hangcheck_address(struct intel_engine_cs *engine)
{
return (i915_ggtt_offset(engine->status_page.vma) +
desc = execlists_update_context(rq);
GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
- GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d), prio=%d\n",
+ GEM_TRACE("%s in[%d]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
engine->name, n,
port[n].context_id, count,
- rq->global_seqno,
rq->fence.context, rq->fence.seqno,
hwsp_seqno(rq),
rq_prio(rq));
while (num_ports-- && port_isset(port)) {
struct i915_request *rq = port_request(port);
- GEM_TRACE("%s:port%u global=%d (fence %llx:%lld), (current %d)\n",
+ GEM_TRACE("%s:port%u fence %llx:%lld, (current %d)\n",
rq->engine->name,
(unsigned int)(port - execlists->port),
- rq->global_seqno,
rq->fence.context, rq->fence.seqno,
hwsp_seqno(rq));
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline.requests, link) {
- GEM_BUG_ON(!rq->global_seqno);
-
if (!i915_request_signaled(rq))
dma_fence_set_error(&rq->fence, -EIO);
EXECLISTS_ACTIVE_USER));
rq = port_unpack(port, &count);
- GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d), prio=%d\n",
+ GEM_TRACE("%s out[0]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
engine->name,
port->context_id, count,
- rq ? rq->global_seqno : 0,
rq ? rq->fence.context : 0,
rq ? rq->fence.seqno : 0,
rq ? hwsp_seqno(rq) : 0,
/* Following the reset, we need to reload the CSB read/write pointers */
reset_csb_pointers(&engine->execlists);
- GEM_TRACE("%s seqno=%d, stalled? %s\n",
- engine->name,
- rq ? rq->global_seqno : 0,
- yesno(stalled));
+ GEM_TRACE("%s stalled? %s\n", engine->name, yesno(stalled));
if (!rq)
goto out_unlock;
static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
{
- /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
- BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
-
cs = gen8_emit_ggtt_write(cs,
request->fence.seqno,
request->timeline->hwsp_offset);
intel_engine_next_hangcheck_seqno(request->engine),
intel_hws_hangcheck_address(request->engine));
- cs = gen8_emit_ggtt_write(cs,
- request->global_seqno,
- intel_hws_seqno_address(request->engine));
-
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
intel_hws_hangcheck_address(request->engine),
0);
- cs = gen8_emit_ggtt_write_rcs(cs,
- request->global_seqno,
- intel_hws_seqno_address(request->engine),
- PIPE_CONTROL_CS_STALL);
-
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
I915_GEM_HWS_HANGCHECK_ADDR);
}
-static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
-{
- return (i915_ggtt_offset(engine->status_page.vma) +
- I915_GEM_HWS_INDEX_ADDR);
-}
-
unsigned int intel_ring_update_space(struct intel_ring *ring)
{
unsigned int space;
*cs++ = hws_hangcheck_address(rq->engine) | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
- *cs++ = intel_hws_seqno_address(rq->engine) | PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = rq->global_seqno;
-
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
*cs++ = hws_hangcheck_address(rq->engine);
*cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = (PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL);
- *cs++ = intel_hws_seqno_address(rq->engine);
- *cs++ = rq->global_seqno;
-
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
*cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
- *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
- *cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = rq->global_seqno;
-
*cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
*cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
- *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
- *cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = rq->global_seqno;
-
for (i = 0; i < GEN7_XCS_WA; i++) {
*cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR;
*cs++ = 0;
*cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
}
}
- GEM_TRACE("%s seqno=%d, stalled? %s\n",
- engine->name,
- rq ? rq->global_seqno : 0,
- yesno(stalled));
+ GEM_TRACE("%s stalled? %s\n", engine->name, yesno(stalled));
+
/*
* The guilty request will get skipped on a hung engine.
*
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->timeline.requests, link) {
- GEM_BUG_ON(!request->global_seqno);
-
if (!i915_request_signaled(request))
dma_fence_set_error(&request->fence, -EIO);
*cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
*cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
- *cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_INDEX_ADDR;
- *cs++ = rq->global_seqno;
-
*cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
*cs++ = MI_FLUSH;
- *cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_SEQNO_ADDR;
- *cs++ = rq->fence.seqno;
-
*cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
*cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
BUILD_BUG_ON(GEN5_WA_STORES < 1);
for (i = 0; i < GEN5_WA_STORES; i++) {
*cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_INDEX_ADDR;
- *cs++ = rq->global_seqno;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR;
+ *cs++ = rq->fence.seqno;
}
*cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
*
* The area from dword 0x30 to 0x3ff is available for driver usage.
*/
-#define I915_GEM_HWS_INDEX 0x30
-#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX * sizeof(u32))
#define I915_GEM_HWS_PREEMPT 0x32
#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT * sizeof(u32))
#define I915_GEM_HWS_HANGCHECK 0x34
return 0;
if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
- GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld, seqno %d.\n",
+ GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n",
rq->engine->name,
rq->fence.context,
- rq->fence.seqno,
- i915_request_global_seqno(rq));
+ rq->fence.seqno);
GEM_TRACE_DUMP();
i915_gem_set_wedged(rq->i915);
unsigned long flags;
i915_request_submit(request);
- GEM_BUG_ON(!request->global_seqno);
spin_lock_irqsave(&engine->hw_lock, flags);
list_add_tail(&mock->link, &engine->hw_queue);