if (err)
goto err;
+ GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
+ ce->engine->name, ce->ring->timeline->fence_context,
+ ce->ring->head, ce->ring->tail);
+
i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
smp_mb__before_atomic(); /* flush pin before it is visible */
mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
if (likely(atomic_dec_and_test(&ce->pin_count))) {
+ GEM_TRACE("%s context:%llx retire\n",
+ ce->engine->name, ce->ring->timeline->fence_context);
+
ce->ops->unpin(ce);
i915_gem_context_put(ce->gem_context);
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
+ GEM_TRACE("%s context:%llx retire\n",
+ ce->engine->name, ce->ring->timeline->fence_context);
+
if (ce->state)
__context_unpin_state(ce->state);
GEM_BUG_ON(ring->vaddr);
ring->vaddr = addr;
+ GEM_TRACE("ring:%llx pin\n", ring->timeline->fence_context);
return 0;
err_ring:
if (!atomic_dec_and_test(&ring->pin_count))
return;
+ GEM_TRACE("ring:%llx unpin\n", ring->timeline->fence_context);
+
/* Discard any unused bytes beyond that submitted to hw. */
intel_ring_reset(ring, ring->tail);