int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
bool active = false;
/* If the command parser is not enabled, report 0 - unsupported */
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (intel_engine_needs_cmd_parser(engine)) {
active = true;
break;
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain);
- for_each_engine_id(engine, dev_priv, id)
+ for_each_engine(engine, dev_priv, id)
seq_printf(m, "%x ",
i915_gem_active_get_seqno(&obj->last_read[id],
&obj->base.dev->struct_mutex));
struct drm_i915_gem_object *obj;
struct file_stats stats;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int j;
memset(&stats, 0, sizeof(stats));
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
list_for_each_entry(obj,
&engine->batch_pool.cache_list[j],
struct drm_device *dev = &dev_priv->drm;
struct drm_i915_gem_object *obj;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int total = 0;
int ret, j;
if (ret)
return ret;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
int count;
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
- struct intel_engine_cs *engine;
struct drm_i915_gem_request *req;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret, any;
ret = mutex_lock_interruptible(&dev->struct_mutex);
return ret;
any = 0;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
int count;
count = 0;
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_ring_seqno_info(m, engine);
return 0;
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int i, pipe;
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (INTEL_GEN(dev_priv) >= 6) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
const u32 *hws;
int i;
- engine = &dev_priv->engine[(uintptr_t)node->info_ent->data];
+ engine = dev_priv->engine[(uintptr_t)node->info_ent->data];
hws = engine->status_page.page_addr;
if (hws == NULL)
return 0;
intel_runtime_pm_get(dev_priv);
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
acthd[id] = intel_engine_get_active_head(engine);
seqno[id] = intel_engine_get_seqno(engine);
}
- intel_engine_get_instdone(&dev_priv->engine[RCS], &instdone);
+ intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
intel_runtime_pm_put(dev_priv);
} else
seq_printf(m, "Hangcheck inactive\n");
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct rb_node *rb;
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
+ enum intel_engine_id id;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, '\n');
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
struct intel_context *ce = &ctx->engine[engine->id];
seq_printf(m, "%s: ", engine->name);
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
+ enum intel_engine_id id;
int ret;
if (!i915.enable_execlists) {
return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link)
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex);
static void gen8_ppgtt_info(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *engine;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int i;
if (!ppgtt)
return;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
seq_printf(m, "%s\n", engine->name);
for (i = 0; i < 4; i++) {
u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
if (IS_GEN6(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
seq_printf(m, "%s\n", engine->name);
if (IS_GEN7(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n",
static int count_irq_waiters(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int count = 0;
- for_each_engine(engine, i915)
+ for_each_engine(engine, i915, id)
count += intel_engine_has_waiter(engine);
return count;
seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
seq_printf(m, "\tLast submission result: %d\n", client->retcode);
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
u64 submissions = client->submissions[id];
tot += submissions;
seq_printf(m, "\tSubmissions: %llu %s\n",
seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
seq_printf(m, "\nGuC submissions:\n");
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
u64 submissions = guc.submissions[id];
total += submissions;
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct drm_i915_gem_request *rq;
struct rb_node *rb;
page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
seqno = (uint64_t *)kmap_atomic(page);
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
uint64_t offset;
seq_printf(m, "%s\n", engine->name);
kunmap_atomic(seqno);
} else {
seq_puts(m, " Last signal:");
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
for (j = 0; j < num_rings; j++)
seq_printf(m, "0x%08x\n",
I915_READ(engine->semaphore.mbox.signal[j]));
}
seq_puts(m, "\nSync seqno:\n");
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
for (j = 0; j < num_rings; j++)
seq_printf(m, " 0x%08x ",
engine->semaphore.sync_seqno[j]);
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
- for_each_engine_id(engine, dev_priv, id)
+ for_each_engine(engine, dev_priv, id)
seq_printf(m, "HW whitelist count for %s: %d\n",
engine->name, workarounds->hw_whitelist_count[id]);
for (i = 0; i < workarounds->count; ++i) {
value = dev_priv->overlay ? 1 : 0;
break;
case I915_PARAM_HAS_BSD:
- value = intel_engine_initialized(&dev_priv->engine[VCS]);
+ value = !!dev_priv->engine[VCS];
break;
case I915_PARAM_HAS_BLT:
- value = intel_engine_initialized(&dev_priv->engine[BCS]);
+ value = !!dev_priv->engine[BCS];
break;
case I915_PARAM_HAS_VEBOX:
- value = intel_engine_initialized(&dev_priv->engine[VECS]);
+ value = !!dev_priv->engine[VECS];
break;
case I915_PARAM_HAS_BSD2:
- value = intel_engine_initialized(&dev_priv->engine[VCS2]);
+ value = !!dev_priv->engine[VCS2];
break;
case I915_PARAM_HAS_EXEC_CONSTANTS:
value = INTEL_GEN(dev_priv) >= 4;
static void disable_engines_irq(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
/* Ensure irq handler finishes, and not run again. */
disable_irq(dev_priv->drm.irq);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
tasklet_kill(&engine->irq_tasklet);
}
struct pci_dev *bridge_dev;
struct i915_gem_context *kernel_context;
- struct intel_engine_cs engine[I915_NUM_ENGINES];
+ struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct i915_vma *semaphore;
u32 next_seqno;
}
/* Simple iterator over all initialised engines */
-#define for_each_engine(engine__, dev_priv__) \
- for ((engine__) = &(dev_priv__)->engine[0]; \
- (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
- (engine__)++) \
- for_each_if (intel_engine_initialized(engine__))
-
-/* Iterator with engine_id */
-#define for_each_engine_id(engine__, dev_priv__, id__) \
- for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \
- (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
- (engine__)++) \
- for_each_if (((id__) = (engine__)->id, \
- intel_engine_initialized(engine__)))
+#define for_each_engine(engine__, dev_priv__, id__) \
+ for ((id__) = 0; \
+ (id__) < I915_NUM_ENGINES; \
+ (id__)++) \
+ for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
#define __mask_next_bit(mask) ({ \
int __idx = ffs(mask) - 1; \
/* Iterator over subset of engines selected by mask */
#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \
- tmp__ ? (engine__ = &(dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
+ tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
void i915_gem_reset(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
i915_gem_retire_requests(dev_priv);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_gem_reset_engine(engine);
i915_gem_restore_fences(&dev_priv->drm);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
i915_gem_context_lost(dev_priv);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_gem_cleanup_engine(engine);
mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
container_of(work, typeof(*dev_priv), gt.idle_work.work);
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
bool rearm_hangcheck;
if (!READ_ONCE(dev_priv->gt.awake))
if (dev_priv->gt.active_engines)
goto out_unlock;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_gem_batch_pool_fini(&engine->batch_pool);
GEM_BUG_ON(!dev_priv->gt.awake);
unsigned int flags)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (engine->last_context == NULL)
continue;
*/
wmb();
if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
- POSTING_READ(RING_ACTHD(dev_priv->engine[RCS].mmio_base));
+ POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret;
/* Double layer security blanket, see i915_gem_init() */
}
/* Need to do basic initialisation of all rings first: */
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
ret = engine->init_hw(engine);
if (ret)
goto out;
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
dev_priv->gt.cleanup_engine(engine);
}
-static void
-init_engine_lists(struct intel_engine_cs *engine)
-{
- INIT_LIST_HEAD(&engine->request_list);
-}
-
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
i915_gem_load_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int i;
dev_priv->objects =
kmem_cache_create("i915_gem_object",
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- for (i = 0; i < I915_NUM_ENGINES; i++)
- init_engine_lists(&dev_priv->engine[i]);
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
i915_gem_retire_work_handler);
INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (engine->last_context) {
i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL;
if (!i915_gem_context_is_default(ctx))
continue;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
ctx->engine[engine->id].initialised = false;
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
}
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
struct intel_context *kce =
&dev_priv->kernel_context->engine[engine->id];
struct drm_i915_private *dev_priv = req->i915;
struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
+ enum intel_engine_id id;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(num_rings));
- for_each_engine(signaller, dev_priv) {
+ for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(num_rings));
- for_each_engine(signaller, dev_priv) {
+ for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *req;
int ret;
gpu_is_idle(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (intel_engine_is_active(engine))
return false;
}
return NULL;
}
- engine = &dev_priv->engine[_VCS(bsd_idx)];
+ engine = dev_priv->engine[_VCS(bsd_idx)];
} else {
- engine = &dev_priv->engine[user_ring_map[user_ring_id]];
+ engine = dev_priv->engine[user_ring_map[user_ring_id]];
}
- if (!intel_engine_initialized(engine)) {
+ if (!engine) {
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
return NULL;
}
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
uint32_t ecochk, ecobits;
+ enum intel_engine_id id;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
}
I915_WRITE(GAM_ECOCHK, ecochk);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
/* GFX_MODE is per-ring on gen7+ */
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
if (INTEL_INFO(dev_priv)->gen < 6)
return;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
u32 fault_reg;
fault_reg = I915_READ(RING_FAULT_REG(engine));
if (fault_reg & RING_FAULT_VALID) {
fault_reg & ~RING_FAULT_VALID);
}
}
- POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS]));
+
+ /* Engine specific init may not have been done till this point. */
+ if (dev_priv->engine[RCS])
+ POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
}
static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret;
/* Carefully retire all requests without writing to the rings */
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
ret = intel_engine_idle(engine,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
}
/* Finally reset hw state */
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
intel_engine_init_seqno(engine, seqno);
return 0;
len += scnprintf(buf + len, sizeof(buf), "%s%s",
first ? "" : ", ",
- dev_priv->engine[j].name);
+ dev_priv->engine[j]->name);
first = 0;
}
scnprintf(buf + len, sizeof(buf), ")");
obj = ee->batchbuffer;
if (obj) {
- err_puts(m, dev_priv->engine[i].name);
+ err_puts(m, dev_priv->engine[i]->name);
if (ee->pid != -1)
err_printf(m, " (submitted by %s [%d])",
ee->comm,
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
- print_error_obj(m, &dev_priv->engine[i], NULL, obj);
+ print_error_obj(m, dev_priv->engine[i], NULL, obj);
}
if (ee->num_requests) {
err_printf(m, "%s --- %d requests\n",
- dev_priv->engine[i].name,
+ dev_priv->engine[i]->name,
ee->num_requests);
for (j = 0; j < ee->num_requests; j++)
error_print_request(m, " ", &ee->requests[j]);
if (IS_ERR(ee->waiters)) {
err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
- dev_priv->engine[i].name);
+ dev_priv->engine[i]->name);
} else if (ee->num_waiters) {
err_printf(m, "%s --- %d waiters\n",
- dev_priv->engine[i].name,
+ dev_priv->engine[i]->name,
ee->num_waiters);
for (j = 0; j < ee->num_waiters; j++) {
err_printf(m, " seqno 0x%08x for %s [%d]\n",
}
}
- print_error_obj(m, &dev_priv->engine[i],
+ print_error_obj(m, dev_priv->engine[i],
"ringbuffer", ee->ringbuffer);
- print_error_obj(m, &dev_priv->engine[i],
+ print_error_obj(m, dev_priv->engine[i],
"HW Status", ee->hws_page);
- print_error_obj(m, &dev_priv->engine[i],
+ print_error_obj(m, dev_priv->engine[i],
"HW context", ee->ctx);
- print_error_obj(m, &dev_priv->engine[i],
+ print_error_obj(m, dev_priv->engine[i],
"WA context", ee->wa_ctx);
- print_error_obj(m, &dev_priv->engine[i],
+ print_error_obj(m, dev_priv->engine[i],
"WA batchbuffer", ee->wa_batchbuffer);
}
if (!error->semaphore)
return;
- for_each_engine_id(to, dev_priv, id) {
+ for_each_engine(to, dev_priv, id) {
int idx;
u16 signal_offset;
u32 *tmp;
i915_error_object_create(dev_priv, dev_priv->semaphore);
for (i = 0; i < I915_NUM_ENGINES; i++) {
- struct intel_engine_cs *engine = &dev_priv->engine[i];
+ struct intel_engine_cs *engine = dev_priv->engine[i];
struct drm_i915_error_engine *ee = &error->engine[i];
struct drm_i915_gem_request *request;
ee->pid = -1;
ee->engine_id = -1;
- if (!intel_engine_initialized(engine))
+ if (!engine)
continue;
ee->engine_id = i;
struct guc_policies *policies;
struct guc_mmio_reg_state *reg_state;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
struct page *page;
u32 size;
* so its address won't change after we've told the GuC where
* to find it.
*/
- engine = &dev_priv->engine[RCS];
+ engine = dev_priv->engine[RCS];
ads->golden_context_lrca = engine->status_page.ggtt_offset;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
/* GuC scheduling policies */
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
reg_state->mmio_white_list[engine->guc_id].mmio_start =
engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
+ struct drm_i915_gem_request *request;
struct i915_guc_client *client;
struct intel_engine_cs *engine;
- struct drm_i915_gem_request *request;
+ enum intel_engine_id id;
/* client for execbuf submission */
client = guc_client_alloc(dev_priv,
guc_init_doorbell_hw(guc);
/* Take over from manual control of ELSP (execlists) */
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
engine->submit_request = i915_guc_submit;
/* Replay the current set of previously submitted requests */
static bool any_waiters(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
if (intel_engine_has_waiter(engine))
return true;
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VCS]);
+ notify_ring(dev_priv->engine[VCS]);
}
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VCS]);
+ notify_ring(dev_priv->engine[VCS]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[BCS]);
+ notify_ring(dev_priv->engine[BCS]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
u32 gt_iir[4])
{
if (gt_iir[0]) {
- gen8_cs_irq_handler(&dev_priv->engine[RCS],
+ gen8_cs_irq_handler(dev_priv->engine[RCS],
gt_iir[0], GEN8_RCS_IRQ_SHIFT);
- gen8_cs_irq_handler(&dev_priv->engine[BCS],
+ gen8_cs_irq_handler(dev_priv->engine[BCS],
gt_iir[0], GEN8_BCS_IRQ_SHIFT);
}
if (gt_iir[1]) {
- gen8_cs_irq_handler(&dev_priv->engine[VCS],
+ gen8_cs_irq_handler(dev_priv->engine[VCS],
gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
- gen8_cs_irq_handler(&dev_priv->engine[VCS2],
+ gen8_cs_irq_handler(dev_priv->engine[VCS2],
gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
}
if (gt_iir[3])
- gen8_cs_irq_handler(&dev_priv->engine[VECS],
+ gen8_cs_irq_handler(dev_priv->engine[VECS],
gt_iir[3], GEN8_VECS_IRQ_SHIFT);
if (gt_iir[2] & dev_priv->pm_rps_events)
if (HAS_VEBOX(dev_priv)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VECS]);
+ notify_ring(dev_priv->engine[VECS]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
pr_err("render error detected, EIR: 0x%08x\n", eir);
- intel_engine_get_instdone(&dev_priv->engine[RCS], &instdone);
+ intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
if (IS_G4X(dev_priv)) {
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_engine_cs *signaller;
+ enum intel_engine_id id;
if (INTEL_GEN(dev_priv) >= 8) {
- for_each_engine(signaller, dev_priv) {
+ for_each_engine(signaller, dev_priv, id) {
if (engine == signaller)
continue;
} else {
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
- for_each_engine(signaller, dev_priv) {
+ for_each_engine(signaller, dev_priv, id) {
if(engine == signaller)
continue;
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
engine->hangcheck.deadlock = 0;
}
container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
unsigned int hung = 0, stuck = 0;
int busy_count = 0;
#define BUSY 1
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
bool busy = intel_engine_has_waiter(engine);
u64 acthd;
u32 seqno;
new_iir = I915_READ16(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
if (iir & I915_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VCS]);
+ notify_ring(dev_priv->engine[VCS]);
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
unsigned int intel_kick_waiters(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
unsigned int mask = 0;
/* To avoid the task_struct disappearing beneath us as we wake up
* RCU lock, i.e. as we call wake_up_process() we must be holding the
* rcu_read_lock().
*/
- for_each_engine(engine, i915)
+ for_each_engine(engine, i915, id)
if (unlikely(intel_engine_wakeup(engine)))
mask |= intel_engine_flag(engine);
unsigned int intel_kick_signalers(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
unsigned int mask = 0;
- for_each_engine(engine, i915) {
+ for_each_engine(engine, i915, id) {
if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
wake_up_process(engine->breadcrumbs.signaler);
mask |= intel_engine_flag(engine);
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
- engine = &dev_priv->engine[BCS];
+ engine = dev_priv->engine[BCS];
if (fb->modifier[0] != old_fb->modifier[0])
/* vlv: DISPLAY_FLIP fails to change tiling */
engine = NULL;
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
- engine = &dev_priv->engine[BCS];
+ engine = dev_priv->engine[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
engine = i915_gem_active_get_engine(&obj->last_write,
&obj->base.dev->struct_mutex);
if (engine == NULL || engine->id != RCS)
- engine = &dev_priv->engine[BCS];
+ engine = dev_priv->engine[BCS];
} else {
- engine = &dev_priv->engine[RCS];
+ engine = dev_priv->engine[RCS];
}
mmio_flip = use_mmio_flip(engine, obj);
},
};
-static struct intel_engine_cs *
+static int
intel_engine_setup(struct drm_i915_private *dev_priv,
enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
- struct intel_engine_cs *engine = &dev_priv->engine[id];
+ struct intel_engine_cs *engine;
+
+ GEM_BUG_ON(dev_priv->engine[id]);
+ engine = kzalloc(sizeof(*engine), GFP_KERNEL);
+ if (!engine)
+ return -ENOMEM;
engine->id = id;
engine->i915 = dev_priv;
engine->mmio_base = info->mmio_base;
engine->irq_shift = info->irq_shift;
- return engine;
+ dev_priv->engine[id] = engine;
+ return 0;
}
/**
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
unsigned int mask = 0;
int (*init)(struct intel_engine_cs *engine);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
unsigned int i;
int ret;
if (!init)
continue;
- ret = init(intel_engine_setup(dev_priv, i));
+ ret = intel_engine_setup(dev_priv, i);
+ if (ret)
+ goto cleanup;
+
+ ret = init(dev_priv->engine[i]);
if (ret)
goto cleanup;
return 0;
cleanup:
- for (i = 0; i < I915_NUM_ENGINES; i++) {
+ for_each_engine(engine, dev_priv, id) {
if (i915.enable_execlists)
- intel_logical_ring_cleanup(&dev_priv->engine[i]);
+ intel_logical_ring_cleanup(engine);
else
- intel_engine_cleanup(&dev_priv->engine[i]);
+ intel_engine_cleanup(engine);
}
return ret;
static void guc_interrupts_release(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int irqs;
/* tell all command streamers NOT to forward interrupts or vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route all GT interrupts to the host */
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int irqs;
u32 tmp;
/* tell all command streamers to forward interrupts (but not vblank) to GuC */
irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
{
struct drm_i915_private *dev_priv;
- if (!intel_engine_initialized(engine))
- return;
-
/*
* Tasklet cannot be active at this point due intel_mark_active/idle
* so this is just for documentation.
lrc_destroy_wa_ctx_obj(engine);
engine->i915 = NULL;
+ dev_priv->engine[engine->id] = NULL;
+ kfree(engine);
}
void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
engine->submit_request = execlists_submit_request;
}
{
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
+ enum intel_engine_id id;
/* Because we emit WA_TAIL_DWORDS there may be a disparity
* between our bookkeeping in ce->ring->head and ce->ring->tail and
* simplicity, we just zero everything out.
*/
list_for_each_entry(ctx, &dev_priv->context_list, link) {
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
struct intel_context *ce = &ctx->engine[engine->id];
u32 *reg;
static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+ struct intel_engine_cs *engine = dev_priv->engine[RCS];
return i915_gem_request_alloc(engine, dev_priv->kernel_context);
}
static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
uint32_t rc6_mask = 0;
/* 1a: Software RC state - RC0 */
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
if (HAS_GUC(dev_priv))
static void gen8_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
uint32_t rc6_mask = 0;
/* 1a: Software RC state - RC0 */
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
if (IS_BROADWELL(dev_priv))
static void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u32 rc6vids, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u32 gtfifodbg, val, rc6_mode = 0, pcbr;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u32 gtfifodbg, val, rc6_mode = 0;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
if (READ_ONCE(dev_priv->rps.enabled))
goto out;
- rcs = &dev_priv->engine[RCS];
+ rcs = dev_priv->engine[RCS];
if (rcs->last_context)
goto out;
if (ret)
return ret;
- for_each_engine_id(waiter, dev_priv, id) {
+ for_each_engine(waiter, dev_priv, id) {
u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
if (ret)
return ret;
- for_each_engine_id(waiter, dev_priv, id) {
+ for_each_engine(waiter, dev_priv, id) {
u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret, num_rings;
num_rings = INTEL_INFO(dev_priv)->num_rings;
if (ret)
return ret;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
i915_reg_t mbox_reg;
if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
{
struct drm_i915_private *dev_priv;
- if (!intel_engine_initialized(engine))
- return;
-
dev_priv = engine->i915;
if (engine->buffer) {
intel_ring_context_unpin(dev_priv->kernel_context, engine);
engine->i915 = NULL;
+ dev_priv->engine[engine->id] = NULL;
+ kfree(engine);
}
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
engine->buffer->head = engine->buffer->tail;
engine->buffer->last_retired_head = -1;
}
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
-static inline bool
-intel_engine_initialized(const struct intel_engine_cs *engine)
-{
- return engine->i915 != NULL;
-}
-
static inline unsigned
intel_engine_flag(const struct intel_engine_cs *engine)
{
* vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
*/
- idx = (other - engine) - 1;
+ idx = (other->id - engine->id) - 1;
if (idx < 0)
idx += I915_NUM_ENGINES;