* write.
*/
if (mmio->in_context &&
- ((ctx_ctrl & inhibit_mask) != inhibit_mask) &&
- i915_modparams.enable_execlists)
+ (ctx_ctrl & inhibit_mask) != inhibit_mask)
continue;
if (mmio->mask)
return 0;
}
-static void i915_dump_lrc_obj(struct seq_file *m,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- struct i915_vma *vma = ctx->engine[engine->id].state;
- struct page *page;
- int j;
-
- seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
-
- if (!vma) {
- seq_puts(m, "\tFake context\n");
- return;
- }
-
- if (vma->flags & I915_VMA_GLOBAL_BIND)
- seq_printf(m, "\tBound in GGTT at 0x%08x\n",
- i915_ggtt_offset(vma));
-
- if (i915_gem_object_pin_pages(vma->obj)) {
- seq_puts(m, "\tFailed to get pages for context object\n\n");
- return;
- }
-
- page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN);
- if (page) {
- u32 *reg_state = kmap_atomic(page);
-
- for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
- seq_printf(m,
- "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
- j * 4,
- reg_state[j], reg_state[j + 1],
- reg_state[j + 2], reg_state[j + 3]);
- }
- kunmap_atomic(reg_state);
- }
-
- i915_gem_object_unpin_pages(vma->obj);
- seq_putc(m, '\n');
-}
-
-static int i915_dump_lrc(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- enum intel_engine_id id;
- int ret;
-
- if (!i915_modparams.enable_execlists) {
- seq_printf(m, "Logical Ring Contexts are disabled\n");
- return 0;
- }
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- list_for_each_entry(ctx, &dev_priv->contexts.list, link)
- for_each_engine(engine, dev_priv, id)
- i915_dump_lrc_obj(m, ctx, engine);
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static const char *swizzle_string(unsigned swizzle)
{
switch (swizzle) {
{"i915_vbt", i915_vbt, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
- {"i915_dump_lrc", i915_dump_lrc, 0},
{"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) {
value |= I915_SCHEDULER_CAP_ENABLED;
value |= I915_SCHEDULER_CAP_PRIORITY;
-
- if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
- i915_modparams.enable_execlists)
+ if (HAS_LOGICAL_RING_PREEMPTION(dev_priv))
value |= I915_SCHEDULER_CAP_PREEMPTION;
}
break;
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
- i915_modparams.enable_execlists =
- intel_sanitize_enable_execlists(dev_priv,
- i915_modparams.enable_execlists);
-
/*
* i915.enable_ppgtt is read-only, so do an early pass to validate the
* user's requested state against the hardware/driver capabilities. We
((dev_priv)->info.has_logical_ring_contexts)
#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
((dev_priv)->info.has_logical_ring_preemption)
+
+#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
+
#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
return false;
/* TODO: make semaphores and Execlists play nicely together */
- if (i915_modparams.enable_execlists)
+ if (HAS_EXECLISTS(dev_priv))
return false;
if (value >= 0)
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
- if (!i915_modparams.enable_execlists) {
- dev_priv->gt.resume = intel_legacy_submission_resume;
- dev_priv->gt.cleanup_engine = intel_engine_cleanup;
- } else {
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
dev_priv->gt.resume = intel_lr_context_resume;
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
+ } else {
+ dev_priv->gt.resume = intel_legacy_submission_resume;
+ dev_priv->gt.cleanup_engine = intel_engine_cleanup;
}
/* This is just a security blanket to placate dragons.
INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
init_llist_head(&dev_priv->contexts.free_list);
- if (intel_vgpu_active(dev_priv) &&
- HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- if (!i915_modparams.enable_execlists) {
- DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
- return -EINVAL;
- }
- }
-
/* Using the simple ida interface, the max is limited by sizeof(int) */
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
ida_init(&dev_priv->contexts.hw_ida);
struct intel_engine_cs *engine = req->engine;
lockdep_assert_held(&req->i915->drm.struct_mutex);
- GEM_BUG_ON(i915_modparams.enable_execlists);
+ GEM_BUG_ON(HAS_EXECLISTS(req->i915));
if (!req->ctx->engine[engine->id].state) {
struct i915_gem_context *to = req->ctx;
return 0;
}
- if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) {
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
if (has_full_48bit_ppgtt)
return 3;
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
* need to do anything here. */
- if (i915_modparams.enable_execlists)
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
return 0;
if (!USES_PPGTT(dev_priv))
"Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
-i915_param_named_unsafe(enable_execlists, int, 0400,
- "Override execlists usage. "
- "(-1=auto [default], 0=disabled, 1=enabled)");
-
i915_param_named_unsafe(enable_psr, int, 0600,
"Enable PSR "
"(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
param(int, enable_dc, -1) \
param(int, enable_fbc, -1) \
param(int, enable_ppgtt, -1) \
- param(int, enable_execlists, -1) \
param(int, enable_psr, -1) \
param(int, disable_power_well, -1) \
param(int, enable_ips, 1) \
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- if (i915_modparams.enable_execlists)
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
- else {
+ } else {
struct intel_engine_cs *engine = dev_priv->engine[RCS];
struct intel_ring *ring;
int ret;
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- if (i915_modparams.enable_execlists) {
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
} else {
struct intel_engine_cs *engine = dev_priv->engine[RCS];
gen7_oa_hw_tail_read;
dev_priv->perf.oa.oa_formats = hsw_oa_formats;
- } else if (i915_modparams.enable_execlists) {
+ } else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
/* Note: that although we could theoretically also support the
* legacy ringbuffer mode on BDW (and earlier iterations of
* this driver, before upstreaming did this) it didn't seem
case 9:
return GEN9_LR_CONTEXT_RENDER_SIZE;
case 8:
- return i915_modparams.enable_execlists ?
- GEN8_LR_CONTEXT_RENDER_SIZE :
- GEN8_CXT_TOTAL_SIZE;
+ return GEN8_LR_CONTEXT_RENDER_SIZE;
case 7:
if (IS_HASWELL(dev_priv))
return HSW_CXT_TOTAL_SIZE;
&intel_engine_classes[engine->class];
int (*init)(struct intel_engine_cs *engine);
- if (i915_modparams.enable_execlists)
+ if (HAS_EXECLISTS(dev_priv))
init = class_info->init_execlists;
else
init = class_info->init_legacy;
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
- if (i915_modparams.enable_execlists) {
+ if (HAS_EXECLISTS(dev_priv)) {
const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
u32 ptr, read, write;
unsigned int idx;
return 0;
}
- if (!i915_modparams.enable_execlists) {
- DRM_ERROR("i915 GVT-g loading failed due to disabled execlists mode\n");
- return -EIO;
- }
-
if (i915_modparams.enable_guc_submission) {
DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
return -EIO;
struct intel_engine_cs *engine,
struct intel_ring *ring);
-/**
- * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
- * @dev_priv: i915 device private
- * @enable_execlists: value of i915.enable_execlists module parameter.
- *
- * Only certain platforms support Execlists (the prerequisites being
- * support for Logical Ring Contexts and Aliasing PPGTT or better).
- *
- * Return: 1 if Execlists is supported and has to be enabled.
- */
-int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
-{
- /* On platforms with execlist available, vGPU will only
- * support execlist mode, no ring buffer mode.
- */
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
- return 1;
-
- if (INTEL_GEN(dev_priv) >= 9)
- return 1;
-
- if (enable_execlists == 0)
- return 0;
-
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
- USES_PPGTT(dev_priv))
- return 1;
-
- return 0;
-}
-
/**
* intel_lr_context_descriptor_update() - calculate & cache the descriptor
* descriptor for a pinned context
return ctx->engine[engine->id].lrc_desc;
}
-/* Execlists */
-int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
- int enable_execlists);
-
#endif /* _INTEL_LRC_H_ */