#include <drm/i915_drm.h>
#include "i915_drv.h"
+static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
+{
+ return to_i915(node->minor->dev);
+}
+
/* As the drm_debugfs_init() routines are called before dev->dev_private is
* allocated we need to hook into the minor for release. */
static int
node->minor = minor;
node->dent = ent;
- node->info_ent = (void *) key;
+ node->info_ent = (void *)key;
mutex_lock(&minor->debugfs_lock);
list_add(&node->list, &minor->debugfs_list);
static int i915_capabilities(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- const struct intel_device_info *info = INTEL_INFO(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ const struct intel_device_info *info = INTEL_INFO(dev_priv);
- seq_printf(m, "gen: %d\n", info->gen);
- seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
+ seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
+ seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
#define SEP_SEMICOLON ;
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
seq_printf(m, "] %x %s%s%s",
i915_gem_active_get_seqno(&obj->last_write,
&obj->base.dev->struct_mutex),
- i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
+ i915_cache_level_str(dev_priv, obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
}
engine = i915_gem_active_get_engine(&obj->last_write,
- &obj->base.dev->struct_mutex);
+ &dev_priv->drm.struct_mutex);
if (engine)
seq_printf(m, " (%s)", engine->name);
static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct drm_i915_gem_object *obj;
u64 total_obj_size, total_gtt_size;
LIST_HEAD(stolen);
static void print_context_stats(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = &dev_priv->drm;
struct file_stats stats;
struct drm_file *file;
memset(&stats, 0, sizeof(stats));
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev->struct_mutex);
if (dev_priv->kernel_context)
per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
- list_for_each_entry(file, &dev_priv->drm.filelist, lhead) {
+ list_for_each_entry(file, &dev->filelist, lhead) {
struct drm_i915_file_private *fpriv = file->driver_priv;
idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
print_file_stats(m, "[k]contexts", stats);
}
-static int i915_gem_object_info(struct seq_file *m, void* data)
+static int i915_gem_object_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
u32 count, mapped_count, purgeable_count, dpy_count;
u64 size, mapped_size, purgeable_size, dpy_size;
static int i915_gem_gtt_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(node);
+ struct drm_device *dev = &dev_priv->drm;
bool show_pin_display_only = !!node->info_ent->data;
struct drm_i915_gem_object *obj;
u64 total_obj_size, total_gtt_size;
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc;
int ret;
intel_crtc_get_vblank_counter(crtc));
seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
else
addr = I915_READ(DSPADDR(crtc->plane));
static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct drm_i915_gem_object *obj;
struct intel_engine_cs *engine;
int total = 0;
static int i915_gem_request_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct drm_i915_gem_request *req;
int ret, any;
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
int ret;
static int i915_interrupt_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
int ret, i, pipe;
return ret;
intel_runtime_pm_get(dev_priv);
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
seq_printf(m, "Master Interrupt Control:\t%08x\n",
I915_READ(GEN8_MASTER_IRQ));
I915_READ(GEN8_PCU_IIR));
seq_printf(m, "PCU interrupt enable:\t%08x\n",
I915_READ(GEN8_PCU_IER));
- } else if (INTEL_INFO(dev)->gen >= 8) {
+ } else if (INTEL_GEN(dev_priv) >= 8) {
seq_printf(m, "Master Interrupt Control:\t%08x\n",
I915_READ(GEN8_MASTER_IRQ));
I915_READ(GEN8_PCU_IIR));
seq_printf(m, "PCU interrupt enable:\t%08x\n",
I915_READ(GEN8_PCU_IER));
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
seq_printf(m, "Display IER:\t%08x\n",
I915_READ(VLV_IER));
seq_printf(m, "Display IIR:\t%08x\n",
seq_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT));
- } else if (!HAS_PCH_SPLIT(dev)) {
+ } else if (!HAS_PCH_SPLIT(dev_priv)) {
seq_printf(m, "Interrupt enable: %08x\n",
I915_READ(IER));
seq_printf(m, "Interrupt identity: %08x\n",
I915_READ(GTIMR));
}
for_each_engine(engine, dev_priv) {
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
engine->name, I915_READ_IMR(engine));
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
int i, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
static int i915_hws_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(node);
struct intel_engine_cs *engine;
const u32 *hws;
int i;
static int i915_error_state_open(struct inode *inode, struct file *file)
{
- struct drm_device *dev = inode->i_private;
+ struct drm_i915_private *dev_priv = inode->i_private;
struct i915_error_state_file_priv *error_priv;
error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
if (!error_priv)
return -ENOMEM;
- error_priv->dev = dev;
+ error_priv->dev = &dev_priv->drm;
- i915_error_state_get(dev, error_priv);
+ i915_error_state_get(&dev_priv->drm, error_priv);
file->private_data = error_priv;
ssize_t ret_count = 0;
int ret;
- ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
+ ret = i915_error_state_buf_init(&error_str,
+ to_i915(error_priv->dev), count, *pos);
if (ret)
return ret;
static int
i915_next_seqno_get(void *data, u64 *val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
int ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
if (ret)
return ret;
*val = dev_priv->next_seqno;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
}
static int
i915_next_seqno_set(void *data, u64 val)
{
- struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = data;
+ struct drm_device *dev = &dev_priv->drm;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
static int i915_frequency_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
int ret = 0;
intel_runtime_pm_get(dev_priv);
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
u16 rgvswctl = I915_READ16(MEMSWCTL);
u16 rgvstat = I915_READ16(MEMSTAT_ILK);
MEMSTAT_VID_SHIFT);
seq_printf(m, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 freq_sts;
mutex_lock(&dev_priv->rps.hw_lock);
"efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
mutex_unlock(&dev_priv->rps.hw_lock);
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
u32 rp_state_limits;
u32 gt_perf_status;
u32 rp_state_cap;
int max_freq;
rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
} else {
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
- if (IS_GEN9(dev))
+ if (IS_GEN9(dev_priv))
reqf >>= 23;
else {
reqf &= ~GEN6_TURBO_DISABLE;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
reqf >>= 24;
else
reqf >>= 25;
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
- if (IS_GEN9(dev))
+ if (IS_GEN9(dev_priv))
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
pm_ier = I915_READ(GEN6_PMIER);
pm_imr = I915_READ(GEN6_PMIMR);
pm_isr = I915_READ(GEN6_PMISR);
seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
- (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
+ (gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
seq_printf(m, "Down threshold: %d%%\n",
dev_priv->rps.down_threshold);
- max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
+ max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
- max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+ max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
- max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+ max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
- max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
+ max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
- max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+ max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
static int i915_hangcheck_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
u64 acthd[I915_NUM_ENGINES];
u32 seqno[I915_NUM_ENGINES];
static int ironlake_drpc_info(struct seq_file *m)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
u32 rgvmodectl, rstdbyctl;
u16 crstandvid;
int ret;
static int i915_forcewake_domains(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_uncore_forcewake_domain *fw_domain;
spin_lock_irq(&dev_priv->uncore.lock);
static int vlv_drpc_info(struct seq_file *m)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
u32 rpmodectl1, rcctl1, pw_status;
intel_runtime_pm_get(dev_priv);
static int gen6_drpc_info(struct seq_file *m)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
unsigned forcewake_count;
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
}
yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
seq_printf(m, "RC6 Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
seq_printf(m, "Render Well Gating Enabled: %s\n",
yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
seq_printf(m, "Media Well Gating Enabled: %s\n",
seq_printf(m, "Core Power Down: %s\n",
yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
seq_printf(m, "Render Power Well: %s\n",
(gen9_powergate_status &
GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
static int i915_drpc_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_drpc_info(m);
- else if (INTEL_INFO(dev)->gen >= 6)
+ else if (INTEL_GEN(dev_priv) >= 6)
return gen6_drpc_info(m);
else
return ironlake_drpc_info(m);
static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
seq_printf(m, "FB tracking busy bits: 0x%08x\n",
dev_priv->fb_tracking.busy_bits);
static int i915_fbc_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
- if (!HAS_FBC(dev)) {
+ if (!HAS_FBC(dev_priv)) {
seq_puts(m, "FBC unsupported on this chipset\n");
return 0;
}
seq_printf(m, "FBC disabled: %s\n",
dev_priv->fbc.no_fbc_reason);
- if (INTEL_INFO(dev_priv)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
seq_printf(m, "Compressing: %s\n",
yesno(I915_READ(FBC_STATUS2) &
FBC_COMPRESSION_MASK));
static int i915_fbc_fc_get(void *data, u64 *val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
- if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
+ if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
return -ENODEV;
*val = dev_priv->fbc.false_color;
static int i915_fbc_fc_set(void *data, u64 val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
u32 reg;
- if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
+ if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
return -ENODEV;
mutex_lock(&dev_priv->fbc.lock);
static int i915_ips_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
- if (!HAS_IPS(dev)) {
+ if (!HAS_IPS(dev_priv)) {
seq_puts(m, "not supported\n");
return 0;
}
seq_printf(m, "Enabled by kernel parameter: %s\n",
yesno(i915.enable_ips));
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
seq_puts(m, "Currently: unknown\n");
} else {
if (I915_READ(IPS_CTL) & IPS_ENABLE)
static int i915_sr_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
bool sr_enabled = false;
intel_runtime_pm_get(dev_priv);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
- else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
- IS_I945G(dev) || IS_I945GM(dev))
+ else if (IS_CRESTLINE(dev_priv) || IS_G4X(dev_priv) ||
+ IS_I945G(dev_priv) || IS_I945GM(dev_priv))
sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
- else if (IS_I915GM(dev))
+ else if (IS_I915GM(dev_priv))
sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
- else if (IS_PINEVIEW(dev))
+ else if (IS_PINEVIEW(dev_priv))
sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
intel_runtime_pm_put(dev_priv);
static int i915_emon_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
unsigned long temp, chipset, gfx;
int ret;
- if (!IS_GEN5(dev))
+ if (!IS_GEN5(dev_priv))
return -ENODEV;
ret = mutex_lock_interruptible(&dev->struct_mutex);
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
int ret = 0;
int gpu_freq, ia_freq;
unsigned int max_gpu_freq, min_gpu_freq;
- if (!HAS_CORE_RING_FREQ(dev)) {
+ if (!HAS_CORE_RING_FREQ(dev_priv)) {
seq_puts(m, "unsupported on this chipset\n");
return 0;
}
if (ret)
goto out;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq =
dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
&ia_freq);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
intel_gpu_freq(dev_priv, (gpu_freq *
- (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+ (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
static int i915_opregion(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_opregion *opregion = &dev_priv->opregion;
int ret;
static int i915_vbt(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
if (opregion->vbt)
seq_write(m, opregion->vbt, opregion->vbt_size);
static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_framebuffer *fbdev_fb = NULL;
struct drm_framebuffer *drm_fb;
int ret;
return ret;
#ifdef CONFIG_DRM_FBDEV_EMULATION
- if (to_i915(dev)->fbdev) {
- fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
+ if (dev_priv->fbdev) {
+ fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fbdev_fb->base.width,
static int i915_context_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
int ret;
static int i915_dump_lrc(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
int ret;
static int i915_execlists(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
u32 status_pointer;
u8 read_pointer;
static int i915_swizzle_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_y));
- if (IS_GEN3(dev) || IS_GEN4(dev)) {
+ if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
seq_printf(m, "DDC = 0x%08x\n",
I915_READ(DCC));
seq_printf(m, "DDC2 = 0x%08x\n",
I915_READ16(C0DRB3));
seq_printf(m, "C1DRB3 = 0x%04x\n",
I915_READ16(C1DRB3));
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
I915_READ(MAD_DIMM_C0));
seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
I915_READ(MAD_DIMM_C2));
seq_printf(m, "TILECTL = 0x%08x\n",
I915_READ(TILECTL));
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
seq_printf(m, "GAMTARBMODE = 0x%08x\n",
I915_READ(GAMTARBMODE));
else
return 0;
}
-static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+static void gen8_ppgtt_info(struct seq_file *m,
+ struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int i;
}
}
-static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+static void gen6_ppgtt_info(struct seq_file *m,
+ struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
if (IS_GEN6(dev_priv))
static int i915_ppgtt_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct drm_file *file;
int ret = mutex_lock_interruptible(&dev->struct_mutex);
return ret;
intel_runtime_pm_get(dev_priv);
- if (INTEL_INFO(dev)->gen >= 8)
- gen8_ppgtt_info(m, dev);
- else if (INTEL_INFO(dev)->gen >= 6)
- gen6_ppgtt_info(m, dev);
+ if (INTEL_GEN(dev_priv) >= 8)
+ gen8_ppgtt_info(m, dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 6)
+ gen6_ppgtt_info(m, dev_priv);
mutex_lock(&dev->filelist_mutex);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct drm_file *file;
seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
static int i915_llc(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
const bool edram = INTEL_GEN(dev_priv) > 8;
- seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
+ seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
intel_uncore_edram_size(dev_priv)/1024/1024);
static int i915_guc_load_status_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
u32 tmp, i;
static int i915_guc_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_guc guc;
struct i915_guc_client client = {};
struct intel_engine_cs *engine;
static int i915_guc_log_dump(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_i915_gem_object *obj;
int i = 0, pg;
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
u32 psrperf = 0;
u32 stat[3];
enum pipe pipe;
bool enabled = false;
- if (!HAS_PSR(dev)) {
+ if (!HAS_PSR(dev_priv)) {
seq_puts(m, "PSR not supported\n");
return 0;
}
seq_printf(m, "Re-enable work scheduled: %s\n",
yesno(work_busy(&dev_priv->psr.work.work)));
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
else {
for_each_pipe(dev_priv, pipe) {
seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
- if (!HAS_DDI(dev))
+ if (!HAS_DDI(dev_priv))
for_each_pipe(dev_priv, pipe) {
if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
(stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
* VLV/CHV PSR has no kind of performance counter
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
psrperf = I915_READ(EDP_PSR_PERF_CNT) &
EDP_PSR_PERF_CNT_MASK;
static int i915_sink_crc(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_connector *connector;
struct intel_dp *intel_dp = NULL;
int ret;
static int i915_energy_uJ(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
u64 power;
u32 units;
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return -ENODEV;
intel_runtime_pm_get(dev_priv);
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct pci_dev *pdev = dev_priv->drm.pdev;
if (!HAS_RUNTIME_PM(dev_priv))
yesno(!intel_irqs_enabled(dev_priv)));
#ifdef CONFIG_PM
seq_printf(m, "Usage count: %d\n",
- atomic_read(&dev->dev->power.usage_count));
+ atomic_read(&dev_priv->drm.dev->power.usage_count));
#else
seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
#endif
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct i915_power_domains *power_domains = &dev_priv->power_domains;
int i;
static int i915_dmc_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_csr *csr;
- if (!HAS_CSR(dev)) {
+ if (!HAS_CSR(dev_priv)) {
seq_puts(m, "not supported\n");
return 0;
}
seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
- if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) {
+ if (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6)) {
seq_printf(m, "DC3 -> DC5 count: %d\n",
I915_READ(SKL_CSR_DC3_DC5_COUNT));
seq_printf(m, "DC5 -> DC6 count: %d\n",
I915_READ(SKL_CSR_DC5_DC6_COUNT));
- } else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) {
+ } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
seq_printf(m, "DC3 -> DC5 count: %d\n",
I915_READ(BXT_CSR_DC3_DC5_COUNT));
}
struct intel_crtc *intel_crtc,
struct intel_encoder *intel_encoder)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_connector *intel_connector;
struct drm_encoder *encoder;
static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder;
struct drm_plane_state *plane_state = crtc->primary->state;
intel_seq_print_mode(m, 2, mode);
}
-static bool cursor_active(struct drm_device *dev, int pipe)
+static bool cursor_active(struct drm_i915_private *dev_priv, int pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 state;
- if (IS_845G(dev) || IS_I865G(dev))
+ if (IS_845G(dev_priv) || IS_I865G(dev_priv))
state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
else
state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
return state;
}
-static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
+static bool cursor_position(struct drm_i915_private *dev_priv,
+ int pipe, int *x, int *y)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 pos;
pos = I915_READ(CURPOS(pipe));
if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
*y = -*y;
- return cursor_active(dev, pipe);
+ return cursor_active(dev_priv, pipe);
}
static const char *plane_type(enum drm_plane_type type)
static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_plane *intel_plane;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
static int i915_display_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc;
struct drm_connector *connector;
if (pipe_config->base.active) {
intel_crtc_info(m, crtc);
- active = cursor_position(dev, crtc->pipe, &x, &y);
+ active = cursor_position(dev_priv, crtc->pipe, &x, &y);
seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
yesno(crtc->cursor_base),
x, y, crtc->base.cursor->state->crtc_w,
static int i915_semaphore_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
- int num_rings = INTEL_INFO(dev)->num_rings;
+ int num_rings = INTEL_INFO(dev_priv)->num_rings;
enum intel_engine_id id;
int j, ret;
return ret;
intel_runtime_pm_get(dev_priv);
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev_priv)) {
struct page *page;
uint64_t *seqno;
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
int i;
drm_modeset_lock_all(dev);
int i;
int ret;
struct intel_engine_cs *engine;
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct i915_workarounds *workarounds = &dev_priv->workarounds;
enum intel_engine_id id;
static int i915_ddb_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct skl_ddb_allocation *ddb;
struct skl_ddb_entry *entry;
enum pipe pipe;
int plane;
- if (INTEL_INFO(dev)->gen < 9)
+ if (INTEL_GEN(dev_priv) < 9)
return 0;
drm_modeset_lock_all(dev);
}
static void drrs_status_per_crtc(struct seq_file *m,
- struct drm_device *dev, struct intel_crtc *intel_crtc)
+ struct drm_device *dev,
+ struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_drrs *drrs = &dev_priv->drrs;
static int i915_drrs_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *intel_crtc;
int active_crtc_cnt = 0;
struct pipe_crc_info {
const char *name;
- struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
enum pipe pipe;
};
static int i915_dp_mst_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *intel_encoder;
struct intel_digital_port *intel_dig_port;
struct drm_connector *connector;
static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
{
struct pipe_crc_info *info = inode->i_private;
- struct drm_i915_private *dev_priv = to_i915(info->dev);
+ struct drm_i915_private *dev_priv = info->dev_priv;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
- if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
+ if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes)
return -ENODEV;
spin_lock_irq(&pipe_crc->lock);
static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
{
struct pipe_crc_info *info = inode->i_private;
- struct drm_i915_private *dev_priv = to_i915(info->dev);
+ struct drm_i915_private *dev_priv = info->dev_priv;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
spin_lock_irq(&pipe_crc->lock);
loff_t *pos)
{
struct pipe_crc_info *info = filep->private_data;
- struct drm_device *dev = info->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = info->dev_priv;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
char buf[PIPE_CRC_BUFFER_LEN];
int n_entries;
static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
enum pipe pipe)
{
- struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = to_i915(minor->dev);
struct dentry *ent;
struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
- info->dev = dev;
+ info->dev_priv = dev_priv;
ent = debugfs_create_file(info->name, S_IRUGO, root, info,
&i915_pipe_crc_fops);
if (!ent)
static int display_crc_ctl_show(struct seq_file *m, void *data)
{
- struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = m->private;
int i;
for (i = 0; i < I915_MAX_PIPES; i++)
static int display_crc_ctl_open(struct inode *inode, struct file *file)
{
- struct drm_device *dev = inode->i_private;
-
- return single_open(file, display_crc_ctl_show, dev);
+ return single_open(file, display_crc_ctl_show, inode->i_private);
}
static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
+static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
enum intel_pipe_crc_source *source)
{
+ struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
struct intel_digital_port *dig_port;
return ret;
}
-static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
+static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
uint32_t *val)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
bool need_stable_symbols = false;
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
- int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+ int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
if (ret)
return ret;
}
need_stable_symbols = true;
break;
case INTEL_PIPE_CRC_SOURCE_DP_D:
- if (!IS_CHERRYVIEW(dev))
+ if (!IS_CHERRYVIEW(dev_priv))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
need_stable_symbols = true;
return 0;
}
-static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
+static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
uint32_t *val)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
bool need_stable_symbols = false;
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
- int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+ int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
if (ret)
return ret;
}
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
break;
case INTEL_PIPE_CRC_SOURCE_TV:
- if (!SUPPORTS_TV(dev))
+ if (!SUPPORTS_TV(dev_priv))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
break;
case INTEL_PIPE_CRC_SOURCE_DP_B:
- if (!IS_G4X(dev))
+ if (!IS_G4X(dev_priv))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
need_stable_symbols = true;
break;
case INTEL_PIPE_CRC_SOURCE_DP_C:
- if (!IS_G4X(dev))
+ if (!IS_G4X(dev_priv))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
need_stable_symbols = true;
break;
case INTEL_PIPE_CRC_SOURCE_DP_D:
- if (!IS_G4X(dev))
+ if (!IS_G4X(dev_priv))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
need_stable_symbols = true;
if (need_stable_symbols) {
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
- WARN_ON(!IS_G4X(dev));
+ WARN_ON(!IS_G4X(dev_priv));
I915_WRITE(PORT_DFT_I9XX,
I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
return 0;
}
-static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
+static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
switch (pipe) {
}
-static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
+static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
if (pipe == PIPE_A)
return 0;
}
-static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
+ bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
struct intel_crtc_state *pipe_config;
drm_atomic_state_free(state);
}
-static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
+static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
uint32_t *val)
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_PF:
- if (IS_HASWELL(dev) && pipe == PIPE_A)
- hsw_trans_edp_pipe_A_crc_wa(dev, true);
+ if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
+ hsw_trans_edp_pipe_A_crc_wa(dev_priv, true);
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break;
return 0;
}
-static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
+static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
enum intel_pipe_crc_source source)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
- pipe));
+ struct intel_crtc *crtc =
+ to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
enum intel_display_power_domain power_domain;
u32 val = 0; /* shut up gcc */
int ret;
return -EIO;
}
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
ret = i8xx_pipe_crc_ctl_reg(&source, &val);
- else if (INTEL_INFO(dev)->gen < 5)
- ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
- else if (IS_GEN5(dev) || IS_GEN6(dev))
+ else if (INTEL_GEN(dev_priv) < 5)
+ ret = i9xx_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ ret = vlv_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
+ else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
ret = ilk_pipe_crc_ctl_reg(&source, &val);
else
- ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
+ ret = ivb_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
if (ret != 0)
goto out;
kfree(entries);
- if (IS_G4X(dev))
- g4x_undo_pipe_scramble_reset(dev, pipe);
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- vlv_undo_pipe_scramble_reset(dev, pipe);
- else if (IS_HASWELL(dev) && pipe == PIPE_A)
- hsw_trans_edp_pipe_A_crc_wa(dev, false);
+ if (IS_G4X(dev_priv))
+ g4x_undo_pipe_scramble_reset(dev_priv, pipe);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ vlv_undo_pipe_scramble_reset(dev_priv, pipe);
+ else if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
+ hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
hsw_enable_ips(crtc);
}
return -EINVAL;
}
-static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
+static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
+ char *buf, size_t len)
{
#define N_WORDS 3
int n_words;
return -EINVAL;
}
- return pipe_crc_set_source(dev, pipe, source);
+ return pipe_crc_set_source(dev_priv, pipe, source);
}
static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = m->private;
char *tmpbuf;
int ret;
}
tmpbuf[len] = '\0';
- ret = display_crc_ctl_parse(dev, tmpbuf, len);
+ ret = display_crc_ctl_parse(dev_priv, tmpbuf, len);
out:
kfree(tmpbuf);
};
static ssize_t i915_displayport_test_active_write(struct file *file,
- const char __user *ubuf,
- size_t len, loff_t *offp)
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
{
char *input_buffer;
int status = 0;
DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
list_for_each_entry(connector, connector_list, head) {
-
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
struct intel_dp *intel_dp;
list_for_each_entry(connector, connector_list, head) {
-
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
}
static int i915_displayport_test_active_open(struct inode *inode,
- struct file *file)
+ struct file *file)
{
- struct drm_device *dev = inode->i_private;
+ struct drm_i915_private *dev_priv = inode->i_private;
- return single_open(file, i915_displayport_test_active_show, dev);
+ return single_open(file, i915_displayport_test_active_show,
+ &dev_priv->drm);
}
static const struct file_operations i915_displayport_test_active_fops = {
struct intel_dp *intel_dp;
list_for_each_entry(connector, connector_list, head) {
-
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
return 0;
}
static int i915_displayport_test_data_open(struct inode *inode,
- struct file *file)
+ struct file *file)
{
- struct drm_device *dev = inode->i_private;
+ struct drm_i915_private *dev_priv = inode->i_private;
- return single_open(file, i915_displayport_test_data_show, dev);
+ return single_open(file, i915_displayport_test_data_show,
+ &dev_priv->drm);
}
static const struct file_operations i915_displayport_test_data_fops = {
struct intel_dp *intel_dp;
list_for_each_entry(connector, connector_list, head) {
-
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
static int i915_displayport_test_type_open(struct inode *inode,
struct file *file)
{
- struct drm_device *dev = inode->i_private;
+ struct drm_i915_private *dev_priv = inode->i_private;
- return single_open(file, i915_displayport_test_type_show, dev);
+ return single_open(file, i915_displayport_test_type_show,
+ &dev_priv->drm);
}
static const struct file_operations i915_displayport_test_type_fops = {
static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
{
- struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
int level;
int num_levels;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
num_levels = 3;
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
num_levels = 1;
else
num_levels = ilk_wm_max_level(dev) + 1;
* - WM1+ latency values in 0.5us units
* - latencies are in us on gen9/vlv/chv
*/
- if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) ||
- IS_CHERRYVIEW(dev))
+ if (INTEL_GEN(dev_priv) >= 9 || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv))
latency *= 10;
else if (level > 0)
latency *= 5;
static int pri_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = m->private;
const uint16_t *latencies;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
else
- latencies = to_i915(dev)->wm.pri_latency;
+ latencies = dev_priv->wm.pri_latency;
wm_latency_show(m, latencies);
static int spr_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = m->private;
const uint16_t *latencies;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
else
- latencies = to_i915(dev)->wm.spr_latency;
+ latencies = dev_priv->wm.spr_latency;
wm_latency_show(m, latencies);
static int cur_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = m->private;
const uint16_t *latencies;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
else
- latencies = to_i915(dev)->wm.cur_latency;
+ latencies = dev_priv->wm.cur_latency;
wm_latency_show(m, latencies);
static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_device *dev = inode->i_private;
+ struct drm_i915_private *dev_priv = inode->i_private;
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
return -ENODEV;
- return single_open(file, pri_wm_latency_show, dev);
+ return single_open(file, pri_wm_latency_show, dev_priv);
}
static int spr_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_device *dev = inode->i_private;
+ struct drm_i915_private *dev_priv = inode->i_private;
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
return -ENODEV;
- return single_open(file, spr_wm_latency_show, dev);
+ return single_open(file, spr_wm_latency_show, dev_priv);
}
static int cur_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_device *dev = inode->i_private;
+ struct drm_i915_private *dev_priv = inode->i_private;
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
return -ENODEV;
- return single_open(file, cur_wm_latency_show, dev);
+ return single_open(file, cur_wm_latency_show, dev_priv);
}
static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp, uint16_t wm[8])
{
struct seq_file *m = file->private_data;
- struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
uint16_t new[8] = { 0 };
int num_levels;
int level;
int ret;
char tmp[32];
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
num_levels = 3;
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
num_levels = 1;
else
num_levels = ilk_wm_max_level(dev) + 1;
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = m->private;
uint16_t *latencies;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
else
- latencies = to_i915(dev)->wm.pri_latency;
+ latencies = dev_priv->wm.pri_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = m->private;
uint16_t *latencies;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
else
- latencies = to_i915(dev)->wm.spr_latency;
+ latencies = dev_priv->wm.spr_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = m->private;
uint16_t *latencies;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
else
- latencies = to_i915(dev)->wm.cur_latency;
+ latencies = dev_priv->wm.cur_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
static int
i915_wedged_get(void *data, u64 *val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
*val = i915_terminally_wedged(&dev_priv->gpu_error);
static int
i915_wedged_set(void *data, u64 val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
/*
* There is no safeguard against this debugfs entry colliding
static int
i915_ring_missed_irq_get(void *data, u64 *val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
*val = dev_priv->gpu_error.missed_irq_rings;
return 0;
static int
i915_ring_missed_irq_set(void *data, u64 val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
+ struct drm_device *dev = &dev_priv->drm;
int ret;
/* Lock against concurrent debugfs callers */
static int
i915_ring_test_irq_get(void *data, u64 *val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
*val = dev_priv->gpu_error.test_irq_rings;
static int
i915_ring_test_irq_set(void *data, u64 val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
val &= INTEL_INFO(dev_priv)->ring_mask;
DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
static int
i915_drop_caches_set(void *data, u64 val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
+ struct drm_device *dev = &dev_priv->drm;
int ret;
DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
static int
i915_max_freq_get(void *data, u64 *val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return -ENODEV;
*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
static int
i915_max_freq_set(void *data, u64 val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
u32 hw_max, hw_min;
int ret;
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return -ENODEV;
DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
static int
i915_min_freq_get(void *data, u64 *val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
if (INTEL_GEN(dev_priv) < 6)
return -ENODEV;
static int
i915_min_freq_set(void *data, u64 val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
u32 hw_max, hw_min;
int ret;
hw_max = dev_priv->rps.max_freq;
hw_min = dev_priv->rps.min_freq;
- if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
+ if (val < hw_min ||
+ val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL;
}
static int
i915_cache_sharing_get(void *data, u64 *val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
+ struct drm_device *dev = &dev_priv->drm;
u32 snpcr;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
return -ENODEV;
ret = mutex_lock_interruptible(&dev->struct_mutex);
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
static int
i915_cache_sharing_set(void *data, u64 val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
u32 snpcr;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
return -ENODEV;
if (val > 3)
unsigned int eu_per_subslice;
};
-static void cherryview_sseu_device_status(struct drm_device *dev,
+static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_status *stat)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int ss_max = 2;
int ss;
u32 sig1[ss_max], sig2[ss_max];
stat->subslice_total = stat->subslice_per_slice;
}
-static void gen9_sseu_device_status(struct drm_device *dev,
+static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_status *stat)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int s_max = 3, ss_max = 4;
int s, ss;
u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
/* BXT has a single slice and at most 3 subslices. */
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
s_max = 1;
ss_max = 3;
}
stat->slice_total++;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
- ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ ss_cnt = INTEL_INFO(dev_priv)->subslice_per_slice;
for (ss = 0; ss < ss_max; ss++) {
unsigned int eu_cnt;
- if (IS_BROXTON(dev) &&
+ if (IS_BROXTON(dev_priv) &&
!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
/* skip disabled subslice */
continue;
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
ss_cnt++;
eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
}
}
-static void broadwell_sseu_device_status(struct drm_device *dev,
+static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_status *stat)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int s;
u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
+ int s;
stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK);
if (stat->slice_total) {
- stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice;
+ stat->subslice_per_slice = INTEL_INFO(dev_priv)->subslice_per_slice;
stat->subslice_total = stat->slice_total *
stat->subslice_per_slice;
- stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice;
+ stat->eu_per_subslice = INTEL_INFO(dev_priv)->eu_per_subslice;
stat->eu_total = stat->eu_per_subslice * stat->subslice_total;
/* subtract fused off EU(s) from enabled slice(s) */
for (s = 0; s < stat->slice_total; s++) {
- u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s];
+ u8 subslice_7eu = INTEL_INFO(dev_priv)->subslice_7eu[s];
stat->eu_total -= hweight8(subslice_7eu);
}
static int i915_sseu_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
- struct drm_device *dev = &dev_priv->drm;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct sseu_dev_status stat;
- if (INTEL_INFO(dev)->gen < 8)
+ if (INTEL_GEN(dev_priv) < 8)
return -ENODEV;
seq_puts(m, "SSEU Device Info\n");
seq_printf(m, " Available Slice Total: %u\n",
- INTEL_INFO(dev)->slice_total);
+ INTEL_INFO(dev_priv)->slice_total);
seq_printf(m, " Available Subslice Total: %u\n",
- INTEL_INFO(dev)->subslice_total);
+ INTEL_INFO(dev_priv)->subslice_total);
seq_printf(m, " Available Subslice Per Slice: %u\n",
- INTEL_INFO(dev)->subslice_per_slice);
+ INTEL_INFO(dev_priv)->subslice_per_slice);
seq_printf(m, " Available EU Total: %u\n",
- INTEL_INFO(dev)->eu_total);
+ INTEL_INFO(dev_priv)->eu_total);
seq_printf(m, " Available EU Per Subslice: %u\n",
- INTEL_INFO(dev)->eu_per_subslice);
- seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev)));
- if (HAS_POOLED_EU(dev))
+ INTEL_INFO(dev_priv)->eu_per_subslice);
+ seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
+ if (HAS_POOLED_EU(dev_priv))
seq_printf(m, " Min EU in pool: %u\n",
- INTEL_INFO(dev)->min_eu_in_pool);
+ INTEL_INFO(dev_priv)->min_eu_in_pool);
seq_printf(m, " Has Slice Power Gating: %s\n",
- yesno(INTEL_INFO(dev)->has_slice_pg));
+ yesno(INTEL_INFO(dev_priv)->has_slice_pg));
seq_printf(m, " Has Subslice Power Gating: %s\n",
- yesno(INTEL_INFO(dev)->has_subslice_pg));
+ yesno(INTEL_INFO(dev_priv)->has_subslice_pg));
seq_printf(m, " Has EU Power Gating: %s\n",
- yesno(INTEL_INFO(dev)->has_eu_pg));
+ yesno(INTEL_INFO(dev_priv)->has_eu_pg));
seq_puts(m, "SSEU Device Status\n");
memset(&stat, 0, sizeof(stat));
intel_runtime_pm_get(dev_priv);
- if (IS_CHERRYVIEW(dev)) {
- cherryview_sseu_device_status(dev, &stat);
- } else if (IS_BROADWELL(dev)) {
- broadwell_sseu_device_status(dev, &stat);
- } else if (INTEL_INFO(dev)->gen >= 9) {
- gen9_sseu_device_status(dev, &stat);
+ if (IS_CHERRYVIEW(dev_priv)) {
+ cherryview_sseu_device_status(dev_priv, &stat);
+ } else if (IS_BROADWELL(dev_priv)) {
+ broadwell_sseu_device_status(dev_priv, &stat);
+ } else if (INTEL_GEN(dev_priv) >= 9) {
+ gen9_sseu_device_status(dev_priv, &stat);
}
intel_runtime_pm_put(dev_priv);
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
- struct drm_device *dev = inode->i_private;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = inode->i_private;
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return 0;
intel_runtime_pm_get(dev_priv);
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
- struct drm_device *dev = inode->i_private;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = inode->i_private;
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return 0;
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
{
- struct drm_device *dev = minor->dev;
struct dentry *ent;
ent = debugfs_create_file("i915_forcewake_user",
S_IRUSR,
- root, dev,
+ root, to_i915(minor->dev),
&i915_forcewake_fops);
if (!ent)
return -ENOMEM;
const char *name,
const struct file_operations *fops)
{
- struct drm_device *dev = minor->dev;
struct dentry *ent;
ent = debugfs_create_file(name,
S_IRUGO | S_IWUSR,
- root, dev,
+ root, to_i915(minor->dev),
fops);
if (!ent)
return -ENOMEM;
{"i915_dp_test_active", &i915_displayport_test_active_fops}
};
-void intel_display_crc_init(struct drm_device *dev)
+void intel_display_crc_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
drm_debugfs_remove_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
+ drm_debugfs_remove_files((struct drm_info_list *)&i915_forcewake_fops,
1, minor);
for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
struct drm_info_list *info_list =
- (struct drm_info_list *) i915_debugfs_files[i].fops;
+ (struct drm_info_list *)i915_debugfs_files[i].fops;
drm_debugfs_remove_files(info_list, 1, minor);
}