seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
- for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
+ for (gpu_freq = dev_priv->rps.min_delay;
+ gpu_freq <= dev_priv->rps.max_delay;
gpu_freq++) {
I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
return ret;
len = snprintf(buf, sizeof(buf),
- "max freq: %d\n", dev_priv->max_delay * 50);
+ "max freq: %d\n", dev_priv->rps.max_delay * 50);
mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf))
/*
* Turbo will still be enabled, but won't go above the set value.
*/
- dev_priv->max_delay = val / 50;
+ dev_priv->rps.max_delay = val / 50;
gen6_set_rps(dev, val / 50);
mutex_unlock(&dev->struct_mutex);
return ret;
len = snprintf(buf, sizeof(buf),
- "min freq: %d\n", dev_priv->min_delay * 50);
+ "min freq: %d\n", dev_priv->rps.min_delay * 50);
mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf))
/*
* Turbo will still be enabled, but won't go below the set value.
*/
- dev_priv->min_delay = val / 50;
+ dev_priv->rps.min_delay = val / 50;
gen6_set_rps(dev, val / 50);
mutex_unlock(&dev->struct_mutex);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
- spin_lock_init(&dev_priv->rps_lock);
+ spin_lock_init(&dev_priv->rps.lock);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
bool mchbar_need_disable;
- struct work_struct rps_work;
- spinlock_t rps_lock;
- u32 pm_iir;
+ /* gen6+ rps state */
+ struct {
+ struct work_struct work;
+ u32 pm_iir;
+ /* lock - irqsave spinlock that protectects the work_struct and
+ * pm_iir. */
+ spinlock_t lock;
+
+ /* The below variables an all the rps hw state are protected by
+ * dev->struct mutext. */
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ } rps;
+
u8 cur_delay;
u8 min_delay;
static void gen6_pm_rps_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- rps_work);
+ rps.work);
u32 pm_iir, pm_imr;
u8 new_delay;
- spin_lock_irq(&dev_priv->rps_lock);
- pm_iir = dev_priv->pm_iir;
- dev_priv->pm_iir = 0;
+ spin_lock_irq(&dev_priv->rps.lock);
+ pm_iir = dev_priv->rps.pm_iir;
+ dev_priv->rps.pm_iir = 0;
pm_imr = I915_READ(GEN6_PMIMR);
I915_WRITE(GEN6_PMIMR, 0);
- spin_unlock_irq(&dev_priv->rps_lock);
+ spin_unlock_irq(&dev_priv->rps.lock);
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
mutex_lock(&dev_priv->dev->struct_mutex);
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
- new_delay = dev_priv->cur_delay + 1;
+ new_delay = dev_priv->rps.cur_delay + 1;
else
- new_delay = dev_priv->cur_delay - 1;
+ new_delay = dev_priv->rps.cur_delay - 1;
gen6_set_rps(dev_priv->dev, new_delay);
* IIR bits should never already be set because IMR should
* prevent an interrupt from being shown in IIR. The warning
* displays a case where we've unsafely cleared
- * dev_priv->pm_iir. Although missing an interrupt of the same
+ * dev_priv->rps.pm_iir. Although missing an interrupt of the same
* type is not a problem, it displays a problem in the logic.
*
- * The mask bit in IMR is cleared by rps_work.
+ * The mask bit in IMR is cleared by dev_priv->rps.work.
*/
- spin_lock_irqsave(&dev_priv->rps_lock, flags);
- WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
- dev_priv->pm_iir |= pm_iir;
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+ spin_lock_irqsave(&dev_priv->rps.lock, flags);
+ WARN(dev_priv->rps.pm_iir & pm_iir, "Missed a PM interrupt\n");
+ dev_priv->rps.pm_iir |= pm_iir;
+ I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
POSTING_READ(GEN6_PMIMR);
- spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
+ spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
- queue_work(dev_priv->wq, &dev_priv->rps_work);
+ queue_work(dev_priv->wq, &dev_priv->rps.work);
}
static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
- INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
+ INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
dev->driver->get_vblank_counter = i915_get_vblank_counter;
* enqueue unpin/hotplug work. */
drm_irq_uninstall(dev);
cancel_work_sync(&dev_priv->hotplug_work);
- cancel_work_sync(&dev_priv->rps_work);
+ cancel_work_sync(&dev_priv->rps.work);
/* flush any delayed tasks or pending work */
flush_scheduled_work();
u32 limits;
limits = 0;
- if (*val >= dev_priv->max_delay)
- *val = dev_priv->max_delay;
- limits |= dev_priv->max_delay << 24;
+
+ if (*val >= dev_priv->rps.max_delay)
+ *val = dev_priv->rps.max_delay;
+ limits |= dev_priv->rps.max_delay << 24;
/* Only set the down limit when we've reached the lowest level to avoid
* getting more interrupts, otherwise leave this clear. This prevents a
* the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
- if (*val <= dev_priv->min_delay) {
- *val = dev_priv->min_delay;
- limits |= dev_priv->min_delay << 16;
+ if (*val <= dev_priv->rps.min_delay) {
+ *val = dev_priv->rps.min_delay;
+ limits |= dev_priv->rps.min_delay << 16;
}
return limits;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- if (val == dev_priv->cur_delay)
+ if (val == dev_priv->rps.cur_delay)
return;
I915_WRITE(GEN6_RPNSWREQ,
*/
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
- dev_priv->cur_delay = val;
+ dev_priv->rps.cur_delay = val;
}
static void gen6_disable_rps(struct drm_device *dev)
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
- spin_lock_irq(&dev_priv->rps_lock);
- dev_priv->pm_iir = 0;
- spin_unlock_irq(&dev_priv->rps_lock);
+ spin_lock_irq(&dev_priv->rps.lock);
+ dev_priv->rps.pm_iir = 0;
+ spin_unlock_irq(&dev_priv->rps.lock);
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
}
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
/* In units of 100MHz */
- dev_priv->max_delay = rp_state_cap & 0xff;
- dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
- dev_priv->cur_delay = 0;
+ dev_priv->rps.max_delay = rp_state_cap & 0xff;
+ dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
+ dev_priv->rps.cur_delay = 0;
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- dev_priv->max_delay << 24 |
- dev_priv->min_delay << 16);
+ dev_priv->rps.max_delay << 24 |
+ dev_priv->rps.min_delay << 16);
if (IS_HASWELL(dev)) {
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
if (pcu_mbox & (1<<31)) { /* OC supported */
- dev_priv->max_delay = pcu_mbox & 0xff;
+ dev_priv->rps.max_delay = pcu_mbox & 0xff;
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
}
/* requires MSI enabled */
I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
- spin_lock_irq(&dev_priv->rps_lock);
- WARN_ON(dev_priv->pm_iir != 0);
+ spin_lock_irq(&dev_priv->rps.lock);
+ WARN_ON(dev_priv->rps.pm_iir != 0);
I915_WRITE(GEN6_PMIMR, 0);
- spin_unlock_irq(&dev_priv->rps_lock);
+ spin_unlock_irq(&dev_priv->rps.lock);
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
- for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
+ for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
gpu_freq--) {
- int diff = dev_priv->max_delay - gpu_freq;
+ int diff = dev_priv->rps.max_delay - gpu_freq;
/*
* For GPU frequencies less than 750MHz, just use the lowest
assert_spin_locked(&mchdev_lock);
- pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
pxvid = (pxvid >> 24) & 0x7f;
ext_v = pvid_to_extvid(dev_priv, pxvid);