u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
{
- unsigned long flags;
- u32 val = 0;
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
- spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO idle wait timed out\n");
- goto out_unlock;
+ return 0;
}
I915_WRITE(DPIO_REG, reg);
DPIO_BYTE);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO read wait timed out\n");
- goto out_unlock;
+ return 0;
}
- val = I915_READ(DPIO_DATA);
-out_unlock:
- spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
- return val;
+ return I915_READ(DPIO_DATA);
}
static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
u32 val)
{
- unsigned long flags;
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
- spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO idle wait timed out\n");
- goto out_unlock;
+ return;
}
I915_WRITE(DPIO_DATA, val);
DPIO_BYTE);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
DRM_ERROR("DPIO write wait timed out\n");
-
-out_unlock:
- spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
}
static void vlv_init_dpio(struct drm_device *dev)
static void
intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
{
- unsigned long flags;
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
- spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
- goto out_unlock;
+ return;
}
I915_WRITE(SBI_ADDR,
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
- goto out_unlock;
+ return;
}
-
-out_unlock:
- spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
}
static u32
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
{
- unsigned long flags;
- u32 value = 0;
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
- spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
- goto out_unlock;
+ return 0;
}
I915_WRITE(SBI_ADDR,
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
- goto out_unlock;
+ return 0;
}
- value = I915_READ(SBI_DATA);
-
-out_unlock:
- spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
- return value;
+ return I915_READ(SBI_DATA);
}
/**
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
+ mutex_lock(&dev_priv->dpio_lock);
+
/* It is necessary to ungate the pixclk gate prior to programming
* the divisors, and gate it back when it is done.
*/
udelay(24);
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+
+ mutex_unlock(&dev_priv->dpio_lock);
}
/*
bool is_sdvo;
u32 temp;
+ mutex_lock(&dev_priv->dpio_lock);
+
is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
temp |= (1 << 21);
intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
}
+
+ mutex_unlock(&dev_priv->dpio_lock);
}
static void i9xx_update_pll(struct drm_crtc *crtc,