drm/i915: Move sandybride pcode access to intel_sideband.c
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 26 Apr 2019 08:17:25 +0000 (09:17 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 26 Apr 2019 09:20:47 +0000 (10:20 +0100)
sandybride_pcode is another sideband, so move it to their new home.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190426081725.31217-8-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/intel_hdcp.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_sideband.c
drivers/gpu/drm/i915/intel_sideband.h

index c18b28271bfde706031c5a66279f2a12d915113a..1cea98f8b85c596673433b2e2650e9098f059436 100644 (file)
@@ -3420,16 +3420,6 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv);
 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
                                            struct intel_display_error_state *error);
 
-int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
-int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
-                                   u32 val, int fast_timeout_us,
-                                   int slow_timeout_ms);
-#define sandybridge_pcode_write(dev_priv, mbox, val)   \
-       sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500, 0)
-
-int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
-                     u32 reply_mask, u32 reply, int timeout_base_ms);
-
 /* intel_dpio_phy.c */
 void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
                             enum dpio_phy *phy, enum dpio_channel *ch);
index 2476e867981db0dbe71d7f31bf1ca232ffefcd7b..ca5982e45e3eb56fcc3778e5747e3ff4ebb5e43c 100644 (file)
@@ -16,6 +16,7 @@
 #include "i915_reg.h"
 #include "intel_drv.h"
 #include "intel_hdcp.h"
+#include "intel_sideband.h"
 
 #define KEY_LOAD_TRIES 5
 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS       50
index 4b1cd9041b335dcc5a5154f8f7c4c90799f5732d..3687e91659567e28246f8ff0beb0442fc541737e 100644 (file)
@@ -9704,201 +9704,6 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
        }
 }
 
-static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv,
-                                           u32 mbox)
-{
-       switch (mbox & GEN6_PCODE_ERROR_MASK) {
-       case GEN6_PCODE_SUCCESS:
-               return 0;
-       case GEN6_PCODE_UNIMPLEMENTED_CMD:
-               return -ENODEV;
-       case GEN6_PCODE_ILLEGAL_CMD:
-               return -ENXIO;
-       case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
-       case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
-               return -EOVERFLOW;
-       case GEN6_PCODE_TIMEOUT:
-               return -ETIMEDOUT;
-       default:
-               MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
-               return 0;
-       }
-}
-
-static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv,
-                                           u32 mbox)
-{
-       switch (mbox & GEN6_PCODE_ERROR_MASK) {
-       case GEN6_PCODE_SUCCESS:
-               return 0;
-       case GEN6_PCODE_ILLEGAL_CMD:
-               return -ENXIO;
-       case GEN7_PCODE_TIMEOUT:
-               return -ETIMEDOUT;
-       case GEN7_PCODE_ILLEGAL_DATA:
-               return -EINVAL;
-       case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
-               return -EOVERFLOW;
-       default:
-               MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
-               return 0;
-       }
-}
-
-static int __sandybridge_pcode_rw(struct drm_i915_private *dev_priv,
-                                 u32 mbox, u32 *val,
-                                 int fast_timeout_us,
-                                 int slow_timeout_ms,
-                                 bool is_read)
-{
-       lockdep_assert_held(&dev_priv->sb_lock);
-
-       /*
-        * GEN6_PCODE_* are outside of the forcewake domain, we can
-        * use te fw I915_READ variants to reduce the amount of work
-        * required when reading/writing.
-        */
-
-       if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
-               return -EAGAIN;
-
-       I915_WRITE_FW(GEN6_PCODE_DATA, *val);
-       I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
-       I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
-
-       if (__intel_wait_for_register_fw(&dev_priv->uncore,
-                                        GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
-                                        fast_timeout_us,
-                                        slow_timeout_ms,
-                                        &mbox))
-               return -ETIMEDOUT;
-
-       if (is_read)
-               *val = I915_READ_FW(GEN6_PCODE_DATA);
-
-       if (INTEL_GEN(dev_priv) > 6)
-               return gen7_check_mailbox_status(dev_priv, mbox);
-       else
-               return gen6_check_mailbox_status(dev_priv, mbox);
-}
-
-int
-sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
-{
-       int err;
-
-       mutex_lock(&dev_priv->sb_lock);
-       err = __sandybridge_pcode_rw(dev_priv, mbox, val,
-                                    500, 0,
-                                    true);
-       mutex_unlock(&dev_priv->sb_lock);
-
-       if (err) {
-               DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
-                                mbox, __builtin_return_address(0), err);
-       }
-
-       return err;
-}
-
-int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
-                                   u32 mbox, u32 val,
-                                   int fast_timeout_us,
-                                   int slow_timeout_ms)
-{
-       int err;
-
-       mutex_lock(&dev_priv->sb_lock);
-       err = __sandybridge_pcode_rw(dev_priv, mbox, &val,
-                                    fast_timeout_us, slow_timeout_ms,
-                                    false);
-       mutex_unlock(&dev_priv->sb_lock);
-
-       if (err) {
-               DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
-                                val, mbox, __builtin_return_address(0), err);
-       }
-
-       return err;
-}
-
-static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
-                                 u32 request, u32 reply_mask, u32 reply,
-                                 u32 *status)
-{
-       *status = __sandybridge_pcode_rw(dev_priv, mbox, &request,
-                                        500, 0,
-                                        true);
-
-       return *status || ((request & reply_mask) == reply);
-}
-
-/**
- * skl_pcode_request - send PCODE request until acknowledgment
- * @dev_priv: device private
- * @mbox: PCODE mailbox ID the request is targeted for
- * @request: request ID
- * @reply_mask: mask used to check for request acknowledgment
- * @reply: value used to check for request acknowledgment
- * @timeout_base_ms: timeout for polling with preemption enabled
- *
- * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
- * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
- * The request is acknowledged once the PCODE reply dword equals @reply after
- * applying @reply_mask. Polling is first attempted with preemption enabled
- * for @timeout_base_ms and if this times out for another 50 ms with
- * preemption disabled.
- *
- * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
- * other error as reported by PCODE.
- */
-int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
-                     u32 reply_mask, u32 reply, int timeout_base_ms)
-{
-       u32 status;
-       int ret;
-
-       mutex_lock(&dev_priv->sb_lock);
-
-#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
-                                  &status)
-
-       /*
-        * Prime the PCODE by doing a request first. Normally it guarantees
-        * that a subsequent request, at most @timeout_base_ms later, succeeds.
-        * _wait_for() doesn't guarantee when its passed condition is evaluated
-        * first, so send the first request explicitly.
-        */
-       if (COND) {
-               ret = 0;
-               goto out;
-       }
-       ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
-       if (!ret)
-               goto out;
-
-       /*
-        * The above can time out if the number of requests was low (2 in the
-        * worst case) _and_ PCODE was busy for some reason even after a
-        * (queued) request and @timeout_base_ms delay. As a workaround retry
-        * the poll with preemption disabled to maximize the number of
-        * requests. Increase the timeout from @timeout_base_ms to 50ms to
-        * account for interrupts that could reduce the number of these
-        * requests, and for any quirks of the PCODE firmware that delays
-        * the request completion.
-        */
-       DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
-       WARN_ON_ONCE(timeout_base_ms > 3);
-       preempt_disable();
-       ret = wait_for_atomic(COND, 50);
-       preempt_enable();
-
-out:
-       mutex_unlock(&dev_priv->sb_lock);
-       return ret ? ret : status;
-#undef COND
-}
-
 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
        struct intel_rps *rps = &dev_priv->gt_pm.rps;
index 7113fb8850d6d3fe7a17b6861129ee656a126e22..87b5a14c7ca887f262a0d101746bd60b4315dbbb 100644 (file)
@@ -333,3 +333,199 @@ void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
 {
        intel_sbi_rw(i915, reg, destination, &value, false);
 }
+
+static inline int gen6_check_mailbox_status(u32 mbox)
+{
+       switch (mbox & GEN6_PCODE_ERROR_MASK) {
+       case GEN6_PCODE_SUCCESS:
+               return 0;
+       case GEN6_PCODE_UNIMPLEMENTED_CMD:
+               return -ENODEV;
+       case GEN6_PCODE_ILLEGAL_CMD:
+               return -ENXIO;
+       case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+       case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+               return -EOVERFLOW;
+       case GEN6_PCODE_TIMEOUT:
+               return -ETIMEDOUT;
+       default:
+               MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
+               return 0;
+       }
+}
+
+static inline int gen7_check_mailbox_status(u32 mbox)
+{
+       switch (mbox & GEN6_PCODE_ERROR_MASK) {
+       case GEN6_PCODE_SUCCESS:
+               return 0;
+       case GEN6_PCODE_ILLEGAL_CMD:
+               return -ENXIO;
+       case GEN7_PCODE_TIMEOUT:
+               return -ETIMEDOUT;
+       case GEN7_PCODE_ILLEGAL_DATA:
+               return -EINVAL;
+       case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+               return -EOVERFLOW;
+       default:
+               MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
+               return 0;
+       }
+}
+
+static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
+                                 u32 mbox, u32 *val,
+                                 int fast_timeout_us,
+                                 int slow_timeout_ms,
+                                 bool is_read)
+{
+       struct intel_uncore *uncore = &i915->uncore;
+
+       lockdep_assert_held(&i915->sb_lock);
+
+       /*
+        * GEN6_PCODE_* are outside of the forcewake domain, we can
+        * use te fw I915_READ variants to reduce the amount of work
+        * required when reading/writing.
+        */
+
+       if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
+               return -EAGAIN;
+
+       intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
+       intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, 0);
+       intel_uncore_write_fw(uncore,
+                             GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+       if (__intel_wait_for_register_fw(uncore,
+                                        GEN6_PCODE_MAILBOX,
+                                        GEN6_PCODE_READY, 0,
+                                        fast_timeout_us,
+                                        slow_timeout_ms,
+                                        &mbox))
+               return -ETIMEDOUT;
+
+       if (is_read)
+               *val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
+
+       if (INTEL_GEN(i915) > 6)
+               return gen7_check_mailbox_status(mbox);
+       else
+               return gen6_check_mailbox_status(mbox);
+}
+
+int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val)
+{
+       int err;
+
+       mutex_lock(&i915->sb_lock);
+       err = __sandybridge_pcode_rw(i915, mbox, val,
+                                    500, 0,
+                                    true);
+       mutex_unlock(&i915->sb_lock);
+
+       if (err) {
+               DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
+                                mbox, __builtin_return_address(0), err);
+       }
+
+       return err;
+}
+
+int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
+                                   u32 mbox, u32 val,
+                                   int fast_timeout_us,
+                                   int slow_timeout_ms)
+{
+       int err;
+
+       mutex_lock(&i915->sb_lock);
+       err = __sandybridge_pcode_rw(i915, mbox, &val,
+                                    fast_timeout_us, slow_timeout_ms,
+                                    false);
+       mutex_unlock(&i915->sb_lock);
+
+       if (err) {
+               DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
+                                val, mbox, __builtin_return_address(0), err);
+       }
+
+       return err;
+}
+
+static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
+                                 u32 request, u32 reply_mask, u32 reply,
+                                 u32 *status)
+{
+       *status = __sandybridge_pcode_rw(i915, mbox, &request,
+                                        500, 0,
+                                        true);
+
+       return *status || ((request & reply_mask) == reply);
+}
+
+/**
+ * skl_pcode_request - send PCODE request until acknowledgment
+ * @i915: device private
+ * @mbox: PCODE mailbox ID the request is targeted for
+ * @request: request ID
+ * @reply_mask: mask used to check for request acknowledgment
+ * @reply: value used to check for request acknowledgment
+ * @timeout_base_ms: timeout for polling with preemption enabled
+ *
+ * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
+ * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
+ * The request is acknowledged once the PCODE reply dword equals @reply after
+ * applying @reply_mask. Polling is first attempted with preemption enabled
+ * for @timeout_base_ms and if this times out for another 50 ms with
+ * preemption disabled.
+ *
+ * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
+ * other error as reported by PCODE.
+ */
+int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
+                     u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+       u32 status;
+       int ret;
+
+       mutex_lock(&i915->sb_lock);
+
+#define COND \
+       skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status)
+
+       /*
+        * Prime the PCODE by doing a request first. Normally it guarantees
+        * that a subsequent request, at most @timeout_base_ms later, succeeds.
+        * _wait_for() doesn't guarantee when its passed condition is evaluated
+        * first, so send the first request explicitly.
+        */
+       if (COND) {
+               ret = 0;
+               goto out;
+       }
+       ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
+       if (!ret)
+               goto out;
+
+       /*
+        * The above can time out if the number of requests was low (2 in the
+        * worst case) _and_ PCODE was busy for some reason even after a
+        * (queued) request and @timeout_base_ms delay. As a workaround retry
+        * the poll with preemption disabled to maximize the number of
+        * requests. Increase the timeout from @timeout_base_ms to 50ms to
+        * account for interrupts that could reduce the number of these
+        * requests, and for any quirks of the PCODE firmware that delays
+        * the request completion.
+        */
+       DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
+       WARN_ON_ONCE(timeout_base_ms > 3);
+       preempt_disable();
+       ret = wait_for_atomic(COND, 50);
+       preempt_enable();
+
+out:
+       mutex_unlock(&i915->sb_lock);
+       return ret ? ret : status;
+#undef COND
+}
index 9d36bdc1795556b9f390870c3d322926689a31b6..a0907e2c499251443394f5a72172c22abd6b7b56 100644 (file)
@@ -127,4 +127,14 @@ u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
 void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
                     enum intel_sbi_destination destination);
 
+int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val);
+int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox,
+                                   u32 val, int fast_timeout_us,
+                                   int slow_timeout_ms);
+#define sandybridge_pcode_write(i915, mbox, val)       \
+       sandybridge_pcode_write_timeout(i915, mbox, val, 500, 0)
+
+int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
+                     u32 reply_mask, u32 reply, int timeout_base_ms);
+
 #endif /* _INTEL_SIDEBAND_H */