drm/i915: move more generic utils to i915_utils.h
authorJani Nikula <jani.nikula@intel.com>
Thu, 2 May 2019 15:02:46 +0000 (18:02 +0300)
committerJani Nikula <jani.nikula@intel.com>
Fri, 3 May 2019 07:06:51 +0000 (10:06 +0300)
Reduce clutter from i915_drv.h and intel_drv.h.

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/8c197872384fc35442b738c21ba0da9336e02a85.1556809195.git.jani.nikula@intel.com
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_utils.h
drivers/gpu/drm/i915/intel_drv.h

index 5f65c75594ef1f7109d63f344fd3afcd6a6db39e..d167053d6fce3c011b937728a99ce29960cbd830 100644 (file)
@@ -3357,50 +3357,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
 #define INTEL_BROADCAST_RGB_FULL 1
 #define INTEL_BROADCAST_RGB_LIMITED 2
 
-static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
-{
-       unsigned long j = msecs_to_jiffies(m);
-
-       return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
-}
-
-static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
-{
-       /* nsecs_to_jiffies64() does not guard against overflow */
-       if (NSEC_PER_SEC % HZ &&
-           div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
-               return MAX_JIFFY_OFFSET;
-
-        return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
-}
-
-/*
- * If you need to wait X milliseconds between events A and B, but event B
- * doesn't happen exactly after event A, you record the timestamp (jiffies) of
- * when event A happened, then just before event B you call this function and
- * pass the timestamp as the first argument, and X as the second argument.
- */
-static inline void
-wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
-{
-       unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
-
-       /*
-        * Don't re-read the value of "jiffies" every time since it may change
-        * behind our back and break the math.
-        */
-       tmp_jiffies = jiffies;
-       target_jiffies = timestamp_jiffies +
-                        msecs_to_jiffies_timeout(to_wait_ms);
-
-       if (time_after(target_jiffies, tmp_jiffies)) {
-               remaining_jiffies = target_jiffies - tmp_jiffies;
-               while (remaining_jiffies)
-                       remaining_jiffies =
-                           schedule_timeout_uninterruptible(remaining_jiffies);
-       }
-}
-
 void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
 bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
 
index 26117bf7da0c0380ab746898ed66dccf1d55f36b..c849cfa7cb282a76d0d4dadf8b05d1c2f4a05550 100644 (file)
@@ -26,6 +26,7 @@
 #define __I915_UTILS_H
 
 #include <linux/list.h>
+#include <linux/sched.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
 
@@ -176,6 +177,158 @@ static inline void drain_delayed_work(struct delayed_work *dw)
        } while (delayed_work_pending(dw));
 }
 
+static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
+{
+       unsigned long j = msecs_to_jiffies(m);
+
+       return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
+static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
+{
+       /* nsecs_to_jiffies64() does not guard against overflow */
+       if (NSEC_PER_SEC % HZ &&
+           div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
+               return MAX_JIFFY_OFFSET;
+
+        return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
+}
+
+/*
+ * If you need to wait X milliseconds between events A and B, but event B
+ * doesn't happen exactly after event A, you record the timestamp (jiffies) of
+ * when event A happened, then just before event B you call this function and
+ * pass the timestamp as the first argument, and X as the second argument.
+ */
+static inline void
+wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
+{
+       unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
+
+       /*
+        * Don't re-read the value of "jiffies" every time since it may change
+        * behind our back and break the math.
+        */
+       tmp_jiffies = jiffies;
+       target_jiffies = timestamp_jiffies +
+                        msecs_to_jiffies_timeout(to_wait_ms);
+
+       if (time_after(target_jiffies, tmp_jiffies)) {
+               remaining_jiffies = target_jiffies - tmp_jiffies;
+               while (remaining_jiffies)
+                       remaining_jiffies =
+                           schedule_timeout_uninterruptible(remaining_jiffies);
+       }
+}
+
+/**
+ * __wait_for - magic wait macro
+ *
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
+ * important that we check the condition again after having timed out, since the
+ * timeout could be due to preemption or similar and we've never had a chance to
+ * check the condition before the timeout.
+ */
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
+       const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
+       long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
+       int ret__;                                                      \
+       might_sleep();                                                  \
+       for (;;) {                                                      \
+               const bool expired__ = ktime_after(ktime_get_raw(), end__); \
+               OP;                                                     \
+               /* Guarantee COND check prior to timeout */             \
+               barrier();                                              \
+               if (COND) {                                             \
+                       ret__ = 0;                                      \
+                       break;                                          \
+               }                                                       \
+               if (expired__) {                                        \
+                       ret__ = -ETIMEDOUT;                             \
+                       break;                                          \
+               }                                                       \
+               usleep_range(wait__, wait__ * 2);                       \
+               if (wait__ < (Wmax))                                    \
+                       wait__ <<= 1;                                   \
+       }                                                               \
+       ret__;                                                          \
+})
+
+#define _wait_for(COND, US, Wmin, Wmax)        __wait_for(, (COND), (US), (Wmin), \
+                                                  (Wmax))
+#define wait_for(COND, MS)             _wait_for((COND), (MS) * 1000, 10, 1000)
+
+/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
+#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
+#else
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
+#endif
+
+#define _wait_for_atomic(COND, US, ATOMIC) \
+({ \
+       int cpu, ret, timeout = (US) * 1000; \
+       u64 base; \
+       _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
+       if (!(ATOMIC)) { \
+               preempt_disable(); \
+               cpu = smp_processor_id(); \
+       } \
+       base = local_clock(); \
+       for (;;) { \
+               u64 now = local_clock(); \
+               if (!(ATOMIC)) \
+                       preempt_enable(); \
+               /* Guarantee COND check prior to timeout */ \
+               barrier(); \
+               if (COND) { \
+                       ret = 0; \
+                       break; \
+               } \
+               if (now - base >= timeout) { \
+                       ret = -ETIMEDOUT; \
+                       break; \
+               } \
+               cpu_relax(); \
+               if (!(ATOMIC)) { \
+                       preempt_disable(); \
+                       if (unlikely(cpu != smp_processor_id())) { \
+                               timeout -= now - base; \
+                               cpu = smp_processor_id(); \
+                               base = local_clock(); \
+                       } \
+               } \
+       } \
+       ret; \
+})
+
+#define wait_for_us(COND, US) \
+({ \
+       int ret__; \
+       BUILD_BUG_ON(!__builtin_constant_p(US)); \
+       if ((US) > 10) \
+               ret__ = _wait_for((COND), (US), 10, 10); \
+       else \
+               ret__ = _wait_for_atomic((COND), (US), 0); \
+       ret__; \
+})
+
+#define wait_for_atomic_us(COND, US) \
+({ \
+       BUILD_BUG_ON(!__builtin_constant_p(US)); \
+       BUILD_BUG_ON((US) > 50000); \
+       _wait_for_atomic((COND), (US), 1); \
+})
+
+#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
+
+#define KHz(x) (1000 * (x))
+#define MHz(x) KHz(1000 * (x))
+
+#define KBps(x) (1000 * (x))
+#define MBps(x) KBps(1000 * (x))
+#define GBps(x) ((u64)1000 * MBps((x)))
+
 static inline const char *yesno(bool v)
 {
        return v ? "yes" : "no";
index addf6f92a9db83240be6076fe0e9e289e1859ba3..4049e03d2c0d1306312454f9e31cdbdfb8ce0f4c 100644 (file)
 
 struct drm_printer;
 
-/**
- * __wait_for - magic wait macro
- *
- * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
- * important that we check the condition again after having timed out, since the
- * timeout could be due to preemption or similar and we've never had a chance to
- * check the condition before the timeout.
- */
-#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
-       const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
-       long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
-       int ret__;                                                      \
-       might_sleep();                                                  \
-       for (;;) {                                                      \
-               const bool expired__ = ktime_after(ktime_get_raw(), end__); \
-               OP;                                                     \
-               /* Guarantee COND check prior to timeout */             \
-               barrier();                                              \
-               if (COND) {                                             \
-                       ret__ = 0;                                      \
-                       break;                                          \
-               }                                                       \
-               if (expired__) {                                        \
-                       ret__ = -ETIMEDOUT;                             \
-                       break;                                          \
-               }                                                       \
-               usleep_range(wait__, wait__ * 2);                       \
-               if (wait__ < (Wmax))                                    \
-                       wait__ <<= 1;                                   \
-       }                                                               \
-       ret__;                                                          \
-})
-
-#define _wait_for(COND, US, Wmin, Wmax)        __wait_for(, (COND), (US), (Wmin), \
-                                                  (Wmax))
-#define wait_for(COND, MS)             _wait_for((COND), (MS) * 1000, 10, 1000)
-
-/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
-#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
-# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
-#else
-# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
-#endif
-
-#define _wait_for_atomic(COND, US, ATOMIC) \
-({ \
-       int cpu, ret, timeout = (US) * 1000; \
-       u64 base; \
-       _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
-       if (!(ATOMIC)) { \
-               preempt_disable(); \
-               cpu = smp_processor_id(); \
-       } \
-       base = local_clock(); \
-       for (;;) { \
-               u64 now = local_clock(); \
-               if (!(ATOMIC)) \
-                       preempt_enable(); \
-               /* Guarantee COND check prior to timeout */ \
-               barrier(); \
-               if (COND) { \
-                       ret = 0; \
-                       break; \
-               } \
-               if (now - base >= timeout) { \
-                       ret = -ETIMEDOUT; \
-                       break; \
-               } \
-               cpu_relax(); \
-               if (!(ATOMIC)) { \
-                       preempt_disable(); \
-                       if (unlikely(cpu != smp_processor_id())) { \
-                               timeout -= now - base; \
-                               cpu = smp_processor_id(); \
-                               base = local_clock(); \
-                       } \
-               } \
-       } \
-       ret; \
-})
-
-#define wait_for_us(COND, US) \
-({ \
-       int ret__; \
-       BUILD_BUG_ON(!__builtin_constant_p(US)); \
-       if ((US) > 10) \
-               ret__ = _wait_for((COND), (US), 10, 10); \
-       else \
-               ret__ = _wait_for_atomic((COND), (US), 0); \
-       ret__; \
-})
-
-#define wait_for_atomic_us(COND, US) \
-({ \
-       BUILD_BUG_ON(!__builtin_constant_p(US)); \
-       BUILD_BUG_ON((US) > 50000); \
-       _wait_for_atomic((COND), (US), 1); \
-})
-
-#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
-
-#define KHz(x) (1000 * (x))
-#define MHz(x) KHz(1000 * (x))
-
-#define KBps(x) (1000 * (x))
-#define MBps(x) KBps(1000 * (x))
-#define GBps(x) ((u64)1000 * MBps((x)))
-
 /*
  * Display related stuff
  */