cpuidle: Allow idle injection to apply exit latency limit
authorDaniel Lezcano <daniel.lezcano@linaro.org>
Sat, 16 Nov 2019 13:16:12 +0000 (14:16 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Wed, 20 Nov 2019 10:32:55 +0000 (11:32 +0100)
In some cases it may be useful to specify an exit latency limit for
the idle state to be used during CPU idle time injection.

Instead of duplicating the information in struct cpuidle_device
or propagating the latency limit in the call stack, replace the
use_deepest_state field with forced_latency_limit_ns to represent
that limit, so that the deepest idle state with exit latency within
that limit is forced (i.e. no governors) when it is set.

A zero exit latency limit for forced idle means to use governors in
the usual way (analogous to use_deepest_state equal to "false" before
this change).

Additionally, add play_idle_precise() taking two arguments, the
duration of forced idle and the idle state exit latency limit, both
in nanoseconds, and redefine play_idle() as a wrapper around that
new function.

This change is preparatory, no functional impact is expected.

Suggested-by: Rafael J. Wysocki <rafael@kernel.org>
Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
[ rjw: Subject, changelog, cpuidle_use_deepest_state() kerneldoc, whitespace ]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
drivers/cpuidle/cpuidle.c
include/linux/cpu.h
include/linux/cpuidle.h
kernel/sched/idle.c

index bf9b030cd7e12f3b8e35424a91b8c7dbf0040e8e..12077db1158e466928023cfd51ca1bb76c93590d 100644 (file)
@@ -99,20 +99,21 @@ static int find_deepest_state(struct cpuidle_driver *drv,
 }
 
 /**
- * cpuidle_use_deepest_state - Set/clear governor override flag.
- * @enable: New value of the flag.
+ * cpuidle_use_deepest_state - Set/unset governor override mode.
+ * @latency_limit_ns: Idle state exit latency limit (or no override if 0).
  *
- * Set/unset the current CPU to use the deepest idle state (override governors
- * going forward if set).
+ * If @latency_limit_ns is nonzero, set the current CPU to use the deepest idle
+ * state with exit latency within @latency_limit_ns (override governors going
+ * forward), or do not override governors if it is zero.
  */
-void cpuidle_use_deepest_state(bool enable)
+void cpuidle_use_deepest_state(u64 latency_limit_ns)
 {
        struct cpuidle_device *dev;
 
        preempt_disable();
        dev = cpuidle_get_device();
        if (dev)
-               dev->use_deepest_state = enable;
+               dev->forced_idle_latency_limit_ns = latency_limit_ns;
        preempt_enable();
 }
 
index d0633ebdaa9c36ecdb3df623c978efcd32cf6877..cc03a7848b635b720daab8586450f7e88130581a 100644 (file)
@@ -179,7 +179,12 @@ void arch_cpu_idle_dead(void);
 int cpu_report_state(int cpu);
 int cpu_check_up_prepare(int cpu);
 void cpu_set_state_online(int cpu);
-void play_idle(unsigned long duration_us);
+void play_idle_precise(u64 duration_ns, u64 latency_ns);
+
+static inline void play_idle(unsigned long duration_us)
+{
+       play_idle_precise(duration_us * NSEC_PER_USEC, U64_MAX);
+}
 
 #ifdef CONFIG_HOTPLUG_CPU
 bool cpu_wait_death(unsigned int cpu, int seconds);
index afb6a573b46df9ef7d1f73b88ff5ba037123a9f1..72b26ff1de4ba5ed8590d62ccce01d002bce73e6 100644 (file)
@@ -85,7 +85,6 @@ struct cpuidle_driver_kobj;
 struct cpuidle_device {
        unsigned int            registered:1;
        unsigned int            enabled:1;
-       unsigned int            use_deepest_state:1;
        unsigned int            poll_time_limit:1;
        unsigned int            cpu;
        ktime_t                 next_hrtimer;
@@ -93,6 +92,7 @@ struct cpuidle_device {
        int                     last_state_idx;
        u64                     last_residency_ns;
        u64                     poll_limit_ns;
+       u64                     forced_idle_latency_limit_ns;
        struct cpuidle_state_usage      states_usage[CPUIDLE_STATE_MAX];
        struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
        struct cpuidle_driver_kobj *kobj_driver;
@@ -216,7 +216,7 @@ extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
                                      struct cpuidle_device *dev);
 extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
                                struct cpuidle_device *dev);
-extern void cpuidle_use_deepest_state(bool enable);
+extern void cpuidle_use_deepest_state(u64 latency_limit_ns);
 #else
 static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
                                             struct cpuidle_device *dev)
@@ -224,7 +224,7 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
 static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
                                       struct cpuidle_device *dev)
 {return -ENODEV; }
-static inline void cpuidle_use_deepest_state(bool enable)
+static inline void cpuidle_use_deepest_state(u64 latency_limit_ns)
 {
 }
 #endif
index 1aa260702b3861af5de658514c2d5f2fe4a5e522..cd05ffa0abfe8409131bbd9825caa8529ab7ec3e 100644 (file)
@@ -165,7 +165,7 @@ static void cpuidle_idle_call(void)
         * until a proper wakeup interrupt happens.
         */
 
-       if (idle_should_enter_s2idle() || dev->use_deepest_state) {
+       if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
                if (idle_should_enter_s2idle()) {
                        rcu_idle_enter();
 
@@ -311,7 +311,7 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
-void play_idle(unsigned long duration_us)
+void play_idle_precise(u64 duration_ns, u64 latency_ns)
 {
        struct idle_timer it;
 
@@ -323,29 +323,29 @@ void play_idle(unsigned long duration_us)
        WARN_ON_ONCE(current->nr_cpus_allowed != 1);
        WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
        WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
-       WARN_ON_ONCE(!duration_us);
+       WARN_ON_ONCE(!duration_ns);
 
        rcu_sleep_check();
        preempt_disable();
        current->flags |= PF_IDLE;
-       cpuidle_use_deepest_state(true);
+       cpuidle_use_deepest_state(latency_ns);
 
        it.done = 0;
        hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        it.timer.function = idle_inject_timer_fn;
-       hrtimer_start(&it.timer, ns_to_ktime(duration_us * NSEC_PER_USEC),
+       hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
                      HRTIMER_MODE_REL_PINNED);
 
        while (!READ_ONCE(it.done))
                do_idle();
 
-       cpuidle_use_deepest_state(false);
+       cpuidle_use_deepest_state(0);
        current->flags &= ~PF_IDLE;
 
        preempt_fold_need_resched();
        preempt_enable();
 }
-EXPORT_SYMBOL_GPL(play_idle);
+EXPORT_SYMBOL_GPL(play_idle_precise);
 
 void cpu_startup_entry(enum cpuhp_state state)
 {