sched/core: Fix cpu.max vs. cpuhotplug deadlock
authorPeter Zijlstra <peterz@infradead.org>
Mon, 22 Jan 2018 21:53:28 +0000 (22:53 +0100)
committerIngo Molnar <mingo@kernel.org>
Wed, 24 Jan 2018 09:03:44 +0000 (10:03 +0100)
Tejun reported the following cpu-hotplug lock (percpu-rwsem) read recursion:

  tg_set_cfs_bandwidth()
    get_online_cpus()
      cpus_read_lock()

    cfs_bandwidth_usage_inc()
      static_key_slow_inc()
        cpus_read_lock()

Reported-by: Tejun Heo <tj@kernel.org>
Tested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20180122215328.GP3397@worktop
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/jump_label.h
kernel/jump_label.c
kernel/sched/fair.c

index c7b368c734af342da6a3e397c995b6859691f032..e0340ca08d9897bd12ecdbb47454961c0237da82 100644 (file)
@@ -160,6 +160,8 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry,
 extern int jump_label_text_reserved(void *start, void *end);
 extern void static_key_slow_inc(struct static_key *key);
 extern void static_key_slow_dec(struct static_key *key);
+extern void static_key_slow_inc_cpuslocked(struct static_key *key);
+extern void static_key_slow_dec_cpuslocked(struct static_key *key);
 extern void jump_label_apply_nops(struct module *mod);
 extern int static_key_count(struct static_key *key);
 extern void static_key_enable(struct static_key *key);
@@ -222,6 +224,9 @@ static inline void static_key_slow_dec(struct static_key *key)
        atomic_dec(&key->enabled);
 }
 
+#define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
+#define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
+
 static inline int jump_label_text_reserved(void *start, void *end)
 {
        return 0;
@@ -416,6 +421,8 @@ extern bool ____wrong_branch_error(void);
 
 #define static_branch_inc(x)           static_key_slow_inc(&(x)->key)
 #define static_branch_dec(x)           static_key_slow_dec(&(x)->key)
+#define static_branch_inc_cpuslocked(x)        static_key_slow_inc_cpuslocked(&(x)->key)
+#define static_branch_dec_cpuslocked(x)        static_key_slow_dec_cpuslocked(&(x)->key)
 
 /*
  * Normal usage; boolean enable/disable.
index 8594d24e4adc2245e0f7cd0f6f48d9eef755d551..b4517095db6af2f5130e6c871a7a9e32884c2a66 100644 (file)
@@ -79,7 +79,7 @@ int static_key_count(struct static_key *key)
 }
 EXPORT_SYMBOL_GPL(static_key_count);
 
-static void static_key_slow_inc_cpuslocked(struct static_key *key)
+void static_key_slow_inc_cpuslocked(struct static_key *key)
 {
        int v, v1;
 
@@ -180,7 +180,7 @@ void static_key_disable(struct static_key *key)
 }
 EXPORT_SYMBOL_GPL(static_key_disable);
 
-static void static_key_slow_dec_cpuslocked(struct static_key *key,
+static void __static_key_slow_dec_cpuslocked(struct static_key *key,
                                           unsigned long rate_limit,
                                           struct delayed_work *work)
 {
@@ -211,7 +211,7 @@ static void __static_key_slow_dec(struct static_key *key,
                                  struct delayed_work *work)
 {
        cpus_read_lock();
-       static_key_slow_dec_cpuslocked(key, rate_limit, work);
+       __static_key_slow_dec_cpuslocked(key, rate_limit, work);
        cpus_read_unlock();
 }
 
@@ -229,6 +229,12 @@ void static_key_slow_dec(struct static_key *key)
 }
 EXPORT_SYMBOL_GPL(static_key_slow_dec);
 
+void static_key_slow_dec_cpuslocked(struct static_key *key)
+{
+       STATIC_KEY_CHECK_USE(key);
+       __static_key_slow_dec_cpuslocked(key, 0, NULL);
+}
+
 void static_key_slow_dec_deferred(struct static_key_deferred *key)
 {
        STATIC_KEY_CHECK_USE(key);
index 2fe3aa853e4dbacef363b70246390f94cc67932c..26a71ebcd3c2ed94941673a07408a8656ea766c3 100644 (file)
@@ -4365,12 +4365,12 @@ static inline bool cfs_bandwidth_used(void)
 
 void cfs_bandwidth_usage_inc(void)
 {
-       static_key_slow_inc(&__cfs_bandwidth_used);
+       static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used);
 }
 
 void cfs_bandwidth_usage_dec(void)
 {
-       static_key_slow_dec(&__cfs_bandwidth_used);
+       static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
 }
 #else /* HAVE_JUMP_LABEL */
 static bool cfs_bandwidth_used(void)