smp/cfd: Convert core to hotplug state machine
authorRichard Weinberger <richard@nod.at>
Wed, 13 Jul 2016 17:17:01 +0000 (17:17 +0000)
committerIngo Molnar <mingo@kernel.org>
Fri, 15 Jul 2016 08:41:43 +0000 (10:41 +0200)
Install the callbacks via the state machine. They are installed at runtime so
smpcfd_prepare_cpu() needs to be invoked by the boot-CPU.

Signed-off-by: Richard Weinberger <richard@nod.at>
[ Added the dropped CPU dying case back in. ]
Signed-off-by: Richard Cochran <rcochran@linutronix.de>
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Davidlohr Bueso <dave@stgolabs>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160713153337.818376366@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/cpuhotplug.h
include/linux/smp.h
kernel/cpu.c
kernel/smp.c

index 78170827a7762dac1d0ec636645e3d560e45bb0b..b5cf01ace71bc20c879b30fee236e0309061dd62 100644 (file)
@@ -18,6 +18,7 @@ enum cpuhp_state {
        CPUHP_HRTIMERS_PREPARE,
        CPUHP_PROFILE_PREPARE,
        CPUHP_X2APIC_PREPARE,
+       CPUHP_SMPCFD_PREPARE,
        CPUHP_TIMERS_DEAD,
        CPUHP_NOTIFY_PREPARE,
        CPUHP_BRINGUP_CPU,
@@ -57,6 +58,7 @@ enum cpuhp_state {
        CPUHP_AP_ARM_CORESIGHT4_STARTING,
        CPUHP_AP_ARM64_ISNDEP_STARTING,
        CPUHP_AP_LEDTRIG_STARTING,
+       CPUHP_AP_SMPCFD_DYING,
        CPUHP_AP_X86_TBOOT_DYING,
        CPUHP_AP_NOTIFY_STARTING,
        CPUHP_AP_ONLINE,
index c4414074bd88e2ae1b042cb477440f54b2f241dd..eccae4690f4153d60ad256f60ff4c969366211a8 100644 (file)
@@ -196,4 +196,9 @@ extern void arch_enable_nonboot_cpus_end(void);
 
 void smp_setup_processor_id(void);
 
+/* SMP core functions */
+int smpcfd_prepare_cpu(unsigned int cpu);
+int smpcfd_dead_cpu(unsigned int cpu);
+int smpcfd_dying_cpu(unsigned int cpu);
+
 #endif /* __LINUX_SMP_H */
index e1017d92d3089cc7891658b23674623ca930bc64..008e2fd40cb18cafa7eeb11a1a75f1d3e73eef26 100644 (file)
@@ -1195,6 +1195,11 @@ static struct cpuhp_step cpuhp_bp_states[] = {
                .startup = hrtimers_prepare_cpu,
                .teardown = hrtimers_dead_cpu,
        },
+       [CPUHP_SMPCFD_PREPARE] = {
+               .name = "SMPCFD prepare",
+               .startup = smpcfd_prepare_cpu,
+               .teardown = smpcfd_dead_cpu,
+       },
        [CPUHP_TIMERS_DEAD] = {
                .name = "timers dead",
                .startup = NULL,
@@ -1218,6 +1223,10 @@ static struct cpuhp_step cpuhp_bp_states[] = {
                .teardown               = NULL,
                .cant_stop              = true,
        },
+       [CPUHP_AP_SMPCFD_DYING] = {
+               .startup = NULL,
+               .teardown = smpcfd_dying_cpu,
+       },
        /*
         * Handled on controll processor until the plugged processor manages
         * this itself.
index 74165443c240147cd701489298d2b552c8f46adf..7180491c9678d775989e2090c15629fbb47af22a 100644 (file)
@@ -33,69 +33,54 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 
 static void flush_smp_call_function_queue(bool warn_cpu_offline);
 
-static int
-hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
+int smpcfd_prepare_cpu(unsigned int cpu)
 {
-       long cpu = (long)hcpu;
        struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 
-       switch (action) {
-       case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
-               if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
-                               cpu_to_node(cpu)))
-                       return notifier_from_errno(-ENOMEM);
-               cfd->csd = alloc_percpu(struct call_single_data);
-               if (!cfd->csd) {
-                       free_cpumask_var(cfd->cpumask);
-                       return notifier_from_errno(-ENOMEM);
-               }
-               break;
-
-#ifdef CONFIG_HOTPLUG_CPU
-       case CPU_UP_CANCELED:
-       case CPU_UP_CANCELED_FROZEN:
-               /* Fall-through to the CPU_DEAD[_FROZEN] case. */
-
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
+       if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
+                                    cpu_to_node(cpu)))
+               return -ENOMEM;
+       cfd->csd = alloc_percpu(struct call_single_data);
+       if (!cfd->csd) {
                free_cpumask_var(cfd->cpumask);
-               free_percpu(cfd->csd);
-               break;
+               return -ENOMEM;
+       }
 
-       case CPU_DYING:
-       case CPU_DYING_FROZEN:
-               /*
-                * The IPIs for the smp-call-function callbacks queued by other
-                * CPUs might arrive late, either due to hardware latencies or
-                * because this CPU disabled interrupts (inside stop-machine)
-                * before the IPIs were sent. So flush out any pending callbacks
-                * explicitly (without waiting for the IPIs to arrive), to
-                * ensure that the outgoing CPU doesn't go offline with work
-                * still pending.
-                */
-               flush_smp_call_function_queue(false);
-               break;
-#endif
-       };
+       return 0;
+}
+
+int smpcfd_dead_cpu(unsigned int cpu)
+{
+       struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 
-       return NOTIFY_OK;
+       free_cpumask_var(cfd->cpumask);
+       free_percpu(cfd->csd);
+       return 0;
 }
 
-static struct notifier_block hotplug_cfd_notifier = {
-       .notifier_call          = hotplug_cfd,
-};
+int smpcfd_dying_cpu(unsigned int cpu)
+{
+       /*
+        * The IPIs for the smp-call-function callbacks queued by other
+        * CPUs might arrive late, either due to hardware latencies or
+        * because this CPU disabled interrupts (inside stop-machine)
+        * before the IPIs were sent. So flush out any pending callbacks
+        * explicitly (without waiting for the IPIs to arrive), to
+        * ensure that the outgoing CPU doesn't go offline with work
+        * still pending.
+        */
+       flush_smp_call_function_queue(false);
+       return 0;
+}
 
 void __init call_function_init(void)
 {
-       void *cpu = (void *)(long)smp_processor_id();
        int i;
 
        for_each_possible_cpu(i)
                init_llist_head(&per_cpu(call_single_queue, i));
 
-       hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
-       register_cpu_notifier(&hotplug_cfd_notifier);
+       smpcfd_prepare_cpu(smp_processor_id());
 }
 
 /*