x86/irq/32: Handle irq stack allocation failure proper
authorThomas Gleixner <tglx@linutronix.de>
Sun, 14 Apr 2019 16:00:04 +0000 (18:00 +0200)
committerBorislav Petkov <bp@suse.de>
Wed, 17 Apr 2019 13:31:42 +0000 (15:31 +0200)
irq_ctx_init() crashes hard on page allocation failures. While that's ok
during early boot, it's just wrong in the CPU hotplug bringup code.

Check the page allocation failure and return -ENOMEM and handle it at the
call sites. On early boot the only way out is to BUG(), but on CPU hotplug
there is no reason to crash, so just abort the operation.

Rename the function to something more sensible while at it.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Alison Schofield <alison.schofield@intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Nicolai Stange <nstange@suse.de>
Cc: Pu Wen <puwen@hygon.cn>
Cc: Sean Christopherson <sean.j.christopherson@intel.com>
Cc: Shaokun Zhang <zhangshaokun@hisilicon.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Cc: x86-ml <x86@kernel.org>
Cc: xen-devel@lists.xenproject.org
Cc: Yazen Ghannam <yazen.ghannam@amd.com>
Cc: Yi Wang <wang.yi59@zte.com.cn>
Cc: Zhenzhong Duan <zhenzhong.duan@oracle.com>
Link: https://lkml.kernel.org/r/20190414160146.089060584@linutronix.de
arch/x86/include/asm/irq.h
arch/x86/include/asm/smp.h
arch/x86/kernel/irq_32.c
arch/x86/kernel/irqinit.c
arch/x86/kernel/smpboot.c
arch/x86/xen/smp_pv.c

index fbb16e6b6c18b14de64bce525ab02e236b373eef..d751e8440a6bbf9776b0d1786a1ca5c743db48c9 100644 (file)
@@ -17,9 +17,9 @@ static inline int irq_canonicalize(int irq)
 }
 
 #ifdef CONFIG_X86_32
-extern void irq_ctx_init(int cpu);
+extern int irq_init_percpu_irqstack(unsigned int cpu);
 #else
-# define irq_ctx_init(cpu) do { } while (0)
+static inline int irq_init_percpu_irqstack(unsigned int cpu) { return 0; }
 #endif
 
 #define __ARCH_HAS_DO_SOFTIRQ
index 2e95b6c1bca3f517555e99c81b262d386bae5530..da545df207b2affed15a511ceb728bc999c35360 100644 (file)
@@ -131,7 +131,7 @@ void native_smp_prepare_boot_cpu(void);
 void native_smp_prepare_cpus(unsigned int max_cpus);
 void calculate_max_logical_packages(void);
 void native_smp_cpus_done(unsigned int max_cpus);
-void common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
+int common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
 int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
 int native_cpu_disable(void);
 int common_cpu_die(unsigned int cpu);
index f37489c806faf8ed714dda52fc2f5d3e82cdae25..fc34816c6f044923ffdc82c0800f6efa68982e9a 100644 (file)
@@ -107,28 +107,28 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
 }
 
 /*
- * allocate per-cpu stacks for hardirq and for softirq processing
+ * Allocate per-cpu stacks for hardirq and softirq processing
  */
-void irq_ctx_init(int cpu)
+int irq_init_percpu_irqstack(unsigned int cpu)
 {
-       struct irq_stack *irqstk;
+       int node = cpu_to_node(cpu);
+       struct page *ph, *ps;
 
        if (per_cpu(hardirq_stack_ptr, cpu))
-               return;
-
-       irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
-                                              THREADINFO_GFP,
-                                              THREAD_SIZE_ORDER));
-       per_cpu(hardirq_stack_ptr, cpu) = irqstk;
+               return 0;
 
-       irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
-                                              THREADINFO_GFP,
-                                              THREAD_SIZE_ORDER));
-       per_cpu(softirq_stack_ptr, cpu) = irqstk;
+       ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
+       if (!ph)
+               return -ENOMEM;
+       ps = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
+       if (!ps) {
+               __free_pages(ph, THREAD_SIZE_ORDER);
+               return -ENOMEM;
+       }
 
-       pr_debug("CPU %u irqstacks, hard=%p soft=%p\n",
-                cpu, per_cpu(hardirq_stack_ptr, cpu),
-                per_cpu(softirq_stack_ptr, cpu));
+       per_cpu(hardirq_stack_ptr, cpu) = page_address(ph);
+       per_cpu(softirq_stack_ptr, cpu) = page_address(ps);
+       return 0;
 }
 
 void do_softirq_own_stack(void)
index 26b5cb5386b9bebb39f1ff3e8a70f4e745deeb5d..16919a9671fa93f89aac7ed279097898f6516b33 100644 (file)
@@ -91,7 +91,7 @@ void __init init_IRQ(void)
        for (i = 0; i < nr_legacy_irqs(); i++)
                per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = irq_to_desc(i);
 
-       irq_ctx_init(smp_processor_id());
+       BUG_ON(irq_init_percpu_irqstack(smp_processor_id()));
 
        x86_init.irqs.intr_init();
 }
index ce1a67b70168e6b354f1916a200dd031d0e488b9..c92b21f9e9dc6e66587c32b6bbe3a18bb7492164 100644 (file)
@@ -935,20 +935,27 @@ out:
        return boot_error;
 }
 
-void common_cpu_up(unsigned int cpu, struct task_struct *idle)
+int common_cpu_up(unsigned int cpu, struct task_struct *idle)
 {
+       int ret;
+
        /* Just in case we booted with a single CPU. */
        alternatives_enable_smp();
 
        per_cpu(current_task, cpu) = idle;
 
+       /* Initialize the interrupt stack(s) */
+       ret = irq_init_percpu_irqstack(cpu);
+       if (ret)
+               return ret;
+
 #ifdef CONFIG_X86_32
        /* Stack for startup_32 can be just as for start_secondary onwards */
-       irq_ctx_init(cpu);
        per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
 #else
        initial_gs = per_cpu_offset(cpu);
 #endif
+       return 0;
 }
 
 /*
@@ -1106,7 +1113,9 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
        /* the FPU context is blank, nobody can own it */
        per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
 
-       common_cpu_up(cpu, tidle);
+       err = common_cpu_up(cpu, tidle);
+       if (err)
+               return err;
 
        err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered);
        if (err) {
index 145506f9fdbe19308290e51e5c9c28c15796b63e..590fcf863006046156192b85bcfa32c5288e31b7 100644 (file)
@@ -361,7 +361,9 @@ static int xen_pv_cpu_up(unsigned int cpu, struct task_struct *idle)
 {
        int rc;
 
-       common_cpu_up(cpu, idle);
+       rc = common_cpu_up(cpu, idle);
+       if (rc)
+               return rc;
 
        xen_setup_runstate_info(cpu);