x86/exceptions: Add structs for exception stacks
authorThomas Gleixner <tglx@linutronix.de>
Sun, 14 Apr 2019 15:59:47 +0000 (17:59 +0200)
committerBorislav Petkov <bp@suse.de>
Wed, 17 Apr 2019 10:55:18 +0000 (12:55 +0200)
At the moment everything assumes a full linear mapping of the various
exception stacks. Adding guard pages to the cpu entry area mapping of the
exception stacks will break that assumption.

As a preparatory step convert both the real storage and the effective
mapping in the cpu entry area from character arrays to structures.

To ensure that both arrays have the same ordering and the same size of the
individual stacks fill the members with a macro. The guard size is the only
difference between the two resulting structures. For now both have guard
size 0 until the preparation of all usage sites is done.

Provide a couple of helper macros which are used in the following
conversions.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: "Chang S. Bae" <chang.seok.bae@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Dominik Brodowski <linux@dominikbrodowski.net>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20190414160144.506807893@linutronix.de
arch/x86/include/asm/cpu_entry_area.h
arch/x86/kernel/cpu/common.c
arch/x86/mm/cpu_entry_area.c

index 29c70641544355ffb9b6947db066951311b868b3..af8c312673ded227fc4bacbfc3e5b6fc3237f285 100644 (file)
@@ -7,6 +7,51 @@
 #include <asm/processor.h>
 #include <asm/intel_ds.h>
 
+#ifdef CONFIG_X86_64
+
+/* Macro to enforce the same ordering and stack sizes */
+#define ESTACKS_MEMBERS(guardsize)             \
+       char    DF_stack_guard[guardsize];      \
+       char    DF_stack[EXCEPTION_STKSZ];      \
+       char    NMI_stack_guard[guardsize];     \
+       char    NMI_stack[EXCEPTION_STKSZ];     \
+       char    DB_stack_guard[guardsize];      \
+       char    DB_stack[DEBUG_STKSZ];          \
+       char    MCE_stack_guard[guardsize];     \
+       char    MCE_stack[EXCEPTION_STKSZ];     \
+       char    IST_top_guard[guardsize];       \
+
+/* The exception stacks' physical storage. No guard pages required */
+struct exception_stacks {
+       ESTACKS_MEMBERS(0)
+};
+
+/*
+ * The effective cpu entry area mapping with guard pages. Guard size is
+ * zero until the code which makes assumptions about linear mappings is
+ * cleaned up.
+ */
+struct cea_exception_stacks {
+       ESTACKS_MEMBERS(0)
+};
+
+#define CEA_ESTACK_SIZE(st)                                    \
+       sizeof(((struct cea_exception_stacks *)0)->st## _stack)
+
+#define CEA_ESTACK_BOT(ceastp, st)                             \
+       ((unsigned long)&(ceastp)->st## _stack)
+
+#define CEA_ESTACK_TOP(ceastp, st)                             \
+       (CEA_ESTACK_BOT(ceastp, st) + CEA_ESTACK_SIZE(st))
+
+#define CEA_ESTACK_OFFS(st)                                    \
+       offsetof(struct cea_exception_stacks, st## _stack)
+
+#define CEA_ESTACK_PAGES                                       \
+       (sizeof(struct cea_exception_stacks) / PAGE_SIZE)
+
+#endif
+
 /*
  * cpu_entry_area is a percpu region that contains things needed by the CPU
  * and early entry/exit code.  Real types aren't used for all fields here
@@ -32,12 +77,9 @@ struct cpu_entry_area {
 
 #ifdef CONFIG_X86_64
        /*
-        * Exception stacks used for IST entries.
-        *
-        * In the future, this should have a separate slot for each stack
-        * with guard pages between them.
+        * Exception stacks used for IST entries with guard pages.
         */
-       char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
+       struct cea_exception_stacks estacks;
 #endif
 #ifdef CONFIG_CPU_SUP_INTEL
        /*
index 0e4cb718fc4a471bd8cbb00cdae8b2c4fb50bd92..24b801ea75221ce907e86eacfa163cd389a18aed 100644 (file)
@@ -1754,7 +1754,7 @@ void cpu_init(void)
         * set up and load the per-CPU TSS
         */
        if (!oist->ist[0]) {
-               char *estacks = get_cpu_entry_area(cpu)->exception_stacks;
+               char *estacks = (char *)&get_cpu_entry_area(cpu)->estacks;
 
                for (v = 0; v < N_EXCEPTION_STACKS; v++) {
                        estacks += exception_stack_sizes[v];
index c2a54f75d335facc1e191e6449a0e9807da96a76..6a09b84c13fea1619916ba205c15080527e680f8 100644 (file)
@@ -13,8 +13,7 @@
 static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
 
 #ifdef CONFIG_X86_64
-static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
-       [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
+static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
 #endif
 
 struct cpu_entry_area *get_cpu_entry_area(int cpu)
@@ -138,9 +137,8 @@ static void __init setup_cpu_entry_area(unsigned int cpu)
 #ifdef CONFIG_X86_64
        BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
        BUILD_BUG_ON(sizeof(exception_stacks) !=
-                    sizeof(((struct cpu_entry_area *)0)->exception_stacks));
-       cea_map_percpu_pages(&cea->exception_stacks,
-                            &per_cpu(exception_stacks, cpu),
+                    sizeof(((struct cpu_entry_area *)0)->estacks));
+       cea_map_percpu_pages(&cea->estacks, &per_cpu(exception_stacks, cpu),
                             sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
 #endif
        percpu_setup_debug_store(cpu);