x86/cpu_entry_area: Sync cpu_entry_area to initial_page_table
authorThomas Gleixner <tglx@linutronix.de>
Wed, 28 Feb 2018 20:14:26 +0000 (21:14 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 1 Mar 2018 08:48:27 +0000 (09:48 +0100)
The separation of the cpu_entry_area from the fixmap missed the fact that
on 32bit non-PAE kernels the cpu_entry_area mapping might not be covered in
initial_page_table by the previous synchronizations.

This results in suspend/resume failures because 32bit utilizes initial page
table for resume. The absence of the cpu_entry_area mapping results in a
triple fault, aka. insta reboot.

With PAE enabled this works by chance because the PGD entry which covers
the fixmap and other parts incindentally provides the cpu_entry_area
mapping as well.

Synchronize the initial page table after setting up the cpu entry
area. Instead of adding yet another copy of the same code, move it to a
function and invoke it from the various places.

It needs to be investigated if the existing calls in setup_arch() and
setup_per_cpu_areas() can be replaced by the later invocation from
setup_cpu_entry_areas(), but that's beyond the scope of this fix.

Fixes: 92a0f81d8957 ("x86/cpu_entry_area: Move it out of the fixmap")
Reported-by: Woody Suwalski <terraluna977@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Woody Suwalski <terraluna977@gmail.com>
Cc: William Grant <william.grant@canonical.com>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1802282137290.1392@nanos.tec.linutronix.de
arch/x86/include/asm/pgtable_32.h
arch/x86/include/asm/pgtable_64.h
arch/x86/kernel/setup.c
arch/x86/kernel/setup_percpu.c
arch/x86/mm/cpu_entry_area.c
arch/x86/mm/init_32.c

index e55466760ff8e031433132eab676535e614e224f..b3ec519e39827e58eaeb8a567303e37a6bc2e919 100644 (file)
@@ -32,6 +32,7 @@ extern pmd_t initial_pg_pmd[];
 static inline void pgtable_cache_init(void) { }
 static inline void check_pgt_cache(void) { }
 void paging_init(void);
+void sync_initial_page_table(void);
 
 /*
  * Define this if things work differently on an i386 and an i486:
index 81462e9a34f6af49645a08f55c7d67e0144dbb77..1149d2112b2e17347e8f85c4cae355f4522cc40a 100644 (file)
@@ -28,6 +28,7 @@ extern pgd_t init_top_pgt[];
 #define swapper_pg_dir init_top_pgt
 
 extern void paging_init(void);
+static inline void sync_initial_page_table(void) { }
 
 #define pte_ERROR(e)                                   \
        pr_err("%s:%d: bad pte %p(%016lx)\n",           \
index 1ae67e982af70b193498c8186f65ff27788acd9f..4c616be28506fe100f88092474e193521fa81f78 100644 (file)
@@ -1204,20 +1204,13 @@ void __init setup_arch(char **cmdline_p)
 
        kasan_init();
 
-#ifdef CONFIG_X86_32
-       /* sync back kernel address range */
-       clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
-                       swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
-                       KERNEL_PGD_PTRS);
-
        /*
-        * sync back low identity map too.  It is used for example
-        * in the 32-bit EFI stub.
+        * Sync back kernel address range.
+        *
+        * FIXME: Can the later sync in setup_cpu_entry_areas() replace
+        * this call?
         */
-       clone_pgd_range(initial_page_table,
-                       swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
-                       min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
-#endif
+       sync_initial_page_table();
 
        tboot_probe();
 
index 497aa766fab38e21e5d1c24048e65a1e9c5b1e22..ea554f812ee18e46289bb1fc9b65cc7408189a74 100644 (file)
@@ -287,24 +287,15 @@ void __init setup_per_cpu_areas(void)
        /* Setup cpu initialized, callin, callout masks */
        setup_cpu_local_masks();
 
-#ifdef CONFIG_X86_32
        /*
         * Sync back kernel address range again.  We already did this in
         * setup_arch(), but percpu data also needs to be available in
         * the smpboot asm.  We can't reliably pick up percpu mappings
         * using vmalloc_fault(), because exception dispatch needs
         * percpu data.
+        *
+        * FIXME: Can the later sync in setup_cpu_entry_areas() replace
+        * this call?
         */
-       clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
-                       swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
-                       KERNEL_PGD_PTRS);
-
-       /*
-        * sync back low identity map too.  It is used for example
-        * in the 32-bit EFI stub.
-        */
-       clone_pgd_range(initial_page_table,
-                       swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
-                       min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
-#endif
+       sync_initial_page_table();
 }
index b9283cc276220db667ab091a3358eb5741813f7f..476d810639a87a5ba5a24528b4de5dd34b6e59f6 100644 (file)
@@ -163,4 +163,10 @@ void __init setup_cpu_entry_areas(void)
 
        for_each_possible_cpu(cpu)
                setup_cpu_entry_area(cpu);
+
+       /*
+        * This is the last essential update to swapper_pgdir which needs
+        * to be synchronized to initial_page_table on 32bit.
+        */
+       sync_initial_page_table();
 }
index 79cb066f40c0d4a4607a7b7aa4e5524a203b5e10..396e1f0151ac1973de4339a8946653f95aecb66d 100644 (file)
@@ -453,6 +453,21 @@ static inline void permanent_kmaps_init(pgd_t *pgd_base)
 }
 #endif /* CONFIG_HIGHMEM */
 
+void __init sync_initial_page_table(void)
+{
+       clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
+                       swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
+                       KERNEL_PGD_PTRS);
+
+       /*
+        * sync back low identity map too.  It is used for example
+        * in the 32-bit EFI stub.
+        */
+       clone_pgd_range(initial_page_table,
+                       swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
+                       min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+}
+
 void __init native_pagetable_init(void)
 {
        unsigned long pfn, va;