From 39d668e04edad25abe184fb329ce35a131146ee5 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 18 Jul 2018 11:41:04 +0200 Subject: [PATCH] x86/mm/pti: Make pti_clone_kernel_text() compile on 32 bit The pti_clone_kernel_text() function references __end_rodata_hpage_align, which is only present on x86-64. This makes sense as the end of the rodata section is not huge-page aligned on 32 bit. Nevertheless a symbol is required for the function that points at the right address for both 32 and 64 bit. Introduce __end_rodata_aligned for that purpose and use it in pti_clone_kernel_text(). Signed-off-by: Joerg Roedel Signed-off-by: Thomas Gleixner Tested-by: Pavel Machek Cc: "H . Peter Anvin" Cc: linux-mm@kvack.org Cc: Linus Torvalds Cc: Andy Lutomirski Cc: Dave Hansen Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Peter Zijlstra Cc: Borislav Petkov Cc: Jiri Kosina Cc: Boris Ostrovsky Cc: Brian Gerst Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: Andrea Arcangeli Cc: Waiman Long Cc: "David H . Gutteridge" Cc: joro@8bytes.org Link: https://lkml.kernel.org/r/1531906876-13451-28-git-send-email-joro@8bytes.org --- arch/x86/include/asm/sections.h | 1 + arch/x86/kernel/vmlinux.lds.S | 17 ++++++++++------- arch/x86/mm/pti.c | 2 +- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h index 5c019d23d06b..4a911a382ade 100644 --- a/arch/x86/include/asm/sections.h +++ b/arch/x86/include/asm/sections.h @@ -7,6 +7,7 @@ extern char __brk_base[], __brk_limit[]; extern struct exception_table_entry __stop___ex_table[]; +extern char __end_rodata_aligned[]; #if defined(CONFIG_X86_64) extern char __end_rodata_hpage_align[]; diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 5e1458f609a1..8bde0a419f86 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -55,19 +55,22 @@ jiffies_64 = jiffies; * so we can enable protection checks as well as retain 2MB large page * mappings for kernel text. */ -#define X64_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE); +#define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE); -#define X64_ALIGN_RODATA_END \ +#define X86_ALIGN_RODATA_END \ . = ALIGN(HPAGE_SIZE); \ - __end_rodata_hpage_align = .; + __end_rodata_hpage_align = .; \ + __end_rodata_aligned = .; #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); #else -#define X64_ALIGN_RODATA_BEGIN -#define X64_ALIGN_RODATA_END +#define X86_ALIGN_RODATA_BEGIN +#define X86_ALIGN_RODATA_END \ + . = ALIGN(PAGE_SIZE); \ + __end_rodata_aligned = .; #define ALIGN_ENTRY_TEXT_BEGIN #define ALIGN_ENTRY_TEXT_END @@ -141,9 +144,9 @@ SECTIONS /* .text should occupy whole number of pages */ . = ALIGN(PAGE_SIZE); - X64_ALIGN_RODATA_BEGIN + X86_ALIGN_RODATA_BEGIN RO_DATA(PAGE_SIZE) - X64_ALIGN_RODATA_END + X86_ALIGN_RODATA_END /* Data */ .data : AT(ADDR(.data) - LOAD_OFFSET) { diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index a594e3b6401a..453d23760941 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -470,7 +470,7 @@ void pti_clone_kernel_text(void) * clone the areas past rodata, they might contain secrets. */ unsigned long start = PFN_ALIGN(_text); - unsigned long end = (unsigned long)__end_rodata_hpage_align; + unsigned long end = (unsigned long)__end_rodata_aligned; if (!pti_kernel_image_global_ok()) return; -- 2.30.2