efi/x86: Simplify 64-bit EFI firmware call wrapper
authorArd Biesheuvel <ardb@kernel.org>
Fri, 3 Jan 2020 11:39:42 +0000 (12:39 +0100)
committerIngo Molnar <mingo@kernel.org>
Fri, 10 Jan 2020 17:55:02 +0000 (18:55 +0100)
The efi_call() wrapper used to invoke EFI runtime services serves
a number of purposes:
- realign the stack to 16 bytes
- preserve FP and CR0 register state
- translate from SysV to MS calling convention.

Preserving CR0.TS is no longer necessary in Linux, and preserving the
FP register state is also redundant in most cases, since efi_call() is
almost always used from within the scope of a pair of kernel_fpu_begin()/
kernel_fpu_end() calls, with the exception of the early call to
SetVirtualAddressMap() and the SGI UV support code.

So let's add a pair of kernel_fpu_begin()/_end() calls there as well,
and remove the unnecessary code from the assembly implementation of
efi_call(), and only keep the pieces that deal with the stack
alignment and the ABI translation.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Arvind Sankar <nivedita@alum.mit.edu>
Cc: Matthew Garrett <mjg59@google.com>
Cc: linux-efi@vger.kernel.org
Link: https://lkml.kernel.org/r/20200103113953.9571-10-ardb@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/platform/efi/Makefile
arch/x86/platform/efi/efi_64.c
arch/x86/platform/efi/efi_stub_64.S
arch/x86/platform/uv/bios_uv.c

index fe29f3f5d384f286b03e735350dac2ac95f61ccc..7ec3a8b31f8bf90e3b89f40461277bd66bc1da72 100644 (file)
@@ -1,6 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
 OBJECT_FILES_NON_STANDARD_efi_thunk_$(BITS).o := y
-OBJECT_FILES_NON_STANDARD_efi_stub_$(BITS).o := y
 
 obj-$(CONFIG_EFI)              += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o
 obj-$(CONFIG_EFI_MIXED)                += efi_thunk_$(BITS).o
index a7f11d1ff7c4cac7206e87053b4e1a1830624c9d..03565dad0c4b45385b191c670df447e2b0928490 100644 (file)
@@ -1019,6 +1019,8 @@ efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size,
                efi_switch_mm(&efi_mm);
        }
 
+       kernel_fpu_begin();
+
        /* Disable interrupts around EFI calls: */
        local_irq_save(flags);
        status = efi_call(efi.systab->runtime->set_virtual_address_map,
@@ -1026,6 +1028,7 @@ efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size,
                          descriptor_version, virtual_map);
        local_irq_restore(flags);
 
+       kernel_fpu_end();
 
        if (save_pgd)
                efi_old_memmap_phys_epilog(save_pgd);
index b1d2313fe3bfb4dc1c75b268c5e9bbe9e323a346..e7e1020f4ccb6708527442b12c86f48bead16b0b 100644 (file)
@@ -8,41 +8,12 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/segment.h>
-#include <asm/msr.h>
-#include <asm/processor-flags.h>
-#include <asm/page_types.h>
-
-#define SAVE_XMM                       \
-       mov %rsp, %rax;                 \
-       subq $0x70, %rsp;               \
-       and $~0xf, %rsp;                \
-       mov %rax, (%rsp);               \
-       mov %cr0, %rax;                 \
-       clts;                           \
-       mov %rax, 0x8(%rsp);            \
-       movaps %xmm0, 0x60(%rsp);       \
-       movaps %xmm1, 0x50(%rsp);       \
-       movaps %xmm2, 0x40(%rsp);       \
-       movaps %xmm3, 0x30(%rsp);       \
-       movaps %xmm4, 0x20(%rsp);       \
-       movaps %xmm5, 0x10(%rsp)
-
-#define RESTORE_XMM                    \
-       movaps 0x60(%rsp), %xmm0;       \
-       movaps 0x50(%rsp), %xmm1;       \
-       movaps 0x40(%rsp), %xmm2;       \
-       movaps 0x30(%rsp), %xmm3;       \
-       movaps 0x20(%rsp), %xmm4;       \
-       movaps 0x10(%rsp), %xmm5;       \
-       mov 0x8(%rsp), %rsi;            \
-       mov %rsi, %cr0;                 \
-       mov (%rsp), %rsp
+#include <asm/nospec-branch.h>
 
 SYM_FUNC_START(efi_call)
        pushq %rbp
        movq %rsp, %rbp
-       SAVE_XMM
+       and $~0xf, %rsp
        mov 16(%rbp), %rax
        subq $48, %rsp
        mov %r9, 32(%rsp)
@@ -50,9 +21,7 @@ SYM_FUNC_START(efi_call)
        mov %r8, %r9
        mov %rcx, %r8
        mov %rsi, %rcx
-       call *%rdi
-       addq $48, %rsp
-       RESTORE_XMM
-       popq %rbp
+       CALL_NOSPEC %rdi
+       leave
        ret
 SYM_FUNC_END(efi_call)
index ece9cb9c1189bdaf135e1a07375c0334d2f3fc47..5c0e2eb5d87cf6ec951c7b6ebb9532b7df9c2cdd 100644 (file)
@@ -34,10 +34,13 @@ static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
         * If EFI_OLD_MEMMAP is set, we need to fall back to using our old EFI
         * callback method, which uses efi_call() directly, with the kernel page tables:
         */
-       if (unlikely(efi_enabled(EFI_OLD_MEMMAP)))
+       if (unlikely(efi_enabled(EFI_OLD_MEMMAP))) {
+               kernel_fpu_begin();
                ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5);
-       else
+               kernel_fpu_end();
+       } else {
                ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
+       }
 
        return ret;
 }