x86, vdso: Move the vvar and hpet mappings next to the 64-bit vDSO
authorAndy Lutomirski <luto@amacapital.net>
Mon, 5 May 2014 19:19:36 +0000 (12:19 -0700)
committerH. Peter Anvin <hpa@linux.intel.com>
Mon, 5 May 2014 20:19:01 +0000 (13:19 -0700)
This makes the 64-bit and x32 vdsos use the same mechanism as the
32-bit vdso.  Most of the churn is deleting all the old fixmap code.

Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Link: http://lkml.kernel.org/r/8af87023f57f6bb96ec8d17fce3f88018195b49b.1399317206.git.luto@amacapital.net
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
arch/x86/include/asm/fixmap.h
arch/x86/include/asm/vvar.h
arch/x86/include/uapi/asm/vsyscall.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/hpet.c
arch/x86/kernel/vsyscall_64.c
arch/x86/mm/fault.c
arch/x86/mm/init_64.c
arch/x86/vdso/vclock_gettime.c
arch/x86/vdso/vdso-layout.lds.S
arch/x86/xen/mmu.c

index 43f482a0db370628d2692de546c731015ae1203d..b0910f97a3eaaae432728c2d48f2cc09b3cc0b94 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/threads.h>
 #include <asm/kmap_types.h>
 #else
-#include <asm/vsyscall.h>
+#include <uapi/asm/vsyscall.h>
 #endif
 
 /*
@@ -41,7 +41,8 @@
 extern unsigned long __FIXADDR_TOP;
 #define FIXADDR_TOP    ((unsigned long)__FIXADDR_TOP)
 #else
-#define FIXADDR_TOP    (VSYSCALL_END-PAGE_SIZE)
+#define FIXADDR_TOP    (round_up(VSYSCALL_ADDR + PAGE_SIZE, 1<<PMD_SHIFT) - \
+                        PAGE_SIZE)
 #endif
 
 
@@ -68,11 +69,7 @@ enum fixed_addresses {
 #ifdef CONFIG_X86_32
        FIX_HOLE,
 #else
-       VSYSCALL_LAST_PAGE,
-       VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
-                           + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
-       VVAR_PAGE,
-       VSYSCALL_HPET,
+       VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT,
 #ifdef CONFIG_PARAVIRT_CLOCK
        PVCLOCK_FIXMAP_BEGIN,
        PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1,
index 081d909bc495426e576b5f07924db12590c7756d..5d2b9ad2c6d2953cbc803206a7347d31422ebd6c 100644 (file)
 
 #else
 
-#ifdef BUILD_VDSO32
+extern char __vvar_page;
 
 #define DECLARE_VVAR(offset, type, name)                               \
        extern type vvar_ ## name __attribute__((visibility("hidden")));
 
 #define VVAR(name) (vvar_ ## name)
 
-#else
-
-extern char __vvar_page;
-
-/* Base address of vvars.  This is not ABI. */
-#ifdef CONFIG_X86_64
-#define VVAR_ADDRESS (-10*1024*1024 - 4096)
-#else
-#define VVAR_ADDRESS (&__vvar_page)
-#endif
-
-#define DECLARE_VVAR(offset, type, name)                               \
-       static type const * const vvaraddr_ ## name =                   \
-               (void *)(VVAR_ADDRESS + (offset));
-
-#define VVAR(name) (*vvaraddr_ ## name)
-#endif
-
 #define DEFINE_VVAR(type, name)                                                \
        type name                                                       \
        __attribute__((section(".vvar_" #name), aligned(16))) __visible
index 85dc1b3825abc76b8ba24b4b21e15d9164d0adb0..b97dd6e263d293aade784e0c9e238a65a3d9cbd4 100644 (file)
@@ -7,11 +7,6 @@ enum vsyscall_num {
        __NR_vgetcpu,
 };
 
-#define VSYSCALL_START (-10UL << 20)
-#define VSYSCALL_SIZE 1024
-#define VSYSCALL_END (-2UL << 20)
-#define VSYSCALL_MAPPED_PAGES 1
-#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
-
+#define VSYSCALL_ADDR (-10UL << 20)
 
 #endif /* _UAPI_ASM_X86_VSYSCALL_H */
index 7c65b4666c243300309130b6be9ef5da1f9f0b7b..2cbbf88d8f2cb1084d25dcecb776793b8f715bc7 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/processor.h>
 #include <asm/debugreg.h>
 #include <asm/sections.h>
+#include <asm/vsyscall.h>
 #include <linux/topology.h>
 #include <linux/cpumask.h>
 #include <asm/pgtable.h>
index 8d80ae0116039b6c71945737befc4f88dea097df..bbc15a0362d223f91ca4b608ffc1037ddec84b0d 100644 (file)
@@ -74,9 +74,6 @@ static inline void hpet_writel(unsigned int d, unsigned int a)
 static inline void hpet_set_mapping(void)
 {
        hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
-#ifdef CONFIG_X86_64
-       __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VVAR_NOCACHE);
-#endif
 }
 
 static inline void hpet_clear_mapping(void)
index 8b3b3eb3cead2dffbdd20d6dd4632595e723a117..ea5b5709aa76acc00ba3721570d31f6ee7901975 100644 (file)
@@ -91,7 +91,7 @@ static int addr_to_vsyscall_nr(unsigned long addr)
 {
        int nr;
 
-       if ((addr & ~0xC00UL) != VSYSCALL_START)
+       if ((addr & ~0xC00UL) != VSYSCALL_ADDR)
                return -EINVAL;
 
        nr = (addr & 0xC00UL) >> 10;
@@ -330,24 +330,17 @@ void __init map_vsyscall(void)
 {
        extern char __vsyscall_page;
        unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
-       unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
 
-       __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
+       __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
                     vsyscall_mode == NATIVE
                     ? PAGE_KERNEL_VSYSCALL
                     : PAGE_KERNEL_VVAR);
-       BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
-                    (unsigned long)VSYSCALL_START);
-
-       __set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR);
-       BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) !=
-                    (unsigned long)VVAR_ADDRESS);
+       BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
+                    (unsigned long)VSYSCALL_ADDR);
 }
 
 static int __init vsyscall_init(void)
 {
-       BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE));
-
        cpu_notifier_register_begin();
 
        on_each_cpu(cpu_vsyscall_init, NULL, 1);
index 8e57229926779eb9db2afad3e5b277def75d4e0a..858b47b5221be716eba34760cfd44d511b9183e9 100644 (file)
@@ -18,7 +18,8 @@
 #include <asm/traps.h>                 /* dotraplinkage, ...           */
 #include <asm/pgalloc.h>               /* pgd_*(), ...                 */
 #include <asm/kmemcheck.h>             /* kmemcheck_*(), ...           */
-#include <asm/fixmap.h>                        /* VSYSCALL_START               */
+#include <asm/fixmap.h>                        /* VSYSCALL_ADDR                */
+#include <asm/vsyscall.h>              /* emulate_vsyscall             */
 
 #define CREATE_TRACE_POINTS
 #include <asm/trace/exceptions.h>
@@ -771,7 +772,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
                 * emulation.
                 */
                if (unlikely((error_code & PF_INSTR) &&
-                            ((address & ~0xfff) == VSYSCALL_START))) {
+                            ((address & ~0xfff) == VSYSCALL_ADDR))) {
                        if (emulate_vsyscall(regs, address))
                                return;
                }
index 563849600d3eac66f1a6522342d738683aa4d4ac..6f881842116c19acae7242c9e51312aa2c175f91 100644 (file)
@@ -1055,8 +1055,8 @@ void __init mem_init(void)
        after_bootmem = 1;
 
        /* Register memory areas for /proc/kcore */
-       kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
-                        VSYSCALL_END - VSYSCALL_START, KCORE_OTHER);
+       kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
+                        PAGE_SIZE, KCORE_OTHER);
 
        mem_init_print_info(NULL);
 }
@@ -1186,8 +1186,8 @@ int kern_addr_valid(unsigned long addr)
  * not need special handling anymore:
  */
 static struct vm_area_struct gate_vma = {
-       .vm_start       = VSYSCALL_START,
-       .vm_end         = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
+       .vm_start       = VSYSCALL_ADDR,
+       .vm_end         = VSYSCALL_ADDR + PAGE_SIZE,
        .vm_page_prot   = PAGE_READONLY_EXEC,
        .vm_flags       = VM_READ | VM_EXEC
 };
@@ -1218,7 +1218,7 @@ int in_gate_area(struct mm_struct *mm, unsigned long addr)
  */
 int in_gate_area_no_mm(unsigned long addr)
 {
-       return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
+       return (addr & PAGE_MASK) == VSYSCALL_ADDR;
 }
 
 const char *arch_vma_name(struct vm_area_struct *vma)
index 091554c20bc931103ed29d3498472afcfef0a8a4..b2e4f493e5b0ed8ad385ba3f3d32c7c891608970 100644 (file)
@@ -30,9 +30,12 @@ extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
 extern time_t __vdso_time(time_t *t);
 
 #ifdef CONFIG_HPET_TIMER
-static inline u32 read_hpet_counter(const volatile void *addr)
+extern u8 hpet_page
+       __attribute__((visibility("hidden")));
+
+static notrace cycle_t vread_hpet(void)
 {
-       return *(const volatile u32 *) (addr + HPET_COUNTER);
+       return *(const volatile u32 *)(&hpet_page + HPET_COUNTER);
 }
 #endif
 
@@ -43,11 +46,6 @@ static inline u32 read_hpet_counter(const volatile void *addr)
 #include <asm/fixmap.h>
 #include <asm/pvclock.h>
 
-static notrace cycle_t vread_hpet(void)
-{
-       return read_hpet_counter((const void *)fix_to_virt(VSYSCALL_HPET));
-}
-
 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 {
        long ret;
@@ -137,16 +135,6 @@ static notrace cycle_t vread_pvclock(int *mode)
 
 #else
 
-extern u8 hpet_page
-       __attribute__((visibility("hidden")));
-
-#ifdef CONFIG_HPET_TIMER
-static notrace cycle_t vread_hpet(void)
-{
-       return read_hpet_counter((const void *)(&hpet_page));
-}
-#endif
-
 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 {
        long ret;
index e177c08bb4bcf01ba149dd96bb40f9d598b8d2af..2ec72f651ebffa887ae037aebf556869fa5c6968 100644 (file)
@@ -47,7 +47,6 @@ SECTIONS
 
        .text           : { *(.text*) }                 :text   =0x90909090,
 
-#ifdef BUILD_VDSO32
        /*
         * The remainder of the vDSO consists of special pages that are
         * shared between the kernel and userspace.  It needs to be at the
@@ -69,7 +68,6 @@ SECTIONS
 
        hpet_page = .;
        . = . + PAGE_SIZE;
-#endif
 
        . = ALIGN(PAGE_SIZE);
        end_mapping = .;
index 86e02eabb640bd889c94ccb563181cd30690ceeb..3060568248d30092323d58034c4433dc258f1c2b 100644 (file)
@@ -1494,7 +1494,7 @@ static int xen_pgd_alloc(struct mm_struct *mm)
                page->private = (unsigned long)user_pgd;
 
                if (user_pgd != NULL) {
-                       user_pgd[pgd_index(VSYSCALL_START)] =
+                       user_pgd[pgd_index(VSYSCALL_ADDR)] =
                                __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
                        ret = 0;
                }
@@ -2062,8 +2062,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
        case FIX_KMAP_BEGIN ... FIX_KMAP_END:
 # endif
 #else
-       case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
-       case VVAR_PAGE:
+       case VSYSCALL_PAGE:
 #endif
        case FIX_TEXT_POKE0:
        case FIX_TEXT_POKE1:
@@ -2104,8 +2103,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
 #ifdef CONFIG_X86_64
        /* Replicate changes to map the vsyscall page into the user
           pagetable vsyscall mapping. */
-       if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) ||
-           idx == VVAR_PAGE) {
+       if (idx == VSYSCALL_PAGE) {
                unsigned long vaddr = __fix_to_virt(idx);
                set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
        }