powerpc/mm: vmalloc abstraction in preparation for radix
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Fri, 29 Apr 2016 13:26:21 +0000 (23:26 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 11 May 2016 11:53:53 +0000 (21:53 +1000)
The vmalloc range differs between hash and radix config. Hence make
VMALLOC_START and related constants a variable which will be runtime
initialized depending on whether hash or radix mode is active.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
[mpe: Fix missing init of ioremap_bot in pgtable_64.c for ppc64e]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/64/hash.h
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/book3s/64/radix.h
arch/powerpc/kernel/pci_64.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/pgtable-radix.c
arch/powerpc/mm/pgtable_64.c
arch/powerpc/mm/slb_low.S

index cd3e91583c8103742dc199b3e9c99aa1b20fe6cb..f61cad3de4e69ec093674355332f7d3ee093cf2b 100644 (file)
 /*
  * Define the address range of the kernel non-linear virtual area
  */
-#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
-#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
+#define H_KERN_VIRT_START ASM_CONST(0xD000000000000000)
+#define H_KERN_VIRT_SIZE       ASM_CONST(0x0000100000000000)
 
 /*
  * The vmalloc space starts at the beginning of that region, and
  * occupies half of it on hash CPUs and a quarter of it on Book3E
  * (we keep a quarter for the virtual memmap)
  */
-#define VMALLOC_START  KERN_VIRT_START
-#define VMALLOC_SIZE   (KERN_VIRT_SIZE >> 1)
-#define VMALLOC_END    (VMALLOC_START + VMALLOC_SIZE)
+#define H_VMALLOC_START        H_KERN_VIRT_START
+#define H_VMALLOC_SIZE (H_KERN_VIRT_SIZE >> 1)
+#define H_VMALLOC_END  (H_VMALLOC_START + H_VMALLOC_SIZE)
 
 /*
  * Region IDs
@@ -64,7 +64,7 @@
 #define REGION_MASK            (0xfUL << REGION_SHIFT)
 #define REGION_ID(ea)          (((unsigned long)(ea)) >> REGION_SHIFT)
 
-#define VMALLOC_REGION_ID      (REGION_ID(VMALLOC_START))
+#define VMALLOC_REGION_ID      (REGION_ID(H_VMALLOC_START))
 #define KERNEL_REGION_ID       (REGION_ID(PAGE_OFFSET))
 #define VMEMMAP_REGION_ID      (0xfUL) /* Server only */
 #define USER_REGION_ID         (0UL)
@@ -73,7 +73,7 @@
  * Defines the address of the vmemap area, in its own region on
  * hash table CPUs.
  */
-#define VMEMMAP_BASE           (VMEMMAP_REGION_ID << REGION_SHIFT)
+#define H_VMEMMAP_BASE         (VMEMMAP_REGION_ID << REGION_SHIFT)
 
 #ifdef CONFIG_PPC_MM_SLICES
 #define HAVE_ARCH_UNMAPPED_AREA
index 32a975694e53b53319f8b5741dd7bb23ae3a4599..f5628f96134b07fe07bd9f225c447b3019d789e1 100644 (file)
@@ -208,6 +208,18 @@ extern unsigned long __pgd_val_bits;
 #define PUD_MASKED_BITS                0xc0000000000000ffUL
 /* Bits to mask out from a PGD to get to the PUD page */
 #define PGD_MASKED_BITS                0xc0000000000000ffUL
+
+extern unsigned long __vmalloc_start;
+extern unsigned long __vmalloc_end;
+#define VMALLOC_START  __vmalloc_start
+#define VMALLOC_END    __vmalloc_end
+
+extern unsigned long __kernel_virt_start;
+extern unsigned long __kernel_virt_size;
+#define KERN_VIRT_START __kernel_virt_start
+#define KERN_VIRT_SIZE  __kernel_virt_size
+extern struct page *vmemmap;
+extern unsigned long ioremap_bot;
 #endif /* __ASSEMBLY__ */
 
 #include <asm/book3s/64/hash.h>
@@ -220,7 +232,6 @@ extern unsigned long __pgd_val_bits;
 #endif
 
 #include <asm/barrier.h>
-
 /*
  * The second half of the kernel virtual space is used for IO mappings,
  * it's itself carved into the PIO region (ISA and PHB IO space) and
@@ -239,8 +250,6 @@ extern unsigned long __pgd_val_bits;
 #define IOREMAP_BASE   (PHB_IO_END)
 #define IOREMAP_END    (KERN_VIRT_START + KERN_VIRT_SIZE)
 
-#define vmemmap                        ((struct page *)VMEMMAP_BASE)
-
 /* Advertise special mapping type for AGP */
 #define HAVE_PAGE_AGP
 
index 63eb629a8b64f507fa5ae69f8a4d44cf3d080671..f4709024b46693973ac00ad6ab4039e65dc6a5f5 100644 (file)
                              RADIX_PUD_INDEX_SIZE + RADIX_PGD_INDEX_SIZE + PAGE_SHIFT)
 #define RADIX_PGTABLE_RANGE (ASM_CONST(1) << RADIX_PGTABLE_EADDR_SIZE)
 
+/*
+ * We support 52 bit address space, Use top bit for kernel
+ * virtual mapping. Also make sure kernel fit in the top
+ * quadrant.
+ *
+ *           +------------------+
+ *           +------------------+  Kernel virtual map (0xc008000000000000)
+ *           |                  |
+ *           |                  |
+ *           |                  |
+ * 0b11......+------------------+  Kernel linear map (0xc....)
+ *           |                  |
+ *           |     2 quadrant   |
+ *           |                  |
+ * 0b10......+------------------+
+ *           |                  |
+ *           |    1 quadrant    |
+ *           |                  |
+ * 0b01......+------------------+
+ *           |                  |
+ *           |    0 quadrant    |
+ *           |                  |
+ * 0b00......+------------------+
+ *
+ *
+ * 3rd quadrant expanded:
+ * +------------------------------+
+ * |                              |
+ * |                              |
+ * |                              |
+ * +------------------------------+  Kernel IO map end (0xc010000000000000)
+ * |                              |
+ * |                              |
+ * |      1/2 of virtual map      |
+ * |                              |
+ * |                              |
+ * +------------------------------+  Kernel IO map start
+ * |                              |
+ * |      1/4 of virtual map      |
+ * |                              |
+ * +------------------------------+  Kernel vmemap start
+ * |                              |
+ * |     1/4 of virtual map       |
+ * |                              |
+ * +------------------------------+  Kernel virt start (0xc008000000000000)
+ * |                              |
+ * |                              |
+ * |                              |
+ * +------------------------------+  Kernel linear (0xc.....)
+ */
+
+#define RADIX_KERN_VIRT_START ASM_CONST(0xc008000000000000)
+#define RADIX_KERN_VIRT_SIZE  ASM_CONST(0x0008000000000000)
+
+/*
+ * The vmalloc space starts at the beginning of that region, and
+ * occupies a quarter of it on radix config.
+ * (we keep a quarter for the virtual memmap)
+ */
+#define RADIX_VMALLOC_START    RADIX_KERN_VIRT_START
+#define RADIX_VMALLOC_SIZE     (RADIX_KERN_VIRT_SIZE >> 2)
+#define RADIX_VMALLOC_END      (RADIX_VMALLOC_START + RADIX_VMALLOC_SIZE)
+/*
+ * Defines the address of the vmemap area, in its own region on
+ * hash table CPUs.
+ */
+#define RADIX_VMEMMAP_BASE             (RADIX_VMALLOC_END)
+
 #ifndef __ASSEMBLY__
 #define RADIX_PTE_TABLE_SIZE   (sizeof(pte_t) << RADIX_PTE_INDEX_SIZE)
 #define RADIX_PMD_TABLE_SIZE   (sizeof(pmd_t) << RADIX_PMD_INDEX_SIZE)
index 41503d7d53a1350364d256e43456c9d6c96885b2..3759df52bd671d883c38aec23fe4b133e6c0d0f7 100644 (file)
@@ -38,7 +38,7 @@
  * ISA drivers use hard coded offsets.  If no ISA bus exists nothing
  * is mapped on the first 64K of IO space
  */
-unsigned long pci_io_base = ISA_IO_BASE;
+unsigned long pci_io_base;
 EXPORT_SYMBOL(pci_io_base);
 
 static int __init pcibios_init(void)
@@ -47,6 +47,7 @@ static int __init pcibios_init(void)
 
        printk(KERN_INFO "PCI: Probing PCI hardware\n");
 
+       pci_io_base = ISA_IO_BASE;
        /* For now, override phys_mem_access_prot. If we need it,g
         * later, we may move that initialization to each ppc_md
         */
index 64165a730160e08506e06be943317ef2bfa79afa..68aee435ea9766ec12ae3fb012dd72d5ae971dca 100644 (file)
@@ -889,6 +889,14 @@ void __init hash__early_init_mmu(void)
        __pmd_val_bits = 0;
        __pud_val_bits = 0;
        __pgd_val_bits = 0;
+
+       __kernel_virt_start = H_KERN_VIRT_START;
+       __kernel_virt_size = H_KERN_VIRT_SIZE;
+       __vmalloc_start = H_VMALLOC_START;
+       __vmalloc_end = H_VMALLOC_END;
+       vmemmap = (struct page *)H_VMEMMAP_BASE;
+       ioremap_bot = IOREMAP_BASE;
+
        /* Initialize the MMU Hash table and create the linear mapping
         * of memory. Has to be done before SLB initialization as this is
         * currently where the page size encoding is obtained.
index 6182b6c3c9c345ac1768910169df358637d2b4a4..13afacdfa96e4f6427d23fdf23dc386a514d8f72 100644 (file)
@@ -328,6 +328,13 @@ void __init radix__early_init_mmu(void)
        __pud_val_bits = RADIX_PUD_VAL_BITS;
        __pgd_val_bits = RADIX_PGD_VAL_BITS;
 
+       __kernel_virt_start = RADIX_KERN_VIRT_START;
+       __kernel_virt_size = RADIX_KERN_VIRT_SIZE;
+       __vmalloc_start = RADIX_VMALLOC_START;
+       __vmalloc_end = RADIX_VMALLOC_END;
+       vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
+       ioremap_bot = IOREMAP_BASE;
+
        radix_init_page_sizes();
        if (!firmware_has_feature(FW_FEATURE_LPAR))
                radix_init_partition_table();
index b8c75e65157cc0a5b0229e5da99b941b6fdc0475..216e2bda8f2cba40f1d4539730aa94eba9d5c0b4 100644 (file)
@@ -97,9 +97,20 @@ unsigned long __pud_val_bits;
 EXPORT_SYMBOL(__pud_val_bits);
 unsigned long __pgd_val_bits;
 EXPORT_SYMBOL(__pgd_val_bits);
-
-#endif
+unsigned long __kernel_virt_start;
+EXPORT_SYMBOL(__kernel_virt_start);
+unsigned long __kernel_virt_size;
+EXPORT_SYMBOL(__kernel_virt_size);
+unsigned long __vmalloc_start;
+EXPORT_SYMBOL(__vmalloc_start);
+unsigned long __vmalloc_end;
+EXPORT_SYMBOL(__vmalloc_end);
+struct page *vmemmap;
+EXPORT_SYMBOL(vmemmap);
+unsigned long ioremap_bot;
+#else /* !CONFIG_PPC_BOOK3S_64 */
 unsigned long ioremap_bot = IOREMAP_BASE;
+#endif
 
 /**
  * __ioremap_at - Low level function to establish the page tables
index 15b8f712b50b4f5e6f4d4318eb1eae9d8eeea6f5..dfdb90cb44039f364d92cecd2e920eccb25d07e5 100644 (file)
@@ -91,7 +91,7 @@ slb_miss_kernel_load_vmemmap:
         * can be demoted from 64K -> 4K dynamically on some machines
         */
        clrldi  r11,r10,48
-       cmpldi  r11,(VMALLOC_SIZE >> 28) - 1
+       cmpldi  r11,(H_VMALLOC_SIZE >> 28) - 1
        bgt     5f
        lhz     r11,PACAVMALLOCSLLP(r13)
        b       6f