sparc32: use pgtable-nopud instead of 4level-fixup
authorMike Rapoport <rppt@linux.ibm.com>
Thu, 5 Dec 2019 00:54:20 +0000 (16:54 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 5 Dec 2019 03:44:15 +0000 (19:44 -0800)
32-bit version of sparc has three-level page tables and can use
pgtable-nopud and folding of the upper layers.

Replace usage of include/asm-generic/4level-fixup.h with
include/asm-generic/pgtable-nopud.h and adjust page table manipulation
macros and functions accordingly.

Link: http://lkml.kernel.org/r/1572938135-31886-11-git-send-email-rppt@kernel.org
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: David S. Miller <davem@davemloft.net>
Tested-by: Anatoly Pugachev <matorola@gmail.com>
Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Ungerer <gerg@linux-m68k.org>
Cc: Helge Deller <deller@gmx.de>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Mark Salter <msalter@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Peter Rosin <peda@axentia.se>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rolf Eike Beer <eike-kernel@sf-tec.de>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Russell King <rmk+kernel@armlinux.org.uk>
Cc: Sam Creasey <sammy@sammy.net>
Cc: Vincent Chen <deanbo422@gmail.com>
Cc: Vineet Gupta <Vineet.Gupta1@synopsys.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/sparc/include/asm/pgalloc_32.h
arch/sparc/include/asm/pgtable_32.h
arch/sparc/mm/fault_32.c
arch/sparc/mm/highmem.c
arch/sparc/mm/io-unit.c
arch/sparc/mm/iommu.c
arch/sparc/mm/srmmu.c

index 10538a4d1a1e9ab7ad79686b6a968d7093aa830f..eae0c92ec422f2a4c3fde7125215e31eb7445dd8 100644 (file)
@@ -26,14 +26,14 @@ static inline void free_pgd_fast(pgd_t *pgd)
 #define pgd_free(mm, pgd)      free_pgd_fast(pgd)
 #define pgd_alloc(mm)  get_pgd_fast()
 
-static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
+static inline void pud_set(pud_t * pudp, pmd_t * pmdp)
 {
        unsigned long pa = __nocache_pa(pmdp);
 
-       set_pte((pte_t *)pgdp, __pte((SRMMU_ET_PTD | (pa >> 4))));
+       set_pte((pte_t *)pudp, __pte((SRMMU_ET_PTD | (pa >> 4))));
 }
 
-#define pgd_populate(MM, PGD, PMD)      pgd_set(PGD, PMD)
+#define pud_populate(MM, PGD, PMD)      pud_set(PGD, PMD)
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
                                   unsigned long address)
index 31da4482664576e2b4e4cf77972b26c595064810..6d6f44c0cad9b95a335afcc1cac3efb453611c87 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/const.h>
 
 #ifndef __ASSEMBLY__
-#include <asm-generic/4level-fixup.h>
+#include <asm-generic/pgtable-nopud.h>
 
 #include <linux/spinlock.h>
 #include <linux/mm_types.h>
@@ -132,12 +132,12 @@ static inline struct page *pmd_page(pmd_t pmd)
        return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
 }
 
-static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+static inline unsigned long pud_page_vaddr(pud_t pud)
 {
-       if (srmmu_device_memory(pgd_val(pgd))) {
+       if (srmmu_device_memory(pud_val(pud))) {
                return ~0;
        } else {
-               unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK;
+               unsigned long v = pud_val(pud) & SRMMU_PTD_PMASK;
                return (unsigned long)__nocache_va(v << 4);
        }
 }
@@ -184,24 +184,24 @@ static inline void pmd_clear(pmd_t *pmdp)
                set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
 }
 
-static inline int pgd_none(pgd_t pgd)          
+static inline int pud_none(pud_t pud)
 {
-       return !(pgd_val(pgd) & 0xFFFFFFF);
+       return !(pud_val(pud) & 0xFFFFFFF);
 }
 
-static inline int pgd_bad(pgd_t pgd)
+static inline int pud_bad(pud_t pud)
 {
-       return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
+       return (pud_val(pud) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
 }
 
-static inline int pgd_present(pgd_t pgd)
+static inline int pud_present(pud_t pud)
 {
-       return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
+       return ((pud_val(pud) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
 }
 
-static inline void pgd_clear(pgd_t *pgdp)
+static inline void pud_clear(pud_t *pudp)
 {
-       set_pte((pte_t *)pgdp, __pte(0));
+       set_pte((pte_t *)pudp, __pte(0));
 }
 
 /*
@@ -319,9 +319,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
 
 /* Find an entry in the second-level page table.. */
-static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
+static inline pmd_t *pmd_offset(pud_t * dir, unsigned long address)
 {
-       return (pmd_t *) pgd_page_vaddr(*dir) +
+       return (pmd_t *) pud_page_vaddr(*dir) +
                ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
 }
 
index 8d69de111470c6668d709857b755906f0802e91b..89976c9b936cfb242a06c80db5dcf9fc9fa96207 100644 (file)
@@ -351,6 +351,8 @@ vmalloc_fault:
                 */
                int offset = pgd_index(address);
                pgd_t *pgd, *pgd_k;
+               p4d_t *p4d, *p4d_k;
+               pud_t *pud, *pud_k;
                pmd_t *pmd, *pmd_k;
 
                pgd = tsk->active_mm->pgd + offset;
@@ -363,8 +365,13 @@ vmalloc_fault:
                        return;
                }
 
-               pmd = pmd_offset(pgd, address);
-               pmd_k = pmd_offset(pgd_k, address);
+               p4d = p4d_offset(pgd, address);
+               pud = pud_offset(p4d, address);
+               pmd = pmd_offset(pud, address);
+
+               p4d_k = p4d_offset(pgd_k, address);
+               pud_k = pud_offset(p4d_k, address);
+               pmd_k = pmd_offset(pud_k, address);
 
                if (pmd_present(*pmd) || !pmd_present(*pmd_k))
                        goto bad_area_nosemaphore;
index 86bc2a58d26c479e23d82b1ad198b51722812013..d4a80adea7e59b1fb6469ac16d110a3589a4bb53 100644 (file)
@@ -39,10 +39,14 @@ static pte_t *kmap_pte;
 void __init kmap_init(void)
 {
        unsigned long address;
+       p4d_t *p4d;
+       pud_t *pud;
        pmd_t *dir;
 
        address = __fix_to_virt(FIX_KMAP_BEGIN);
-       dir = pmd_offset(pgd_offset_k(address), address);
+       p4d = p4d_offset(pgd_offset_k(address), address);
+       pud = pud_offset(p4d, address);
+       dir = pmd_offset(pud, address);
 
         /* cache the first kmap pte */
         kmap_pte = pte_offset_kernel(dir, address);
index f770ee7229d8d07b08fe056392ee9df5cfb05792..33a0facd9eb5b61be9e086933657e8e7f7858236 100644 (file)
@@ -239,12 +239,16 @@ static void *iounit_alloc(struct device *dev, size_t len,
                page = va;
                {
                        pgd_t *pgdp;
+                       p4d_t *p4dp;
+                       pud_t *pudp;
                        pmd_t *pmdp;
                        pte_t *ptep;
                        long i;
 
                        pgdp = pgd_offset(&init_mm, addr);
-                       pmdp = pmd_offset(pgdp, addr);
+                       p4dp = p4d_offset(pgdp, addr);
+                       pudp = pud_offset(p4dp, addr);
+                       pmdp = pmd_offset(pudp, addr);
                        ptep = pte_offset_map(pmdp, addr);
 
                        set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
index 71ac353032b686e9fe0cc252613c8d6c97c2cfca..4d3c6991f0aedee0003e285d9050c42108d8aab4 100644 (file)
@@ -343,6 +343,8 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len,
                page = va;
                {
                        pgd_t *pgdp;
+                       p4d_t *p4dp;
+                       pud_t *pudp;
                        pmd_t *pmdp;
                        pte_t *ptep;
 
@@ -354,7 +356,9 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len,
                                __flush_page_to_ram(page);
 
                        pgdp = pgd_offset(&init_mm, addr);
-                       pmdp = pmd_offset(pgdp, addr);
+                       p4dp = p4d_offset(pgdp, addr);
+                       pudp = pud_offset(p4dp, addr);
+                       pmdp = pmd_offset(pudp, addr);
                        ptep = pte_offset_map(pmdp, addr);
 
                        set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
index cc3ad64479ac3478b9fab46e316f32b649f8d788..f56c3c9a97933765d6931b7cb0654eab6bff13ec 100644 (file)
@@ -296,6 +296,8 @@ static void __init srmmu_nocache_init(void)
        void *srmmu_nocache_bitmap;
        unsigned int bitmap_bits;
        pgd_t *pgd;
+       p4d_t *p4d;
+       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
        unsigned long paddr, vaddr;
@@ -329,6 +331,8 @@ static void __init srmmu_nocache_init(void)
 
        while (vaddr < srmmu_nocache_end) {
                pgd = pgd_offset_k(vaddr);
+               p4d = p4d_offset(__nocache_fix(pgd), vaddr);
+               pud = pud_offset(__nocache_fix(p4d), vaddr);
                pmd = pmd_offset(__nocache_fix(pgd), vaddr);
                pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
 
@@ -516,13 +520,17 @@ static inline void srmmu_mapioaddr(unsigned long physaddr,
                                   unsigned long virt_addr, int bus_type)
 {
        pgd_t *pgdp;
+       p4d_t *p4dp;
+       pud_t *pudp;
        pmd_t *pmdp;
        pte_t *ptep;
        unsigned long tmp;
 
        physaddr &= PAGE_MASK;
        pgdp = pgd_offset_k(virt_addr);
-       pmdp = pmd_offset(pgdp, virt_addr);
+       p4dp = p4d_offset(pgdp, virt_addr);
+       pudp = pud_offset(p4dp, virt_addr);
+       pmdp = pmd_offset(pudp, virt_addr);
        ptep = pte_offset_kernel(pmdp, virt_addr);
        tmp = (physaddr >> 4) | SRMMU_ET_PTE;
 
@@ -551,11 +559,16 @@ void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
 static inline void srmmu_unmapioaddr(unsigned long virt_addr)
 {
        pgd_t *pgdp;
+       p4d_t *p4dp;
+       pud_t *pudp;
        pmd_t *pmdp;
        pte_t *ptep;
 
+
        pgdp = pgd_offset_k(virt_addr);
-       pmdp = pmd_offset(pgdp, virt_addr);
+       p4dp = p4d_offset(pgdp, virt_addr);
+       pudp = pud_offset(p4dp, virt_addr);
+       pmdp = pmd_offset(pudp, virt_addr);
        ptep = pte_offset_kernel(pmdp, virt_addr);
 
        /* No need to flush uncacheable page. */
@@ -693,20 +706,24 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
                                                        unsigned long end)
 {
        pgd_t *pgdp;
+       p4d_t *p4dp;
+       pud_t *pudp;
        pmd_t *pmdp;
        pte_t *ptep;
 
        while (start < end) {
                pgdp = pgd_offset_k(start);
-               if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
+               p4dp = p4d_offset(pgdp, start);
+               pudp = pud_offset(p4dp, start);
+               if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
                        pmdp = __srmmu_get_nocache(
                            SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
                        if (pmdp == NULL)
                                early_pgtable_allocfail("pmd");
                        memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
-                       pgd_set(__nocache_fix(pgdp), pmdp);
+                       pud_set(__nocache_fix(pudp), pmdp);
                }
-               pmdp = pmd_offset(__nocache_fix(pgdp), start);
+               pmdp = pmd_offset(__nocache_fix(pudp), start);
                if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
                        ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
                        if (ptep == NULL)
@@ -724,19 +741,23 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
                                                  unsigned long end)
 {
        pgd_t *pgdp;
+       p4d_t *p4dp;
+       pud_t *pudp;
        pmd_t *pmdp;
        pte_t *ptep;
 
        while (start < end) {
                pgdp = pgd_offset_k(start);
-               if (pgd_none(*pgdp)) {
+               p4dp = p4d_offset(pgdp, start);
+               pudp = pud_offset(p4dp, start);
+               if (pud_none(*pudp)) {
                        pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
                        if (pmdp == NULL)
                                early_pgtable_allocfail("pmd");
                        memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
-                       pgd_set(pgdp, pmdp);
+                       pud_set((pud_t *)pgdp, pmdp);
                }
-               pmdp = pmd_offset(pgdp, start);
+               pmdp = pmd_offset(pudp, start);
                if (srmmu_pmd_none(*pmdp)) {
                        ptep = __srmmu_get_nocache(PTE_SIZE,
                                                             PTE_SIZE);
@@ -779,6 +800,8 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
        unsigned long probed;
        unsigned long addr;
        pgd_t *pgdp;
+       p4d_t *p4dp;
+       pud_t *pudp;
        pmd_t *pmdp;
        pte_t *ptep;
        int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
@@ -810,18 +833,20 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
                }
 
                pgdp = pgd_offset_k(start);
+               p4dp = p4d_offset(pgdp, start);
+               pudp = pud_offset(p4dp, start);
                if (what == 2) {
                        *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
                        start += SRMMU_PGDIR_SIZE;
                        continue;
                }
-               if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
+               if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
                        pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
                                                   SRMMU_PMD_TABLE_SIZE);
                        if (pmdp == NULL)
                                early_pgtable_allocfail("pmd");
                        memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
-                       pgd_set(__nocache_fix(pgdp), pmdp);
+                       pud_set(__nocache_fix(pudp), pmdp);
                }
                pmdp = pmd_offset(__nocache_fix(pgdp), start);
                if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
@@ -906,6 +931,8 @@ void __init srmmu_paging_init(void)
        phandle cpunode;
        char node_str[128];
        pgd_t *pgd;
+       p4d_t *p4d;
+       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
        unsigned long pages_avail;
@@ -967,7 +994,9 @@ void __init srmmu_paging_init(void)
        srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
 
        pgd = pgd_offset_k(PKMAP_BASE);
-       pmd = pmd_offset(pgd, PKMAP_BASE);
+       p4d = p4d_offset(pgd, PKMAP_BASE);
+       pud = pud_offset(p4d, PKMAP_BASE);
+       pmd = pmd_offset(pud, PKMAP_BASE);
        pte = pte_offset_kernel(pmd, PKMAP_BASE);
        pkmap_page_table = pte;