powerpc/hash32: use physical address directly in hash handlers.
authorChristophe Leroy <christophe.leroy@c-s.fr>
Thu, 21 Feb 2019 10:37:57 +0000 (10:37 +0000)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 21 Feb 2019 13:10:16 +0000 (00:10 +1100)
Since commit c62ce9ef97ba ("powerpc: remove remaining bits from
CONFIG_APUS"), tophys() has become a pure constant operation.
PAGE_OFFSET is known at compile time so the physical address
can be builtin directly.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/mm/hash_low_32.S
arch/powerpc/mm/ppc_mmu_32.c

index 82e7dd0c02209569581d2cee0b2a3349a52f3d37..d94fef524ef50342951c512ed02b8a300f28655d 100644 (file)
@@ -47,14 +47,13 @@ mmu_hash_lock:
  * Returns to the caller if the access is illegal or there is no
  * mapping for the address.  Otherwise it places an appropriate PTE
  * in the hash table and returns from the exception.
- * Uses r0, r3 - r8, r10, ctr, lr.
+ * Uses r0, r3 - r6, r8, r10, ctr, lr.
  */
        .text
 _GLOBAL(hash_page)
-       tophys(r7,0)                    /* gets -KERNELBASE into r7 */
 #ifdef CONFIG_SMP
-       addis   r8,r7,mmu_hash_lock@h
-       ori     r8,r8,mmu_hash_lock@l
+       lis     r8, (mmu_hash_lock - PAGE_OFFSET)@h
+       ori     r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l
        lis     r0,0x0fff
        b       10f
 11:    lwz     r6,0(r8)
@@ -76,7 +75,7 @@ _GLOBAL(hash_page)
        lis     r5,swapper_pg_dir@ha    /* if kernel address, use */
        addi    r5,r5,swapper_pg_dir@l  /* kernel page table */
        rlwimi  r3,r9,32-12,29,29       /* MSR_PR -> _PAGE_USER */
-112:   add     r5,r5,r7                /* convert to phys addr */
+112:   tophys(r5, r5)
 #ifndef CONFIG_PTE_64BIT
        rlwimi  r5,r4,12,20,29          /* insert top 10 bits of address */
        lwz     r8,0(r5)                /* get pmd entry */
@@ -143,25 +142,24 @@ retry:
 
 #ifdef CONFIG_SMP
        eieio
-       addis   r8,r7,mmu_hash_lock@ha
+       lis     r8, (mmu_hash_lock - PAGE_OFFSET)@ha
        li      r0,0
-       stw     r0,mmu_hash_lock@l(r8)
+       stw     r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
 #endif
 
        /* Return from the exception */
        lwz     r5,_CTR(r11)
        mtctr   r5
        lwz     r0,GPR0(r11)
-       lwz     r7,GPR7(r11)
        lwz     r8,GPR8(r11)
        b       fast_exception_return
 
 #ifdef CONFIG_SMP
 hash_page_out:
        eieio
-       addis   r8,r7,mmu_hash_lock@ha
+       lis     r8, (mmu_hash_lock - PAGE_OFFSET)@ha
        li      r0,0
-       stw     r0,mmu_hash_lock@l(r8)
+       stw     r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
        blr
 #endif /* CONFIG_SMP */
 
@@ -207,11 +205,9 @@ _GLOBAL(add_hash_page)
        SYNC_601
        isync
 
-       tophys(r7,0)
-
 #ifdef CONFIG_SMP
-       addis   r6,r7,mmu_hash_lock@ha
-       addi    r6,r6,mmu_hash_lock@l
+       lis     r6, (mmu_hash_lock - PAGE_OFFSET)@ha
+       addi    r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
 10:    lwarx   r0,0,r6                 /* take the mmu_hash_lock */
        cmpi    0,r0,0
        bne-    11f
@@ -256,8 +252,8 @@ _GLOBAL(add_hash_page)
 
 9:
 #ifdef CONFIG_SMP
-       addis   r6,r7,mmu_hash_lock@ha
-       addi    r6,r6,mmu_hash_lock@l
+       lis     r6, (mmu_hash_lock - PAGE_OFFSET)@ha
+       addi    r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
        eieio
        li      r0,0
        stw     r0,0(r6)                /* clear mmu_hash_lock */
@@ -277,10 +273,8 @@ _GLOBAL(add_hash_page)
  * It is designed to be called with the MMU either on or off.
  * r3 contains the VSID, r4 contains the virtual address,
  * r5 contains the linux PTE, r6 contains the old value of the
- * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
- * offset to be added to addresses (0 if the MMU is on,
- * -KERNELBASE if it is off).  r10 contains the upper half of
- * the PTE if CONFIG_PTE_64BIT.
+ * linux PTE (before setting _PAGE_HASHPTE). r10 contains the
+ * upper half of the PTE if CONFIG_PTE_64BIT.
  * On SMP, the caller should have the mmu_hash_lock held.
  * We assume that the caller has (or will) set the _PAGE_HASHPTE
  * bit in the linux PTE in memory.  The value passed in r6 should
@@ -341,7 +335,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
        patch_site      1f, patch__hash_page_A1
        patch_site      2f, patch__hash_page_A2
        /* Get the address of the primary PTE group in the hash table (r3) */
-0:     addis   r0,r7,Hash_base@h       /* base address of hash table */
+0:     lis     r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
 1:     rlwimi  r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
 2:     rlwinm  r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
        xor     r3,r3,r0                /* make primary hash */
@@ -355,10 +349,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
        beq+    10f                     /* no PTE: go look for an empty slot */
        tlbie   r4
 
-       addis   r4,r7,htab_hash_searches@ha
-       lwz     r6,htab_hash_searches@l(r4)
+       lis     r4, (htab_hash_searches - PAGE_OFFSET)@ha
+       lwz     r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
        addi    r6,r6,1                 /* count how many searches we do */
-       stw     r6,htab_hash_searches@l(r4)
+       stw     r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
 
        /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
        mtctr   r0
@@ -390,10 +384,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
        beq+    found_empty
 
        /* update counter of times that the primary PTEG is full */
-       addis   r4,r7,primary_pteg_full@ha
-       lwz     r6,primary_pteg_full@l(r4)
+       lis     r4, (primary_pteg_full - PAGE_OFFSET)@ha
+       lwz     r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
        addi    r6,r6,1
-       stw     r6,primary_pteg_full@l(r4)
+       stw     r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
 
        patch_site      0f, patch__hash_page_C
        /* Search the secondary PTEG for an empty slot */
@@ -427,8 +421,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
         * lockup here but that shouldn't happen
         */
 
-1:     addis   r4,r7,next_slot@ha              /* get next evict slot */
-       lwz     r6,next_slot@l(r4)
+1:     lis     r4, (next_slot - PAGE_OFFSET)@ha        /* get next evict slot */
+       lwz     r6, (next_slot - PAGE_OFFSET)@l(r4)
        addi    r6,r6,HPTE_SIZE                 /* search for candidate */
        andi.   r6,r6,7*HPTE_SIZE
        stw     r6,next_slot@l(r4)
@@ -500,8 +494,6 @@ htab_hash_searches:
  * We assume that there is a hash table in use (Hash != 0).
  */
 _GLOBAL(flush_hash_pages)
-       tophys(r7,0)
-
        /*
         * We disable interrupts here, even on UP, because we want
         * the _PAGE_HASHPTE bit to be a reliable indication of
@@ -546,10 +538,10 @@ _GLOBAL(flush_hash_pages)
        SET_V(r11)                      /* set V (valid) bit */
 
 #ifdef CONFIG_SMP
-       addis   r9,r7,mmu_hash_lock@ha
-       addi    r9,r9,mmu_hash_lock@l
+       lis     r9, (mmu_hash_lock - PAGE_OFFSET)@ha
+       addi    r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l
        CURRENT_THREAD_INFO(r8, r1)
-       add     r8,r8,r7
+       tophys(r8, r8)
        lwz     r8,TI_CPU(r8)
        oris    r8,r8,9
 10:    lwarx   r0,0,r9
@@ -583,7 +575,7 @@ _GLOBAL(flush_hash_pages)
        patch_site      1f, patch__flush_hash_A1
        patch_site      2f, patch__flush_hash_A2
        /* Get the address of the primary PTE group in the hash table (r3) */
-0:     addis   r8,r7,Hash_base@h       /* base address of hash table */
+0:     lis     r8, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
 1:     rlwimi  r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
 2:     rlwinm  r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
        xor     r8,r0,r8                /* make primary hash */
index 3f4193201ee713c72ae99c3d8ffa3e079b71c2cf..fb747bb0b3e4352f1ceeb3f163cf334f5c366124 100644 (file)
@@ -231,7 +231,8 @@ void __init MMU_init_hw(void)
        if (lg_n_hpteg > 16)
                mb2 = 16 - LG_HPTEG_SIZE;
 
-       modify_instruction_site(&patch__hash_page_A0, 0xffff, (unsigned int)Hash >> 16);
+       modify_instruction_site(&patch__hash_page_A0, 0xffff,
+                               ((unsigned int)Hash - PAGE_OFFSET) >> 16);
        modify_instruction_site(&patch__hash_page_A1, 0x7c0, mb << 6);
        modify_instruction_site(&patch__hash_page_A2, 0x7c0, mb2 << 6);
        modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
@@ -240,7 +241,8 @@ void __init MMU_init_hw(void)
        /*
         * Patch up the instructions in hashtable.S:flush_hash_page
         */
-       modify_instruction_site(&patch__flush_hash_A0, 0xffff, (unsigned int)Hash >> 16);
+       modify_instruction_site(&patch__flush_hash_A0, 0xffff,
+                               ((unsigned int)Hash - PAGE_OFFSET) >> 16);
        modify_instruction_site(&patch__flush_hash_A1, 0x7c0, mb << 6);
        modify_instruction_site(&patch__flush_hash_A2, 0x7c0, mb2 << 6);
        modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);