There are actually only four separate implementations of set_pte_ext.
Use assembler macros to insert code for these into the proc-*.S files.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
.align 5
ENTRY(cpu_arm1020_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- eor r3, r1, #0x0a @ C & small page?
- tst r3, #0x0b
- biceq r2, r2, #4
-#endif
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 4
.align 5
ENTRY(cpu_arm1020e_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- eor r3, r1, #0x0a @ C & small page?
- tst r3, #0x0b
- biceq r2, r2, #4
-#endif
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
.align 5
ENTRY(cpu_arm1022_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- eor r3, r1, #0x0a @ C & small page?
- tst r3, #0x0b
- biceq r2, r2, #4
-#endif
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
.align 5
ENTRY(cpu_arm1026_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- eor r3, r1, #0x0a @ C & small page?
- tst r3, #0x0b
- biceq r2, r2, #4
-#endif
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#include <asm/pgtable.h>
#include <asm/ptrace.h>
+#include "proc-macros.S"
+
ENTRY(cpu_arm6_dcache_clean_area)
ENTRY(cpu_arm7_dcache_clean_area)
mov pc, lr
* : r1 = value to set
* Purpose : Set a PTE and flush it out of any WB cache
*/
- .align 5
+ .align 5
ENTRY(cpu_arm6_set_pte_ext)
ENTRY(cpu_arm7_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young
- movne r2, #0
-
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext wc_disable=0
#endif /* CONFIG_MMU */
- mov pc, lr
+ mov pc, lr
/*
* Function: _arm6_7_reset
* : r1 = value to set
* Purpose : Set a PTE and flush it out of any WB cache
*/
- .align 5
+ .align 5
ENTRY(cpu_arm720_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young
- movne r2, #0
-
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext wc_disable=0
#endif
- mov pc, lr
+ mov pc, lr
/*
* Function: arm720_reset
.align 5
ENTRY(cpu_arm920_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- eor r3, r2, #0x0a @ C & small page?
- tst r3, #0x0b
- biceq r2, r2, #4
-#endif
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
-#endif /* CONFIG_MMU */
+#endif
mov pc, lr
__INIT
.align 5
ENTRY(cpu_arm922_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- eor r3, r2, #0x0a @ C & small page?
- tst r3, #0x0b
- biceq r2, r2, #4
-#endif
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
.align 5
ENTRY(cpu_arm925_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- eor r3, r2, #0x0a @ C & small page?
- tst r3, #0x0b
- biceq r2, r2, #4
-#endif
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
.align 5
ENTRY(cpu_arm926_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- eor r3, r2, #0x0a @ C & small page?
- tst r3, #0x0b
- biceq r2, r2, #4
-#endif
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
.align 5
ENTRY(cpu_feroceon_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext wc_disable=0
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#if defined(CONFIG_CACHE_FEROCEON_L2) && !defined(CONFIG_L2_CACHE_WRITETHROUGH)
mov \reg, #16 @ size offset
mov \reg, \reg, lsl \tmp @ actual cache line size
.endm
+
+
+/*
+ * Sanity check the PTE configuration for the code below - which makes
+ * certain assumptions about how these bits are layed out.
+ */
+#if L_PTE_SHARED != PTE_EXT_SHARED
+#error PTE shared bit mismatch
+#endif
+#if L_PTE_BUFFERABLE != PTE_BUFFERABLE
+#error PTE bufferable bit mismatch
+#endif
+#if L_PTE_CACHEABLE != PTE_CACHEABLE
+#error PTE cacheable bit mismatch
+#endif
+#if (L_PTE_EXEC+L_PTE_USER+L_PTE_WRITE+L_PTE_DIRTY+L_PTE_YOUNG+\
+ L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
+#error Invalid Linux PTE bit settings
+#endif
+
+/*
+ * The ARMv6 and ARMv7 set_pte_ext translation function.
+ *
+ * Permission translation:
+ * YUWD APX AP1 AP0 SVC User
+ * 0xxx 0 0 0 no acc no acc
+ * 100x 1 0 1 r/o no acc
+ * 10x0 1 0 1 r/o no acc
+ * 1011 0 0 1 r/w no acc
+ * 110x 0 1 0 r/w r/o
+ * 11x0 0 1 0 r/w r/o
+ * 1111 0 1 1 r/w r/w
+ */
+ .macro armv6_set_pte_ext
+ str r1, [r0], #-2048 @ linux version
+
+ bic r3, r1, #0x000003f0
+ bic r3, r3, #PTE_TYPE_MASK
+ orr r3, r3, r2
+ orr r3, r3, #PTE_EXT_AP0 | 2
+
+ tst r1, #L_PTE_WRITE
+ tstne r1, #L_PTE_DIRTY
+ orreq r3, r3, #PTE_EXT_APX
+
+ tst r1, #L_PTE_USER
+ orrne r3, r3, #PTE_EXT_AP1
+ tstne r3, #PTE_EXT_APX
+ bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
+
+ tst r1, #L_PTE_EXEC
+ orreq r3, r3, #PTE_EXT_XN
+
+ tst r1, #L_PTE_YOUNG
+ tstne r1, #L_PTE_PRESENT
+ moveq r3, #0
+
+ str r3, [r0]
+ mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+ .endm
+
+
+/*
+ * The ARMv3, ARMv4 and ARMv5 set_pte_ext translation function,
+ * covering most CPUs except Xscale and Xscale 3.
+ *
+ * Permission translation:
+ * YUWD AP SVC User
+ * 0xxx 0x00 no acc no acc
+ * 100x 0x00 r/o no acc
+ * 10x0 0x00 r/o no acc
+ * 1011 0x55 r/w no acc
+ * 110x 0xaa r/w r/o
+ * 11x0 0xaa r/w r/o
+ * 1111 0xff r/w r/w
+ */
+ .macro armv3_set_pte_ext wc_disable=1
+ str r1, [r0], #-2048 @ linux version
+
+ eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
+
+ bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits
+ bic r2, r2, #PTE_TYPE_MASK
+ orr r2, r2, #PTE_TYPE_SMALL
+
+ tst r3, #L_PTE_USER @ user?
+ orrne r2, r2, #PTE_SMALL_AP_URO_SRW
+
+ tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty?
+ orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
+
+ tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
+ movne r2, #0
+
+ .if \wc_disable
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+ tst r2, #PTE_CACHEABLE
+ bicne r2, r2, #PTE_BUFFERABLE
+#endif
+ .endif
+ str r2, [r0] @ hardware version
+ .endm
+
+
+/*
+ * Xscale set_pte_ext translation, split into two halves to cope
+ * with work-arounds. r3 must be preserved by code between these
+ * two macros.
+ *
+ * Permission translation:
+ * YUWD AP SVC User
+ * 0xxx 00 no acc no acc
+ * 100x 00 r/o no acc
+ * 10x0 00 r/o no acc
+ * 1011 01 r/w no acc
+ * 110x 10 r/w r/o
+ * 11x0 10 r/w r/o
+ * 1111 11 r/w r/w
+ */
+ .macro xscale_set_pte_ext_prologue
+ str r1, [r0], #-2048 @ linux version
+
+ eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
+
+ bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits
+ orr r2, r2, #PTE_TYPE_EXT @ extended page
+
+ tst r3, #L_PTE_USER @ user?
+ orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
+
+ tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty?
+ orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
+ @ combined with user -> user r/w
+ .endm
+
+ .macro xscale_set_pte_ext_epilogue
+ tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
+ movne r2, #0 @ no -> fault
+
+ str r2, [r0] @ hardware version
+ mov ip, #0
+ mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
+ mcr p15, 0, ip, c7, c10, 4 @ data write barrier
+ .endm
.align 5
ENTRY(cpu_sa110_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext wc_disable=0
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
.align 5
ENTRY(cpu_sa1100_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext wc_disable=0
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
* (hardware version is stored at -1024 bytes)
* - pte - PTE value to store
* - ext - value for extended PTE bits
- *
- * Permissions:
- * YUWD APX AP1 AP0 SVC User
- * 0xxx 0 0 0 no acc no acc
- * 100x 1 0 1 r/o no acc
- * 10x0 1 0 1 r/o no acc
- * 1011 0 0 1 r/w no acc
- * 110x 0 1 0 r/w r/o
- * 11x0 0 1 0 r/w r/o
- * 1111 0 1 1 r/w r/w
*/
ENTRY(cpu_v6_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- bic r3, r1, #0x000003f0
- bic r3, r3, #0x00000003
- orr r3, r3, r2
- orr r3, r3, #PTE_EXT_AP0 | 2
-
- tst r1, #L_PTE_WRITE
- tstne r1, #L_PTE_DIRTY
- orreq r3, r3, #PTE_EXT_APX
-
- tst r1, #L_PTE_USER
- orrne r3, r3, #PTE_EXT_AP1
- tstne r3, #PTE_EXT_APX
- bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
-
- tst r1, #L_PTE_YOUNG
- biceq r3, r3, #PTE_EXT_APX | PTE_EXT_AP_MASK
-
- tst r1, #L_PTE_EXEC
- orreq r3, r3, #PTE_EXT_XN
-
- tst r1, #L_PTE_PRESENT
- moveq r3, #0
-
- str r3, [r0]
- mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+ armv6_set_pte_ext
#endif
mov pc, lr
* (hardware version is stored at -1024 bytes)
* - pte - PTE value to store
* - ext - value for extended PTE bits
- *
- * Permissions:
- * YUWD APX AP1 AP0 SVC User
- * 0xxx 0 0 0 no acc no acc
- * 100x 1 0 1 r/o no acc
- * 10x0 1 0 1 r/o no acc
- * 1011 0 0 1 r/w no acc
- * 110x 0 1 0 r/w r/o
- * 11x0 0 1 0 r/w r/o
- * 1111 0 1 1 r/w r/w
*/
ENTRY(cpu_v7_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- bic r3, r1, #0x000003f0
- bic r3, r3, #0x00000003
- orr r3, r3, r2
- orr r3, r3, #PTE_EXT_AP0 | 2
-
- tst r1, #L_PTE_WRITE
- tstne r1, #L_PTE_DIRTY
- orreq r3, r3, #PTE_EXT_APX
-
- tst r1, #L_PTE_USER
- orrne r3, r3, #PTE_EXT_AP1
- tstne r3, #PTE_EXT_APX
- bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
-
- tst r1, #L_PTE_YOUNG
- biceq r3, r3, #PTE_EXT_APX | PTE_EXT_AP_MASK
-
- tst r1, #L_PTE_EXEC
- orreq r3, r3, #PTE_EXT_XN
-
- tst r1, #L_PTE_PRESENT
- moveq r3, #0
-
- str r3, [r0]
- mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+ armv6_set_pte_ext
#endif
mov pc, lr
*/
.align 5
ENTRY(cpu_xsc3_set_pte_ext)
- str r1, [r0], #-2048 @ linux version
-
- bic r2, r1, #0xff0 @ keep C, B bits
- orr r2, r2, #PTE_TYPE_EXT @ extended page
- tst r1, #L_PTE_SHARED @ shared?
- orrne r2, r2, #0x200
-
- eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- tst r3, #L_PTE_USER @ user?
- orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
-
- tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty?
- orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
- @ combined with user -> user r/w
+ xscale_set_pte_ext_prologue
@ If it's cacheable, it needs to be in L2 also.
- eor ip, r1, #L_PTE_CACHEABLE
- tst ip, #L_PTE_CACHEABLE
- orreq r2, r2, #PTE_EXT_TEX(0x5)
+ tst r1, #L_PTE_CACHEABLE
+ orrne r2, r2, #PTE_EXT_TEX(0x5)
- tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
- movne r2, #0 @ no -> fault
+ tst r1, #L_PTE_SHARED @ shared?
+ orrne r2, r2, #0x200
- str r2, [r0] @ hardware version
- mov ip, #0
- mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
- mcr p15, 0, ip, c7, c10, 4 @ data write barrier
+ xscale_set_pte_ext_epilogue
mov pc, lr
.ltorg
*/
.align 5
ENTRY(cpu_xscale_set_pte_ext)
- str r1, [r0], #-2048 @ linux version
-
- bic r2, r1, #0xff0
- orr r2, r2, #PTE_TYPE_EXT @ extended page
-
- eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- tst r3, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
-
- tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
- @ combined with user -> user r/w
-
+ xscale_set_pte_ext_prologue
@
@ Handle the X bit. We want to set this bit for the minicache
@ (U = E = B = W = 0, C = 1) or when write allocate is enabled,
@
@ X = (C & ~W & ~B) | (C & W & B & write_allocate)
@
- eor ip, r1, #L_PTE_CACHEABLE
- tst ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
+ and ip, r1, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
+ teq ip, #L_PTE_CACHEABLE
#if PTE_CACHE_WRITE_ALLOCATE
- eorne ip, r1, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
- tstne ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
+ teqne ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
#endif
orreq r2, r2, #PTE_EXT_TEX(1)
teq ip, #L_PTE_USER | L_PTE_CACHEABLE
biceq r2, r2, #PTE_BUFFERABLE
- tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0 @ no -> fault
-
- str r2, [r0] @ hardware version
- mov ip, #0
- mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
- mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
+ xscale_set_pte_ext_epilogue
mov pc, lr