config ARM64_VA_BITS_48
bool "48-bit"
+config ARM64_USER_VA_BITS_52
+ bool "52-bit (user)"
+ depends on ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN)
+ help
+ Enable 52-bit virtual addressing for userspace when explicitly
+ requested via a hint to mmap(). The kernel will continue to
+ use 48-bit virtual addresses for its own mappings.
+
+ NOTE: Enabling 52-bit virtual addressing in conjunction with
+ ARMv8.3 Pointer Authentication will result in the PAC being
+ reduced from 7 bits to 3 bits, which may have a significant
+ impact on its susceptibility to brute-force attacks.
+
+ If unsure, select 48-bit virtual addressing instead.
+
endchoice
+config ARM64_FORCE_52BIT
+ bool "Force 52-bit virtual addresses for userspace"
+ depends on ARM64_USER_VA_BITS_52 && EXPERT
+ help
+ For systems with 52-bit userspace VAs enabled, the kernel will attempt
+ to maintain compatibility with older software by providing 48-bit VAs
+ unless a hint is supplied to mmap.
+
+ This configuration option disables the 48-bit compatibility logic, and
+ forces all userspace addresses to be 52-bit on HW that supports it. One
+ should only enable this configuration option for stress testing userspace
+ memory management code. If unsure say N here.
+
config ARM64_VA_BITS
int
default 36 if ARM64_VA_BITS_36
default 39 if ARM64_VA_BITS_39
default 42 if ARM64_VA_BITS_42
default 47 if ARM64_VA_BITS_47
- default 48 if ARM64_VA_BITS_48
+ default 48 if ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52
choice
prompt "Physical address space size"
endchoice
-config ARM64_52BIT_VA
- def_bool y
- depends on ARM64_VA_BITS_48 && ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN)
-
config ARM64_PA_BITS
int
default 48 if ARM64_PA_BITS_48
at runtime, and does not affect PEs that do not implement
this feature.
-config ARM64_FORCE_52BIT
- bool "Force 52-bit virtual addresses for userspace"
- depends on ARM64_52BIT_VA && EXPERT
- help
- For systems with 52-bit userspace VAs enabled, the kernel will attempt
- to maintain compatibility with older software by providing 48-bit VAs
- unless a hint is supplied to mmap.
-
- This configuration option disables the 48-bit compatibility logic, and
- forces all userspace addresses to be 52-bit on HW that supports it. One
- should only enable this configuration option for stress testing userspace
- memory management code. If unsure say N here.
-
endmenu
config ARM64_SVE
* ttbr: Value of ttbr to set, modified.
*/
.macro offset_ttbr1, ttbr
-#ifdef CONFIG_ARM64_52BIT_VA
+#ifdef CONFIG_ARM64_USER_VA_BITS_52
orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
#endif
.endm
* to be nop'ed out when dealing with 52-bit kernel VAs.
*/
.macro restore_ttbr1, ttbr
-#ifdef CONFIG_ARM64_52BIT_VA
+#ifdef CONFIG_ARM64_USER_VA_BITS_52
bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
#endif
.endm
static inline bool __cpu_uses_extended_idmap(void)
{
- if (IS_ENABLED(CONFIG_ARM64_52BIT_VA))
+ if (IS_ENABLED(CONFIG_ARM64_USER_VA_BITS_52))
return false;
return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
#define PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - CONFIG_PGTABLE_LEVELS)
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#ifdef CONFIG_ARM64_52BIT_VA
+#ifdef CONFIG_ARM64_USER_VA_BITS_52
#define PTRS_PER_PGD (1 << (52 - PGDIR_SHIFT))
#else
#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
#define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2)
#endif
-#ifdef CONFIG_ARM64_52BIT_VA
+#ifdef CONFIG_ARM64_USER_VA_BITS_52
/* Must be at least 64-byte aligned to prevent corruption of the TTBR */
#define TTBR1_BADDR_4852_OFFSET (((UL(1) << (52 - PGDIR_SHIFT)) - \
(UL(1) << (48 - PGDIR_SHIFT))) * 8)
#define __ASM_PROCESSOR_H
#define KERNEL_DS UL(-1)
-#ifdef CONFIG_ARM64_52BIT_VA
+#ifdef CONFIG_ARM64_USER_VA_BITS_52
#define USER_DS ((UL(1) << 52) - 1)
#else
#define USER_DS ((UL(1) << VA_BITS) - 1)
-#endif /* CONFIG_ARM64_52BIT_VA */
+#endif /* CONFIG_ARM64_USER_VA_BITS_52 */
/*
* On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
adrp x0, idmap_pg_dir
adrp x3, __idmap_text_start // __pa(__idmap_text_start)
-#ifdef CONFIG_ARM64_52BIT_VA
+#ifdef CONFIG_ARM64_USER_VA_BITS_52
mrs_s x6, SYS_ID_AA64MMFR2_EL1
and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
mov x5, #52
ENDPROC(__enable_mmu)
ENTRY(__cpu_secondary_check52bitva)
-#ifdef CONFIG_ARM64_52BIT_VA
+#ifdef CONFIG_ARM64_USER_VA_BITS_52
ldr_l x0, vabits_user
cmp x0, #52
b.ne 2f
if (!cpu_online(cpu)) {
pr_crit("CPU%u: failed to come online\n", cpu);
- if (IS_ENABLED(CONFIG_ARM64_52BIT_VA) && va52mismatch)
+ if (IS_ENABLED(CONFIG_ARM64_USER_VA_BITS_52) && va52mismatch)
pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
ret = -EIO;
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
TCR_TBI0 | TCR_A1
-#ifdef CONFIG_ARM64_52BIT_VA
- ldr_l x9, vabits_user
+#ifdef CONFIG_ARM64_USER_VA_BITS_52
+ ldr_l x9, vabits_user
sub x9, xzr, x9
add x9, x9, #64
#else