diff options
author | Will Deacon <will.deacon@arm.com> | 2018-12-10 09:15:15 -0500 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2018-12-10 13:42:18 -0500 |
commit | 68d23da4373aba76f5300017c4746440f276698e (patch) | |
tree | 138b93b628cf77727970417690693f18eddf7c40 | |
parent | b9567720a1b8e739380e0241413606c056c57859 (diff) |
arm64: Kconfig: Re-jig CONFIG options for 52-bit VA
Enabling 52-bit VAs for userspace is pretty confusing, since it requires
you to select "48-bit" virtual addressing in the Kconfig.
Rework the logic so that 52-bit user virtual addressing is advertised in
the "Virtual address space size" choice, along with some help text to
describe its interaction with Pointer Authentication. The EXPERT-only
option to force all user mappings to the 52-bit range is then made
available immediately below the VA size selection.
Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r-- | arch/arm64/Kconfig | 47 | ||||
-rw-r--r-- | arch/arm64/include/asm/assembler.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/mmu_context.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable-hwdef.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/processor.h | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/head.S | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/arm64/mm/proc.S | 4 |
8 files changed, 41 insertions, 30 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index ca1f93233b22..905ce1653e82 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -682,15 +682,43 @@ config ARM64_VA_BITS_47 | |||
682 | config ARM64_VA_BITS_48 | 682 | config ARM64_VA_BITS_48 |
683 | bool "48-bit" | 683 | bool "48-bit" |
684 | 684 | ||
685 | config ARM64_USER_VA_BITS_52 | ||
686 | bool "52-bit (user)" | ||
687 | depends on ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN) | ||
688 | help | ||
689 | Enable 52-bit virtual addressing for userspace when explicitly | ||
690 | requested via a hint to mmap(). The kernel will continue to | ||
691 | use 48-bit virtual addresses for its own mappings. | ||
692 | |||
693 | NOTE: Enabling 52-bit virtual addressing in conjunction with | ||
694 | ARMv8.3 Pointer Authentication will result in the PAC being | ||
695 | reduced from 7 bits to 3 bits, which may have a significant | ||
696 | impact on its susceptibility to brute-force attacks. | ||
697 | |||
698 | If unsure, select 48-bit virtual addressing instead. | ||
699 | |||
685 | endchoice | 700 | endchoice |
686 | 701 | ||
702 | config ARM64_FORCE_52BIT | ||
703 | bool "Force 52-bit virtual addresses for userspace" | ||
704 | depends on ARM64_USER_VA_BITS_52 && EXPERT | ||
705 | help | ||
706 | For systems with 52-bit userspace VAs enabled, the kernel will attempt | ||
707 | to maintain compatibility with older software by providing 48-bit VAs | ||
708 | unless a hint is supplied to mmap. | ||
709 | |||
710 | This configuration option disables the 48-bit compatibility logic, and | ||
711 | forces all userspace addresses to be 52-bit on HW that supports it. One | ||
712 | should only enable this configuration option for stress testing userspace | ||
713 | memory management code. If unsure say N here. | ||
714 | |||
687 | config ARM64_VA_BITS | 715 | config ARM64_VA_BITS |
688 | int | 716 | int |
689 | default 36 if ARM64_VA_BITS_36 | 717 | default 36 if ARM64_VA_BITS_36 |
690 | default 39 if ARM64_VA_BITS_39 | 718 | default 39 if ARM64_VA_BITS_39 |
691 | default 42 if ARM64_VA_BITS_42 | 719 | default 42 if ARM64_VA_BITS_42 |
692 | default 47 if ARM64_VA_BITS_47 | 720 | default 47 if ARM64_VA_BITS_47 |
693 | default 48 if ARM64_VA_BITS_48 | 721 | default 48 if ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52 |
694 | 722 | ||
695 | choice | 723 | choice |
696 | prompt "Physical address space size" | 724 | prompt "Physical address space size" |
@@ -716,10 +744,6 @@ config ARM64_PA_BITS_52 | |||
716 | 744 | ||
717 | endchoice | 745 | endchoice |
718 | 746 | ||
719 | config ARM64_52BIT_VA | ||
720 | def_bool y | ||
721 | depends on ARM64_VA_BITS_48 && ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN) | ||
722 | |||
723 | config ARM64_PA_BITS | 747 | config ARM64_PA_BITS |
724 | int | 748 | int |
725 | default 48 if ARM64_PA_BITS_48 | 749 | default 48 if ARM64_PA_BITS_48 |
@@ -1186,19 +1210,6 @@ config ARM64_CNP | |||
1186 | at runtime, and does not affect PEs that do not implement | 1210 | at runtime, and does not affect PEs that do not implement |
1187 | this feature. | 1211 | this feature. |
1188 | 1212 | ||
1189 | config ARM64_FORCE_52BIT | ||
1190 | bool "Force 52-bit virtual addresses for userspace" | ||
1191 | depends on ARM64_52BIT_VA && EXPERT | ||
1192 | help | ||
1193 | For systems with 52-bit userspace VAs enabled, the kernel will attempt | ||
1194 | to maintain compatibility with older software by providing 48-bit VAs | ||
1195 | unless a hint is supplied to mmap. | ||
1196 | |||
1197 | This configuration option disables the 48-bit compatibility logic, and | ||
1198 | forces all userspace addresses to be 52-bit on HW that supports it. One | ||
1199 | should only enable this configuration option for stress testing userspace | ||
1200 | memory management code. If unsure say N here. | ||
1201 | |||
1202 | endmenu | 1213 | endmenu |
1203 | 1214 | ||
1204 | config ARM64_SVE | 1215 | config ARM64_SVE |
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 122d91d4097a..ce985f13dce5 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h | |||
@@ -549,7 +549,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU | |||
549 | * ttbr: Value of ttbr to set, modified. | 549 | * ttbr: Value of ttbr to set, modified. |
550 | */ | 550 | */ |
551 | .macro offset_ttbr1, ttbr | 551 | .macro offset_ttbr1, ttbr |
552 | #ifdef CONFIG_ARM64_52BIT_VA | 552 | #ifdef CONFIG_ARM64_USER_VA_BITS_52 |
553 | orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET | 553 | orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET |
554 | #endif | 554 | #endif |
555 | .endm | 555 | .endm |
@@ -560,7 +560,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU | |||
560 | * to be nop'ed out when dealing with 52-bit kernel VAs. | 560 | * to be nop'ed out when dealing with 52-bit kernel VAs. |
561 | */ | 561 | */ |
562 | .macro restore_ttbr1, ttbr | 562 | .macro restore_ttbr1, ttbr |
563 | #ifdef CONFIG_ARM64_52BIT_VA | 563 | #ifdef CONFIG_ARM64_USER_VA_BITS_52 |
564 | bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET | 564 | bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET |
565 | #endif | 565 | #endif |
566 | .endm | 566 | .endm |
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index b0768502fa08..2da3e478fd8f 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h | |||
@@ -74,7 +74,7 @@ extern u64 idmap_ptrs_per_pgd; | |||
74 | 74 | ||
75 | static inline bool __cpu_uses_extended_idmap(void) | 75 | static inline bool __cpu_uses_extended_idmap(void) |
76 | { | 76 | { |
77 | if (IS_ENABLED(CONFIG_ARM64_52BIT_VA)) | 77 | if (IS_ENABLED(CONFIG_ARM64_USER_VA_BITS_52)) |
78 | return false; | 78 | return false; |
79 | 79 | ||
80 | return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)); | 80 | return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)); |
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index d5219f2624b7..41c808d9168a 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h | |||
@@ -80,7 +80,7 @@ | |||
80 | #define PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - CONFIG_PGTABLE_LEVELS) | 80 | #define PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - CONFIG_PGTABLE_LEVELS) |
81 | #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) | 81 | #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) |
82 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 82 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
83 | #ifdef CONFIG_ARM64_52BIT_VA | 83 | #ifdef CONFIG_ARM64_USER_VA_BITS_52 |
84 | #define PTRS_PER_PGD (1 << (52 - PGDIR_SHIFT)) | 84 | #define PTRS_PER_PGD (1 << (52 - PGDIR_SHIFT)) |
85 | #else | 85 | #else |
86 | #define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT)) | 86 | #define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT)) |
@@ -310,7 +310,7 @@ | |||
310 | #define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2) | 310 | #define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2) |
311 | #endif | 311 | #endif |
312 | 312 | ||
313 | #ifdef CONFIG_ARM64_52BIT_VA | 313 | #ifdef CONFIG_ARM64_USER_VA_BITS_52 |
314 | /* Must be at least 64-byte aligned to prevent corruption of the TTBR */ | 314 | /* Must be at least 64-byte aligned to prevent corruption of the TTBR */ |
315 | #define TTBR1_BADDR_4852_OFFSET (((UL(1) << (52 - PGDIR_SHIFT)) - \ | 315 | #define TTBR1_BADDR_4852_OFFSET (((UL(1) << (52 - PGDIR_SHIFT)) - \ |
316 | (UL(1) << (48 - PGDIR_SHIFT))) * 8) | 316 | (UL(1) << (48 - PGDIR_SHIFT))) * 8) |
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index efa0210cf927..538ecbc15067 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h | |||
@@ -20,11 +20,11 @@ | |||
20 | #define __ASM_PROCESSOR_H | 20 | #define __ASM_PROCESSOR_H |
21 | 21 | ||
22 | #define KERNEL_DS UL(-1) | 22 | #define KERNEL_DS UL(-1) |
23 | #ifdef CONFIG_ARM64_52BIT_VA | 23 | #ifdef CONFIG_ARM64_USER_VA_BITS_52 |
24 | #define USER_DS ((UL(1) << 52) - 1) | 24 | #define USER_DS ((UL(1) << 52) - 1) |
25 | #else | 25 | #else |
26 | #define USER_DS ((UL(1) << VA_BITS) - 1) | 26 | #define USER_DS ((UL(1) << VA_BITS) - 1) |
27 | #endif /* CONFIG_ARM64_52BIT_VA */ | 27 | #endif /* CONFIG_ARM64_USER_VA_BITS_52 */ |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is | 30 | * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index c229d9cfe9bf..6b70dd625f01 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -318,7 +318,7 @@ __create_page_tables: | |||
318 | adrp x0, idmap_pg_dir | 318 | adrp x0, idmap_pg_dir |
319 | adrp x3, __idmap_text_start // __pa(__idmap_text_start) | 319 | adrp x3, __idmap_text_start // __pa(__idmap_text_start) |
320 | 320 | ||
321 | #ifdef CONFIG_ARM64_52BIT_VA | 321 | #ifdef CONFIG_ARM64_USER_VA_BITS_52 |
322 | mrs_s x6, SYS_ID_AA64MMFR2_EL1 | 322 | mrs_s x6, SYS_ID_AA64MMFR2_EL1 |
323 | and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT) | 323 | and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT) |
324 | mov x5, #52 | 324 | mov x5, #52 |
@@ -800,7 +800,7 @@ ENTRY(__enable_mmu) | |||
800 | ENDPROC(__enable_mmu) | 800 | ENDPROC(__enable_mmu) |
801 | 801 | ||
802 | ENTRY(__cpu_secondary_check52bitva) | 802 | ENTRY(__cpu_secondary_check52bitva) |
803 | #ifdef CONFIG_ARM64_52BIT_VA | 803 | #ifdef CONFIG_ARM64_USER_VA_BITS_52 |
804 | ldr_l x0, vabits_user | 804 | ldr_l x0, vabits_user |
805 | cmp x0, #52 | 805 | cmp x0, #52 |
806 | b.ne 2f | 806 | b.ne 2f |
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index e15b0b64d4d0..1ff18f5fbecb 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
@@ -139,7 +139,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
139 | if (!cpu_online(cpu)) { | 139 | if (!cpu_online(cpu)) { |
140 | pr_crit("CPU%u: failed to come online\n", cpu); | 140 | pr_crit("CPU%u: failed to come online\n", cpu); |
141 | 141 | ||
142 | if (IS_ENABLED(CONFIG_ARM64_52BIT_VA) && va52mismatch) | 142 | if (IS_ENABLED(CONFIG_ARM64_USER_VA_BITS_52) && va52mismatch) |
143 | pr_crit("CPU%u: does not support 52-bit VAs\n", cpu); | 143 | pr_crit("CPU%u: does not support 52-bit VAs\n", cpu); |
144 | 144 | ||
145 | ret = -EIO; | 145 | ret = -EIO; |
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 0cf86b17714c..e05b3ce1db6b 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S | |||
@@ -451,8 +451,8 @@ ENTRY(__cpu_setup) | |||
451 | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ | 451 | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ |
452 | TCR_TBI0 | TCR_A1 | 452 | TCR_TBI0 | TCR_A1 |
453 | 453 | ||
454 | #ifdef CONFIG_ARM64_52BIT_VA | 454 | #ifdef CONFIG_ARM64_USER_VA_BITS_52 |
455 | ldr_l x9, vabits_user | 455 | ldr_l x9, vabits_user |
456 | sub x9, xzr, x9 | 456 | sub x9, xzr, x9 |
457 | add x9, x9, #64 | 457 | add x9, x9, #64 |
458 | #else | 458 | #else |