aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2019-01-08 11:19:01 -0500
committerWill Deacon <will.deacon@arm.com>2019-01-10 12:49:35 -0500
commitb89d82ef01b33bc50cbaa8ff05607879b40d0704 (patch)
treeae9df3844e5cb08338a8945d4a4fb9547ba27e26
parentd9ed41962ee202f653a5fa8d2ea0f52924abe629 (diff)
arm64: kpti: Avoid rewriting early page tables when KASLR is enabled
A side effect of commit c55191e96caa ("arm64: mm: apply r/o permissions of VM areas to its linear alias as well") is that the linear map is created with page granularity, which means that transitioning the early page table from global to non-global mappings when enabling kpti can take a significant amount of time during boot. Given that most CPU implementations do not require kpti, this mainly impacts KASLR builds where kpti is forcefully enabled. However, in these situations we know early on that non-global mappings are required and can avoid the use of global mappings from the beginning. The only gotcha is Cavium erratum #27456, which we must detect based on the MIDR value of the boot CPU. Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reported-by: John Garry <john.garry@huawei.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/include/asm/mmu.h41
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h4
-rw-r--r--arch/arm64/kernel/cpu_errata.c2
-rw-r--r--arch/arm64/kernel/cpufeature.c9
-rw-r--r--arch/arm64/kernel/head.S1
5 files changed, 52 insertions, 5 deletions
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 7689c7aa1d77..ac352accb3d9 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -16,6 +16,8 @@
16#ifndef __ASM_MMU_H 16#ifndef __ASM_MMU_H
17#define __ASM_MMU_H 17#define __ASM_MMU_H
18 18
19#include <asm/cputype.h>
20
19#define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */ 21#define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */
20#define USER_ASID_BIT 48 22#define USER_ASID_BIT 48
21#define USER_ASID_FLAG (UL(1) << USER_ASID_BIT) 23#define USER_ASID_FLAG (UL(1) << USER_ASID_BIT)
@@ -44,6 +46,45 @@ static inline bool arm64_kernel_unmapped_at_el0(void)
44 cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); 46 cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
45} 47}
46 48
49static inline bool arm64_kernel_use_ng_mappings(void)
50{
51 bool tx1_bug;
52
53 /* What's a kpti? Use global mappings if we don't know. */
54 if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
55 return false;
56
57 /*
58 * Note: this function is called before the CPU capabilities have
59 * been configured, so our early mappings will be global. If we
60 * later determine that kpti is required, then
61 * kpti_install_ng_mappings() will make them non-global.
62 */
63 if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
64 return arm64_kernel_unmapped_at_el0();
65
66 /*
67 * KASLR is enabled so we're going to be enabling kpti on non-broken
68 * CPUs regardless of their susceptibility to Meltdown. Rather
69 * than force everybody to go through the G -> nG dance later on,
70 * just put down non-global mappings from the beginning.
71 */
72 if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
73 tx1_bug = false;
74#ifndef MODULE
75 } else if (!static_branch_likely(&arm64_const_caps_ready)) {
76 extern const struct midr_range cavium_erratum_27456_cpus[];
77
78 tx1_bug = is_midr_in_range_list(read_cpuid_id(),
79 cavium_erratum_27456_cpus);
80#endif
81 } else {
82 tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456);
83 }
84
85 return !tx1_bug && kaslr_offset() > 0;
86}
87
47typedef void (*bp_hardening_cb_t)(void); 88typedef void (*bp_hardening_cb_t)(void);
48 89
49struct bp_hardening_data { 90struct bp_hardening_data {
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 78b942c1bea4..986e41c4c32b 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -37,8 +37,8 @@
37#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) 37#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
38#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) 38#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
39 39
40#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0) 40#define PTE_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PTE_NG : 0)
41#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0) 41#define PMD_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PMD_SECT_NG : 0)
42 42
43#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) 43#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
44#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) 44#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 09ac548c9d44..9950bb0cbd52 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -553,7 +553,7 @@ static const struct midr_range arm64_repeat_tlbi_cpus[] = {
553#endif 553#endif
554 554
555#ifdef CONFIG_CAVIUM_ERRATUM_27456 555#ifdef CONFIG_CAVIUM_ERRATUM_27456
556static const struct midr_range cavium_erratum_27456_cpus[] = { 556const struct midr_range cavium_erratum_27456_cpus[] = {
557 /* Cavium ThunderX, T88 pass 1.x - 2.1 */ 557 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
558 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), 558 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
559 /* Cavium ThunderX, T81 pass 1.0 */ 559 /* Cavium ThunderX, T81 pass 1.0 */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 4f272399de89..f6d84e2c92fe 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -983,7 +983,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
983 983
984 /* Useful for KASLR robustness */ 984 /* Useful for KASLR robustness */
985 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) 985 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
986 return true; 986 return kaslr_offset() > 0;
987 987
988 /* Don't force KPTI for CPUs that are not vulnerable */ 988 /* Don't force KPTI for CPUs that are not vulnerable */
989 if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list)) 989 if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
@@ -1003,7 +1003,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
1003 static bool kpti_applied = false; 1003 static bool kpti_applied = false;
1004 int cpu = smp_processor_id(); 1004 int cpu = smp_processor_id();
1005 1005
1006 if (kpti_applied) 1006 /*
1007 * We don't need to rewrite the page-tables if either we've done
1008 * it already or we have KASLR enabled and therefore have not
1009 * created any global mappings at all.
1010 */
1011 if (kpti_applied || kaslr_offset() > 0)
1007 return; 1012 return;
1008 1013
1009 remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); 1014 remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c7213674cb24..15d79a8e5e5e 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -475,6 +475,7 @@ ENDPROC(__primary_switched)
475 475
476ENTRY(kimage_vaddr) 476ENTRY(kimage_vaddr)
477 .quad _text - TEXT_OFFSET 477 .quad _text - TEXT_OFFSET
478EXPORT_SYMBOL(kimage_vaddr)
478 479
479/* 480/*
480 * If we're fortunate enough to boot at EL2, ensure that the world is 481 * If we're fortunate enough to boot at EL2, ensure that the world is