diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-06 20:54:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-06 20:54:22 -0400 |
commit | c620f7bd0ba5c882b3e7fc199a8d5c2f6c2f5263 (patch) | |
tree | d44e53b3ec7a9cba43540c7e6cea7df3adae5ba2 | |
parent | dd4e5d6106b2380e2c1238406d26df8b2fe1c42c (diff) | |
parent | b33f908811b7627015238e0dee9baf2b4c9d720d (diff) |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon:
"Mostly just incremental improvements here:
- Introduce AT_HWCAP2 for advertising CPU features to userspace
- Expose SVE2 availability to userspace
- Support for "data cache clean to point of deep persistence" (DC PODP)
- Honour "mitigations=off" on the cmdline and advertise status via
sysfs
- CPU timer erratum workaround (Neoverse-N1 #1188873)
- Introduce perf PMU driver for the SMMUv3 performance counters
- Add config option to disable the kuser helpers page for AArch32 tasks
- Futex modifications to ensure liveness under contention
- Rework debug exception handling to seperate kernel and user
handlers
- Non-critical fixes and cleanup"
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (92 commits)
Documentation: Add ARM64 to kernel-parameters.rst
arm64/speculation: Support 'mitigations=' cmdline option
arm64: ssbs: Don't treat CPUs with SSBS as unaffected by SSB
arm64: enable generic CPU vulnerabilites support
arm64: add sysfs vulnerability show for speculative store bypass
arm64: Fix size of __early_cpu_boot_status
clocksource/arm_arch_timer: Use arch_timer_read_counter to access stable counters
clocksource/arm_arch_timer: Remove use of workaround static key
clocksource/arm_arch_timer: Drop use of static key in arch_timer_reg_read_stable
clocksource/arm_arch_timer: Direcly assign set_next_event workaround
arm64: Use arch_timer_read_counter instead of arch_counter_get_cntvct
watchdog/sbsa: Use arch_timer_read_counter instead of arch_counter_get_cntvct
ARM: vdso: Remove dependency with the arch_timer driver internals
arm64: Apply ARM64_ERRATUM_1188873 to Neoverse-N1
arm64: Add part number for Neoverse N1
arm64: Make ARM64_ERRATUM_1188873 depend on COMPAT
arm64: Restrict ARM64_ERRATUM_1188873 mitigation to AArch32
arm64: mm: Remove pte_unmap_nested()
arm64: Fix compiler warning from pte_unmap() with -Wunused-but-set-variable
arm64: compat: Reduce address limit for 64K pages
...
93 files changed, 2537 insertions, 833 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.rst b/Documentation/admin-guide/kernel-parameters.rst index b8d0bc07ed0a..0124980dca2d 100644 --- a/Documentation/admin-guide/kernel-parameters.rst +++ b/Documentation/admin-guide/kernel-parameters.rst | |||
@@ -88,6 +88,7 @@ parameter is applicable:: | |||
88 | APIC APIC support is enabled. | 88 | APIC APIC support is enabled. |
89 | APM Advanced Power Management support is enabled. | 89 | APM Advanced Power Management support is enabled. |
90 | ARM ARM architecture is enabled. | 90 | ARM ARM architecture is enabled. |
91 | ARM64 ARM64 architecture is enabled. | ||
91 | AX25 Appropriate AX.25 support is enabled. | 92 | AX25 Appropriate AX.25 support is enabled. |
92 | CLK Common clock infrastructure is enabled. | 93 | CLK Common clock infrastructure is enabled. |
93 | CMA Contiguous Memory Area support is enabled. | 94 | CMA Contiguous Memory Area support is enabled. |
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 3c0646e28488..fd03e2b629bb 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt | |||
@@ -2548,8 +2548,8 @@ | |||
2548 | http://repo.or.cz/w/linux-2.6/mini2440.git | 2548 | http://repo.or.cz/w/linux-2.6/mini2440.git |
2549 | 2549 | ||
2550 | mitigations= | 2550 | mitigations= |
2551 | [X86,PPC,S390] Control optional mitigations for CPU | 2551 | [X86,PPC,S390,ARM64] Control optional mitigations for |
2552 | vulnerabilities. This is a set of curated, | 2552 | CPU vulnerabilities. This is a set of curated, |
2553 | arch-independent options, each of which is an | 2553 | arch-independent options, each of which is an |
2554 | aggregation of existing arch-specific options. | 2554 | aggregation of existing arch-specific options. |
2555 | 2555 | ||
@@ -2558,11 +2558,13 @@ | |||
2558 | improves system performance, but it may also | 2558 | improves system performance, but it may also |
2559 | expose users to several CPU vulnerabilities. | 2559 | expose users to several CPU vulnerabilities. |
2560 | Equivalent to: nopti [X86,PPC] | 2560 | Equivalent to: nopti [X86,PPC] |
2561 | kpti=0 [ARM64] | ||
2561 | nospectre_v1 [PPC] | 2562 | nospectre_v1 [PPC] |
2562 | nobp=0 [S390] | 2563 | nobp=0 [S390] |
2563 | nospectre_v2 [X86,PPC,S390] | 2564 | nospectre_v2 [X86,PPC,S390,ARM64] |
2564 | spectre_v2_user=off [X86] | 2565 | spectre_v2_user=off [X86] |
2565 | spec_store_bypass_disable=off [X86,PPC] | 2566 | spec_store_bypass_disable=off [X86,PPC] |
2567 | ssbd=force-off [ARM64] | ||
2566 | l1tf=off [X86] | 2568 | l1tf=off [X86] |
2567 | 2569 | ||
2568 | auto (default) | 2570 | auto (default) |
@@ -2908,10 +2910,10 @@ | |||
2908 | check bypass). With this option data leaks are possible | 2910 | check bypass). With this option data leaks are possible |
2909 | in the system. | 2911 | in the system. |
2910 | 2912 | ||
2911 | nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2 | 2913 | nospectre_v2 [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for |
2912 | (indirect branch prediction) vulnerability. System may | 2914 | the Spectre variant 2 (indirect branch prediction) |
2913 | allow data leaks with this option, which is equivalent | 2915 | vulnerability. System may allow data leaks with this |
2914 | to spectre_v2=off. | 2916 | option. |
2915 | 2917 | ||
2916 | nospec_store_bypass_disable | 2918 | nospec_store_bypass_disable |
2917 | [HW] Disable all mitigations for the Speculative Store Bypass vulnerability | 2919 | [HW] Disable all mitigations for the Speculative Store Bypass vulnerability |
diff --git a/Documentation/arm64/cpu-feature-registers.txt b/Documentation/arm64/cpu-feature-registers.txt index d4b4dd1fe786..684a0da39378 100644 --- a/Documentation/arm64/cpu-feature-registers.txt +++ b/Documentation/arm64/cpu-feature-registers.txt | |||
@@ -209,6 +209,22 @@ infrastructure: | |||
209 | | AT | [35-32] | y | | 209 | | AT | [35-32] | y | |
210 | x--------------------------------------------------x | 210 | x--------------------------------------------------x |
211 | 211 | ||
212 | 6) ID_AA64ZFR0_EL1 - SVE feature ID register 0 | ||
213 | |||
214 | x--------------------------------------------------x | ||
215 | | Name | bits | visible | | ||
216 | |--------------------------------------------------| | ||
217 | | SM4 | [43-40] | y | | ||
218 | |--------------------------------------------------| | ||
219 | | SHA3 | [35-32] | y | | ||
220 | |--------------------------------------------------| | ||
221 | | BitPerm | [19-16] | y | | ||
222 | |--------------------------------------------------| | ||
223 | | AES | [7-4] | y | | ||
224 | |--------------------------------------------------| | ||
225 | | SVEVer | [3-0] | y | | ||
226 | x--------------------------------------------------x | ||
227 | |||
212 | Appendix I: Example | 228 | Appendix I: Example |
213 | --------------------------- | 229 | --------------------------- |
214 | 230 | ||
diff --git a/Documentation/arm64/elf_hwcaps.txt b/Documentation/arm64/elf_hwcaps.txt index 13d6691b37be..b73a2519ecf2 100644 --- a/Documentation/arm64/elf_hwcaps.txt +++ b/Documentation/arm64/elf_hwcaps.txt | |||
@@ -13,9 +13,9 @@ architected discovery mechanism available to userspace code at EL0. The | |||
13 | kernel exposes the presence of these features to userspace through a set | 13 | kernel exposes the presence of these features to userspace through a set |
14 | of flags called hwcaps, exposed in the auxilliary vector. | 14 | of flags called hwcaps, exposed in the auxilliary vector. |
15 | 15 | ||
16 | Userspace software can test for features by acquiring the AT_HWCAP entry | 16 | Userspace software can test for features by acquiring the AT_HWCAP or |
17 | of the auxilliary vector, and testing whether the relevant flags are | 17 | AT_HWCAP2 entry of the auxiliary vector, and testing whether the relevant |
18 | set, e.g. | 18 | flags are set, e.g. |
19 | 19 | ||
20 | bool floating_point_is_present(void) | 20 | bool floating_point_is_present(void) |
21 | { | 21 | { |
@@ -135,6 +135,10 @@ HWCAP_DCPOP | |||
135 | 135 | ||
136 | Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0001. | 136 | Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0001. |
137 | 137 | ||
138 | HWCAP2_DCPODP | ||
139 | |||
140 | Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0010. | ||
141 | |||
138 | HWCAP_SHA3 | 142 | HWCAP_SHA3 |
139 | 143 | ||
140 | Functionality implied by ID_AA64ISAR0_EL1.SHA3 == 0b0001. | 144 | Functionality implied by ID_AA64ISAR0_EL1.SHA3 == 0b0001. |
@@ -159,6 +163,30 @@ HWCAP_SVE | |||
159 | 163 | ||
160 | Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001. | 164 | Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001. |
161 | 165 | ||
166 | HWCAP2_SVE2 | ||
167 | |||
168 | Functionality implied by ID_AA64ZFR0_EL1.SVEVer == 0b0001. | ||
169 | |||
170 | HWCAP2_SVEAES | ||
171 | |||
172 | Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0001. | ||
173 | |||
174 | HWCAP2_SVEPMULL | ||
175 | |||
176 | Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0010. | ||
177 | |||
178 | HWCAP2_SVEBITPERM | ||
179 | |||
180 | Functionality implied by ID_AA64ZFR0_EL1.BitPerm == 0b0001. | ||
181 | |||
182 | HWCAP2_SVESHA3 | ||
183 | |||
184 | Functionality implied by ID_AA64ZFR0_EL1.SHA3 == 0b0001. | ||
185 | |||
186 | HWCAP2_SVESM4 | ||
187 | |||
188 | Functionality implied by ID_AA64ZFR0_EL1.SM4 == 0b0001. | ||
189 | |||
162 | HWCAP_ASIMDFHM | 190 | HWCAP_ASIMDFHM |
163 | 191 | ||
164 | Functionality implied by ID_AA64ISAR0_EL1.FHM == 0b0001. | 192 | Functionality implied by ID_AA64ISAR0_EL1.FHM == 0b0001. |
@@ -194,3 +222,10 @@ HWCAP_PACG | |||
194 | Functionality implied by ID_AA64ISAR1_EL1.GPA == 0b0001 or | 222 | Functionality implied by ID_AA64ISAR1_EL1.GPA == 0b0001 or |
195 | ID_AA64ISAR1_EL1.GPI == 0b0001, as described by | 223 | ID_AA64ISAR1_EL1.GPI == 0b0001, as described by |
196 | Documentation/arm64/pointer-authentication.txt. | 224 | Documentation/arm64/pointer-authentication.txt. |
225 | |||
226 | |||
227 | 4. Unused AT_HWCAP bits | ||
228 | ----------------------- | ||
229 | |||
230 | For interoperation with userspace, the kernel guarantees that bits 62 | ||
231 | and 63 of AT_HWCAP will always be returned as 0. | ||
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index d1e2bb801e1b..68d9b74fd751 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt | |||
@@ -61,6 +61,7 @@ stable kernels. | |||
61 | | ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 | | 61 | | ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 | |
62 | | ARM | Cortex-A76 | #1165522 | ARM64_ERRATUM_1165522 | | 62 | | ARM | Cortex-A76 | #1165522 | ARM64_ERRATUM_1165522 | |
63 | | ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 | | 63 | | ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 | |
64 | | ARM | Neoverse-N1 | #1188873 | ARM64_ERRATUM_1188873 | | ||
64 | | ARM | MMU-500 | #841119,#826419 | N/A | | 65 | | ARM | MMU-500 | #841119,#826419 | N/A | |
65 | | | | | | | 66 | | | | | | |
66 | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | | 67 | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | |
@@ -77,6 +78,7 @@ stable kernels. | |||
77 | | Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 | | 78 | | Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 | |
78 | | Hisilicon | Hip0{6,7} | #161010701 | N/A | | 79 | | Hisilicon | Hip0{6,7} | #161010701 | N/A | |
79 | | Hisilicon | Hip07 | #161600802 | HISILICON_ERRATUM_161600802 | | 80 | | Hisilicon | Hip07 | #161600802 | HISILICON_ERRATUM_161600802 | |
81 | | Hisilicon | Hip08 SMMU PMCG | #162001800 | N/A | | ||
80 | | | | | | | 82 | | | | | | |
81 | | Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | | 83 | | Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | |
82 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | | 84 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | |
diff --git a/Documentation/arm64/sve.txt b/Documentation/arm64/sve.txt index 7169a0ec41d8..9940e924a47e 100644 --- a/Documentation/arm64/sve.txt +++ b/Documentation/arm64/sve.txt | |||
@@ -34,6 +34,23 @@ model features for SVE is included in Appendix A. | |||
34 | following sections: software that needs to verify that those interfaces are | 34 | following sections: software that needs to verify that those interfaces are |
35 | present must check for HWCAP_SVE instead. | 35 | present must check for HWCAP_SVE instead. |
36 | 36 | ||
37 | * On hardware that supports the SVE2 extensions, HWCAP2_SVE2 will also | ||
38 | be reported in the AT_HWCAP2 aux vector entry. In addition to this, | ||
39 | optional extensions to SVE2 may be reported by the presence of: | ||
40 | |||
41 | HWCAP2_SVE2 | ||
42 | HWCAP2_SVEAES | ||
43 | HWCAP2_SVEPMULL | ||
44 | HWCAP2_SVEBITPERM | ||
45 | HWCAP2_SVESHA3 | ||
46 | HWCAP2_SVESM4 | ||
47 | |||
48 | This list may be extended over time as the SVE architecture evolves. | ||
49 | |||
50 | These extensions are also reported via the CPU ID register ID_AA64ZFR0_EL1, | ||
51 | which userspace can read using an MRS instruction. See elf_hwcaps.txt and | ||
52 | cpu-feature-registers.txt for details. | ||
53 | |||
37 | * Debuggers should restrict themselves to interacting with the target via the | 54 | * Debuggers should restrict themselves to interacting with the target via the |
38 | NT_ARM_SVE regset. The recommended way of detecting support for this regset | 55 | NT_ARM_SVE regset. The recommended way of detecting support for this regset |
39 | is to connect to a target process first and then attempt a | 56 | is to connect to a target process first and then attempt a |
diff --git a/Documentation/robust-futexes.txt b/Documentation/robust-futexes.txt index 6c42c75103eb..6361fb01c9c1 100644 --- a/Documentation/robust-futexes.txt +++ b/Documentation/robust-futexes.txt | |||
@@ -218,5 +218,4 @@ All other architectures should build just fine too - but they won't have | |||
218 | the new syscalls yet. | 218 | the new syscalls yet. |
219 | 219 | ||
220 | Architectures need to implement the new futex_atomic_cmpxchg_inatomic() | 220 | Architectures need to implement the new futex_atomic_cmpxchg_inatomic() |
221 | inline function before writing up the syscalls (that function returns | 221 | inline function before writing up the syscalls. |
222 | -ENOSYS right now). | ||
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index 0a8d7bba2cb0..4b66ecd6be99 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h | |||
@@ -11,6 +11,10 @@ | |||
11 | #include <clocksource/arm_arch_timer.h> | 11 | #include <clocksource/arm_arch_timer.h> |
12 | 12 | ||
13 | #ifdef CONFIG_ARM_ARCH_TIMER | 13 | #ifdef CONFIG_ARM_ARCH_TIMER |
14 | /* 32bit ARM doesn't know anything about timer errata... */ | ||
15 | #define has_erratum_handler(h) (false) | ||
16 | #define erratum_handler(h) (arch_timer_##h) | ||
17 | |||
14 | int arch_timer_arch_init(void); | 18 | int arch_timer_arch_init(void); |
15 | 19 | ||
16 | /* | 20 | /* |
@@ -79,7 +83,7 @@ static inline u32 arch_timer_get_cntfrq(void) | |||
79 | return val; | 83 | return val; |
80 | } | 84 | } |
81 | 85 | ||
82 | static inline u64 arch_counter_get_cntpct(void) | 86 | static inline u64 __arch_counter_get_cntpct(void) |
83 | { | 87 | { |
84 | u64 cval; | 88 | u64 cval; |
85 | 89 | ||
@@ -88,7 +92,12 @@ static inline u64 arch_counter_get_cntpct(void) | |||
88 | return cval; | 92 | return cval; |
89 | } | 93 | } |
90 | 94 | ||
91 | static inline u64 arch_counter_get_cntvct(void) | 95 | static inline u64 __arch_counter_get_cntpct_stable(void) |
96 | { | ||
97 | return __arch_counter_get_cntpct(); | ||
98 | } | ||
99 | |||
100 | static inline u64 __arch_counter_get_cntvct(void) | ||
92 | { | 101 | { |
93 | u64 cval; | 102 | u64 cval; |
94 | 103 | ||
@@ -97,6 +106,11 @@ static inline u64 arch_counter_get_cntvct(void) | |||
97 | return cval; | 106 | return cval; |
98 | } | 107 | } |
99 | 108 | ||
109 | static inline u64 __arch_counter_get_cntvct_stable(void) | ||
110 | { | ||
111 | return __arch_counter_get_cntvct(); | ||
112 | } | ||
113 | |||
100 | static inline u32 arch_timer_get_cntkctl(void) | 114 | static inline u32 arch_timer_get_cntkctl(void) |
101 | { | 115 | { |
102 | u32 cntkctl; | 116 | u32 cntkctl; |
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index 07e27f212dc7..d2453e2d3f1f 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h | |||
@@ -68,6 +68,8 @@ | |||
68 | #define BPIALL __ACCESS_CP15(c7, 0, c5, 6) | 68 | #define BPIALL __ACCESS_CP15(c7, 0, c5, 6) |
69 | #define ICIALLU __ACCESS_CP15(c7, 0, c5, 0) | 69 | #define ICIALLU __ACCESS_CP15(c7, 0, c5, 0) |
70 | 70 | ||
71 | #define CNTVCT __ACCESS_CP15_64(1, c14) | ||
72 | |||
71 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ | 73 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ |
72 | 74 | ||
73 | static inline unsigned long get_cr(void) | 75 | static inline unsigned long get_cr(void) |
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h index 9e11dce55e06..9587517649bd 100644 --- a/arch/arm/include/asm/stage2_pgtable.h +++ b/arch/arm/include/asm/stage2_pgtable.h | |||
@@ -32,14 +32,14 @@ | |||
32 | #define stage2_pgd_present(kvm, pgd) pgd_present(pgd) | 32 | #define stage2_pgd_present(kvm, pgd) pgd_present(pgd) |
33 | #define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud) | 33 | #define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud) |
34 | #define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address) | 34 | #define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address) |
35 | #define stage2_pud_free(kvm, pud) pud_free(NULL, pud) | 35 | #define stage2_pud_free(kvm, pud) do { } while (0) |
36 | 36 | ||
37 | #define stage2_pud_none(kvm, pud) pud_none(pud) | 37 | #define stage2_pud_none(kvm, pud) pud_none(pud) |
38 | #define stage2_pud_clear(kvm, pud) pud_clear(pud) | 38 | #define stage2_pud_clear(kvm, pud) pud_clear(pud) |
39 | #define stage2_pud_present(kvm, pud) pud_present(pud) | 39 | #define stage2_pud_present(kvm, pud) pud_present(pud) |
40 | #define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd) | 40 | #define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd) |
41 | #define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address) | 41 | #define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address) |
42 | #define stage2_pmd_free(kvm, pmd) pmd_free(NULL, pmd) | 42 | #define stage2_pmd_free(kvm, pmd) free_page((unsigned long)pmd) |
43 | 43 | ||
44 | #define stage2_pud_huge(kvm, pud) pud_huge(pud) | 44 | #define stage2_pud_huge(kvm, pud) pud_huge(pud) |
45 | 45 | ||
diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c index a9dd619c6c29..7bdbf5d5c47d 100644 --- a/arch/arm/vdso/vgettimeofday.c +++ b/arch/arm/vdso/vgettimeofday.c | |||
@@ -18,9 +18,9 @@ | |||
18 | #include <linux/compiler.h> | 18 | #include <linux/compiler.h> |
19 | #include <linux/hrtimer.h> | 19 | #include <linux/hrtimer.h> |
20 | #include <linux/time.h> | 20 | #include <linux/time.h> |
21 | #include <asm/arch_timer.h> | ||
22 | #include <asm/barrier.h> | 21 | #include <asm/barrier.h> |
23 | #include <asm/bug.h> | 22 | #include <asm/bug.h> |
23 | #include <asm/cp15.h> | ||
24 | #include <asm/page.h> | 24 | #include <asm/page.h> |
25 | #include <asm/unistd.h> | 25 | #include <asm/unistd.h> |
26 | #include <asm/vdso_datapage.h> | 26 | #include <asm/vdso_datapage.h> |
@@ -123,7 +123,8 @@ static notrace u64 get_ns(struct vdso_data *vdata) | |||
123 | u64 cycle_now; | 123 | u64 cycle_now; |
124 | u64 nsec; | 124 | u64 nsec; |
125 | 125 | ||
126 | cycle_now = arch_counter_get_cntvct(); | 126 | isb(); |
127 | cycle_now = read_sysreg(CNTVCT); | ||
127 | 128 | ||
128 | cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask; | 129 | cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask; |
129 | 130 | ||
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index d81adca1b04d..df350f4e1e7a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -90,6 +90,7 @@ config ARM64 | |||
90 | select GENERIC_CLOCKEVENTS | 90 | select GENERIC_CLOCKEVENTS |
91 | select GENERIC_CLOCKEVENTS_BROADCAST | 91 | select GENERIC_CLOCKEVENTS_BROADCAST |
92 | select GENERIC_CPU_AUTOPROBE | 92 | select GENERIC_CPU_AUTOPROBE |
93 | select GENERIC_CPU_VULNERABILITIES | ||
93 | select GENERIC_EARLY_IOREMAP | 94 | select GENERIC_EARLY_IOREMAP |
94 | select GENERIC_IDLE_POLL_SETUP | 95 | select GENERIC_IDLE_POLL_SETUP |
95 | select GENERIC_IRQ_MULTI_HANDLER | 96 | select GENERIC_IRQ_MULTI_HANDLER |
@@ -148,6 +149,7 @@ config ARM64 | |||
148 | select HAVE_PERF_REGS | 149 | select HAVE_PERF_REGS |
149 | select HAVE_PERF_USER_STACK_DUMP | 150 | select HAVE_PERF_USER_STACK_DUMP |
150 | select HAVE_REGS_AND_STACK_ACCESS_API | 151 | select HAVE_REGS_AND_STACK_ACCESS_API |
152 | select HAVE_FUNCTION_ARG_ACCESS_API | ||
151 | select HAVE_RCU_TABLE_FREE | 153 | select HAVE_RCU_TABLE_FREE |
152 | select HAVE_RSEQ | 154 | select HAVE_RSEQ |
153 | select HAVE_STACKPROTECTOR | 155 | select HAVE_STACKPROTECTOR |
@@ -293,7 +295,7 @@ menu "Kernel Features" | |||
293 | menu "ARM errata workarounds via the alternatives framework" | 295 | menu "ARM errata workarounds via the alternatives framework" |
294 | 296 | ||
295 | config ARM64_WORKAROUND_CLEAN_CACHE | 297 | config ARM64_WORKAROUND_CLEAN_CACHE |
296 | def_bool n | 298 | bool |
297 | 299 | ||
298 | config ARM64_ERRATUM_826319 | 300 | config ARM64_ERRATUM_826319 |
299 | bool "Cortex-A53: 826319: System might deadlock if a write cannot complete until read data is accepted" | 301 | bool "Cortex-A53: 826319: System might deadlock if a write cannot complete until read data is accepted" |
@@ -460,26 +462,28 @@ config ARM64_ERRATUM_1024718 | |||
460 | bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update" | 462 | bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update" |
461 | default y | 463 | default y |
462 | help | 464 | help |
463 | This option adds work around for Arm Cortex-A55 Erratum 1024718. | 465 | This option adds a workaround for ARM Cortex-A55 Erratum 1024718. |
464 | 466 | ||
465 | Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect | 467 | Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect |
466 | update of the hardware dirty bit when the DBM/AP bits are updated | 468 | update of the hardware dirty bit when the DBM/AP bits are updated |
467 | without a break-before-make. The work around is to disable the usage | 469 | without a break-before-make. The workaround is to disable the usage |
468 | of hardware DBM locally on the affected cores. CPUs not affected by | 470 | of hardware DBM locally on the affected cores. CPUs not affected by |
469 | erratum will continue to use the feature. | 471 | this erratum will continue to use the feature. |
470 | 472 | ||
471 | If unsure, say Y. | 473 | If unsure, say Y. |
472 | 474 | ||
473 | config ARM64_ERRATUM_1188873 | 475 | config ARM64_ERRATUM_1188873 |
474 | bool "Cortex-A76: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result" | 476 | bool "Cortex-A76/Neoverse-N1: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result" |
475 | default y | 477 | default y |
478 | depends on COMPAT | ||
476 | select ARM_ARCH_TIMER_OOL_WORKAROUND | 479 | select ARM_ARCH_TIMER_OOL_WORKAROUND |
477 | help | 480 | help |
478 | This option adds work arounds for ARM Cortex-A76 erratum 1188873 | 481 | This option adds a workaround for ARM Cortex-A76/Neoverse-N1 |
482 | erratum 1188873. | ||
479 | 483 | ||
480 | Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could cause | 484 | Affected Cortex-A76/Neoverse-N1 cores (r0p0, r1p0, r2p0) could |
481 | register corruption when accessing the timer registers from | 485 | cause register corruption when accessing the timer registers |
482 | AArch32 userspace. | 486 | from AArch32 userspace. |
483 | 487 | ||
484 | If unsure, say Y. | 488 | If unsure, say Y. |
485 | 489 | ||
@@ -487,7 +491,7 @@ config ARM64_ERRATUM_1165522 | |||
487 | bool "Cortex-A76: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation" | 491 | bool "Cortex-A76: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation" |
488 | default y | 492 | default y |
489 | help | 493 | help |
490 | This option adds work arounds for ARM Cortex-A76 erratum 1165522 | 494 | This option adds a workaround for ARM Cortex-A76 erratum 1165522. |
491 | 495 | ||
492 | Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could end-up with | 496 | Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could end-up with |
493 | corrupted TLBs by speculating an AT instruction during a guest | 497 | corrupted TLBs by speculating an AT instruction during a guest |
@@ -500,7 +504,7 @@ config ARM64_ERRATUM_1286807 | |||
500 | default y | 504 | default y |
501 | select ARM64_WORKAROUND_REPEAT_TLBI | 505 | select ARM64_WORKAROUND_REPEAT_TLBI |
502 | help | 506 | help |
503 | This option adds workaround for ARM Cortex-A76 erratum 1286807 | 507 | This option adds a workaround for ARM Cortex-A76 erratum 1286807. |
504 | 508 | ||
505 | On the affected Cortex-A76 cores (r0p0 to r3p0), if a virtual | 509 | On the affected Cortex-A76 cores (r0p0 to r3p0), if a virtual |
506 | address for a cacheable mapping of a location is being | 510 | address for a cacheable mapping of a location is being |
@@ -517,10 +521,10 @@ config CAVIUM_ERRATUM_22375 | |||
517 | bool "Cavium erratum 22375, 24313" | 521 | bool "Cavium erratum 22375, 24313" |
518 | default y | 522 | default y |
519 | help | 523 | help |
520 | Enable workaround for erratum 22375, 24313. | 524 | Enable workaround for errata 22375 and 24313. |
521 | 525 | ||
522 | This implements two gicv3-its errata workarounds for ThunderX. Both | 526 | This implements two gicv3-its errata workarounds for ThunderX. Both |
523 | with small impact affecting only ITS table allocation. | 527 | with a small impact affecting only ITS table allocation. |
524 | 528 | ||
525 | erratum 22375: only alloc 8MB table size | 529 | erratum 22375: only alloc 8MB table size |
526 | erratum 24313: ignore memory access type | 530 | erratum 24313: ignore memory access type |
@@ -584,9 +588,6 @@ config QCOM_FALKOR_ERRATUM_1003 | |||
584 | 588 | ||
585 | config ARM64_WORKAROUND_REPEAT_TLBI | 589 | config ARM64_WORKAROUND_REPEAT_TLBI |
586 | bool | 590 | bool |
587 | help | ||
588 | Enable the repeat TLBI workaround for Falkor erratum 1009 and | ||
589 | Cortex-A76 erratum 1286807. | ||
590 | 591 | ||
591 | config QCOM_FALKOR_ERRATUM_1009 | 592 | config QCOM_FALKOR_ERRATUM_1009 |
592 | bool "Falkor E1009: Prematurely complete a DSB after a TLBI" | 593 | bool "Falkor E1009: Prematurely complete a DSB after a TLBI" |
@@ -622,7 +623,7 @@ config HISILICON_ERRATUM_161600802 | |||
622 | bool "Hip07 161600802: Erroneous redistributor VLPI base" | 623 | bool "Hip07 161600802: Erroneous redistributor VLPI base" |
623 | default y | 624 | default y |
624 | help | 625 | help |
625 | The HiSilicon Hip07 SoC usees the wrong redistributor base | 626 | The HiSilicon Hip07 SoC uses the wrong redistributor base |
626 | when issued ITS commands such as VMOVP and VMAPP, and requires | 627 | when issued ITS commands such as VMOVP and VMAPP, and requires |
627 | a 128kB offset to be applied to the target address in this commands. | 628 | a 128kB offset to be applied to the target address in this commands. |
628 | 629 | ||
@@ -642,7 +643,7 @@ config FUJITSU_ERRATUM_010001 | |||
642 | bool "Fujitsu-A64FX erratum E#010001: Undefined fault may occur wrongly" | 643 | bool "Fujitsu-A64FX erratum E#010001: Undefined fault may occur wrongly" |
643 | default y | 644 | default y |
644 | help | 645 | help |
645 | This option adds workaround for Fujitsu-A64FX erratum E#010001. | 646 | This option adds a workaround for Fujitsu-A64FX erratum E#010001. |
646 | On some variants of the Fujitsu-A64FX cores ver(1.0, 1.1), memory | 647 | On some variants of the Fujitsu-A64FX cores ver(1.0, 1.1), memory |
647 | accesses may cause undefined fault (Data abort, DFSC=0b111111). | 648 | accesses may cause undefined fault (Data abort, DFSC=0b111111). |
648 | This fault occurs under a specific hardware condition when a | 649 | This fault occurs under a specific hardware condition when a |
@@ -653,7 +654,7 @@ config FUJITSU_ERRATUM_010001 | |||
653 | case-4 TTBR1_EL2 with TCR_EL2.NFD1 == 1. | 654 | case-4 TTBR1_EL2 with TCR_EL2.NFD1 == 1. |
654 | 655 | ||
655 | The workaround is to ensure these bits are clear in TCR_ELx. | 656 | The workaround is to ensure these bits are clear in TCR_ELx. |
656 | The workaround only affect the Fujitsu-A64FX. | 657 | The workaround only affects the Fujitsu-A64FX. |
657 | 658 | ||
658 | If unsure, say Y. | 659 | If unsure, say Y. |
659 | 660 | ||
@@ -885,6 +886,9 @@ config ARCH_WANT_HUGE_PMD_SHARE | |||
885 | config ARCH_HAS_CACHE_LINE_SIZE | 886 | config ARCH_HAS_CACHE_LINE_SIZE |
886 | def_bool y | 887 | def_bool y |
887 | 888 | ||
889 | config ARCH_ENABLE_SPLIT_PMD_PTLOCK | ||
890 | def_bool y if PGTABLE_LEVELS > 2 | ||
891 | |||
888 | config SECCOMP | 892 | config SECCOMP |
889 | bool "Enable seccomp to safely compute untrusted bytecode" | 893 | bool "Enable seccomp to safely compute untrusted bytecode" |
890 | ---help--- | 894 | ---help--- |
@@ -1074,9 +1078,65 @@ config RODATA_FULL_DEFAULT_ENABLED | |||
1074 | This requires the linear region to be mapped down to pages, | 1078 | This requires the linear region to be mapped down to pages, |
1075 | which may adversely affect performance in some cases. | 1079 | which may adversely affect performance in some cases. |
1076 | 1080 | ||
1081 | config ARM64_SW_TTBR0_PAN | ||
1082 | bool "Emulate Privileged Access Never using TTBR0_EL1 switching" | ||
1083 | help | ||
1084 | Enabling this option prevents the kernel from accessing | ||
1085 | user-space memory directly by pointing TTBR0_EL1 to a reserved | ||
1086 | zeroed area and reserved ASID. The user access routines | ||
1087 | restore the valid TTBR0_EL1 temporarily. | ||
1088 | |||
1089 | menuconfig COMPAT | ||
1090 | bool "Kernel support for 32-bit EL0" | ||
1091 | depends on ARM64_4K_PAGES || EXPERT | ||
1092 | select COMPAT_BINFMT_ELF if BINFMT_ELF | ||
1093 | select HAVE_UID16 | ||
1094 | select OLD_SIGSUSPEND3 | ||
1095 | select COMPAT_OLD_SIGACTION | ||
1096 | help | ||
1097 | This option enables support for a 32-bit EL0 running under a 64-bit | ||
1098 | kernel at EL1. AArch32-specific components such as system calls, | ||
1099 | the user helper functions, VFP support and the ptrace interface are | ||
1100 | handled appropriately by the kernel. | ||
1101 | |||
1102 | If you use a page size other than 4KB (i.e, 16KB or 64KB), please be aware | ||
1103 | that you will only be able to execute AArch32 binaries that were compiled | ||
1104 | with page size aligned segments. | ||
1105 | |||
1106 | If you want to execute 32-bit userspace applications, say Y. | ||
1107 | |||
1108 | if COMPAT | ||
1109 | |||
1110 | config KUSER_HELPERS | ||
1111 | bool "Enable kuser helpers page for 32 bit applications" | ||
1112 | default y | ||
1113 | help | ||
1114 | Warning: disabling this option may break 32-bit user programs. | ||
1115 | |||
1116 | Provide kuser helpers to compat tasks. The kernel provides | ||
1117 | helper code to userspace in read only form at a fixed location | ||
1118 | to allow userspace to be independent of the CPU type fitted to | ||
1119 | the system. This permits binaries to be run on ARMv4 through | ||
1120 | to ARMv8 without modification. | ||
1121 | |||
1122 | See Documentation/arm/kernel_user_helpers.txt for details. | ||
1123 | |||
1124 | However, the fixed address nature of these helpers can be used | ||
1125 | by ROP (return orientated programming) authors when creating | ||
1126 | exploits. | ||
1127 | |||
1128 | If all of the binaries and libraries which run on your platform | ||
1129 | are built specifically for your platform, and make no use of | ||
1130 | these helpers, then you can turn this option off to hinder | ||
1131 | such exploits. However, in that case, if a binary or library | ||
1132 | relying on those helpers is run, it will not function correctly. | ||
1133 | |||
1134 | Say N here only if you are absolutely certain that you do not | ||
1135 | need these helpers; otherwise, the safe option is to say Y. | ||
1136 | |||
1137 | |||
1077 | menuconfig ARMV8_DEPRECATED | 1138 | menuconfig ARMV8_DEPRECATED |
1078 | bool "Emulate deprecated/obsolete ARMv8 instructions" | 1139 | bool "Emulate deprecated/obsolete ARMv8 instructions" |
1079 | depends on COMPAT | ||
1080 | depends on SYSCTL | 1140 | depends on SYSCTL |
1081 | help | 1141 | help |
1082 | Legacy software support may require certain instructions | 1142 | Legacy software support may require certain instructions |
@@ -1142,13 +1202,7 @@ config SETEND_EMULATION | |||
1142 | If unsure, say Y | 1202 | If unsure, say Y |
1143 | endif | 1203 | endif |
1144 | 1204 | ||
1145 | config ARM64_SW_TTBR0_PAN | 1205 | endif |
1146 | bool "Emulate Privileged Access Never using TTBR0_EL1 switching" | ||
1147 | help | ||
1148 | Enabling this option prevents the kernel from accessing | ||
1149 | user-space memory directly by pointing TTBR0_EL1 to a reserved | ||
1150 | zeroed area and reserved ASID. The user access routines | ||
1151 | restore the valid TTBR0_EL1 temporarily. | ||
1152 | 1206 | ||
1153 | menu "ARMv8.1 architectural features" | 1207 | menu "ARMv8.1 architectural features" |
1154 | 1208 | ||
@@ -1314,6 +1368,9 @@ config ARM64_SVE | |||
1314 | 1368 | ||
1315 | To enable use of this extension on CPUs that implement it, say Y. | 1369 | To enable use of this extension on CPUs that implement it, say Y. |
1316 | 1370 | ||
1371 | On CPUs that support the SVE2 extensions, this option will enable | ||
1372 | those too. | ||
1373 | |||
1317 | Note that for architectural reasons, firmware _must_ implement SVE | 1374 | Note that for architectural reasons, firmware _must_ implement SVE |
1318 | support when running on SVE capable hardware. The required support | 1375 | support when running on SVE capable hardware. The required support |
1319 | is present in: | 1376 | is present in: |
@@ -1347,7 +1404,7 @@ config ARM64_PSEUDO_NMI | |||
1347 | help | 1404 | help |
1348 | Adds support for mimicking Non-Maskable Interrupts through the use of | 1405 | Adds support for mimicking Non-Maskable Interrupts through the use of |
1349 | GIC interrupt priority. This support requires version 3 or later of | 1406 | GIC interrupt priority. This support requires version 3 or later of |
1350 | Arm GIC. | 1407 | ARM GIC. |
1351 | 1408 | ||
1352 | This high priority configuration for interrupts needs to be | 1409 | This high priority configuration for interrupts needs to be |
1353 | explicitly enabled by setting the kernel parameter | 1410 | explicitly enabled by setting the kernel parameter |
@@ -1471,25 +1528,6 @@ config DMI | |||
1471 | 1528 | ||
1472 | endmenu | 1529 | endmenu |
1473 | 1530 | ||
1474 | config COMPAT | ||
1475 | bool "Kernel support for 32-bit EL0" | ||
1476 | depends on ARM64_4K_PAGES || EXPERT | ||
1477 | select COMPAT_BINFMT_ELF if BINFMT_ELF | ||
1478 | select HAVE_UID16 | ||
1479 | select OLD_SIGSUSPEND3 | ||
1480 | select COMPAT_OLD_SIGACTION | ||
1481 | help | ||
1482 | This option enables support for a 32-bit EL0 running under a 64-bit | ||
1483 | kernel at EL1. AArch32-specific components such as system calls, | ||
1484 | the user helper functions, VFP support and the ptrace interface are | ||
1485 | handled appropriately by the kernel. | ||
1486 | |||
1487 | If you use a page size other than 4KB (i.e, 16KB or 64KB), please be aware | ||
1488 | that you will only be able to execute AArch32 binaries that were compiled | ||
1489 | with page size aligned segments. | ||
1490 | |||
1491 | If you want to execute 32-bit userspace applications, say Y. | ||
1492 | |||
1493 | config SYSVIPC_COMPAT | 1531 | config SYSVIPC_COMPAT |
1494 | def_bool y | 1532 | def_bool y |
1495 | depends on COMPAT && SYSVIPC | 1533 | depends on COMPAT && SYSVIPC |
diff --git a/arch/arm64/boot/dts/mediatek/mt2712-pinfunc.h b/arch/arm64/boot/dts/mediatek/mt2712-pinfunc.h index 1b4cb0c55744..385c455a7c98 100644 --- a/arch/arm64/boot/dts/mediatek/mt2712-pinfunc.h +++ b/arch/arm64/boot/dts/mediatek/mt2712-pinfunc.h | |||
@@ -1,4 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* | 2 | /* |
3 | * Copyright (C) 2018 MediaTek Inc. | 3 | * Copyright (C) 2018 MediaTek Inc. |
4 | * Author: Zhiyong Tao <zhiyong.tao@mediatek.com> | 4 | * Author: Zhiyong Tao <zhiyong.tao@mediatek.com> |
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 5fc6f51908fd..036ea77f83bc 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c | |||
@@ -372,7 +372,7 @@ static struct aead_alg ccm_aes_alg = { | |||
372 | 372 | ||
373 | static int __init aes_mod_init(void) | 373 | static int __init aes_mod_init(void) |
374 | { | 374 | { |
375 | if (!(elf_hwcap & HWCAP_AES)) | 375 | if (!cpu_have_named_feature(AES)) |
376 | return -ENODEV; | 376 | return -ENODEV; |
377 | return crypto_register_aead(&ccm_aes_alg); | 377 | return crypto_register_aead(&ccm_aes_alg); |
378 | } | 378 | } |
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c index e7a95a566462..bf1b321ff4c1 100644 --- a/arch/arm64/crypto/aes-neonbs-glue.c +++ b/arch/arm64/crypto/aes-neonbs-glue.c | |||
@@ -440,7 +440,7 @@ static int __init aes_init(void) | |||
440 | int err; | 440 | int err; |
441 | int i; | 441 | int i; |
442 | 442 | ||
443 | if (!(elf_hwcap & HWCAP_ASIMD)) | 443 | if (!cpu_have_named_feature(ASIMD)) |
444 | return -ENODEV; | 444 | return -ENODEV; |
445 | 445 | ||
446 | err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); | 446 | err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); |
diff --git a/arch/arm64/crypto/chacha-neon-glue.c b/arch/arm64/crypto/chacha-neon-glue.c index bece1d85bd81..cb054f51c917 100644 --- a/arch/arm64/crypto/chacha-neon-glue.c +++ b/arch/arm64/crypto/chacha-neon-glue.c | |||
@@ -173,7 +173,7 @@ static struct skcipher_alg algs[] = { | |||
173 | 173 | ||
174 | static int __init chacha_simd_mod_init(void) | 174 | static int __init chacha_simd_mod_init(void) |
175 | { | 175 | { |
176 | if (!(elf_hwcap & HWCAP_ASIMD)) | 176 | if (!cpu_have_named_feature(ASIMD)) |
177 | return -ENODEV; | 177 | return -ENODEV; |
178 | 178 | ||
179 | return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); | 179 | return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); |
diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c index dd325829ee44..e81d5bd555c0 100644 --- a/arch/arm64/crypto/crct10dif-ce-glue.c +++ b/arch/arm64/crypto/crct10dif-ce-glue.c | |||
@@ -101,7 +101,7 @@ static struct shash_alg crc_t10dif_alg[] = {{ | |||
101 | 101 | ||
102 | static int __init crc_t10dif_mod_init(void) | 102 | static int __init crc_t10dif_mod_init(void) |
103 | { | 103 | { |
104 | if (elf_hwcap & HWCAP_PMULL) | 104 | if (cpu_have_named_feature(PMULL)) |
105 | return crypto_register_shashes(crc_t10dif_alg, | 105 | return crypto_register_shashes(crc_t10dif_alg, |
106 | ARRAY_SIZE(crc_t10dif_alg)); | 106 | ARRAY_SIZE(crc_t10dif_alg)); |
107 | else | 107 | else |
@@ -111,7 +111,7 @@ static int __init crc_t10dif_mod_init(void) | |||
111 | 111 | ||
112 | static void __exit crc_t10dif_mod_exit(void) | 112 | static void __exit crc_t10dif_mod_exit(void) |
113 | { | 113 | { |
114 | if (elf_hwcap & HWCAP_PMULL) | 114 | if (cpu_have_named_feature(PMULL)) |
115 | crypto_unregister_shashes(crc_t10dif_alg, | 115 | crypto_unregister_shashes(crc_t10dif_alg, |
116 | ARRAY_SIZE(crc_t10dif_alg)); | 116 | ARRAY_SIZE(crc_t10dif_alg)); |
117 | else | 117 | else |
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 791ad422c427..4e69bb78ea89 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c | |||
@@ -704,10 +704,10 @@ static int __init ghash_ce_mod_init(void) | |||
704 | { | 704 | { |
705 | int ret; | 705 | int ret; |
706 | 706 | ||
707 | if (!(elf_hwcap & HWCAP_ASIMD)) | 707 | if (!cpu_have_named_feature(ASIMD)) |
708 | return -ENODEV; | 708 | return -ENODEV; |
709 | 709 | ||
710 | if (elf_hwcap & HWCAP_PMULL) | 710 | if (cpu_have_named_feature(PMULL)) |
711 | ret = crypto_register_shashes(ghash_alg, | 711 | ret = crypto_register_shashes(ghash_alg, |
712 | ARRAY_SIZE(ghash_alg)); | 712 | ARRAY_SIZE(ghash_alg)); |
713 | else | 713 | else |
@@ -717,7 +717,7 @@ static int __init ghash_ce_mod_init(void) | |||
717 | if (ret) | 717 | if (ret) |
718 | return ret; | 718 | return ret; |
719 | 719 | ||
720 | if (elf_hwcap & HWCAP_PMULL) { | 720 | if (cpu_have_named_feature(PMULL)) { |
721 | ret = crypto_register_aead(&gcm_aes_alg); | 721 | ret = crypto_register_aead(&gcm_aes_alg); |
722 | if (ret) | 722 | if (ret) |
723 | crypto_unregister_shashes(ghash_alg, | 723 | crypto_unregister_shashes(ghash_alg, |
@@ -728,7 +728,7 @@ static int __init ghash_ce_mod_init(void) | |||
728 | 728 | ||
729 | static void __exit ghash_ce_mod_exit(void) | 729 | static void __exit ghash_ce_mod_exit(void) |
730 | { | 730 | { |
731 | if (elf_hwcap & HWCAP_PMULL) | 731 | if (cpu_have_named_feature(PMULL)) |
732 | crypto_unregister_shashes(ghash_alg, ARRAY_SIZE(ghash_alg)); | 732 | crypto_unregister_shashes(ghash_alg, ARRAY_SIZE(ghash_alg)); |
733 | else | 733 | else |
734 | crypto_unregister_shash(ghash_alg); | 734 | crypto_unregister_shash(ghash_alg); |
diff --git a/arch/arm64/crypto/nhpoly1305-neon-glue.c b/arch/arm64/crypto/nhpoly1305-neon-glue.c index 22cc32ac9448..38a589044b6c 100644 --- a/arch/arm64/crypto/nhpoly1305-neon-glue.c +++ b/arch/arm64/crypto/nhpoly1305-neon-glue.c | |||
@@ -56,7 +56,7 @@ static struct shash_alg nhpoly1305_alg = { | |||
56 | 56 | ||
57 | static int __init nhpoly1305_mod_init(void) | 57 | static int __init nhpoly1305_mod_init(void) |
58 | { | 58 | { |
59 | if (!(elf_hwcap & HWCAP_ASIMD)) | 59 | if (!cpu_have_named_feature(ASIMD)) |
60 | return -ENODEV; | 60 | return -ENODEV; |
61 | 61 | ||
62 | return crypto_register_shash(&nhpoly1305_alg); | 62 | return crypto_register_shash(&nhpoly1305_alg); |
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c index 4aedeaefd61f..0cccdb9cc2c0 100644 --- a/arch/arm64/crypto/sha256-glue.c +++ b/arch/arm64/crypto/sha256-glue.c | |||
@@ -173,7 +173,7 @@ static int __init sha256_mod_init(void) | |||
173 | if (ret) | 173 | if (ret) |
174 | return ret; | 174 | return ret; |
175 | 175 | ||
176 | if (elf_hwcap & HWCAP_ASIMD) { | 176 | if (cpu_have_named_feature(ASIMD)) { |
177 | ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs)); | 177 | ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs)); |
178 | if (ret) | 178 | if (ret) |
179 | crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); | 179 | crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); |
@@ -183,7 +183,7 @@ static int __init sha256_mod_init(void) | |||
183 | 183 | ||
184 | static void __exit sha256_mod_fini(void) | 184 | static void __exit sha256_mod_fini(void) |
185 | { | 185 | { |
186 | if (elf_hwcap & HWCAP_ASIMD) | 186 | if (cpu_have_named_feature(ASIMD)) |
187 | crypto_unregister_shashes(neon_algs, ARRAY_SIZE(neon_algs)); | 187 | crypto_unregister_shashes(neon_algs, ARRAY_SIZE(neon_algs)); |
188 | crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); | 188 | crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); |
189 | } | 189 | } |
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index f2a234d6516c..b7bca1ae09e6 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h | |||
@@ -31,11 +31,23 @@ | |||
31 | #include <clocksource/arm_arch_timer.h> | 31 | #include <clocksource/arm_arch_timer.h> |
32 | 32 | ||
33 | #if IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND) | 33 | #if IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND) |
34 | extern struct static_key_false arch_timer_read_ool_enabled; | 34 | #define has_erratum_handler(h) \ |
35 | #define needs_unstable_timer_counter_workaround() \ | 35 | ({ \ |
36 | static_branch_unlikely(&arch_timer_read_ool_enabled) | 36 | const struct arch_timer_erratum_workaround *__wa; \ |
37 | __wa = __this_cpu_read(timer_unstable_counter_workaround); \ | ||
38 | (__wa && __wa->h); \ | ||
39 | }) | ||
40 | |||
41 | #define erratum_handler(h) \ | ||
42 | ({ \ | ||
43 | const struct arch_timer_erratum_workaround *__wa; \ | ||
44 | __wa = __this_cpu_read(timer_unstable_counter_workaround); \ | ||
45 | (__wa && __wa->h) ? __wa->h : arch_timer_##h; \ | ||
46 | }) | ||
47 | |||
37 | #else | 48 | #else |
38 | #define needs_unstable_timer_counter_workaround() false | 49 | #define has_erratum_handler(h) false |
50 | #define erratum_handler(h) (arch_timer_##h) | ||
39 | #endif | 51 | #endif |
40 | 52 | ||
41 | enum arch_timer_erratum_match_type { | 53 | enum arch_timer_erratum_match_type { |
@@ -61,23 +73,37 @@ struct arch_timer_erratum_workaround { | |||
61 | DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *, | 73 | DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *, |
62 | timer_unstable_counter_workaround); | 74 | timer_unstable_counter_workaround); |
63 | 75 | ||
76 | /* inline sysreg accessors that make erratum_handler() work */ | ||
77 | static inline notrace u32 arch_timer_read_cntp_tval_el0(void) | ||
78 | { | ||
79 | return read_sysreg(cntp_tval_el0); | ||
80 | } | ||
81 | |||
82 | static inline notrace u32 arch_timer_read_cntv_tval_el0(void) | ||
83 | { | ||
84 | return read_sysreg(cntv_tval_el0); | ||
85 | } | ||
86 | |||
87 | static inline notrace u64 arch_timer_read_cntpct_el0(void) | ||
88 | { | ||
89 | return read_sysreg(cntpct_el0); | ||
90 | } | ||
91 | |||
92 | static inline notrace u64 arch_timer_read_cntvct_el0(void) | ||
93 | { | ||
94 | return read_sysreg(cntvct_el0); | ||
95 | } | ||
96 | |||
64 | #define arch_timer_reg_read_stable(reg) \ | 97 | #define arch_timer_reg_read_stable(reg) \ |
65 | ({ \ | 98 | ({ \ |
66 | u64 _val; \ | 99 | u64 _val; \ |
67 | if (needs_unstable_timer_counter_workaround()) { \ | 100 | \ |
68 | const struct arch_timer_erratum_workaround *wa; \ | ||
69 | preempt_disable_notrace(); \ | 101 | preempt_disable_notrace(); \ |
70 | wa = __this_cpu_read(timer_unstable_counter_workaround); \ | 102 | _val = erratum_handler(read_ ## reg)(); \ |
71 | if (wa && wa->read_##reg) \ | ||
72 | _val = wa->read_##reg(); \ | ||
73 | else \ | ||
74 | _val = read_sysreg(reg); \ | ||
75 | preempt_enable_notrace(); \ | 103 | preempt_enable_notrace(); \ |
76 | } else { \ | 104 | \ |
77 | _val = read_sysreg(reg); \ | 105 | _val; \ |
78 | } \ | 106 | }) |
79 | _val; \ | ||
80 | }) | ||
81 | 107 | ||
82 | /* | 108 | /* |
83 | * These register accessors are marked inline so the compiler can | 109 | * These register accessors are marked inline so the compiler can |
@@ -148,18 +174,67 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl) | |||
148 | isb(); | 174 | isb(); |
149 | } | 175 | } |
150 | 176 | ||
151 | static inline u64 arch_counter_get_cntpct(void) | 177 | /* |
178 | * Ensure that reads of the counter are treated the same as memory reads | ||
179 | * for the purposes of ordering by subsequent memory barriers. | ||
180 | * | ||
181 | * This insanity brought to you by speculative system register reads, | ||
182 | * out-of-order memory accesses, sequence locks and Thomas Gleixner. | ||
183 | * | ||
184 | * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html | ||
185 | */ | ||
186 | #define arch_counter_enforce_ordering(val) do { \ | ||
187 | u64 tmp, _val = (val); \ | ||
188 | \ | ||
189 | asm volatile( \ | ||
190 | " eor %0, %1, %1\n" \ | ||
191 | " add %0, sp, %0\n" \ | ||
192 | " ldr xzr, [%0]" \ | ||
193 | : "=r" (tmp) : "r" (_val)); \ | ||
194 | } while (0) | ||
195 | |||
196 | static inline u64 __arch_counter_get_cntpct_stable(void) | ||
197 | { | ||
198 | u64 cnt; | ||
199 | |||
200 | isb(); | ||
201 | cnt = arch_timer_reg_read_stable(cntpct_el0); | ||
202 | arch_counter_enforce_ordering(cnt); | ||
203 | return cnt; | ||
204 | } | ||
205 | |||
206 | static inline u64 __arch_counter_get_cntpct(void) | ||
152 | { | 207 | { |
208 | u64 cnt; | ||
209 | |||
153 | isb(); | 210 | isb(); |
154 | return arch_timer_reg_read_stable(cntpct_el0); | 211 | cnt = read_sysreg(cntpct_el0); |
212 | arch_counter_enforce_ordering(cnt); | ||
213 | return cnt; | ||
155 | } | 214 | } |
156 | 215 | ||
157 | static inline u64 arch_counter_get_cntvct(void) | 216 | static inline u64 __arch_counter_get_cntvct_stable(void) |
158 | { | 217 | { |
218 | u64 cnt; | ||
219 | |||
159 | isb(); | 220 | isb(); |
160 | return arch_timer_reg_read_stable(cntvct_el0); | 221 | cnt = arch_timer_reg_read_stable(cntvct_el0); |
222 | arch_counter_enforce_ordering(cnt); | ||
223 | return cnt; | ||
161 | } | 224 | } |
162 | 225 | ||
226 | static inline u64 __arch_counter_get_cntvct(void) | ||
227 | { | ||
228 | u64 cnt; | ||
229 | |||
230 | isb(); | ||
231 | cnt = read_sysreg(cntvct_el0); | ||
232 | arch_counter_enforce_ordering(cnt); | ||
233 | return cnt; | ||
234 | } | ||
235 | |||
236 | #undef arch_counter_enforce_ordering | ||
237 | |||
163 | static inline int arch_timer_arch_init(void) | 238 | static inline int arch_timer_arch_init(void) |
164 | { | 239 | { |
165 | return 0; | 240 | return 0; |
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index c5308d01e228..039fbd822ec6 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h | |||
@@ -407,10 +407,14 @@ alternative_endif | |||
407 | .ifc \op, cvap | 407 | .ifc \op, cvap |
408 | sys 3, c7, c12, 1, \kaddr // dc cvap | 408 | sys 3, c7, c12, 1, \kaddr // dc cvap |
409 | .else | 409 | .else |
410 | .ifc \op, cvadp | ||
411 | sys 3, c7, c13, 1, \kaddr // dc cvadp | ||
412 | .else | ||
410 | dc \op, \kaddr | 413 | dc \op, \kaddr |
411 | .endif | 414 | .endif |
412 | .endif | 415 | .endif |
413 | .endif | 416 | .endif |
417 | .endif | ||
414 | add \kaddr, \kaddr, \tmp1 | 418 | add \kaddr, \kaddr, \tmp1 |
415 | cmp \kaddr, \size | 419 | cmp \kaddr, \size |
416 | b.lo 9998b | 420 | b.lo 9998b |
@@ -442,8 +446,8 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU | |||
442 | * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present | 446 | * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present |
443 | */ | 447 | */ |
444 | .macro reset_pmuserenr_el0, tmpreg | 448 | .macro reset_pmuserenr_el0, tmpreg |
445 | mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer | 449 | mrs \tmpreg, id_aa64dfr0_el1 |
446 | sbfx \tmpreg, \tmpreg, #8, #4 | 450 | sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4 |
447 | cmp \tmpreg, #1 // Skip if no PMU present | 451 | cmp \tmpreg, #1 // Skip if no PMU present |
448 | b.lt 9000f | 452 | b.lt 9000f |
449 | msr pmuserenr_el0, xzr // Disable PMU access from EL0 | 453 | msr pmuserenr_el0, xzr // Disable PMU access from EL0 |
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index f66bb04fdf2d..85b6bedbcc68 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h | |||
@@ -20,6 +20,8 @@ | |||
20 | 20 | ||
21 | #ifndef __ASSEMBLY__ | 21 | #ifndef __ASSEMBLY__ |
22 | 22 | ||
23 | #include <linux/kasan-checks.h> | ||
24 | |||
23 | #define __nops(n) ".rept " #n "\nnop\n.endr\n" | 25 | #define __nops(n) ".rept " #n "\nnop\n.endr\n" |
24 | #define nops(n) asm volatile(__nops(n)) | 26 | #define nops(n) asm volatile(__nops(n)) |
25 | 27 | ||
@@ -72,31 +74,33 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx, | |||
72 | 74 | ||
73 | #define __smp_store_release(p, v) \ | 75 | #define __smp_store_release(p, v) \ |
74 | do { \ | 76 | do { \ |
77 | typeof(p) __p = (p); \ | ||
75 | union { typeof(*p) __val; char __c[1]; } __u = \ | 78 | union { typeof(*p) __val; char __c[1]; } __u = \ |
76 | { .__val = (__force typeof(*p)) (v) }; \ | 79 | { .__val = (__force typeof(*p)) (v) }; \ |
77 | compiletime_assert_atomic_type(*p); \ | 80 | compiletime_assert_atomic_type(*p); \ |
81 | kasan_check_write(__p, sizeof(*p)); \ | ||
78 | switch (sizeof(*p)) { \ | 82 | switch (sizeof(*p)) { \ |
79 | case 1: \ | 83 | case 1: \ |
80 | asm volatile ("stlrb %w1, %0" \ | 84 | asm volatile ("stlrb %w1, %0" \ |
81 | : "=Q" (*p) \ | 85 | : "=Q" (*__p) \ |
82 | : "r" (*(__u8 *)__u.__c) \ | 86 | : "r" (*(__u8 *)__u.__c) \ |
83 | : "memory"); \ | 87 | : "memory"); \ |
84 | break; \ | 88 | break; \ |
85 | case 2: \ | 89 | case 2: \ |
86 | asm volatile ("stlrh %w1, %0" \ | 90 | asm volatile ("stlrh %w1, %0" \ |
87 | : "=Q" (*p) \ | 91 | : "=Q" (*__p) \ |
88 | : "r" (*(__u16 *)__u.__c) \ | 92 | : "r" (*(__u16 *)__u.__c) \ |
89 | : "memory"); \ | 93 | : "memory"); \ |
90 | break; \ | 94 | break; \ |
91 | case 4: \ | 95 | case 4: \ |
92 | asm volatile ("stlr %w1, %0" \ | 96 | asm volatile ("stlr %w1, %0" \ |
93 | : "=Q" (*p) \ | 97 | : "=Q" (*__p) \ |
94 | : "r" (*(__u32 *)__u.__c) \ | 98 | : "r" (*(__u32 *)__u.__c) \ |
95 | : "memory"); \ | 99 | : "memory"); \ |
96 | break; \ | 100 | break; \ |
97 | case 8: \ | 101 | case 8: \ |
98 | asm volatile ("stlr %1, %0" \ | 102 | asm volatile ("stlr %1, %0" \ |
99 | : "=Q" (*p) \ | 103 | : "=Q" (*__p) \ |
100 | : "r" (*(__u64 *)__u.__c) \ | 104 | : "r" (*(__u64 *)__u.__c) \ |
101 | : "memory"); \ | 105 | : "memory"); \ |
102 | break; \ | 106 | break; \ |
@@ -106,27 +110,29 @@ do { \ | |||
106 | #define __smp_load_acquire(p) \ | 110 | #define __smp_load_acquire(p) \ |
107 | ({ \ | 111 | ({ \ |
108 | union { typeof(*p) __val; char __c[1]; } __u; \ | 112 | union { typeof(*p) __val; char __c[1]; } __u; \ |
113 | typeof(p) __p = (p); \ | ||
109 | compiletime_assert_atomic_type(*p); \ | 114 | compiletime_assert_atomic_type(*p); \ |
115 | kasan_check_read(__p, sizeof(*p)); \ | ||
110 | switch (sizeof(*p)) { \ | 116 | switch (sizeof(*p)) { \ |
111 | case 1: \ | 117 | case 1: \ |
112 | asm volatile ("ldarb %w0, %1" \ | 118 | asm volatile ("ldarb %w0, %1" \ |
113 | : "=r" (*(__u8 *)__u.__c) \ | 119 | : "=r" (*(__u8 *)__u.__c) \ |
114 | : "Q" (*p) : "memory"); \ | 120 | : "Q" (*__p) : "memory"); \ |
115 | break; \ | 121 | break; \ |
116 | case 2: \ | 122 | case 2: \ |
117 | asm volatile ("ldarh %w0, %1" \ | 123 | asm volatile ("ldarh %w0, %1" \ |
118 | : "=r" (*(__u16 *)__u.__c) \ | 124 | : "=r" (*(__u16 *)__u.__c) \ |
119 | : "Q" (*p) : "memory"); \ | 125 | : "Q" (*__p) : "memory"); \ |
120 | break; \ | 126 | break; \ |
121 | case 4: \ | 127 | case 4: \ |
122 | asm volatile ("ldar %w0, %1" \ | 128 | asm volatile ("ldar %w0, %1" \ |
123 | : "=r" (*(__u32 *)__u.__c) \ | 129 | : "=r" (*(__u32 *)__u.__c) \ |
124 | : "Q" (*p) : "memory"); \ | 130 | : "Q" (*__p) : "memory"); \ |
125 | break; \ | 131 | break; \ |
126 | case 8: \ | 132 | case 8: \ |
127 | asm volatile ("ldar %0, %1" \ | 133 | asm volatile ("ldar %0, %1" \ |
128 | : "=r" (*(__u64 *)__u.__c) \ | 134 | : "=r" (*(__u64 *)__u.__c) \ |
129 | : "Q" (*p) : "memory"); \ | 135 | : "Q" (*__p) : "memory"); \ |
130 | break; \ | 136 | break; \ |
131 | } \ | 137 | } \ |
132 | __u.__val; \ | 138 | __u.__val; \ |
diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h index 2945fe6cd863..d84294064e6a 100644 --- a/arch/arm64/include/asm/brk-imm.h +++ b/arch/arm64/include/asm/brk-imm.h | |||
@@ -11,6 +11,8 @@ | |||
11 | 11 | ||
12 | /* | 12 | /* |
13 | * #imm16 values used for BRK instruction generation | 13 | * #imm16 values used for BRK instruction generation |
14 | * 0x004: for installing kprobes | ||
15 | * 0x005: for installing uprobes | ||
14 | * Allowed values for kgdb are 0x400 - 0x7ff | 16 | * Allowed values for kgdb are 0x400 - 0x7ff |
15 | * 0x100: for triggering a fault on purpose (reserved) | 17 | * 0x100: for triggering a fault on purpose (reserved) |
16 | * 0x400: for dynamic BRK instruction | 18 | * 0x400: for dynamic BRK instruction |
@@ -18,10 +20,13 @@ | |||
18 | * 0x800: kernel-mode BUG() and WARN() traps | 20 | * 0x800: kernel-mode BUG() and WARN() traps |
19 | * 0x9xx: tag-based KASAN trap (allowed values 0x900 - 0x9ff) | 21 | * 0x9xx: tag-based KASAN trap (allowed values 0x900 - 0x9ff) |
20 | */ | 22 | */ |
23 | #define KPROBES_BRK_IMM 0x004 | ||
24 | #define UPROBES_BRK_IMM 0x005 | ||
21 | #define FAULT_BRK_IMM 0x100 | 25 | #define FAULT_BRK_IMM 0x100 |
22 | #define KGDB_DYN_DBG_BRK_IMM 0x400 | 26 | #define KGDB_DYN_DBG_BRK_IMM 0x400 |
23 | #define KGDB_COMPILED_DBG_BRK_IMM 0x401 | 27 | #define KGDB_COMPILED_DBG_BRK_IMM 0x401 |
24 | #define BUG_BRK_IMM 0x800 | 28 | #define BUG_BRK_IMM 0x800 |
25 | #define KASAN_BRK_IMM 0x900 | 29 | #define KASAN_BRK_IMM 0x900 |
30 | #define KASAN_BRK_MASK 0x0ff | ||
26 | 31 | ||
27 | #endif | 32 | #endif |
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index f6a76e43f39e..defdc67d9ab4 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h | |||
@@ -61,7 +61,8 @@ | |||
61 | #define ARM64_HAS_GENERIC_AUTH_ARCH 40 | 61 | #define ARM64_HAS_GENERIC_AUTH_ARCH 40 |
62 | #define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41 | 62 | #define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41 |
63 | #define ARM64_HAS_IRQ_PRIO_MASKING 42 | 63 | #define ARM64_HAS_IRQ_PRIO_MASKING 42 |
64 | #define ARM64_HAS_DCPODP 43 | ||
64 | 65 | ||
65 | #define ARM64_NCAPS 43 | 66 | #define ARM64_NCAPS 44 |
66 | 67 | ||
67 | #endif /* __ASM_CPUCAPS_H */ | 68 | #endif /* __ASM_CPUCAPS_H */ |
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index e505e1fbd2b9..f210bcf096f7 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h | |||
@@ -14,15 +14,8 @@ | |||
14 | #include <asm/hwcap.h> | 14 | #include <asm/hwcap.h> |
15 | #include <asm/sysreg.h> | 15 | #include <asm/sysreg.h> |
16 | 16 | ||
17 | /* | 17 | #define MAX_CPU_FEATURES 64 |
18 | * In the arm64 world (as in the ARM world), elf_hwcap is used both internally | 18 | #define cpu_feature(x) KERNEL_HWCAP_ ## x |
19 | * in the kernel and for user space to keep track of which optional features | ||
20 | * are supported by the current system. So let's map feature 'x' to HWCAP_x. | ||
21 | * Note that HWCAP_x constants are bit fields so we need to take the log. | ||
22 | */ | ||
23 | |||
24 | #define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap)) | ||
25 | #define cpu_feature(x) ilog2(HWCAP_ ## x) | ||
26 | 19 | ||
27 | #ifndef __ASSEMBLY__ | 20 | #ifndef __ASSEMBLY__ |
28 | 21 | ||
@@ -399,11 +392,13 @@ extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); | |||
399 | for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS) | 392 | for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS) |
400 | 393 | ||
401 | bool this_cpu_has_cap(unsigned int cap); | 394 | bool this_cpu_has_cap(unsigned int cap); |
395 | void cpu_set_feature(unsigned int num); | ||
396 | bool cpu_have_feature(unsigned int num); | ||
397 | unsigned long cpu_get_elf_hwcap(void); | ||
398 | unsigned long cpu_get_elf_hwcap2(void); | ||
402 | 399 | ||
403 | static inline bool cpu_have_feature(unsigned int num) | 400 | #define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name)) |
404 | { | 401 | #define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name)) |
405 | return elf_hwcap & (1UL << num); | ||
406 | } | ||
407 | 402 | ||
408 | /* System capability check for constant caps */ | 403 | /* System capability check for constant caps */ |
409 | static inline bool __cpus_have_const_cap(int num) | 404 | static inline bool __cpus_have_const_cap(int num) |
@@ -638,11 +633,7 @@ static inline int arm64_get_ssbd_state(void) | |||
638 | #endif | 633 | #endif |
639 | } | 634 | } |
640 | 635 | ||
641 | #ifdef CONFIG_ARM64_SSBD | ||
642 | void arm64_set_ssbd_mitigation(bool state); | 636 | void arm64_set_ssbd_mitigation(bool state); |
643 | #else | ||
644 | static inline void arm64_set_ssbd_mitigation(bool state) {} | ||
645 | #endif | ||
646 | 637 | ||
647 | extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); | 638 | extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); |
648 | 639 | ||
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 5f1437099b99..2602bae334fb 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h | |||
@@ -89,6 +89,7 @@ | |||
89 | #define ARM_CPU_PART_CORTEX_A35 0xD04 | 89 | #define ARM_CPU_PART_CORTEX_A35 0xD04 |
90 | #define ARM_CPU_PART_CORTEX_A55 0xD05 | 90 | #define ARM_CPU_PART_CORTEX_A55 0xD05 |
91 | #define ARM_CPU_PART_CORTEX_A76 0xD0B | 91 | #define ARM_CPU_PART_CORTEX_A76 0xD0B |
92 | #define ARM_CPU_PART_NEOVERSE_N1 0xD0C | ||
92 | 93 | ||
93 | #define APM_CPU_PART_POTENZA 0x000 | 94 | #define APM_CPU_PART_POTENZA 0x000 |
94 | 95 | ||
@@ -118,6 +119,7 @@ | |||
118 | #define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35) | 119 | #define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35) |
119 | #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55) | 120 | #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55) |
120 | #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) | 121 | #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) |
122 | #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1) | ||
121 | #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) | 123 | #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) |
122 | #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) | 124 | #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) |
123 | #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) | 125 | #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) |
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index a44cf5225429..0679f781696d 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h | |||
@@ -65,12 +65,9 @@ | |||
65 | #define CACHE_FLUSH_IS_SAFE 1 | 65 | #define CACHE_FLUSH_IS_SAFE 1 |
66 | 66 | ||
67 | /* kprobes BRK opcodes with ESR encoding */ | 67 | /* kprobes BRK opcodes with ESR encoding */ |
68 | #define BRK64_ESR_MASK 0xFFFF | 68 | #define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (KPROBES_BRK_IMM << 5)) |
69 | #define BRK64_ESR_KPROBES 0x0004 | ||
70 | #define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (BRK64_ESR_KPROBES << 5)) | ||
71 | /* uprobes BRK opcodes with ESR encoding */ | 69 | /* uprobes BRK opcodes with ESR encoding */ |
72 | #define BRK64_ESR_UPROBES 0x0005 | 70 | #define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (UPROBES_BRK_IMM << 5)) |
73 | #define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (BRK64_ESR_UPROBES << 5)) | ||
74 | 71 | ||
75 | /* AArch32 */ | 72 | /* AArch32 */ |
76 | #define DBG_ESR_EVT_BKPT 0x4 | 73 | #define DBG_ESR_EVT_BKPT 0x4 |
@@ -94,18 +91,24 @@ struct step_hook { | |||
94 | int (*fn)(struct pt_regs *regs, unsigned int esr); | 91 | int (*fn)(struct pt_regs *regs, unsigned int esr); |
95 | }; | 92 | }; |
96 | 93 | ||
97 | void register_step_hook(struct step_hook *hook); | 94 | void register_user_step_hook(struct step_hook *hook); |
98 | void unregister_step_hook(struct step_hook *hook); | 95 | void unregister_user_step_hook(struct step_hook *hook); |
96 | |||
97 | void register_kernel_step_hook(struct step_hook *hook); | ||
98 | void unregister_kernel_step_hook(struct step_hook *hook); | ||
99 | 99 | ||
100 | struct break_hook { | 100 | struct break_hook { |
101 | struct list_head node; | 101 | struct list_head node; |
102 | u32 esr_val; | ||
103 | u32 esr_mask; | ||
104 | int (*fn)(struct pt_regs *regs, unsigned int esr); | 102 | int (*fn)(struct pt_regs *regs, unsigned int esr); |
103 | u16 imm; | ||
104 | u16 mask; /* These bits are ignored when comparing with imm */ | ||
105 | }; | 105 | }; |
106 | 106 | ||
107 | void register_break_hook(struct break_hook *hook); | 107 | void register_user_break_hook(struct break_hook *hook); |
108 | void unregister_break_hook(struct break_hook *hook); | 108 | void unregister_user_break_hook(struct break_hook *hook); |
109 | |||
110 | void register_kernel_break_hook(struct break_hook *hook); | ||
111 | void unregister_kernel_break_hook(struct break_hook *hook); | ||
109 | 112 | ||
110 | u8 debug_monitors_arch(void); | 113 | u8 debug_monitors_arch(void); |
111 | 114 | ||
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 6adc1a90e7e6..355d120b78cb 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h | |||
@@ -214,10 +214,10 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; | |||
214 | set_thread_flag(TIF_32BIT); \ | 214 | set_thread_flag(TIF_32BIT); \ |
215 | }) | 215 | }) |
216 | #define COMPAT_ARCH_DLINFO | 216 | #define COMPAT_ARCH_DLINFO |
217 | extern int aarch32_setup_vectors_page(struct linux_binprm *bprm, | 217 | extern int aarch32_setup_additional_pages(struct linux_binprm *bprm, |
218 | int uses_interp); | 218 | int uses_interp); |
219 | #define compat_arch_setup_additional_pages \ | 219 | #define compat_arch_setup_additional_pages \ |
220 | aarch32_setup_vectors_page | 220 | aarch32_setup_additional_pages |
221 | 221 | ||
222 | #endif /* CONFIG_COMPAT */ | 222 | #endif /* CONFIG_COMPAT */ |
223 | 223 | ||
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 52233f00d53d..0e27fe91d5ea 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h | |||
@@ -156,9 +156,7 @@ | |||
156 | ESR_ELx_WFx_ISS_WFI) | 156 | ESR_ELx_WFx_ISS_WFI) |
157 | 157 | ||
158 | /* BRK instruction trap from AArch64 state */ | 158 | /* BRK instruction trap from AArch64 state */ |
159 | #define ESR_ELx_VAL_BRK64(imm) \ | 159 | #define ESR_ELx_BRK64_ISS_COMMENT_MASK 0xffff |
160 | ((ESR_ELx_EC_BRK64 << ESR_ELx_EC_SHIFT) | ESR_ELx_IL | \ | ||
161 | ((imm) & 0xffff)) | ||
162 | 160 | ||
163 | /* ISS field definitions for System instruction traps */ | 161 | /* ISS field definitions for System instruction traps */ |
164 | #define ESR_ELx_SYS64_ISS_RES0_SHIFT 22 | 162 | #define ESR_ELx_SYS64_ISS_RES0_SHIFT 22 |
@@ -198,9 +196,10 @@ | |||
198 | /* | 196 | /* |
199 | * User space cache operations have the following sysreg encoding | 197 | * User space cache operations have the following sysreg encoding |
200 | * in System instructions. | 198 | * in System instructions. |
201 | * op0=1, op1=3, op2=1, crn=7, crm={ 5, 10, 11, 12, 14 }, WRITE (L=0) | 199 | * op0=1, op1=3, op2=1, crn=7, crm={ 5, 10, 11, 12, 13, 14 }, WRITE (L=0) |
202 | */ | 200 | */ |
203 | #define ESR_ELx_SYS64_ISS_CRM_DC_CIVAC 14 | 201 | #define ESR_ELx_SYS64_ISS_CRM_DC_CIVAC 14 |
202 | #define ESR_ELx_SYS64_ISS_CRM_DC_CVADP 13 | ||
204 | #define ESR_ELx_SYS64_ISS_CRM_DC_CVAP 12 | 203 | #define ESR_ELx_SYS64_ISS_CRM_DC_CVAP 12 |
205 | #define ESR_ELx_SYS64_ISS_CRM_DC_CVAU 11 | 204 | #define ESR_ELx_SYS64_ISS_CRM_DC_CVAU 11 |
206 | #define ESR_ELx_SYS64_ISS_CRM_DC_CVAC 10 | 205 | #define ESR_ELx_SYS64_ISS_CRM_DC_CVAC 10 |
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index c7e1a7837706..a56efb5626fa 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h | |||
@@ -23,26 +23,34 @@ | |||
23 | 23 | ||
24 | #include <asm/errno.h> | 24 | #include <asm/errno.h> |
25 | 25 | ||
26 | #define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */ | ||
27 | |||
26 | #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ | 28 | #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ |
27 | do { \ | 29 | do { \ |
30 | unsigned int loops = FUTEX_MAX_LOOPS; \ | ||
31 | \ | ||
28 | uaccess_enable(); \ | 32 | uaccess_enable(); \ |
29 | asm volatile( \ | 33 | asm volatile( \ |
30 | " prfm pstl1strm, %2\n" \ | 34 | " prfm pstl1strm, %2\n" \ |
31 | "1: ldxr %w1, %2\n" \ | 35 | "1: ldxr %w1, %2\n" \ |
32 | insn "\n" \ | 36 | insn "\n" \ |
33 | "2: stlxr %w0, %w3, %2\n" \ | 37 | "2: stlxr %w0, %w3, %2\n" \ |
34 | " cbnz %w0, 1b\n" \ | 38 | " cbz %w0, 3f\n" \ |
35 | " dmb ish\n" \ | 39 | " sub %w4, %w4, %w0\n" \ |
40 | " cbnz %w4, 1b\n" \ | ||
41 | " mov %w0, %w7\n" \ | ||
36 | "3:\n" \ | 42 | "3:\n" \ |
43 | " dmb ish\n" \ | ||
37 | " .pushsection .fixup,\"ax\"\n" \ | 44 | " .pushsection .fixup,\"ax\"\n" \ |
38 | " .align 2\n" \ | 45 | " .align 2\n" \ |
39 | "4: mov %w0, %w5\n" \ | 46 | "4: mov %w0, %w6\n" \ |
40 | " b 3b\n" \ | 47 | " b 3b\n" \ |
41 | " .popsection\n" \ | 48 | " .popsection\n" \ |
42 | _ASM_EXTABLE(1b, 4b) \ | 49 | _ASM_EXTABLE(1b, 4b) \ |
43 | _ASM_EXTABLE(2b, 4b) \ | 50 | _ASM_EXTABLE(2b, 4b) \ |
44 | : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ | 51 | : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \ |
45 | : "r" (oparg), "Ir" (-EFAULT) \ | 52 | "+r" (loops) \ |
53 | : "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \ | ||
46 | : "memory"); \ | 54 | : "memory"); \ |
47 | uaccess_disable(); \ | 55 | uaccess_disable(); \ |
48 | } while (0) | 56 | } while (0) |
@@ -57,23 +65,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) | |||
57 | 65 | ||
58 | switch (op) { | 66 | switch (op) { |
59 | case FUTEX_OP_SET: | 67 | case FUTEX_OP_SET: |
60 | __futex_atomic_op("mov %w3, %w4", | 68 | __futex_atomic_op("mov %w3, %w5", |
61 | ret, oldval, uaddr, tmp, oparg); | 69 | ret, oldval, uaddr, tmp, oparg); |
62 | break; | 70 | break; |
63 | case FUTEX_OP_ADD: | 71 | case FUTEX_OP_ADD: |
64 | __futex_atomic_op("add %w3, %w1, %w4", | 72 | __futex_atomic_op("add %w3, %w1, %w5", |
65 | ret, oldval, uaddr, tmp, oparg); | 73 | ret, oldval, uaddr, tmp, oparg); |
66 | break; | 74 | break; |
67 | case FUTEX_OP_OR: | 75 | case FUTEX_OP_OR: |
68 | __futex_atomic_op("orr %w3, %w1, %w4", | 76 | __futex_atomic_op("orr %w3, %w1, %w5", |
69 | ret, oldval, uaddr, tmp, oparg); | 77 | ret, oldval, uaddr, tmp, oparg); |
70 | break; | 78 | break; |
71 | case FUTEX_OP_ANDN: | 79 | case FUTEX_OP_ANDN: |
72 | __futex_atomic_op("and %w3, %w1, %w4", | 80 | __futex_atomic_op("and %w3, %w1, %w5", |
73 | ret, oldval, uaddr, tmp, ~oparg); | 81 | ret, oldval, uaddr, tmp, ~oparg); |
74 | break; | 82 | break; |
75 | case FUTEX_OP_XOR: | 83 | case FUTEX_OP_XOR: |
76 | __futex_atomic_op("eor %w3, %w1, %w4", | 84 | __futex_atomic_op("eor %w3, %w1, %w5", |
77 | ret, oldval, uaddr, tmp, oparg); | 85 | ret, oldval, uaddr, tmp, oparg); |
78 | break; | 86 | break; |
79 | default: | 87 | default: |
@@ -93,6 +101,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, | |||
93 | u32 oldval, u32 newval) | 101 | u32 oldval, u32 newval) |
94 | { | 102 | { |
95 | int ret = 0; | 103 | int ret = 0; |
104 | unsigned int loops = FUTEX_MAX_LOOPS; | ||
96 | u32 val, tmp; | 105 | u32 val, tmp; |
97 | u32 __user *uaddr; | 106 | u32 __user *uaddr; |
98 | 107 | ||
@@ -104,24 +113,30 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, | |||
104 | asm volatile("// futex_atomic_cmpxchg_inatomic\n" | 113 | asm volatile("// futex_atomic_cmpxchg_inatomic\n" |
105 | " prfm pstl1strm, %2\n" | 114 | " prfm pstl1strm, %2\n" |
106 | "1: ldxr %w1, %2\n" | 115 | "1: ldxr %w1, %2\n" |
107 | " sub %w3, %w1, %w4\n" | 116 | " sub %w3, %w1, %w5\n" |
108 | " cbnz %w3, 3f\n" | 117 | " cbnz %w3, 4f\n" |
109 | "2: stlxr %w3, %w5, %2\n" | 118 | "2: stlxr %w3, %w6, %2\n" |
110 | " cbnz %w3, 1b\n" | 119 | " cbz %w3, 3f\n" |
111 | " dmb ish\n" | 120 | " sub %w4, %w4, %w3\n" |
121 | " cbnz %w4, 1b\n" | ||
122 | " mov %w0, %w8\n" | ||
112 | "3:\n" | 123 | "3:\n" |
124 | " dmb ish\n" | ||
125 | "4:\n" | ||
113 | " .pushsection .fixup,\"ax\"\n" | 126 | " .pushsection .fixup,\"ax\"\n" |
114 | "4: mov %w0, %w6\n" | 127 | "5: mov %w0, %w7\n" |
115 | " b 3b\n" | 128 | " b 4b\n" |
116 | " .popsection\n" | 129 | " .popsection\n" |
117 | _ASM_EXTABLE(1b, 4b) | 130 | _ASM_EXTABLE(1b, 5b) |
118 | _ASM_EXTABLE(2b, 4b) | 131 | _ASM_EXTABLE(2b, 5b) |
119 | : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) | 132 | : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops) |
120 | : "r" (oldval), "r" (newval), "Ir" (-EFAULT) | 133 | : "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN) |
121 | : "memory"); | 134 | : "memory"); |
122 | uaccess_disable(); | 135 | uaccess_disable(); |
123 | 136 | ||
124 | *uval = val; | 137 | if (!ret) |
138 | *uval = val; | ||
139 | |||
125 | return ret; | 140 | return ret; |
126 | } | 141 | } |
127 | 142 | ||
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 400b80b49595..b4bfb6672168 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #define __ASM_HWCAP_H | 17 | #define __ASM_HWCAP_H |
18 | 18 | ||
19 | #include <uapi/asm/hwcap.h> | 19 | #include <uapi/asm/hwcap.h> |
20 | #include <asm/cpufeature.h> | ||
20 | 21 | ||
21 | #define COMPAT_HWCAP_HALF (1 << 1) | 22 | #define COMPAT_HWCAP_HALF (1 << 1) |
22 | #define COMPAT_HWCAP_THUMB (1 << 2) | 23 | #define COMPAT_HWCAP_THUMB (1 << 2) |
@@ -40,11 +41,67 @@ | |||
40 | #define COMPAT_HWCAP2_CRC32 (1 << 4) | 41 | #define COMPAT_HWCAP2_CRC32 (1 << 4) |
41 | 42 | ||
42 | #ifndef __ASSEMBLY__ | 43 | #ifndef __ASSEMBLY__ |
44 | #include <linux/log2.h> | ||
45 | |||
46 | /* | ||
47 | * For userspace we represent hwcaps as a collection of HWCAP{,2}_x bitfields | ||
48 | * as described in uapi/asm/hwcap.h. For the kernel we represent hwcaps as | ||
49 | * natural numbers (in a single range of size MAX_CPU_FEATURES) defined here | ||
50 | * with prefix KERNEL_HWCAP_ mapped to their HWCAP{,2}_x counterpart. | ||
51 | * | ||
52 | * Hwcaps should be set and tested within the kernel via the | ||
53 | * cpu_{set,have}_named_feature(feature) where feature is the unique suffix | ||
54 | * of KERNEL_HWCAP_{feature}. | ||
55 | */ | ||
56 | #define __khwcap_feature(x) const_ilog2(HWCAP_ ## x) | ||
57 | #define KERNEL_HWCAP_FP __khwcap_feature(FP) | ||
58 | #define KERNEL_HWCAP_ASIMD __khwcap_feature(ASIMD) | ||
59 | #define KERNEL_HWCAP_EVTSTRM __khwcap_feature(EVTSTRM) | ||
60 | #define KERNEL_HWCAP_AES __khwcap_feature(AES) | ||
61 | #define KERNEL_HWCAP_PMULL __khwcap_feature(PMULL) | ||
62 | #define KERNEL_HWCAP_SHA1 __khwcap_feature(SHA1) | ||
63 | #define KERNEL_HWCAP_SHA2 __khwcap_feature(SHA2) | ||
64 | #define KERNEL_HWCAP_CRC32 __khwcap_feature(CRC32) | ||
65 | #define KERNEL_HWCAP_ATOMICS __khwcap_feature(ATOMICS) | ||
66 | #define KERNEL_HWCAP_FPHP __khwcap_feature(FPHP) | ||
67 | #define KERNEL_HWCAP_ASIMDHP __khwcap_feature(ASIMDHP) | ||
68 | #define KERNEL_HWCAP_CPUID __khwcap_feature(CPUID) | ||
69 | #define KERNEL_HWCAP_ASIMDRDM __khwcap_feature(ASIMDRDM) | ||
70 | #define KERNEL_HWCAP_JSCVT __khwcap_feature(JSCVT) | ||
71 | #define KERNEL_HWCAP_FCMA __khwcap_feature(FCMA) | ||
72 | #define KERNEL_HWCAP_LRCPC __khwcap_feature(LRCPC) | ||
73 | #define KERNEL_HWCAP_DCPOP __khwcap_feature(DCPOP) | ||
74 | #define KERNEL_HWCAP_SHA3 __khwcap_feature(SHA3) | ||
75 | #define KERNEL_HWCAP_SM3 __khwcap_feature(SM3) | ||
76 | #define KERNEL_HWCAP_SM4 __khwcap_feature(SM4) | ||
77 | #define KERNEL_HWCAP_ASIMDDP __khwcap_feature(ASIMDDP) | ||
78 | #define KERNEL_HWCAP_SHA512 __khwcap_feature(SHA512) | ||
79 | #define KERNEL_HWCAP_SVE __khwcap_feature(SVE) | ||
80 | #define KERNEL_HWCAP_ASIMDFHM __khwcap_feature(ASIMDFHM) | ||
81 | #define KERNEL_HWCAP_DIT __khwcap_feature(DIT) | ||
82 | #define KERNEL_HWCAP_USCAT __khwcap_feature(USCAT) | ||
83 | #define KERNEL_HWCAP_ILRCPC __khwcap_feature(ILRCPC) | ||
84 | #define KERNEL_HWCAP_FLAGM __khwcap_feature(FLAGM) | ||
85 | #define KERNEL_HWCAP_SSBS __khwcap_feature(SSBS) | ||
86 | #define KERNEL_HWCAP_SB __khwcap_feature(SB) | ||
87 | #define KERNEL_HWCAP_PACA __khwcap_feature(PACA) | ||
88 | #define KERNEL_HWCAP_PACG __khwcap_feature(PACG) | ||
89 | |||
90 | #define __khwcap2_feature(x) (const_ilog2(HWCAP2_ ## x) + 32) | ||
91 | #define KERNEL_HWCAP_DCPODP __khwcap2_feature(DCPODP) | ||
92 | #define KERNEL_HWCAP_SVE2 __khwcap2_feature(SVE2) | ||
93 | #define KERNEL_HWCAP_SVEAES __khwcap2_feature(SVEAES) | ||
94 | #define KERNEL_HWCAP_SVEPMULL __khwcap2_feature(SVEPMULL) | ||
95 | #define KERNEL_HWCAP_SVEBITPERM __khwcap2_feature(SVEBITPERM) | ||
96 | #define KERNEL_HWCAP_SVESHA3 __khwcap2_feature(SVESHA3) | ||
97 | #define KERNEL_HWCAP_SVESM4 __khwcap2_feature(SVESM4) | ||
98 | |||
43 | /* | 99 | /* |
44 | * This yields a mask that user programs can use to figure out what | 100 | * This yields a mask that user programs can use to figure out what |
45 | * instruction set this cpu supports. | 101 | * instruction set this cpu supports. |
46 | */ | 102 | */ |
47 | #define ELF_HWCAP (elf_hwcap) | 103 | #define ELF_HWCAP cpu_get_elf_hwcap() |
104 | #define ELF_HWCAP2 cpu_get_elf_hwcap2() | ||
48 | 105 | ||
49 | #ifdef CONFIG_COMPAT | 106 | #ifdef CONFIG_COMPAT |
50 | #define COMPAT_ELF_HWCAP (compat_elf_hwcap) | 107 | #define COMPAT_ELF_HWCAP (compat_elf_hwcap) |
@@ -60,6 +117,5 @@ enum { | |||
60 | #endif | 117 | #endif |
61 | }; | 118 | }; |
62 | 119 | ||
63 | extern unsigned long elf_hwcap; | ||
64 | #endif | 120 | #endif |
65 | #endif | 121 | #endif |
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index 43d8366c1e87..629963189085 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h | |||
@@ -43,7 +43,7 @@ static inline void arch_local_irq_enable(void) | |||
43 | asm volatile(ALTERNATIVE( | 43 | asm volatile(ALTERNATIVE( |
44 | "msr daifclr, #2 // arch_local_irq_enable\n" | 44 | "msr daifclr, #2 // arch_local_irq_enable\n" |
45 | "nop", | 45 | "nop", |
46 | "msr_s " __stringify(SYS_ICC_PMR_EL1) ",%0\n" | 46 | __msr_s(SYS_ICC_PMR_EL1, "%0") |
47 | "dsb sy", | 47 | "dsb sy", |
48 | ARM64_HAS_IRQ_PRIO_MASKING) | 48 | ARM64_HAS_IRQ_PRIO_MASKING) |
49 | : | 49 | : |
@@ -55,7 +55,7 @@ static inline void arch_local_irq_disable(void) | |||
55 | { | 55 | { |
56 | asm volatile(ALTERNATIVE( | 56 | asm volatile(ALTERNATIVE( |
57 | "msr daifset, #2 // arch_local_irq_disable", | 57 | "msr daifset, #2 // arch_local_irq_disable", |
58 | "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0", | 58 | __msr_s(SYS_ICC_PMR_EL1, "%0"), |
59 | ARM64_HAS_IRQ_PRIO_MASKING) | 59 | ARM64_HAS_IRQ_PRIO_MASKING) |
60 | : | 60 | : |
61 | : "r" ((unsigned long) GIC_PRIO_IRQOFF) | 61 | : "r" ((unsigned long) GIC_PRIO_IRQOFF) |
@@ -86,7 +86,7 @@ static inline unsigned long arch_local_save_flags(void) | |||
86 | "mov %0, %1\n" | 86 | "mov %0, %1\n" |
87 | "nop\n" | 87 | "nop\n" |
88 | "nop", | 88 | "nop", |
89 | "mrs_s %0, " __stringify(SYS_ICC_PMR_EL1) "\n" | 89 | __mrs_s("%0", SYS_ICC_PMR_EL1) |
90 | "ands %1, %1, " __stringify(PSR_I_BIT) "\n" | 90 | "ands %1, %1, " __stringify(PSR_I_BIT) "\n" |
91 | "csel %0, %0, %2, eq", | 91 | "csel %0, %0, %2, eq", |
92 | ARM64_HAS_IRQ_PRIO_MASKING) | 92 | ARM64_HAS_IRQ_PRIO_MASKING) |
@@ -116,7 +116,7 @@ static inline void arch_local_irq_restore(unsigned long flags) | |||
116 | asm volatile(ALTERNATIVE( | 116 | asm volatile(ALTERNATIVE( |
117 | "msr daif, %0\n" | 117 | "msr daif, %0\n" |
118 | "nop", | 118 | "nop", |
119 | "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0\n" | 119 | __msr_s(SYS_ICC_PMR_EL1, "%0") |
120 | "dsb sy", | 120 | "dsb sy", |
121 | ARM64_HAS_IRQ_PRIO_MASKING) | 121 | ARM64_HAS_IRQ_PRIO_MASKING) |
122 | : "+r" (flags) | 122 | : "+r" (flags) |
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h index d5a44cf859e9..21721fbf44e7 100644 --- a/arch/arm64/include/asm/kprobes.h +++ b/arch/arm64/include/asm/kprobes.h | |||
@@ -54,8 +54,6 @@ void arch_remove_kprobe(struct kprobe *); | |||
54 | int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr); | 54 | int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr); |
55 | int kprobe_exceptions_notify(struct notifier_block *self, | 55 | int kprobe_exceptions_notify(struct notifier_block *self, |
56 | unsigned long val, void *data); | 56 | unsigned long val, void *data); |
57 | int kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr); | ||
58 | int kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr); | ||
59 | void kretprobe_trampoline(void); | 57 | void kretprobe_trampoline(void); |
60 | void __kprobes *trampoline_probe_handler(struct pt_regs *regs); | 58 | void __kprobes *trampoline_probe_handler(struct pt_regs *regs); |
61 | 59 | ||
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 4da765f2cca5..c3060833b7a5 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h | |||
@@ -30,7 +30,7 @@ | |||
30 | ({ \ | 30 | ({ \ |
31 | u64 reg; \ | 31 | u64 reg; \ |
32 | asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\ | 32 | asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\ |
33 | "mrs_s %0, " __stringify(r##vh),\ | 33 | __mrs_s("%0", r##vh), \ |
34 | ARM64_HAS_VIRT_HOST_EXTN) \ | 34 | ARM64_HAS_VIRT_HOST_EXTN) \ |
35 | : "=r" (reg)); \ | 35 | : "=r" (reg)); \ |
36 | reg; \ | 36 | reg; \ |
@@ -40,7 +40,7 @@ | |||
40 | do { \ | 40 | do { \ |
41 | u64 __val = (u64)(v); \ | 41 | u64 __val = (u64)(v); \ |
42 | asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\ | 42 | asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\ |
43 | "msr_s " __stringify(r##vh) ", %x0",\ | 43 | __msr_s(r##vh, "%x0"), \ |
44 | ARM64_HAS_VIRT_HOST_EXTN) \ | 44 | ARM64_HAS_VIRT_HOST_EXTN) \ |
45 | : : "rZ" (__val)); \ | 45 | : : "rZ" (__val)); \ |
46 | } while (0) | 46 | } while (0) |
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 290195168bb3..2cb8248fa2c8 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -302,7 +302,7 @@ static inline void *phys_to_virt(phys_addr_t x) | |||
302 | */ | 302 | */ |
303 | #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) | 303 | #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) |
304 | 304 | ||
305 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | 305 | #if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL) |
306 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 306 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
307 | #define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | 307 | #define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
308 | #else | 308 | #else |
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index 52fa47c73bf0..dabba4b2c61f 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h | |||
@@ -33,12 +33,22 @@ | |||
33 | 33 | ||
34 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) | 34 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
35 | { | 35 | { |
36 | return (pmd_t *)__get_free_page(PGALLOC_GFP); | 36 | struct page *page; |
37 | |||
38 | page = alloc_page(PGALLOC_GFP); | ||
39 | if (!page) | ||
40 | return NULL; | ||
41 | if (!pgtable_pmd_page_ctor(page)) { | ||
42 | __free_page(page); | ||
43 | return NULL; | ||
44 | } | ||
45 | return page_address(page); | ||
37 | } | 46 | } |
38 | 47 | ||
39 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp) | 48 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp) |
40 | { | 49 | { |
41 | BUG_ON((unsigned long)pmdp & (PAGE_SIZE-1)); | 50 | BUG_ON((unsigned long)pmdp & (PAGE_SIZE-1)); |
51 | pgtable_pmd_page_dtor(virt_to_page(pmdp)); | ||
42 | free_page((unsigned long)pmdp); | 52 | free_page((unsigned long)pmdp); |
43 | } | 53 | } |
44 | 54 | ||
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index de70c1eabf33..2c41b04708fe 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -478,6 +478,8 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd) | |||
478 | return __pmd_to_phys(pmd); | 478 | return __pmd_to_phys(pmd); |
479 | } | 479 | } |
480 | 480 | ||
481 | static inline void pte_unmap(pte_t *pte) { } | ||
482 | |||
481 | /* Find an entry in the third-level page table. */ | 483 | /* Find an entry in the third-level page table. */ |
482 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 484 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
483 | 485 | ||
@@ -485,9 +487,6 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd) | |||
485 | #define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr)))) | 487 | #define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr)))) |
486 | 488 | ||
487 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) | 489 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) |
488 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) | ||
489 | #define pte_unmap(pte) do { } while (0) | ||
490 | #define pte_unmap_nested(pte) do { } while (0) | ||
491 | 490 | ||
492 | #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) | 491 | #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) |
493 | #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) | 492 | #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) |
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index 15d49515efdd..d328540cb85e 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h | |||
@@ -1,4 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __ASM_POINTER_AUTH_H | 2 | #ifndef __ASM_POINTER_AUTH_H |
3 | #define __ASM_POINTER_AUTH_H | 3 | #define __ASM_POINTER_AUTH_H |
4 | 4 | ||
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 5d9ce62bdebd..fcd0e691b1ea 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h | |||
@@ -57,7 +57,15 @@ | |||
57 | #define TASK_SIZE_64 (UL(1) << vabits_user) | 57 | #define TASK_SIZE_64 (UL(1) << vabits_user) |
58 | 58 | ||
59 | #ifdef CONFIG_COMPAT | 59 | #ifdef CONFIG_COMPAT |
60 | #if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS) | ||
61 | /* | ||
62 | * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied | ||
63 | * by the compat vectors page. | ||
64 | */ | ||
60 | #define TASK_SIZE_32 UL(0x100000000) | 65 | #define TASK_SIZE_32 UL(0x100000000) |
66 | #else | ||
67 | #define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE) | ||
68 | #endif /* CONFIG_ARM64_64K_PAGES */ | ||
61 | #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ | 69 | #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ |
62 | TASK_SIZE_32 : TASK_SIZE_64) | 70 | TASK_SIZE_32 : TASK_SIZE_64) |
63 | #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ | 71 | #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ |
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index ec60174c8c18..b2de32939ada 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h | |||
@@ -305,6 +305,28 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) | |||
305 | return regs->regs[0]; | 305 | return regs->regs[0]; |
306 | } | 306 | } |
307 | 307 | ||
308 | /** | ||
309 | * regs_get_kernel_argument() - get Nth function argument in kernel | ||
310 | * @regs: pt_regs of that context | ||
311 | * @n: function argument number (start from 0) | ||
312 | * | ||
313 | * regs_get_argument() returns @n th argument of the function call. | ||
314 | * | ||
315 | * Note that this chooses the most likely register mapping. In very rare | ||
316 | * cases this may not return correct data, for example, if one of the | ||
317 | * function parameters is 16 bytes or bigger. In such cases, we cannot | ||
318 | * get access the parameter correctly and the register assignment of | ||
319 | * subsequent parameters will be shifted. | ||
320 | */ | ||
321 | static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs, | ||
322 | unsigned int n) | ||
323 | { | ||
324 | #define NR_REG_ARGUMENTS 8 | ||
325 | if (n < NR_REG_ARGUMENTS) | ||
326 | return pt_regs_read_reg(regs, n); | ||
327 | return 0; | ||
328 | } | ||
329 | |||
308 | /* We must avoid circular header include via sched.h */ | 330 | /* We must avoid circular header include via sched.h */ |
309 | struct task_struct; | 331 | struct task_struct; |
310 | int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task); | 332 | int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task); |
diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h index ffe47d766c25..63e0b92a5fbb 100644 --- a/arch/arm64/include/asm/sdei.h +++ b/arch/arm64/include/asm/sdei.h | |||
@@ -1,4 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | // Copyright (C) 2017 Arm Ltd. | 2 | // Copyright (C) 2017 Arm Ltd. |
3 | #ifndef __ASM_SDEI_H | 3 | #ifndef __ASM_SDEI_H |
4 | #define __ASM_SDEI_H | 4 | #define __ASM_SDEI_H |
diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h index 81abea0b7650..58e288aaf0ba 100644 --- a/arch/arm64/include/asm/signal32.h +++ b/arch/arm64/include/asm/signal32.h | |||
@@ -20,8 +20,6 @@ | |||
20 | #ifdef CONFIG_COMPAT | 20 | #ifdef CONFIG_COMPAT |
21 | #include <linux/compat.h> | 21 | #include <linux/compat.h> |
22 | 22 | ||
23 | #define AARCH32_KERN_SIGRET_CODE_OFFSET 0x500 | ||
24 | |||
25 | int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set, | 23 | int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set, |
26 | struct pt_regs *regs); | 24 | struct pt_regs *regs); |
27 | int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, | 25 | int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, |
diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h index 5412fa40825e..915809e4ac32 100644 --- a/arch/arm64/include/asm/stage2_pgtable.h +++ b/arch/arm64/include/asm/stage2_pgtable.h | |||
@@ -119,7 +119,7 @@ static inline pud_t *stage2_pud_offset(struct kvm *kvm, | |||
119 | static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud) | 119 | static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud) |
120 | { | 120 | { |
121 | if (kvm_stage2_has_pud(kvm)) | 121 | if (kvm_stage2_has_pud(kvm)) |
122 | pud_free(NULL, pud); | 122 | free_page((unsigned long)pud); |
123 | } | 123 | } |
124 | 124 | ||
125 | static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp) | 125 | static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp) |
@@ -192,7 +192,7 @@ static inline pmd_t *stage2_pmd_offset(struct kvm *kvm, | |||
192 | static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd) | 192 | static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd) |
193 | { | 193 | { |
194 | if (kvm_stage2_has_pmd(kvm)) | 194 | if (kvm_stage2_has_pmd(kvm)) |
195 | pmd_free(NULL, pmd); | 195 | free_page((unsigned long)pmd); |
196 | } | 196 | } |
197 | 197 | ||
198 | static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud) | 198 | static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud) |
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 5b267dec6194..3f7b917e8f3a 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h | |||
@@ -606,6 +606,20 @@ | |||
606 | #define ID_AA64PFR1_SSBS_PSTATE_ONLY 1 | 606 | #define ID_AA64PFR1_SSBS_PSTATE_ONLY 1 |
607 | #define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 | 607 | #define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 |
608 | 608 | ||
609 | /* id_aa64zfr0 */ | ||
610 | #define ID_AA64ZFR0_SM4_SHIFT 40 | ||
611 | #define ID_AA64ZFR0_SHA3_SHIFT 32 | ||
612 | #define ID_AA64ZFR0_BITPERM_SHIFT 16 | ||
613 | #define ID_AA64ZFR0_AES_SHIFT 4 | ||
614 | #define ID_AA64ZFR0_SVEVER_SHIFT 0 | ||
615 | |||
616 | #define ID_AA64ZFR0_SM4 0x1 | ||
617 | #define ID_AA64ZFR0_SHA3 0x1 | ||
618 | #define ID_AA64ZFR0_BITPERM 0x1 | ||
619 | #define ID_AA64ZFR0_AES 0x1 | ||
620 | #define ID_AA64ZFR0_AES_PMULL 0x2 | ||
621 | #define ID_AA64ZFR0_SVEVER_SVE2 0x1 | ||
622 | |||
609 | /* id_aa64mmfr0 */ | 623 | /* id_aa64mmfr0 */ |
610 | #define ID_AA64MMFR0_TGRAN4_SHIFT 28 | 624 | #define ID_AA64MMFR0_TGRAN4_SHIFT 28 |
611 | #define ID_AA64MMFR0_TGRAN64_SHIFT 24 | 625 | #define ID_AA64MMFR0_TGRAN64_SHIFT 24 |
@@ -746,20 +760,39 @@ | |||
746 | #include <linux/build_bug.h> | 760 | #include <linux/build_bug.h> |
747 | #include <linux/types.h> | 761 | #include <linux/types.h> |
748 | 762 | ||
749 | asm( | 763 | #define __DEFINE_MRS_MSR_S_REGNUM \ |
750 | " .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" | 764 | " .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \ |
751 | " .equ .L__reg_num_x\\num, \\num\n" | 765 | " .equ .L__reg_num_x\\num, \\num\n" \ |
752 | " .endr\n" | 766 | " .endr\n" \ |
753 | " .equ .L__reg_num_xzr, 31\n" | 767 | " .equ .L__reg_num_xzr, 31\n" |
754 | "\n" | 768 | |
755 | " .macro mrs_s, rt, sreg\n" | 769 | #define DEFINE_MRS_S \ |
756 | __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt)) | 770 | __DEFINE_MRS_MSR_S_REGNUM \ |
771 | " .macro mrs_s, rt, sreg\n" \ | ||
772 | __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt)) \ | ||
757 | " .endm\n" | 773 | " .endm\n" |
758 | "\n" | 774 | |
759 | " .macro msr_s, sreg, rt\n" | 775 | #define DEFINE_MSR_S \ |
760 | __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt)) | 776 | __DEFINE_MRS_MSR_S_REGNUM \ |
777 | " .macro msr_s, sreg, rt\n" \ | ||
778 | __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt)) \ | ||
761 | " .endm\n" | 779 | " .endm\n" |
762 | ); | 780 | |
781 | #define UNDEFINE_MRS_S \ | ||
782 | " .purgem mrs_s\n" | ||
783 | |||
784 | #define UNDEFINE_MSR_S \ | ||
785 | " .purgem msr_s\n" | ||
786 | |||
787 | #define __mrs_s(v, r) \ | ||
788 | DEFINE_MRS_S \ | ||
789 | " mrs_s " v ", " __stringify(r) "\n" \ | ||
790 | UNDEFINE_MRS_S | ||
791 | |||
792 | #define __msr_s(r, v) \ | ||
793 | DEFINE_MSR_S \ | ||
794 | " msr_s " __stringify(r) ", " v "\n" \ | ||
795 | UNDEFINE_MSR_S | ||
763 | 796 | ||
764 | /* | 797 | /* |
765 | * Unlike read_cpuid, calls to read_sysreg are never expected to be | 798 | * Unlike read_cpuid, calls to read_sysreg are never expected to be |
@@ -787,13 +820,13 @@ asm( | |||
787 | */ | 820 | */ |
788 | #define read_sysreg_s(r) ({ \ | 821 | #define read_sysreg_s(r) ({ \ |
789 | u64 __val; \ | 822 | u64 __val; \ |
790 | asm volatile("mrs_s %0, " __stringify(r) : "=r" (__val)); \ | 823 | asm volatile(__mrs_s("%0", r) : "=r" (__val)); \ |
791 | __val; \ | 824 | __val; \ |
792 | }) | 825 | }) |
793 | 826 | ||
794 | #define write_sysreg_s(v, r) do { \ | 827 | #define write_sysreg_s(v, r) do { \ |
795 | u64 __val = (u64)(v); \ | 828 | u64 __val = (u64)(v); \ |
796 | asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \ | 829 | asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \ |
797 | } while (0) | 830 | } while (0) |
798 | 831 | ||
799 | /* | 832 | /* |
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h index 32693f34f431..fca95424e873 100644 --- a/arch/arm64/include/asm/system_misc.h +++ b/arch/arm64/include/asm/system_misc.h | |||
@@ -41,7 +41,6 @@ void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int, | |||
41 | int sig, int code, const char *name); | 41 | int sig, int code, const char *name); |
42 | 42 | ||
43 | struct mm_struct; | 43 | struct mm_struct; |
44 | extern void show_pte(unsigned long addr); | ||
45 | extern void __show_regs(struct pt_regs *); | 44 | extern void __show_regs(struct pt_regs *); |
46 | 45 | ||
47 | extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); | 46 | extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); |
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 37603b5616a5..a287189ca8b4 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h | |||
@@ -63,7 +63,10 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | |||
63 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | 63 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, |
64 | unsigned long addr) | 64 | unsigned long addr) |
65 | { | 65 | { |
66 | tlb_remove_table(tlb, virt_to_page(pmdp)); | 66 | struct page *page = virt_to_page(pmdp); |
67 | |||
68 | pgtable_pmd_page_dtor(page); | ||
69 | tlb_remove_table(tlb, page); | ||
67 | } | 70 | } |
68 | #endif | 71 | #endif |
69 | 72 | ||
diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h index 2b9a63771eda..f89263c8e11a 100644 --- a/arch/arm64/include/asm/vdso_datapage.h +++ b/arch/arm64/include/asm/vdso_datapage.h | |||
@@ -38,6 +38,7 @@ struct vdso_data { | |||
38 | __u32 tz_minuteswest; /* Whacky timezone stuff */ | 38 | __u32 tz_minuteswest; /* Whacky timezone stuff */ |
39 | __u32 tz_dsttime; | 39 | __u32 tz_dsttime; |
40 | __u32 use_syscall; | 40 | __u32 use_syscall; |
41 | __u32 hrtimer_res; | ||
41 | }; | 42 | }; |
42 | 43 | ||
43 | #endif /* !__ASSEMBLY__ */ | 44 | #endif /* !__ASSEMBLY__ */ |
diff --git a/arch/arm64/include/asm/vmap_stack.h b/arch/arm64/include/asm/vmap_stack.h index 0b5ec6e08c10..0a12115d9638 100644 --- a/arch/arm64/include/asm/vmap_stack.h +++ b/arch/arm64/include/asm/vmap_stack.h | |||
@@ -1,4 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | // Copyright (C) 2017 Arm Ltd. | 2 | // Copyright (C) 2017 Arm Ltd. |
3 | #ifndef __ASM_VMAP_STACK_H | 3 | #ifndef __ASM_VMAP_STACK_H |
4 | #define __ASM_VMAP_STACK_H | 4 | #define __ASM_VMAP_STACK_H |
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 5f0750c2199c..1a772b162191 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h | |||
@@ -18,7 +18,7 @@ | |||
18 | #define _UAPI__ASM_HWCAP_H | 18 | #define _UAPI__ASM_HWCAP_H |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP | 21 | * HWCAP flags - for AT_HWCAP |
22 | */ | 22 | */ |
23 | #define HWCAP_FP (1 << 0) | 23 | #define HWCAP_FP (1 << 0) |
24 | #define HWCAP_ASIMD (1 << 1) | 24 | #define HWCAP_ASIMD (1 << 1) |
@@ -53,4 +53,15 @@ | |||
53 | #define HWCAP_PACA (1 << 30) | 53 | #define HWCAP_PACA (1 << 30) |
54 | #define HWCAP_PACG (1UL << 31) | 54 | #define HWCAP_PACG (1UL << 31) |
55 | 55 | ||
56 | /* | ||
57 | * HWCAP2 flags - for AT_HWCAP2 | ||
58 | */ | ||
59 | #define HWCAP2_DCPODP (1 << 0) | ||
60 | #define HWCAP2_SVE2 (1 << 1) | ||
61 | #define HWCAP2_SVEAES (1 << 2) | ||
62 | #define HWCAP2_SVEPMULL (1 << 3) | ||
63 | #define HWCAP2_SVEBITPERM (1 << 4) | ||
64 | #define HWCAP2_SVESHA3 (1 << 5) | ||
65 | #define HWCAP2_SVESM4 (1 << 6) | ||
66 | |||
56 | #endif /* _UAPI__ASM_HWCAP_H */ | 67 | #endif /* _UAPI__ASM_HWCAP_H */ |
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index cd434d0719c1..9e7dcb2c31c7 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile | |||
@@ -7,9 +7,9 @@ CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) | |||
7 | AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) | 7 | AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) |
8 | CFLAGS_armv8_deprecated.o := -I$(src) | 8 | CFLAGS_armv8_deprecated.o := -I$(src) |
9 | 9 | ||
10 | CFLAGS_REMOVE_ftrace.o = -pg | 10 | CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) |
11 | CFLAGS_REMOVE_insn.o = -pg | 11 | CFLAGS_REMOVE_insn.o = $(CC_FLAGS_FTRACE) |
12 | CFLAGS_REMOVE_return_address.o = -pg | 12 | CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE) |
13 | 13 | ||
14 | # Object file lists. | 14 | # Object file lists. |
15 | obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ | 15 | obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ |
@@ -27,8 +27,9 @@ OBJCOPYFLAGS := --prefix-symbols=__efistub_ | |||
27 | $(obj)/%.stub.o: $(obj)/%.o FORCE | 27 | $(obj)/%.stub.o: $(obj)/%.o FORCE |
28 | $(call if_changed,objcopy) | 28 | $(call if_changed,objcopy) |
29 | 29 | ||
30 | obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ | 30 | obj-$(CONFIG_COMPAT) += sys32.o signal32.o \ |
31 | sys_compat.o | 31 | sigreturn32.o sys_compat.o |
32 | obj-$(CONFIG_KUSER_HELPERS) += kuser32.o | ||
32 | obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o | 33 | obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o |
33 | obj-$(CONFIG_MODULES) += module.o | 34 | obj-$(CONFIG_MODULES) += module.o |
34 | obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o | 35 | obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o |
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 7f40dcbdd51d..e10e2a5d9ddc 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c | |||
@@ -94,7 +94,7 @@ int main(void) | |||
94 | DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); | 94 | DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); |
95 | DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); | 95 | DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); |
96 | DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW); | 96 | DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW); |
97 | DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); | 97 | DEFINE(CLOCK_REALTIME_RES, offsetof(struct vdso_data, hrtimer_res)); |
98 | DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE); | 98 | DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE); |
99 | DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE); | 99 | DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE); |
100 | DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC); | 100 | DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC); |
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 9950bb0cbd52..e88d4e7bdfc7 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/arm-smccc.h> | 19 | #include <linux/arm-smccc.h> |
20 | #include <linux/psci.h> | 20 | #include <linux/psci.h> |
21 | #include <linux/types.h> | 21 | #include <linux/types.h> |
22 | #include <linux/cpu.h> | ||
22 | #include <asm/cpu.h> | 23 | #include <asm/cpu.h> |
23 | #include <asm/cputype.h> | 24 | #include <asm/cputype.h> |
24 | #include <asm/cpufeature.h> | 25 | #include <asm/cpufeature.h> |
@@ -109,7 +110,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused) | |||
109 | 110 | ||
110 | atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); | 111 | atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); |
111 | 112 | ||
112 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR | ||
113 | #include <asm/mmu_context.h> | 113 | #include <asm/mmu_context.h> |
114 | #include <asm/cacheflush.h> | 114 | #include <asm/cacheflush.h> |
115 | 115 | ||
@@ -131,9 +131,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, | |||
131 | __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); | 131 | __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); |
132 | } | 132 | } |
133 | 133 | ||
134 | static void __install_bp_hardening_cb(bp_hardening_cb_t fn, | 134 | static void install_bp_hardening_cb(bp_hardening_cb_t fn, |
135 | const char *hyp_vecs_start, | 135 | const char *hyp_vecs_start, |
136 | const char *hyp_vecs_end) | 136 | const char *hyp_vecs_end) |
137 | { | 137 | { |
138 | static DEFINE_RAW_SPINLOCK(bp_lock); | 138 | static DEFINE_RAW_SPINLOCK(bp_lock); |
139 | int cpu, slot = -1; | 139 | int cpu, slot = -1; |
@@ -169,7 +169,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, | |||
169 | #define __smccc_workaround_1_smc_start NULL | 169 | #define __smccc_workaround_1_smc_start NULL |
170 | #define __smccc_workaround_1_smc_end NULL | 170 | #define __smccc_workaround_1_smc_end NULL |
171 | 171 | ||
172 | static void __install_bp_hardening_cb(bp_hardening_cb_t fn, | 172 | static void install_bp_hardening_cb(bp_hardening_cb_t fn, |
173 | const char *hyp_vecs_start, | 173 | const char *hyp_vecs_start, |
174 | const char *hyp_vecs_end) | 174 | const char *hyp_vecs_end) |
175 | { | 175 | { |
@@ -177,23 +177,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, | |||
177 | } | 177 | } |
178 | #endif /* CONFIG_KVM_INDIRECT_VECTORS */ | 178 | #endif /* CONFIG_KVM_INDIRECT_VECTORS */ |
179 | 179 | ||
180 | static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, | ||
181 | bp_hardening_cb_t fn, | ||
182 | const char *hyp_vecs_start, | ||
183 | const char *hyp_vecs_end) | ||
184 | { | ||
185 | u64 pfr0; | ||
186 | |||
187 | if (!entry->matches(entry, SCOPE_LOCAL_CPU)) | ||
188 | return; | ||
189 | |||
190 | pfr0 = read_cpuid(ID_AA64PFR0_EL1); | ||
191 | if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) | ||
192 | return; | ||
193 | |||
194 | __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end); | ||
195 | } | ||
196 | |||
197 | #include <uapi/linux/psci.h> | 180 | #include <uapi/linux/psci.h> |
198 | #include <linux/arm-smccc.h> | 181 | #include <linux/arm-smccc.h> |
199 | #include <linux/psci.h> | 182 | #include <linux/psci.h> |
@@ -220,60 +203,83 @@ static void qcom_link_stack_sanitization(void) | |||
220 | : "=&r" (tmp)); | 203 | : "=&r" (tmp)); |
221 | } | 204 | } |
222 | 205 | ||
223 | static void | 206 | static bool __nospectre_v2; |
224 | enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) | 207 | static int __init parse_nospectre_v2(char *str) |
208 | { | ||
209 | __nospectre_v2 = true; | ||
210 | return 0; | ||
211 | } | ||
212 | early_param("nospectre_v2", parse_nospectre_v2); | ||
213 | |||
214 | /* | ||
215 | * -1: No workaround | ||
216 | * 0: No workaround required | ||
217 | * 1: Workaround installed | ||
218 | */ | ||
219 | static int detect_harden_bp_fw(void) | ||
225 | { | 220 | { |
226 | bp_hardening_cb_t cb; | 221 | bp_hardening_cb_t cb; |
227 | void *smccc_start, *smccc_end; | 222 | void *smccc_start, *smccc_end; |
228 | struct arm_smccc_res res; | 223 | struct arm_smccc_res res; |
229 | u32 midr = read_cpuid_id(); | 224 | u32 midr = read_cpuid_id(); |
230 | 225 | ||
231 | if (!entry->matches(entry, SCOPE_LOCAL_CPU)) | ||
232 | return; | ||
233 | |||
234 | if (psci_ops.smccc_version == SMCCC_VERSION_1_0) | 226 | if (psci_ops.smccc_version == SMCCC_VERSION_1_0) |
235 | return; | 227 | return -1; |
236 | 228 | ||
237 | switch (psci_ops.conduit) { | 229 | switch (psci_ops.conduit) { |
238 | case PSCI_CONDUIT_HVC: | 230 | case PSCI_CONDUIT_HVC: |
239 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, | 231 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
240 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); | 232 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); |
241 | if ((int)res.a0 < 0) | 233 | switch ((int)res.a0) { |
242 | return; | 234 | case 1: |
243 | cb = call_hvc_arch_workaround_1; | 235 | /* Firmware says we're just fine */ |
244 | /* This is a guest, no need to patch KVM vectors */ | 236 | return 0; |
245 | smccc_start = NULL; | 237 | case 0: |
246 | smccc_end = NULL; | 238 | cb = call_hvc_arch_workaround_1; |
239 | /* This is a guest, no need to patch KVM vectors */ | ||
240 | smccc_start = NULL; | ||
241 | smccc_end = NULL; | ||
242 | break; | ||
243 | default: | ||
244 | return -1; | ||
245 | } | ||
247 | break; | 246 | break; |
248 | 247 | ||
249 | case PSCI_CONDUIT_SMC: | 248 | case PSCI_CONDUIT_SMC: |
250 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, | 249 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
251 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); | 250 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); |
252 | if ((int)res.a0 < 0) | 251 | switch ((int)res.a0) { |
253 | return; | 252 | case 1: |
254 | cb = call_smc_arch_workaround_1; | 253 | /* Firmware says we're just fine */ |
255 | smccc_start = __smccc_workaround_1_smc_start; | 254 | return 0; |
256 | smccc_end = __smccc_workaround_1_smc_end; | 255 | case 0: |
256 | cb = call_smc_arch_workaround_1; | ||
257 | smccc_start = __smccc_workaround_1_smc_start; | ||
258 | smccc_end = __smccc_workaround_1_smc_end; | ||
259 | break; | ||
260 | default: | ||
261 | return -1; | ||
262 | } | ||
257 | break; | 263 | break; |
258 | 264 | ||
259 | default: | 265 | default: |
260 | return; | 266 | return -1; |
261 | } | 267 | } |
262 | 268 | ||
263 | if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || | 269 | if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || |
264 | ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) | 270 | ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) |
265 | cb = qcom_link_stack_sanitization; | 271 | cb = qcom_link_stack_sanitization; |
266 | 272 | ||
267 | install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); | 273 | if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) |
274 | install_bp_hardening_cb(cb, smccc_start, smccc_end); | ||
268 | 275 | ||
269 | return; | 276 | return 1; |
270 | } | 277 | } |
271 | #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ | ||
272 | 278 | ||
273 | #ifdef CONFIG_ARM64_SSBD | ||
274 | DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); | 279 | DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); |
275 | 280 | ||
276 | int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; | 281 | int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; |
282 | static bool __ssb_safe = true; | ||
277 | 283 | ||
278 | static const struct ssbd_options { | 284 | static const struct ssbd_options { |
279 | const char *str; | 285 | const char *str; |
@@ -343,6 +349,11 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt, | |||
343 | 349 | ||
344 | void arm64_set_ssbd_mitigation(bool state) | 350 | void arm64_set_ssbd_mitigation(bool state) |
345 | { | 351 | { |
352 | if (!IS_ENABLED(CONFIG_ARM64_SSBD)) { | ||
353 | pr_info_once("SSBD disabled by kernel configuration\n"); | ||
354 | return; | ||
355 | } | ||
356 | |||
346 | if (this_cpu_has_cap(ARM64_SSBS)) { | 357 | if (this_cpu_has_cap(ARM64_SSBS)) { |
347 | if (state) | 358 | if (state) |
348 | asm volatile(SET_PSTATE_SSBS(0)); | 359 | asm volatile(SET_PSTATE_SSBS(0)); |
@@ -372,16 +383,28 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, | |||
372 | struct arm_smccc_res res; | 383 | struct arm_smccc_res res; |
373 | bool required = true; | 384 | bool required = true; |
374 | s32 val; | 385 | s32 val; |
386 | bool this_cpu_safe = false; | ||
375 | 387 | ||
376 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | 388 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
377 | 389 | ||
390 | if (cpu_mitigations_off()) | ||
391 | ssbd_state = ARM64_SSBD_FORCE_DISABLE; | ||
392 | |||
393 | /* delay setting __ssb_safe until we get a firmware response */ | ||
394 | if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list)) | ||
395 | this_cpu_safe = true; | ||
396 | |||
378 | if (this_cpu_has_cap(ARM64_SSBS)) { | 397 | if (this_cpu_has_cap(ARM64_SSBS)) { |
398 | if (!this_cpu_safe) | ||
399 | __ssb_safe = false; | ||
379 | required = false; | 400 | required = false; |
380 | goto out_printmsg; | 401 | goto out_printmsg; |
381 | } | 402 | } |
382 | 403 | ||
383 | if (psci_ops.smccc_version == SMCCC_VERSION_1_0) { | 404 | if (psci_ops.smccc_version == SMCCC_VERSION_1_0) { |
384 | ssbd_state = ARM64_SSBD_UNKNOWN; | 405 | ssbd_state = ARM64_SSBD_UNKNOWN; |
406 | if (!this_cpu_safe) | ||
407 | __ssb_safe = false; | ||
385 | return false; | 408 | return false; |
386 | } | 409 | } |
387 | 410 | ||
@@ -398,6 +421,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, | |||
398 | 421 | ||
399 | default: | 422 | default: |
400 | ssbd_state = ARM64_SSBD_UNKNOWN; | 423 | ssbd_state = ARM64_SSBD_UNKNOWN; |
424 | if (!this_cpu_safe) | ||
425 | __ssb_safe = false; | ||
401 | return false; | 426 | return false; |
402 | } | 427 | } |
403 | 428 | ||
@@ -406,14 +431,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, | |||
406 | switch (val) { | 431 | switch (val) { |
407 | case SMCCC_RET_NOT_SUPPORTED: | 432 | case SMCCC_RET_NOT_SUPPORTED: |
408 | ssbd_state = ARM64_SSBD_UNKNOWN; | 433 | ssbd_state = ARM64_SSBD_UNKNOWN; |
434 | if (!this_cpu_safe) | ||
435 | __ssb_safe = false; | ||
409 | return false; | 436 | return false; |
410 | 437 | ||
438 | /* machines with mixed mitigation requirements must not return this */ | ||
411 | case SMCCC_RET_NOT_REQUIRED: | 439 | case SMCCC_RET_NOT_REQUIRED: |
412 | pr_info_once("%s mitigation not required\n", entry->desc); | 440 | pr_info_once("%s mitigation not required\n", entry->desc); |
413 | ssbd_state = ARM64_SSBD_MITIGATED; | 441 | ssbd_state = ARM64_SSBD_MITIGATED; |
414 | return false; | 442 | return false; |
415 | 443 | ||
416 | case SMCCC_RET_SUCCESS: | 444 | case SMCCC_RET_SUCCESS: |
445 | __ssb_safe = false; | ||
417 | required = true; | 446 | required = true; |
418 | break; | 447 | break; |
419 | 448 | ||
@@ -423,6 +452,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, | |||
423 | 452 | ||
424 | default: | 453 | default: |
425 | WARN_ON(1); | 454 | WARN_ON(1); |
455 | if (!this_cpu_safe) | ||
456 | __ssb_safe = false; | ||
426 | return false; | 457 | return false; |
427 | } | 458 | } |
428 | 459 | ||
@@ -462,7 +493,14 @@ out_printmsg: | |||
462 | 493 | ||
463 | return required; | 494 | return required; |
464 | } | 495 | } |
465 | #endif /* CONFIG_ARM64_SSBD */ | 496 | |
497 | /* known invulnerable cores */ | ||
498 | static const struct midr_range arm64_ssb_cpus[] = { | ||
499 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), | ||
500 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), | ||
501 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), | ||
502 | {}, | ||
503 | }; | ||
466 | 504 | ||
467 | static void __maybe_unused | 505 | static void __maybe_unused |
468 | cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) | 506 | cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) |
@@ -507,26 +545,67 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) | |||
507 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | 545 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ |
508 | CAP_MIDR_RANGE_LIST(midr_list) | 546 | CAP_MIDR_RANGE_LIST(midr_list) |
509 | 547 | ||
510 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR | 548 | /* Track overall mitigation state. We are only mitigated if all cores are ok */ |
549 | static bool __hardenbp_enab = true; | ||
550 | static bool __spectrev2_safe = true; | ||
511 | 551 | ||
512 | /* | 552 | /* |
513 | * List of CPUs where we need to issue a psci call to | 553 | * List of CPUs that do not need any Spectre-v2 mitigation at all. |
514 | * harden the branch predictor. | ||
515 | */ | 554 | */ |
516 | static const struct midr_range arm64_bp_harden_smccc_cpus[] = { | 555 | static const struct midr_range spectre_v2_safe_list[] = { |
517 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), | 556 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), |
518 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), | 557 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), |
519 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), | 558 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), |
520 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), | 559 | { /* sentinel */ } |
521 | MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), | ||
522 | MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), | ||
523 | MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), | ||
524 | MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), | ||
525 | MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER), | ||
526 | {}, | ||
527 | }; | 560 | }; |
528 | 561 | ||
529 | #endif | 562 | /* |
563 | * Track overall bp hardening for all heterogeneous cores in the machine. | ||
564 | * We are only considered "safe" if all booted cores are known safe. | ||
565 | */ | ||
566 | static bool __maybe_unused | ||
567 | check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) | ||
568 | { | ||
569 | int need_wa; | ||
570 | |||
571 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | ||
572 | |||
573 | /* If the CPU has CSV2 set, we're safe */ | ||
574 | if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1), | ||
575 | ID_AA64PFR0_CSV2_SHIFT)) | ||
576 | return false; | ||
577 | |||
578 | /* Alternatively, we have a list of unaffected CPUs */ | ||
579 | if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) | ||
580 | return false; | ||
581 | |||
582 | /* Fallback to firmware detection */ | ||
583 | need_wa = detect_harden_bp_fw(); | ||
584 | if (!need_wa) | ||
585 | return false; | ||
586 | |||
587 | __spectrev2_safe = false; | ||
588 | |||
589 | if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) { | ||
590 | pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n"); | ||
591 | __hardenbp_enab = false; | ||
592 | return false; | ||
593 | } | ||
594 | |||
595 | /* forced off */ | ||
596 | if (__nospectre_v2 || cpu_mitigations_off()) { | ||
597 | pr_info_once("spectrev2 mitigation disabled by command line option\n"); | ||
598 | __hardenbp_enab = false; | ||
599 | return false; | ||
600 | } | ||
601 | |||
602 | if (need_wa < 0) { | ||
603 | pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n"); | ||
604 | __hardenbp_enab = false; | ||
605 | } | ||
606 | |||
607 | return (need_wa > 0); | ||
608 | } | ||
530 | 609 | ||
531 | #ifdef CONFIG_HARDEN_EL2_VECTORS | 610 | #ifdef CONFIG_HARDEN_EL2_VECTORS |
532 | 611 | ||
@@ -603,6 +682,16 @@ static const struct midr_range workaround_clean_cache[] = { | |||
603 | }; | 682 | }; |
604 | #endif | 683 | #endif |
605 | 684 | ||
685 | #ifdef CONFIG_ARM64_ERRATUM_1188873 | ||
686 | static const struct midr_range erratum_1188873_list[] = { | ||
687 | /* Cortex-A76 r0p0 to r2p0 */ | ||
688 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), | ||
689 | /* Neoverse-N1 r0p0 to r2p0 */ | ||
690 | MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 2, 0), | ||
691 | {}, | ||
692 | }; | ||
693 | #endif | ||
694 | |||
606 | const struct arm64_cpu_capabilities arm64_errata[] = { | 695 | const struct arm64_cpu_capabilities arm64_errata[] = { |
607 | #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE | 696 | #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE |
608 | { | 697 | { |
@@ -701,13 +790,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = { | |||
701 | ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), | 790 | ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), |
702 | }, | 791 | }, |
703 | #endif | 792 | #endif |
704 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR | ||
705 | { | 793 | { |
706 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, | 794 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
707 | .cpu_enable = enable_smccc_arch_workaround_1, | 795 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
708 | ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus), | 796 | .matches = check_branch_predictor, |
709 | }, | 797 | }, |
710 | #endif | ||
711 | #ifdef CONFIG_HARDEN_EL2_VECTORS | 798 | #ifdef CONFIG_HARDEN_EL2_VECTORS |
712 | { | 799 | { |
713 | .desc = "EL2 vector hardening", | 800 | .desc = "EL2 vector hardening", |
@@ -715,20 +802,18 @@ const struct arm64_cpu_capabilities arm64_errata[] = { | |||
715 | ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors), | 802 | ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors), |
716 | }, | 803 | }, |
717 | #endif | 804 | #endif |
718 | #ifdef CONFIG_ARM64_SSBD | ||
719 | { | 805 | { |
720 | .desc = "Speculative Store Bypass Disable", | 806 | .desc = "Speculative Store Bypass Disable", |
721 | .capability = ARM64_SSBD, | 807 | .capability = ARM64_SSBD, |
722 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, | 808 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
723 | .matches = has_ssbd_mitigation, | 809 | .matches = has_ssbd_mitigation, |
810 | .midr_range_list = arm64_ssb_cpus, | ||
724 | }, | 811 | }, |
725 | #endif | ||
726 | #ifdef CONFIG_ARM64_ERRATUM_1188873 | 812 | #ifdef CONFIG_ARM64_ERRATUM_1188873 |
727 | { | 813 | { |
728 | /* Cortex-A76 r0p0 to r2p0 */ | ||
729 | .desc = "ARM erratum 1188873", | 814 | .desc = "ARM erratum 1188873", |
730 | .capability = ARM64_WORKAROUND_1188873, | 815 | .capability = ARM64_WORKAROUND_1188873, |
731 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), | 816 | ERRATA_MIDR_RANGE_LIST(erratum_1188873_list), |
732 | }, | 817 | }, |
733 | #endif | 818 | #endif |
734 | #ifdef CONFIG_ARM64_ERRATUM_1165522 | 819 | #ifdef CONFIG_ARM64_ERRATUM_1165522 |
@@ -742,3 +827,38 @@ const struct arm64_cpu_capabilities arm64_errata[] = { | |||
742 | { | 827 | { |
743 | } | 828 | } |
744 | }; | 829 | }; |
830 | |||
831 | ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, | ||
832 | char *buf) | ||
833 | { | ||
834 | return sprintf(buf, "Mitigation: __user pointer sanitization\n"); | ||
835 | } | ||
836 | |||
837 | ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, | ||
838 | char *buf) | ||
839 | { | ||
840 | if (__spectrev2_safe) | ||
841 | return sprintf(buf, "Not affected\n"); | ||
842 | |||
843 | if (__hardenbp_enab) | ||
844 | return sprintf(buf, "Mitigation: Branch predictor hardening\n"); | ||
845 | |||
846 | return sprintf(buf, "Vulnerable\n"); | ||
847 | } | ||
848 | |||
849 | ssize_t cpu_show_spec_store_bypass(struct device *dev, | ||
850 | struct device_attribute *attr, char *buf) | ||
851 | { | ||
852 | if (__ssb_safe) | ||
853 | return sprintf(buf, "Not affected\n"); | ||
854 | |||
855 | switch (ssbd_state) { | ||
856 | case ARM64_SSBD_KERNEL: | ||
857 | case ARM64_SSBD_FORCE_ENABLE: | ||
858 | if (IS_ENABLED(CONFIG_ARM64_SSBD)) | ||
859 | return sprintf(buf, | ||
860 | "Mitigation: Speculative Store Bypass disabled via prctl\n"); | ||
861 | } | ||
862 | |||
863 | return sprintf(buf, "Vulnerable\n"); | ||
864 | } | ||
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index ea001241bdd4..00f8b8612b69 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c | |||
@@ -85,6 +85,7 @@ static const char *__init cpu_read_enable_method(int cpu) | |||
85 | pr_err("%pOF: missing enable-method property\n", | 85 | pr_err("%pOF: missing enable-method property\n", |
86 | dn); | 86 | dn); |
87 | } | 87 | } |
88 | of_node_put(dn); | ||
88 | } else { | 89 | } else { |
89 | enable_method = acpi_get_enable_method(cpu); | 90 | enable_method = acpi_get_enable_method(cpu); |
90 | if (!enable_method) { | 91 | if (!enable_method) { |
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 4061de10cea6..2b807f129e60 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/stop_machine.h> | 25 | #include <linux/stop_machine.h> |
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/mm.h> | 27 | #include <linux/mm.h> |
28 | #include <linux/cpu.h> | ||
28 | #include <asm/cpu.h> | 29 | #include <asm/cpu.h> |
29 | #include <asm/cpufeature.h> | 30 | #include <asm/cpufeature.h> |
30 | #include <asm/cpu_ops.h> | 31 | #include <asm/cpu_ops.h> |
@@ -35,8 +36,8 @@ | |||
35 | #include <asm/traps.h> | 36 | #include <asm/traps.h> |
36 | #include <asm/virt.h> | 37 | #include <asm/virt.h> |
37 | 38 | ||
38 | unsigned long elf_hwcap __read_mostly; | 39 | /* Kernel representation of AT_HWCAP and AT_HWCAP2 */ |
39 | EXPORT_SYMBOL_GPL(elf_hwcap); | 40 | static unsigned long elf_hwcap __read_mostly; |
40 | 41 | ||
41 | #ifdef CONFIG_COMPAT | 42 | #ifdef CONFIG_COMPAT |
42 | #define COMPAT_ELF_HWCAP_DEFAULT \ | 43 | #define COMPAT_ELF_HWCAP_DEFAULT \ |
@@ -184,6 +185,15 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { | |||
184 | ARM64_FTR_END, | 185 | ARM64_FTR_END, |
185 | }; | 186 | }; |
186 | 187 | ||
188 | static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = { | ||
189 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0), | ||
190 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0), | ||
191 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0), | ||
192 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0), | ||
193 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0), | ||
194 | ARM64_FTR_END, | ||
195 | }; | ||
196 | |||
187 | static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { | 197 | static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { |
188 | S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), | 198 | S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), |
189 | S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), | 199 | S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), |
@@ -392,7 +402,7 @@ static const struct __ftr_reg_entry { | |||
392 | /* Op1 = 0, CRn = 0, CRm = 4 */ | 402 | /* Op1 = 0, CRn = 0, CRm = 4 */ |
393 | ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0), | 403 | ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0), |
394 | ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1), | 404 | ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1), |
395 | ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz), | 405 | ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0), |
396 | 406 | ||
397 | /* Op1 = 0, CRn = 0, CRm = 5 */ | 407 | /* Op1 = 0, CRn = 0, CRm = 5 */ |
398 | ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0), | 408 | ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0), |
@@ -947,7 +957,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope) | |||
947 | return has_cpuid_feature(entry, scope); | 957 | return has_cpuid_feature(entry, scope); |
948 | } | 958 | } |
949 | 959 | ||
950 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | 960 | static bool __meltdown_safe = true; |
951 | static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ | 961 | static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ |
952 | 962 | ||
953 | static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, | 963 | static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, |
@@ -966,7 +976,17 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, | |||
966 | MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), | 976 | MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), |
967 | { /* sentinel */ } | 977 | { /* sentinel */ } |
968 | }; | 978 | }; |
969 | char const *str = "command line option"; | 979 | char const *str = "kpti command line option"; |
980 | bool meltdown_safe; | ||
981 | |||
982 | meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list); | ||
983 | |||
984 | /* Defer to CPU feature registers */ | ||
985 | if (has_cpuid_feature(entry, scope)) | ||
986 | meltdown_safe = true; | ||
987 | |||
988 | if (!meltdown_safe) | ||
989 | __meltdown_safe = false; | ||
970 | 990 | ||
971 | /* | 991 | /* |
972 | * For reasons that aren't entirely clear, enabling KPTI on Cavium | 992 | * For reasons that aren't entirely clear, enabling KPTI on Cavium |
@@ -978,6 +998,24 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, | |||
978 | __kpti_forced = -1; | 998 | __kpti_forced = -1; |
979 | } | 999 | } |
980 | 1000 | ||
1001 | /* Useful for KASLR robustness */ | ||
1002 | if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) { | ||
1003 | if (!__kpti_forced) { | ||
1004 | str = "KASLR"; | ||
1005 | __kpti_forced = 1; | ||
1006 | } | ||
1007 | } | ||
1008 | |||
1009 | if (cpu_mitigations_off() && !__kpti_forced) { | ||
1010 | str = "mitigations=off"; | ||
1011 | __kpti_forced = -1; | ||
1012 | } | ||
1013 | |||
1014 | if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) { | ||
1015 | pr_info_once("kernel page table isolation disabled by kernel configuration\n"); | ||
1016 | return false; | ||
1017 | } | ||
1018 | |||
981 | /* Forced? */ | 1019 | /* Forced? */ |
982 | if (__kpti_forced) { | 1020 | if (__kpti_forced) { |
983 | pr_info_once("kernel page table isolation forced %s by %s\n", | 1021 | pr_info_once("kernel page table isolation forced %s by %s\n", |
@@ -985,18 +1023,10 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, | |||
985 | return __kpti_forced > 0; | 1023 | return __kpti_forced > 0; |
986 | } | 1024 | } |
987 | 1025 | ||
988 | /* Useful for KASLR robustness */ | 1026 | return !meltdown_safe; |
989 | if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) | ||
990 | return kaslr_offset() > 0; | ||
991 | |||
992 | /* Don't force KPTI for CPUs that are not vulnerable */ | ||
993 | if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list)) | ||
994 | return false; | ||
995 | |||
996 | /* Defer to CPU feature registers */ | ||
997 | return !has_cpuid_feature(entry, scope); | ||
998 | } | 1027 | } |
999 | 1028 | ||
1029 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | ||
1000 | static void | 1030 | static void |
1001 | kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) | 1031 | kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) |
1002 | { | 1032 | { |
@@ -1026,6 +1056,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) | |||
1026 | 1056 | ||
1027 | return; | 1057 | return; |
1028 | } | 1058 | } |
1059 | #else | ||
1060 | static void | ||
1061 | kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) | ||
1062 | { | ||
1063 | } | ||
1064 | #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ | ||
1029 | 1065 | ||
1030 | static int __init parse_kpti(char *str) | 1066 | static int __init parse_kpti(char *str) |
1031 | { | 1067 | { |
@@ -1039,7 +1075,6 @@ static int __init parse_kpti(char *str) | |||
1039 | return 0; | 1075 | return 0; |
1040 | } | 1076 | } |
1041 | early_param("kpti", parse_kpti); | 1077 | early_param("kpti", parse_kpti); |
1042 | #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ | ||
1043 | 1078 | ||
1044 | #ifdef CONFIG_ARM64_HW_AFDBM | 1079 | #ifdef CONFIG_ARM64_HW_AFDBM |
1045 | static inline void __cpu_enable_hw_dbm(void) | 1080 | static inline void __cpu_enable_hw_dbm(void) |
@@ -1306,7 +1341,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { | |||
1306 | .field_pos = ID_AA64PFR0_EL0_SHIFT, | 1341 | .field_pos = ID_AA64PFR0_EL0_SHIFT, |
1307 | .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT, | 1342 | .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT, |
1308 | }, | 1343 | }, |
1309 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | ||
1310 | { | 1344 | { |
1311 | .desc = "Kernel page table isolation (KPTI)", | 1345 | .desc = "Kernel page table isolation (KPTI)", |
1312 | .capability = ARM64_UNMAP_KERNEL_AT_EL0, | 1346 | .capability = ARM64_UNMAP_KERNEL_AT_EL0, |
@@ -1322,7 +1356,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { | |||
1322 | .matches = unmap_kernel_at_el0, | 1356 | .matches = unmap_kernel_at_el0, |
1323 | .cpu_enable = kpti_install_ng_mappings, | 1357 | .cpu_enable = kpti_install_ng_mappings, |
1324 | }, | 1358 | }, |
1325 | #endif | ||
1326 | { | 1359 | { |
1327 | /* FP/SIMD is not implemented */ | 1360 | /* FP/SIMD is not implemented */ |
1328 | .capability = ARM64_HAS_NO_FPSIMD, | 1361 | .capability = ARM64_HAS_NO_FPSIMD, |
@@ -1340,6 +1373,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = { | |||
1340 | .field_pos = ID_AA64ISAR1_DPB_SHIFT, | 1373 | .field_pos = ID_AA64ISAR1_DPB_SHIFT, |
1341 | .min_field_value = 1, | 1374 | .min_field_value = 1, |
1342 | }, | 1375 | }, |
1376 | { | ||
1377 | .desc = "Data cache clean to Point of Deep Persistence", | ||
1378 | .capability = ARM64_HAS_DCPODP, | ||
1379 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, | ||
1380 | .matches = has_cpuid_feature, | ||
1381 | .sys_reg = SYS_ID_AA64ISAR1_EL1, | ||
1382 | .sign = FTR_UNSIGNED, | ||
1383 | .field_pos = ID_AA64ISAR1_DPB_SHIFT, | ||
1384 | .min_field_value = 2, | ||
1385 | }, | ||
1343 | #endif | 1386 | #endif |
1344 | #ifdef CONFIG_ARM64_SVE | 1387 | #ifdef CONFIG_ARM64_SVE |
1345 | { | 1388 | { |
@@ -1571,39 +1614,46 @@ static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = { | |||
1571 | #endif | 1614 | #endif |
1572 | 1615 | ||
1573 | static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { | 1616 | static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { |
1574 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL), | 1617 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL), |
1575 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES), | 1618 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES), |
1576 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1), | 1619 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1), |
1577 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2), | 1620 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2), |
1578 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512), | 1621 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512), |
1579 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32), | 1622 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32), |
1580 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS), | 1623 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS), |
1581 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM), | 1624 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM), |
1582 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3), | 1625 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3), |
1583 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3), | 1626 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3), |
1584 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4), | 1627 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4), |
1585 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP), | 1628 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP), |
1586 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM), | 1629 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM), |
1587 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM), | 1630 | HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM), |
1588 | HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP), | 1631 | HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP), |
1589 | HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP), | 1632 | HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP), |
1590 | HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD), | 1633 | HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), |
1591 | HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP), | 1634 | HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), |
1592 | HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT), | 1635 | HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT), |
1593 | HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP), | 1636 | HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP), |
1594 | HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT), | 1637 | HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), |
1595 | HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA), | 1638 | HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT), |
1596 | HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC), | 1639 | HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FCMA), |
1597 | HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC), | 1640 | HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_LRCPC), |
1598 | HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SB), | 1641 | HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC), |
1599 | HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT), | 1642 | HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB), |
1643 | HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT), | ||
1600 | #ifdef CONFIG_ARM64_SVE | 1644 | #ifdef CONFIG_ARM64_SVE |
1601 | HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE), | 1645 | HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE), |
1646 | HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SVEVER_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SVEVER_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2), | ||
1647 | HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES), | ||
1648 | HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL), | ||
1649 | HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM), | ||
1650 | HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3), | ||
1651 | HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4), | ||
1602 | #endif | 1652 | #endif |
1603 | HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS), | 1653 | HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS), |
1604 | #ifdef CONFIG_ARM64_PTR_AUTH | 1654 | #ifdef CONFIG_ARM64_PTR_AUTH |
1605 | HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, HWCAP_PACA), | 1655 | HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA), |
1606 | HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, HWCAP_PACG), | 1656 | HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG), |
1607 | #endif | 1657 | #endif |
1608 | {}, | 1658 | {}, |
1609 | }; | 1659 | }; |
@@ -1623,7 +1673,7 @@ static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap) | |||
1623 | { | 1673 | { |
1624 | switch (cap->hwcap_type) { | 1674 | switch (cap->hwcap_type) { |
1625 | case CAP_HWCAP: | 1675 | case CAP_HWCAP: |
1626 | elf_hwcap |= cap->hwcap; | 1676 | cpu_set_feature(cap->hwcap); |
1627 | break; | 1677 | break; |
1628 | #ifdef CONFIG_COMPAT | 1678 | #ifdef CONFIG_COMPAT |
1629 | case CAP_COMPAT_HWCAP: | 1679 | case CAP_COMPAT_HWCAP: |
@@ -1646,7 +1696,7 @@ static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap) | |||
1646 | 1696 | ||
1647 | switch (cap->hwcap_type) { | 1697 | switch (cap->hwcap_type) { |
1648 | case CAP_HWCAP: | 1698 | case CAP_HWCAP: |
1649 | rc = (elf_hwcap & cap->hwcap) != 0; | 1699 | rc = cpu_have_feature(cap->hwcap); |
1650 | break; | 1700 | break; |
1651 | #ifdef CONFIG_COMPAT | 1701 | #ifdef CONFIG_COMPAT |
1652 | case CAP_COMPAT_HWCAP: | 1702 | case CAP_COMPAT_HWCAP: |
@@ -1667,7 +1717,7 @@ static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap) | |||
1667 | static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps) | 1717 | static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps) |
1668 | { | 1718 | { |
1669 | /* We support emulation of accesses to CPU ID feature registers */ | 1719 | /* We support emulation of accesses to CPU ID feature registers */ |
1670 | elf_hwcap |= HWCAP_CPUID; | 1720 | cpu_set_named_feature(CPUID); |
1671 | for (; hwcaps->matches; hwcaps++) | 1721 | for (; hwcaps->matches; hwcaps++) |
1672 | if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps))) | 1722 | if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps))) |
1673 | cap_set_elf_hwcap(hwcaps); | 1723 | cap_set_elf_hwcap(hwcaps); |
@@ -1947,6 +1997,35 @@ bool this_cpu_has_cap(unsigned int n) | |||
1947 | return false; | 1997 | return false; |
1948 | } | 1998 | } |
1949 | 1999 | ||
2000 | void cpu_set_feature(unsigned int num) | ||
2001 | { | ||
2002 | WARN_ON(num >= MAX_CPU_FEATURES); | ||
2003 | elf_hwcap |= BIT(num); | ||
2004 | } | ||
2005 | EXPORT_SYMBOL_GPL(cpu_set_feature); | ||
2006 | |||
2007 | bool cpu_have_feature(unsigned int num) | ||
2008 | { | ||
2009 | WARN_ON(num >= MAX_CPU_FEATURES); | ||
2010 | return elf_hwcap & BIT(num); | ||
2011 | } | ||
2012 | EXPORT_SYMBOL_GPL(cpu_have_feature); | ||
2013 | |||
2014 | unsigned long cpu_get_elf_hwcap(void) | ||
2015 | { | ||
2016 | /* | ||
2017 | * We currently only populate the first 32 bits of AT_HWCAP. Please | ||
2018 | * note that for userspace compatibility we guarantee that bits 62 | ||
2019 | * and 63 will always be returned as 0. | ||
2020 | */ | ||
2021 | return lower_32_bits(elf_hwcap); | ||
2022 | } | ||
2023 | |||
2024 | unsigned long cpu_get_elf_hwcap2(void) | ||
2025 | { | ||
2026 | return upper_32_bits(elf_hwcap); | ||
2027 | } | ||
2028 | |||
1950 | static void __init setup_system_capabilities(void) | 2029 | static void __init setup_system_capabilities(void) |
1951 | { | 2030 | { |
1952 | /* | 2031 | /* |
@@ -2101,3 +2180,15 @@ static int __init enable_mrs_emulation(void) | |||
2101 | } | 2180 | } |
2102 | 2181 | ||
2103 | core_initcall(enable_mrs_emulation); | 2182 | core_initcall(enable_mrs_emulation); |
2183 | |||
2184 | ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, | ||
2185 | char *buf) | ||
2186 | { | ||
2187 | if (__meltdown_safe) | ||
2188 | return sprintf(buf, "Not affected\n"); | ||
2189 | |||
2190 | if (arm64_kernel_unmapped_at_el0()) | ||
2191 | return sprintf(buf, "Mitigation: PTI\n"); | ||
2192 | |||
2193 | return sprintf(buf, "Vulnerable\n"); | ||
2194 | } | ||
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index ca0685f33900..f6f7936be6e7 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c | |||
@@ -85,6 +85,13 @@ static const char *const hwcap_str[] = { | |||
85 | "sb", | 85 | "sb", |
86 | "paca", | 86 | "paca", |
87 | "pacg", | 87 | "pacg", |
88 | "dcpodp", | ||
89 | "sve2", | ||
90 | "sveaes", | ||
91 | "svepmull", | ||
92 | "svebitperm", | ||
93 | "svesha3", | ||
94 | "svesm4", | ||
88 | NULL | 95 | NULL |
89 | }; | 96 | }; |
90 | 97 | ||
@@ -167,7 +174,7 @@ static int c_show(struct seq_file *m, void *v) | |||
167 | #endif /* CONFIG_COMPAT */ | 174 | #endif /* CONFIG_COMPAT */ |
168 | } else { | 175 | } else { |
169 | for (j = 0; hwcap_str[j]; j++) | 176 | for (j = 0; hwcap_str[j]; j++) |
170 | if (elf_hwcap & (1 << j)) | 177 | if (cpu_have_feature(j)) |
171 | seq_printf(m, " %s", hwcap_str[j]); | 178 | seq_printf(m, " %s", hwcap_str[j]); |
172 | } | 179 | } |
173 | seq_puts(m, "\n"); | 180 | seq_puts(m, "\n"); |
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index d7bb6aefae0a..555b6bd2f3d6 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c | |||
@@ -135,6 +135,7 @@ NOKPROBE_SYMBOL(disable_debug_monitors); | |||
135 | */ | 135 | */ |
136 | static int clear_os_lock(unsigned int cpu) | 136 | static int clear_os_lock(unsigned int cpu) |
137 | { | 137 | { |
138 | write_sysreg(0, osdlr_el1); | ||
138 | write_sysreg(0, oslar_el1); | 139 | write_sysreg(0, oslar_el1); |
139 | isb(); | 140 | isb(); |
140 | return 0; | 141 | return 0; |
@@ -163,25 +164,46 @@ static void clear_regs_spsr_ss(struct pt_regs *regs) | |||
163 | } | 164 | } |
164 | NOKPROBE_SYMBOL(clear_regs_spsr_ss); | 165 | NOKPROBE_SYMBOL(clear_regs_spsr_ss); |
165 | 166 | ||
166 | /* EL1 Single Step Handler hooks */ | 167 | static DEFINE_SPINLOCK(debug_hook_lock); |
167 | static LIST_HEAD(step_hook); | 168 | static LIST_HEAD(user_step_hook); |
168 | static DEFINE_SPINLOCK(step_hook_lock); | 169 | static LIST_HEAD(kernel_step_hook); |
169 | 170 | ||
170 | void register_step_hook(struct step_hook *hook) | 171 | static void register_debug_hook(struct list_head *node, struct list_head *list) |
171 | { | 172 | { |
172 | spin_lock(&step_hook_lock); | 173 | spin_lock(&debug_hook_lock); |
173 | list_add_rcu(&hook->node, &step_hook); | 174 | list_add_rcu(node, list); |
174 | spin_unlock(&step_hook_lock); | 175 | spin_unlock(&debug_hook_lock); |
176 | |||
175 | } | 177 | } |
176 | 178 | ||
177 | void unregister_step_hook(struct step_hook *hook) | 179 | static void unregister_debug_hook(struct list_head *node) |
178 | { | 180 | { |
179 | spin_lock(&step_hook_lock); | 181 | spin_lock(&debug_hook_lock); |
180 | list_del_rcu(&hook->node); | 182 | list_del_rcu(node); |
181 | spin_unlock(&step_hook_lock); | 183 | spin_unlock(&debug_hook_lock); |
182 | synchronize_rcu(); | 184 | synchronize_rcu(); |
183 | } | 185 | } |
184 | 186 | ||
187 | void register_user_step_hook(struct step_hook *hook) | ||
188 | { | ||
189 | register_debug_hook(&hook->node, &user_step_hook); | ||
190 | } | ||
191 | |||
192 | void unregister_user_step_hook(struct step_hook *hook) | ||
193 | { | ||
194 | unregister_debug_hook(&hook->node); | ||
195 | } | ||
196 | |||
197 | void register_kernel_step_hook(struct step_hook *hook) | ||
198 | { | ||
199 | register_debug_hook(&hook->node, &kernel_step_hook); | ||
200 | } | ||
201 | |||
202 | void unregister_kernel_step_hook(struct step_hook *hook) | ||
203 | { | ||
204 | unregister_debug_hook(&hook->node); | ||
205 | } | ||
206 | |||
185 | /* | 207 | /* |
186 | * Call registered single step handlers | 208 | * Call registered single step handlers |
187 | * There is no Syndrome info to check for determining the handler. | 209 | * There is no Syndrome info to check for determining the handler. |
@@ -191,11 +213,14 @@ void unregister_step_hook(struct step_hook *hook) | |||
191 | static int call_step_hook(struct pt_regs *regs, unsigned int esr) | 213 | static int call_step_hook(struct pt_regs *regs, unsigned int esr) |
192 | { | 214 | { |
193 | struct step_hook *hook; | 215 | struct step_hook *hook; |
216 | struct list_head *list; | ||
194 | int retval = DBG_HOOK_ERROR; | 217 | int retval = DBG_HOOK_ERROR; |
195 | 218 | ||
219 | list = user_mode(regs) ? &user_step_hook : &kernel_step_hook; | ||
220 | |||
196 | rcu_read_lock(); | 221 | rcu_read_lock(); |
197 | 222 | ||
198 | list_for_each_entry_rcu(hook, &step_hook, node) { | 223 | list_for_each_entry_rcu(hook, list, node) { |
199 | retval = hook->fn(regs, esr); | 224 | retval = hook->fn(regs, esr); |
200 | if (retval == DBG_HOOK_HANDLED) | 225 | if (retval == DBG_HOOK_HANDLED) |
201 | break; | 226 | break; |
@@ -222,7 +247,7 @@ static void send_user_sigtrap(int si_code) | |||
222 | "User debug trap"); | 247 | "User debug trap"); |
223 | } | 248 | } |
224 | 249 | ||
225 | static int single_step_handler(unsigned long addr, unsigned int esr, | 250 | static int single_step_handler(unsigned long unused, unsigned int esr, |
226 | struct pt_regs *regs) | 251 | struct pt_regs *regs) |
227 | { | 252 | { |
228 | bool handler_found = false; | 253 | bool handler_found = false; |
@@ -234,10 +259,6 @@ static int single_step_handler(unsigned long addr, unsigned int esr, | |||
234 | if (!reinstall_suspended_bps(regs)) | 259 | if (!reinstall_suspended_bps(regs)) |
235 | return 0; | 260 | return 0; |
236 | 261 | ||
237 | #ifdef CONFIG_KPROBES | ||
238 | if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED) | ||
239 | handler_found = true; | ||
240 | #endif | ||
241 | if (!handler_found && call_step_hook(regs, esr) == DBG_HOOK_HANDLED) | 262 | if (!handler_found && call_step_hook(regs, esr) == DBG_HOOK_HANDLED) |
242 | handler_found = true; | 263 | handler_found = true; |
243 | 264 | ||
@@ -264,61 +285,59 @@ static int single_step_handler(unsigned long addr, unsigned int esr, | |||
264 | } | 285 | } |
265 | NOKPROBE_SYMBOL(single_step_handler); | 286 | NOKPROBE_SYMBOL(single_step_handler); |
266 | 287 | ||
267 | /* | 288 | static LIST_HEAD(user_break_hook); |
268 | * Breakpoint handler is re-entrant as another breakpoint can | 289 | static LIST_HEAD(kernel_break_hook); |
269 | * hit within breakpoint handler, especically in kprobes. | ||
270 | * Use reader/writer locks instead of plain spinlock. | ||
271 | */ | ||
272 | static LIST_HEAD(break_hook); | ||
273 | static DEFINE_SPINLOCK(break_hook_lock); | ||
274 | 290 | ||
275 | void register_break_hook(struct break_hook *hook) | 291 | void register_user_break_hook(struct break_hook *hook) |
276 | { | 292 | { |
277 | spin_lock(&break_hook_lock); | 293 | register_debug_hook(&hook->node, &user_break_hook); |
278 | list_add_rcu(&hook->node, &break_hook); | ||
279 | spin_unlock(&break_hook_lock); | ||
280 | } | 294 | } |
281 | 295 | ||
282 | void unregister_break_hook(struct break_hook *hook) | 296 | void unregister_user_break_hook(struct break_hook *hook) |
283 | { | 297 | { |
284 | spin_lock(&break_hook_lock); | 298 | unregister_debug_hook(&hook->node); |
285 | list_del_rcu(&hook->node); | 299 | } |
286 | spin_unlock(&break_hook_lock); | 300 | |
287 | synchronize_rcu(); | 301 | void register_kernel_break_hook(struct break_hook *hook) |
302 | { | ||
303 | register_debug_hook(&hook->node, &kernel_break_hook); | ||
304 | } | ||
305 | |||
306 | void unregister_kernel_break_hook(struct break_hook *hook) | ||
307 | { | ||
308 | unregister_debug_hook(&hook->node); | ||
288 | } | 309 | } |
289 | 310 | ||
290 | static int call_break_hook(struct pt_regs *regs, unsigned int esr) | 311 | static int call_break_hook(struct pt_regs *regs, unsigned int esr) |
291 | { | 312 | { |
292 | struct break_hook *hook; | 313 | struct break_hook *hook; |
314 | struct list_head *list; | ||
293 | int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; | 315 | int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; |
294 | 316 | ||
317 | list = user_mode(regs) ? &user_break_hook : &kernel_break_hook; | ||
318 | |||
295 | rcu_read_lock(); | 319 | rcu_read_lock(); |
296 | list_for_each_entry_rcu(hook, &break_hook, node) | 320 | list_for_each_entry_rcu(hook, list, node) { |
297 | if ((esr & hook->esr_mask) == hook->esr_val) | 321 | unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; |
322 | |||
323 | if ((comment & ~hook->mask) == hook->imm) | ||
298 | fn = hook->fn; | 324 | fn = hook->fn; |
325 | } | ||
299 | rcu_read_unlock(); | 326 | rcu_read_unlock(); |
300 | 327 | ||
301 | return fn ? fn(regs, esr) : DBG_HOOK_ERROR; | 328 | return fn ? fn(regs, esr) : DBG_HOOK_ERROR; |
302 | } | 329 | } |
303 | NOKPROBE_SYMBOL(call_break_hook); | 330 | NOKPROBE_SYMBOL(call_break_hook); |
304 | 331 | ||
305 | static int brk_handler(unsigned long addr, unsigned int esr, | 332 | static int brk_handler(unsigned long unused, unsigned int esr, |
306 | struct pt_regs *regs) | 333 | struct pt_regs *regs) |
307 | { | 334 | { |
308 | bool handler_found = false; | 335 | if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED) |
309 | 336 | return 0; | |
310 | #ifdef CONFIG_KPROBES | ||
311 | if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) { | ||
312 | if (kprobe_breakpoint_handler(regs, esr) == DBG_HOOK_HANDLED) | ||
313 | handler_found = true; | ||
314 | } | ||
315 | #endif | ||
316 | if (!handler_found && call_break_hook(regs, esr) == DBG_HOOK_HANDLED) | ||
317 | handler_found = true; | ||
318 | 337 | ||
319 | if (!handler_found && user_mode(regs)) { | 338 | if (user_mode(regs)) { |
320 | send_user_sigtrap(TRAP_BRKPT); | 339 | send_user_sigtrap(TRAP_BRKPT); |
321 | } else if (!handler_found) { | 340 | } else { |
322 | pr_warn("Unexpected kernel BRK exception at EL1\n"); | 341 | pr_warn("Unexpected kernel BRK exception at EL1\n"); |
323 | return -EFAULT; | 342 | return -EFAULT; |
324 | } | 343 | } |
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index c50a7a75f2e0..1a7811b7e3c4 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S | |||
@@ -336,6 +336,21 @@ alternative_if ARM64_WORKAROUND_845719 | |||
336 | alternative_else_nop_endif | 336 | alternative_else_nop_endif |
337 | #endif | 337 | #endif |
338 | 3: | 338 | 3: |
339 | #ifdef CONFIG_ARM64_ERRATUM_1188873 | ||
340 | alternative_if_not ARM64_WORKAROUND_1188873 | ||
341 | b 4f | ||
342 | alternative_else_nop_endif | ||
343 | /* | ||
344 | * if (x22.mode32 == cntkctl_el1.el0vcten) | ||
345 | * cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten | ||
346 | */ | ||
347 | mrs x1, cntkctl_el1 | ||
348 | eon x0, x1, x22, lsr #3 | ||
349 | tbz x0, #1, 4f | ||
350 | eor x1, x1, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN | ||
351 | msr cntkctl_el1, x1 | ||
352 | 4: | ||
353 | #endif | ||
339 | apply_ssbd 0, x0, x1 | 354 | apply_ssbd 0, x0, x1 |
340 | .endif | 355 | .endif |
341 | 356 | ||
@@ -362,11 +377,11 @@ alternative_else_nop_endif | |||
362 | .if \el == 0 | 377 | .if \el == 0 |
363 | alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 | 378 | alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 |
364 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | 379 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
365 | bne 4f | 380 | bne 5f |
366 | msr far_el1, x30 | 381 | msr far_el1, x30 |
367 | tramp_alias x30, tramp_exit_native | 382 | tramp_alias x30, tramp_exit_native |
368 | br x30 | 383 | br x30 |
369 | 4: | 384 | 5: |
370 | tramp_alias x30, tramp_exit_compat | 385 | tramp_alias x30, tramp_exit_compat |
371 | br x30 | 386 | br x30 |
372 | #endif | 387 | #endif |
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 5ebe73b69961..735cf1f8b109 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c | |||
@@ -1258,14 +1258,14 @@ static inline void fpsimd_hotplug_init(void) { } | |||
1258 | */ | 1258 | */ |
1259 | static int __init fpsimd_init(void) | 1259 | static int __init fpsimd_init(void) |
1260 | { | 1260 | { |
1261 | if (elf_hwcap & HWCAP_FP) { | 1261 | if (cpu_have_named_feature(FP)) { |
1262 | fpsimd_pm_init(); | 1262 | fpsimd_pm_init(); |
1263 | fpsimd_hotplug_init(); | 1263 | fpsimd_hotplug_init(); |
1264 | } else { | 1264 | } else { |
1265 | pr_notice("Floating-point is not implemented\n"); | 1265 | pr_notice("Floating-point is not implemented\n"); |
1266 | } | 1266 | } |
1267 | 1267 | ||
1268 | if (!(elf_hwcap & HWCAP_ASIMD)) | 1268 | if (!cpu_have_named_feature(ASIMD)) |
1269 | pr_notice("Advanced SIMD is not implemented\n"); | 1269 | pr_notice("Advanced SIMD is not implemented\n"); |
1270 | 1270 | ||
1271 | return sve_sysctl_init(); | 1271 | return sve_sysctl_init(); |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index eecf7927dab0..fcae3f85c6cd 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -505,7 +505,7 @@ ENTRY(el2_setup) | |||
505 | * kernel is intended to run at EL2. | 505 | * kernel is intended to run at EL2. |
506 | */ | 506 | */ |
507 | mrs x2, id_aa64mmfr1_el1 | 507 | mrs x2, id_aa64mmfr1_el1 |
508 | ubfx x2, x2, #8, #4 | 508 | ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4 |
509 | #else | 509 | #else |
510 | mov x2, xzr | 510 | mov x2, xzr |
511 | #endif | 511 | #endif |
@@ -538,7 +538,7 @@ set_hcr: | |||
538 | #ifdef CONFIG_ARM_GIC_V3 | 538 | #ifdef CONFIG_ARM_GIC_V3 |
539 | /* GICv3 system register access */ | 539 | /* GICv3 system register access */ |
540 | mrs x0, id_aa64pfr0_el1 | 540 | mrs x0, id_aa64pfr0_el1 |
541 | ubfx x0, x0, #24, #4 | 541 | ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4 |
542 | cbz x0, 3f | 542 | cbz x0, 3f |
543 | 543 | ||
544 | mrs_s x0, SYS_ICC_SRE_EL2 | 544 | mrs_s x0, SYS_ICC_SRE_EL2 |
@@ -564,8 +564,8 @@ set_hcr: | |||
564 | #endif | 564 | #endif |
565 | 565 | ||
566 | /* EL2 debug */ | 566 | /* EL2 debug */ |
567 | mrs x1, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer | 567 | mrs x1, id_aa64dfr0_el1 |
568 | sbfx x0, x1, #8, #4 | 568 | sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4 |
569 | cmp x0, #1 | 569 | cmp x0, #1 |
570 | b.lt 4f // Skip if no PMU present | 570 | b.lt 4f // Skip if no PMU present |
571 | mrs x0, pmcr_el0 // Disable debug access traps | 571 | mrs x0, pmcr_el0 // Disable debug access traps |
@@ -574,7 +574,7 @@ set_hcr: | |||
574 | csel x3, xzr, x0, lt // all PMU counters from EL1 | 574 | csel x3, xzr, x0, lt // all PMU counters from EL1 |
575 | 575 | ||
576 | /* Statistical profiling */ | 576 | /* Statistical profiling */ |
577 | ubfx x0, x1, #32, #4 // Check ID_AA64DFR0_EL1 PMSVer | 577 | ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4 |
578 | cbz x0, 7f // Skip if SPE not present | 578 | cbz x0, 7f // Skip if SPE not present |
579 | cbnz x2, 6f // VHE? | 579 | cbnz x2, 6f // VHE? |
580 | mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2, | 580 | mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2, |
@@ -684,7 +684,7 @@ ENTRY(__boot_cpu_mode) | |||
684 | * with MMU turned off. | 684 | * with MMU turned off. |
685 | */ | 685 | */ |
686 | ENTRY(__early_cpu_boot_status) | 686 | ENTRY(__early_cpu_boot_status) |
687 | .long 0 | 687 | .quad 0 |
688 | 688 | ||
689 | .popsection | 689 | .popsection |
690 | 690 | ||
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index 691854b77c7f..30853d5b7859 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c | |||
@@ -244,9 +244,6 @@ int kgdb_arch_handle_exception(int exception_vector, int signo, | |||
244 | 244 | ||
245 | static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr) | 245 | static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr) |
246 | { | 246 | { |
247 | if (user_mode(regs)) | ||
248 | return DBG_HOOK_ERROR; | ||
249 | |||
250 | kgdb_handle_exception(1, SIGTRAP, 0, regs); | 247 | kgdb_handle_exception(1, SIGTRAP, 0, regs); |
251 | return DBG_HOOK_HANDLED; | 248 | return DBG_HOOK_HANDLED; |
252 | } | 249 | } |
@@ -254,9 +251,6 @@ NOKPROBE_SYMBOL(kgdb_brk_fn) | |||
254 | 251 | ||
255 | static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) | 252 | static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) |
256 | { | 253 | { |
257 | if (user_mode(regs)) | ||
258 | return DBG_HOOK_ERROR; | ||
259 | |||
260 | compiled_break = 1; | 254 | compiled_break = 1; |
261 | kgdb_handle_exception(1, SIGTRAP, 0, regs); | 255 | kgdb_handle_exception(1, SIGTRAP, 0, regs); |
262 | 256 | ||
@@ -266,7 +260,7 @@ NOKPROBE_SYMBOL(kgdb_compiled_brk_fn); | |||
266 | 260 | ||
267 | static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) | 261 | static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) |
268 | { | 262 | { |
269 | if (user_mode(regs) || !kgdb_single_step) | 263 | if (!kgdb_single_step) |
270 | return DBG_HOOK_ERROR; | 264 | return DBG_HOOK_ERROR; |
271 | 265 | ||
272 | kgdb_handle_exception(1, SIGTRAP, 0, regs); | 266 | kgdb_handle_exception(1, SIGTRAP, 0, regs); |
@@ -275,15 +269,13 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) | |||
275 | NOKPROBE_SYMBOL(kgdb_step_brk_fn); | 269 | NOKPROBE_SYMBOL(kgdb_step_brk_fn); |
276 | 270 | ||
277 | static struct break_hook kgdb_brkpt_hook = { | 271 | static struct break_hook kgdb_brkpt_hook = { |
278 | .esr_mask = 0xffffffff, | 272 | .fn = kgdb_brk_fn, |
279 | .esr_val = (u32)ESR_ELx_VAL_BRK64(KGDB_DYN_DBG_BRK_IMM), | 273 | .imm = KGDB_DYN_DBG_BRK_IMM, |
280 | .fn = kgdb_brk_fn | ||
281 | }; | 274 | }; |
282 | 275 | ||
283 | static struct break_hook kgdb_compiled_brkpt_hook = { | 276 | static struct break_hook kgdb_compiled_brkpt_hook = { |
284 | .esr_mask = 0xffffffff, | 277 | .fn = kgdb_compiled_brk_fn, |
285 | .esr_val = (u32)ESR_ELx_VAL_BRK64(KGDB_COMPILED_DBG_BRK_IMM), | 278 | .imm = KGDB_COMPILED_DBG_BRK_IMM, |
286 | .fn = kgdb_compiled_brk_fn | ||
287 | }; | 279 | }; |
288 | 280 | ||
289 | static struct step_hook kgdb_step_hook = { | 281 | static struct step_hook kgdb_step_hook = { |
@@ -332,9 +324,9 @@ int kgdb_arch_init(void) | |||
332 | if (ret != 0) | 324 | if (ret != 0) |
333 | return ret; | 325 | return ret; |
334 | 326 | ||
335 | register_break_hook(&kgdb_brkpt_hook); | 327 | register_kernel_break_hook(&kgdb_brkpt_hook); |
336 | register_break_hook(&kgdb_compiled_brkpt_hook); | 328 | register_kernel_break_hook(&kgdb_compiled_brkpt_hook); |
337 | register_step_hook(&kgdb_step_hook); | 329 | register_kernel_step_hook(&kgdb_step_hook); |
338 | return 0; | 330 | return 0; |
339 | } | 331 | } |
340 | 332 | ||
@@ -345,9 +337,9 @@ int kgdb_arch_init(void) | |||
345 | */ | 337 | */ |
346 | void kgdb_arch_exit(void) | 338 | void kgdb_arch_exit(void) |
347 | { | 339 | { |
348 | unregister_break_hook(&kgdb_brkpt_hook); | 340 | unregister_kernel_break_hook(&kgdb_brkpt_hook); |
349 | unregister_break_hook(&kgdb_compiled_brkpt_hook); | 341 | unregister_kernel_break_hook(&kgdb_compiled_brkpt_hook); |
350 | unregister_step_hook(&kgdb_step_hook); | 342 | unregister_kernel_step_hook(&kgdb_step_hook); |
351 | unregister_die_notifier(&kgdb_notifier); | 343 | unregister_die_notifier(&kgdb_notifier); |
352 | } | 344 | } |
353 | 345 | ||
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S index 997e6b27ff6a..49825e9e421e 100644 --- a/arch/arm64/kernel/kuser32.S +++ b/arch/arm64/kernel/kuser32.S | |||
@@ -1,29 +1,14 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* | 2 | /* |
2 | * Low-level user helpers placed in the vectors page for AArch32. | 3 | * AArch32 user helpers. |
3 | * Based on the kuser helpers in arch/arm/kernel/entry-armv.S. | 4 | * Based on the kuser helpers in arch/arm/kernel/entry-armv.S. |
4 | * | 5 | * |
5 | * Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net> | 6 | * Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net> |
6 | * Copyright (C) 2012 ARM Ltd. | 7 | * Copyright (C) 2012-2018 ARM Ltd. |
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | 8 | * |
17 | * You should have received a copy of the GNU General Public License | 9 | * The kuser helpers below are mapped at a fixed address by |
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 10 | * aarch32_setup_additional_pages() and are provided for compatibility |
19 | * | 11 | * reasons with 32 bit (aarch32) applications that need them. |
20 | * | ||
21 | * AArch32 user helpers. | ||
22 | * | ||
23 | * Each segment is 32-byte aligned and will be moved to the top of the high | ||
24 | * vector page. New segments (if ever needed) must be added in front of | ||
25 | * existing ones. This mechanism should be used only for things that are | ||
26 | * really small and justified, and not be abused freely. | ||
27 | * | 12 | * |
28 | * See Documentation/arm/kernel_user_helpers.txt for formal definitions. | 13 | * See Documentation/arm/kernel_user_helpers.txt for formal definitions. |
29 | */ | 14 | */ |
@@ -77,42 +62,3 @@ __kuser_helper_version: // 0xffff0ffc | |||
77 | .word ((__kuser_helper_end - __kuser_helper_start) >> 5) | 62 | .word ((__kuser_helper_end - __kuser_helper_start) >> 5) |
78 | .globl __kuser_helper_end | 63 | .globl __kuser_helper_end |
79 | __kuser_helper_end: | 64 | __kuser_helper_end: |
80 | |||
81 | /* | ||
82 | * AArch32 sigreturn code | ||
83 | * | ||
84 | * For ARM syscalls, the syscall number has to be loaded into r7. | ||
85 | * We do not support an OABI userspace. | ||
86 | * | ||
87 | * For Thumb syscalls, we also pass the syscall number via r7. We therefore | ||
88 | * need two 16-bit instructions. | ||
89 | */ | ||
90 | .globl __aarch32_sigret_code_start | ||
91 | __aarch32_sigret_code_start: | ||
92 | |||
93 | /* | ||
94 | * ARM Code | ||
95 | */ | ||
96 | .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn | ||
97 | .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn | ||
98 | |||
99 | /* | ||
100 | * Thumb code | ||
101 | */ | ||
102 | .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn | ||
103 | .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn | ||
104 | |||
105 | /* | ||
106 | * ARM code | ||
107 | */ | ||
108 | .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn | ||
109 | .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn | ||
110 | |||
111 | /* | ||
112 | * Thumb code | ||
113 | */ | ||
114 | .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn | ||
115 | .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn | ||
116 | |||
117 | .globl __aarch32_sigret_code_end | ||
118 | __aarch32_sigret_code_end: | ||
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 4addb38bc250..6164d389eed6 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -431,7 +431,7 @@ static inline u64 armv8pmu_read_hw_counter(struct perf_event *event) | |||
431 | return val; | 431 | return val; |
432 | } | 432 | } |
433 | 433 | ||
434 | static inline u64 armv8pmu_read_counter(struct perf_event *event) | 434 | static u64 armv8pmu_read_counter(struct perf_event *event) |
435 | { | 435 | { |
436 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | 436 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
437 | struct hw_perf_event *hwc = &event->hw; | 437 | struct hw_perf_event *hwc = &event->hw; |
@@ -468,7 +468,7 @@ static inline void armv8pmu_write_hw_counter(struct perf_event *event, | |||
468 | } | 468 | } |
469 | } | 469 | } |
470 | 470 | ||
471 | static inline void armv8pmu_write_counter(struct perf_event *event, u64 value) | 471 | static void armv8pmu_write_counter(struct perf_event *event, u64 value) |
472 | { | 472 | { |
473 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | 473 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
474 | struct hw_perf_event *hwc = &event->hw; | 474 | struct hw_perf_event *hwc = &event->hw; |
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 7a679caf4585..2509fcb6d404 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c | |||
@@ -439,15 +439,12 @@ kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr) | |||
439 | return DBG_HOOK_ERROR; | 439 | return DBG_HOOK_ERROR; |
440 | } | 440 | } |
441 | 441 | ||
442 | int __kprobes | 442 | static int __kprobes |
443 | kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) | 443 | kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) |
444 | { | 444 | { |
445 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 445 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
446 | int retval; | 446 | int retval; |
447 | 447 | ||
448 | if (user_mode(regs)) | ||
449 | return DBG_HOOK_ERROR; | ||
450 | |||
451 | /* return error if this is not our step */ | 448 | /* return error if this is not our step */ |
452 | retval = kprobe_ss_hit(kcb, instruction_pointer(regs)); | 449 | retval = kprobe_ss_hit(kcb, instruction_pointer(regs)); |
453 | 450 | ||
@@ -461,16 +458,22 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) | |||
461 | return retval; | 458 | return retval; |
462 | } | 459 | } |
463 | 460 | ||
464 | int __kprobes | 461 | static struct step_hook kprobes_step_hook = { |
462 | .fn = kprobe_single_step_handler, | ||
463 | }; | ||
464 | |||
465 | static int __kprobes | ||
465 | kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr) | 466 | kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr) |
466 | { | 467 | { |
467 | if (user_mode(regs)) | ||
468 | return DBG_HOOK_ERROR; | ||
469 | |||
470 | kprobe_handler(regs); | 468 | kprobe_handler(regs); |
471 | return DBG_HOOK_HANDLED; | 469 | return DBG_HOOK_HANDLED; |
472 | } | 470 | } |
473 | 471 | ||
472 | static struct break_hook kprobes_break_hook = { | ||
473 | .imm = KPROBES_BRK_IMM, | ||
474 | .fn = kprobe_breakpoint_handler, | ||
475 | }; | ||
476 | |||
474 | /* | 477 | /* |
475 | * Provide a blacklist of symbols identifying ranges which cannot be kprobed. | 478 | * Provide a blacklist of symbols identifying ranges which cannot be kprobed. |
476 | * This blacklist is exposed to userspace via debugfs (kprobes/blacklist). | 479 | * This blacklist is exposed to userspace via debugfs (kprobes/blacklist). |
@@ -599,5 +602,8 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p) | |||
599 | 602 | ||
600 | int __init arch_init_kprobes(void) | 603 | int __init arch_init_kprobes(void) |
601 | { | 604 | { |
605 | register_kernel_break_hook(&kprobes_break_hook); | ||
606 | register_kernel_step_hook(&kprobes_step_hook); | ||
607 | |||
602 | return 0; | 608 | return 0; |
603 | } | 609 | } |
diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c index 636ca0119c0e..605945eac1f8 100644 --- a/arch/arm64/kernel/probes/uprobes.c +++ b/arch/arm64/kernel/probes/uprobes.c | |||
@@ -171,7 +171,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, | |||
171 | static int uprobe_breakpoint_handler(struct pt_regs *regs, | 171 | static int uprobe_breakpoint_handler(struct pt_regs *regs, |
172 | unsigned int esr) | 172 | unsigned int esr) |
173 | { | 173 | { |
174 | if (user_mode(regs) && uprobe_pre_sstep_notifier(regs)) | 174 | if (uprobe_pre_sstep_notifier(regs)) |
175 | return DBG_HOOK_HANDLED; | 175 | return DBG_HOOK_HANDLED; |
176 | 176 | ||
177 | return DBG_HOOK_ERROR; | 177 | return DBG_HOOK_ERROR; |
@@ -182,21 +182,16 @@ static int uprobe_single_step_handler(struct pt_regs *regs, | |||
182 | { | 182 | { |
183 | struct uprobe_task *utask = current->utask; | 183 | struct uprobe_task *utask = current->utask; |
184 | 184 | ||
185 | if (user_mode(regs)) { | 185 | WARN_ON(utask && (instruction_pointer(regs) != utask->xol_vaddr + 4)); |
186 | WARN_ON(utask && | 186 | if (uprobe_post_sstep_notifier(regs)) |
187 | (instruction_pointer(regs) != utask->xol_vaddr + 4)); | 187 | return DBG_HOOK_HANDLED; |
188 | |||
189 | if (uprobe_post_sstep_notifier(regs)) | ||
190 | return DBG_HOOK_HANDLED; | ||
191 | } | ||
192 | 188 | ||
193 | return DBG_HOOK_ERROR; | 189 | return DBG_HOOK_ERROR; |
194 | } | 190 | } |
195 | 191 | ||
196 | /* uprobe breakpoint handler hook */ | 192 | /* uprobe breakpoint handler hook */ |
197 | static struct break_hook uprobes_break_hook = { | 193 | static struct break_hook uprobes_break_hook = { |
198 | .esr_mask = BRK64_ESR_MASK, | 194 | .imm = UPROBES_BRK_IMM, |
199 | .esr_val = BRK64_ESR_UPROBES, | ||
200 | .fn = uprobe_breakpoint_handler, | 195 | .fn = uprobe_breakpoint_handler, |
201 | }; | 196 | }; |
202 | 197 | ||
@@ -207,8 +202,8 @@ static struct step_hook uprobes_step_hook = { | |||
207 | 202 | ||
208 | static int __init arch_init_uprobes(void) | 203 | static int __init arch_init_uprobes(void) |
209 | { | 204 | { |
210 | register_break_hook(&uprobes_break_hook); | 205 | register_user_break_hook(&uprobes_break_hook); |
211 | register_step_hook(&uprobes_step_hook); | 206 | register_user_step_hook(&uprobes_step_hook); |
212 | 207 | ||
213 | return 0; | 208 | return 0; |
214 | } | 209 | } |
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index cb7800acd19f..caea6e25db2a 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c | |||
@@ -403,8 +403,7 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, | |||
403 | if (ka->sa.sa_flags & SA_SIGINFO) | 403 | if (ka->sa.sa_flags & SA_SIGINFO) |
404 | idx += 3; | 404 | idx += 3; |
405 | 405 | ||
406 | retcode = AARCH32_VECTORS_BASE + | 406 | retcode = (unsigned long)current->mm->context.vdso + |
407 | AARCH32_KERN_SIGRET_CODE_OFFSET + | ||
408 | (idx << 2) + thumb; | 407 | (idx << 2) + thumb; |
409 | } | 408 | } |
410 | 409 | ||
diff --git a/arch/arm64/kernel/sigreturn32.S b/arch/arm64/kernel/sigreturn32.S new file mode 100644 index 000000000000..475d30d471ac --- /dev/null +++ b/arch/arm64/kernel/sigreturn32.S | |||
@@ -0,0 +1,46 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * AArch32 sigreturn code. | ||
4 | * Based on the kuser helpers in arch/arm/kernel/entry-armv.S. | ||
5 | * | ||
6 | * Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net> | ||
7 | * Copyright (C) 2012-2018 ARM Ltd. | ||
8 | * | ||
9 | * For ARM syscalls, the syscall number has to be loaded into r7. | ||
10 | * We do not support an OABI userspace. | ||
11 | * | ||
12 | * For Thumb syscalls, we also pass the syscall number via r7. We therefore | ||
13 | * need two 16-bit instructions. | ||
14 | */ | ||
15 | |||
16 | #include <asm/unistd.h> | ||
17 | |||
18 | .globl __aarch32_sigret_code_start | ||
19 | __aarch32_sigret_code_start: | ||
20 | |||
21 | /* | ||
22 | * ARM Code | ||
23 | */ | ||
24 | .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn | ||
25 | .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn | ||
26 | |||
27 | /* | ||
28 | * Thumb code | ||
29 | */ | ||
30 | .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn | ||
31 | .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn | ||
32 | |||
33 | /* | ||
34 | * ARM code | ||
35 | */ | ||
36 | .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn | ||
37 | .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn | ||
38 | |||
39 | /* | ||
40 | * Thumb code | ||
41 | */ | ||
42 | .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn | ||
43 | .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn | ||
44 | |||
45 | .globl __aarch32_sigret_code_end | ||
46 | __aarch32_sigret_code_end: | ||
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c index b44065fb1616..6f91e8116514 100644 --- a/arch/arm64/kernel/sys.c +++ b/arch/arm64/kernel/sys.c | |||
@@ -31,7 +31,7 @@ | |||
31 | 31 | ||
32 | SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, | 32 | SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, |
33 | unsigned long, prot, unsigned long, flags, | 33 | unsigned long, prot, unsigned long, flags, |
34 | unsigned long, fd, off_t, off) | 34 | unsigned long, fd, unsigned long, off) |
35 | { | 35 | { |
36 | if (offset_in_page(off) != 0) | 36 | if (offset_in_page(off) != 0) |
37 | return -EINVAL; | 37 | return -EINVAL; |
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 29755989f616..ade32046f3fe 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
@@ -462,6 +462,9 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) | |||
462 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */ | 462 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */ |
463 | __user_cache_maint("dc civac", address, ret); | 463 | __user_cache_maint("dc civac", address, ret); |
464 | break; | 464 | break; |
465 | case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */ | ||
466 | __user_cache_maint("sys 3, c7, c13, 1", address, ret); | ||
467 | break; | ||
465 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */ | 468 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */ |
466 | __user_cache_maint("sys 3, c7, c12, 1", address, ret); | 469 | __user_cache_maint("sys 3, c7, c12, 1", address, ret); |
467 | break; | 470 | break; |
@@ -496,7 +499,7 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) | |||
496 | { | 499 | { |
497 | int rt = ESR_ELx_SYS64_ISS_RT(esr); | 500 | int rt = ESR_ELx_SYS64_ISS_RT(esr); |
498 | 501 | ||
499 | pt_regs_write_reg(regs, rt, arch_counter_get_cntvct()); | 502 | pt_regs_write_reg(regs, rt, arch_timer_read_counter()); |
500 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); | 503 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
501 | } | 504 | } |
502 | 505 | ||
@@ -668,7 +671,7 @@ static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs) | |||
668 | { | 671 | { |
669 | int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT; | 672 | int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT; |
670 | int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT; | 673 | int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT; |
671 | u64 val = arch_counter_get_cntvct(); | 674 | u64 val = arch_timer_read_counter(); |
672 | 675 | ||
673 | pt_regs_write_reg(regs, rt, lower_32_bits(val)); | 676 | pt_regs_write_reg(regs, rt, lower_32_bits(val)); |
674 | pt_regs_write_reg(regs, rt2, upper_32_bits(val)); | 677 | pt_regs_write_reg(regs, rt2, upper_32_bits(val)); |
@@ -950,9 +953,6 @@ int is_valid_bugaddr(unsigned long addr) | |||
950 | 953 | ||
951 | static int bug_handler(struct pt_regs *regs, unsigned int esr) | 954 | static int bug_handler(struct pt_regs *regs, unsigned int esr) |
952 | { | 955 | { |
953 | if (user_mode(regs)) | ||
954 | return DBG_HOOK_ERROR; | ||
955 | |||
956 | switch (report_bug(regs->pc, regs)) { | 956 | switch (report_bug(regs->pc, regs)) { |
957 | case BUG_TRAP_TYPE_BUG: | 957 | case BUG_TRAP_TYPE_BUG: |
958 | die("Oops - BUG", regs, 0); | 958 | die("Oops - BUG", regs, 0); |
@@ -972,9 +972,8 @@ static int bug_handler(struct pt_regs *regs, unsigned int esr) | |||
972 | } | 972 | } |
973 | 973 | ||
974 | static struct break_hook bug_break_hook = { | 974 | static struct break_hook bug_break_hook = { |
975 | .esr_val = 0xf2000000 | BUG_BRK_IMM, | ||
976 | .esr_mask = 0xffffffff, | ||
977 | .fn = bug_handler, | 975 | .fn = bug_handler, |
976 | .imm = BUG_BRK_IMM, | ||
978 | }; | 977 | }; |
979 | 978 | ||
980 | #ifdef CONFIG_KASAN_SW_TAGS | 979 | #ifdef CONFIG_KASAN_SW_TAGS |
@@ -992,9 +991,6 @@ static int kasan_handler(struct pt_regs *regs, unsigned int esr) | |||
992 | u64 addr = regs->regs[0]; | 991 | u64 addr = regs->regs[0]; |
993 | u64 pc = regs->pc; | 992 | u64 pc = regs->pc; |
994 | 993 | ||
995 | if (user_mode(regs)) | ||
996 | return DBG_HOOK_ERROR; | ||
997 | |||
998 | kasan_report(addr, size, write, pc); | 994 | kasan_report(addr, size, write, pc); |
999 | 995 | ||
1000 | /* | 996 | /* |
@@ -1019,13 +1015,10 @@ static int kasan_handler(struct pt_regs *regs, unsigned int esr) | |||
1019 | return DBG_HOOK_HANDLED; | 1015 | return DBG_HOOK_HANDLED; |
1020 | } | 1016 | } |
1021 | 1017 | ||
1022 | #define KASAN_ESR_VAL (0xf2000000 | KASAN_BRK_IMM) | ||
1023 | #define KASAN_ESR_MASK 0xffffff00 | ||
1024 | |||
1025 | static struct break_hook kasan_break_hook = { | 1018 | static struct break_hook kasan_break_hook = { |
1026 | .esr_val = KASAN_ESR_VAL, | 1019 | .fn = kasan_handler, |
1027 | .esr_mask = KASAN_ESR_MASK, | 1020 | .imm = KASAN_BRK_IMM, |
1028 | .fn = kasan_handler, | 1021 | .mask = KASAN_BRK_MASK, |
1029 | }; | 1022 | }; |
1030 | #endif | 1023 | #endif |
1031 | 1024 | ||
@@ -1037,7 +1030,9 @@ int __init early_brk64(unsigned long addr, unsigned int esr, | |||
1037 | struct pt_regs *regs) | 1030 | struct pt_regs *regs) |
1038 | { | 1031 | { |
1039 | #ifdef CONFIG_KASAN_SW_TAGS | 1032 | #ifdef CONFIG_KASAN_SW_TAGS |
1040 | if ((esr & KASAN_ESR_MASK) == KASAN_ESR_VAL) | 1033 | unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; |
1034 | |||
1035 | if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM) | ||
1041 | return kasan_handler(regs, esr) != DBG_HOOK_HANDLED; | 1036 | return kasan_handler(regs, esr) != DBG_HOOK_HANDLED; |
1042 | #endif | 1037 | #endif |
1043 | return bug_handler(regs, esr) != DBG_HOOK_HANDLED; | 1038 | return bug_handler(regs, esr) != DBG_HOOK_HANDLED; |
@@ -1046,8 +1041,8 @@ int __init early_brk64(unsigned long addr, unsigned int esr, | |||
1046 | /* This registration must happen early, before debug_traps_init(). */ | 1041 | /* This registration must happen early, before debug_traps_init(). */ |
1047 | void __init trap_init(void) | 1042 | void __init trap_init(void) |
1048 | { | 1043 | { |
1049 | register_break_hook(&bug_break_hook); | 1044 | register_kernel_break_hook(&bug_break_hook); |
1050 | #ifdef CONFIG_KASAN_SW_TAGS | 1045 | #ifdef CONFIG_KASAN_SW_TAGS |
1051 | register_break_hook(&kasan_break_hook); | 1046 | register_kernel_break_hook(&kasan_break_hook); |
1052 | #endif | 1047 | #endif |
1053 | } | 1048 | } |
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 2d419006ad43..8074cbd3a3a8 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * VDSO implementation for AArch64 and vector page setup for AArch32. | 2 | * VDSO implementations. |
3 | * | 3 | * |
4 | * Copyright (C) 2012 ARM Limited | 4 | * Copyright (C) 2012 ARM Limited |
5 | * | 5 | * |
@@ -53,61 +53,129 @@ struct vdso_data *vdso_data = &vdso_data_store.data; | |||
53 | /* | 53 | /* |
54 | * Create and map the vectors page for AArch32 tasks. | 54 | * Create and map the vectors page for AArch32 tasks. |
55 | */ | 55 | */ |
56 | static struct page *vectors_page[1] __ro_after_init; | 56 | #define C_VECTORS 0 |
57 | #define C_SIGPAGE 1 | ||
58 | #define C_PAGES (C_SIGPAGE + 1) | ||
59 | static struct page *aarch32_vdso_pages[C_PAGES] __ro_after_init; | ||
60 | static const struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = { | ||
61 | { | ||
62 | .name = "[vectors]", /* ABI */ | ||
63 | .pages = &aarch32_vdso_pages[C_VECTORS], | ||
64 | }, | ||
65 | { | ||
66 | .name = "[sigpage]", /* ABI */ | ||
67 | .pages = &aarch32_vdso_pages[C_SIGPAGE], | ||
68 | }, | ||
69 | }; | ||
57 | 70 | ||
58 | static int __init alloc_vectors_page(void) | 71 | static int aarch32_alloc_kuser_vdso_page(void) |
59 | { | 72 | { |
60 | extern char __kuser_helper_start[], __kuser_helper_end[]; | 73 | extern char __kuser_helper_start[], __kuser_helper_end[]; |
61 | extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; | ||
62 | |||
63 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; | 74 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; |
64 | int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; | 75 | unsigned long vdso_page; |
65 | unsigned long vpage; | ||
66 | 76 | ||
67 | vpage = get_zeroed_page(GFP_ATOMIC); | 77 | if (!IS_ENABLED(CONFIG_KUSER_HELPERS)) |
78 | return 0; | ||
68 | 79 | ||
69 | if (!vpage) | 80 | vdso_page = get_zeroed_page(GFP_ATOMIC); |
81 | if (!vdso_page) | ||
70 | return -ENOMEM; | 82 | return -ENOMEM; |
71 | 83 | ||
72 | /* kuser helpers */ | 84 | memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start, |
73 | memcpy((void *)vpage + 0x1000 - kuser_sz, __kuser_helper_start, | 85 | kuser_sz); |
74 | kuser_sz); | 86 | aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_page); |
87 | flush_dcache_page(aarch32_vdso_pages[C_VECTORS]); | ||
88 | return 0; | ||
89 | } | ||
75 | 90 | ||
76 | /* sigreturn code */ | 91 | static int __init aarch32_alloc_vdso_pages(void) |
77 | memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET, | 92 | { |
78 | __aarch32_sigret_code_start, sigret_sz); | 93 | extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; |
94 | int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; | ||
95 | unsigned long sigpage; | ||
96 | int ret; | ||
79 | 97 | ||
80 | flush_icache_range(vpage, vpage + PAGE_SIZE); | 98 | sigpage = get_zeroed_page(GFP_ATOMIC); |
81 | vectors_page[0] = virt_to_page(vpage); | 99 | if (!sigpage) |
100 | return -ENOMEM; | ||
82 | 101 | ||
83 | return 0; | 102 | memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz); |
103 | aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(sigpage); | ||
104 | flush_dcache_page(aarch32_vdso_pages[C_SIGPAGE]); | ||
105 | |||
106 | ret = aarch32_alloc_kuser_vdso_page(); | ||
107 | if (ret) | ||
108 | free_page(sigpage); | ||
109 | |||
110 | return ret; | ||
84 | } | 111 | } |
85 | arch_initcall(alloc_vectors_page); | 112 | arch_initcall(aarch32_alloc_vdso_pages); |
86 | 113 | ||
87 | int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp) | 114 | static int aarch32_kuser_helpers_setup(struct mm_struct *mm) |
88 | { | 115 | { |
89 | struct mm_struct *mm = current->mm; | 116 | void *ret; |
90 | unsigned long addr = AARCH32_VECTORS_BASE; | 117 | |
91 | static const struct vm_special_mapping spec = { | 118 | if (!IS_ENABLED(CONFIG_KUSER_HELPERS)) |
92 | .name = "[vectors]", | 119 | return 0; |
93 | .pages = vectors_page, | 120 | |
121 | /* | ||
122 | * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's | ||
123 | * not safe to CoW the page containing the CPU exception vectors. | ||
124 | */ | ||
125 | ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE, | ||
126 | VM_READ | VM_EXEC | | ||
127 | VM_MAYREAD | VM_MAYEXEC, | ||
128 | &aarch32_vdso_spec[C_VECTORS]); | ||
94 | 129 | ||
95 | }; | 130 | return PTR_ERR_OR_ZERO(ret); |
131 | } | ||
132 | |||
133 | static int aarch32_sigreturn_setup(struct mm_struct *mm) | ||
134 | { | ||
135 | unsigned long addr; | ||
96 | void *ret; | 136 | void *ret; |
97 | 137 | ||
98 | if (down_write_killable(&mm->mmap_sem)) | 138 | addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); |
99 | return -EINTR; | 139 | if (IS_ERR_VALUE(addr)) { |
100 | current->mm->context.vdso = (void *)addr; | 140 | ret = ERR_PTR(addr); |
141 | goto out; | ||
142 | } | ||
101 | 143 | ||
102 | /* Map vectors page at the high address. */ | 144 | /* |
145 | * VM_MAYWRITE is required to allow gdb to Copy-on-Write and | ||
146 | * set breakpoints. | ||
147 | */ | ||
103 | ret = _install_special_mapping(mm, addr, PAGE_SIZE, | 148 | ret = _install_special_mapping(mm, addr, PAGE_SIZE, |
104 | VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC, | 149 | VM_READ | VM_EXEC | VM_MAYREAD | |
105 | &spec); | 150 | VM_MAYWRITE | VM_MAYEXEC, |
151 | &aarch32_vdso_spec[C_SIGPAGE]); | ||
152 | if (IS_ERR(ret)) | ||
153 | goto out; | ||
106 | 154 | ||
107 | up_write(&mm->mmap_sem); | 155 | mm->context.vdso = (void *)addr; |
108 | 156 | ||
157 | out: | ||
109 | return PTR_ERR_OR_ZERO(ret); | 158 | return PTR_ERR_OR_ZERO(ret); |
110 | } | 159 | } |
160 | |||
161 | int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | ||
162 | { | ||
163 | struct mm_struct *mm = current->mm; | ||
164 | int ret; | ||
165 | |||
166 | if (down_write_killable(&mm->mmap_sem)) | ||
167 | return -EINTR; | ||
168 | |||
169 | ret = aarch32_kuser_helpers_setup(mm); | ||
170 | if (ret) | ||
171 | goto out; | ||
172 | |||
173 | ret = aarch32_sigreturn_setup(mm); | ||
174 | |||
175 | out: | ||
176 | up_write(&mm->mmap_sem); | ||
177 | return ret; | ||
178 | } | ||
111 | #endif /* CONFIG_COMPAT */ | 179 | #endif /* CONFIG_COMPAT */ |
112 | 180 | ||
113 | static int vdso_mremap(const struct vm_special_mapping *sm, | 181 | static int vdso_mremap(const struct vm_special_mapping *sm, |
@@ -146,8 +214,6 @@ static int __init vdso_init(void) | |||
146 | } | 214 | } |
147 | 215 | ||
148 | vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; | 216 | vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; |
149 | pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", | ||
150 | vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data); | ||
151 | 217 | ||
152 | /* Allocate the vDSO pagelist, plus a page for the data. */ | 218 | /* Allocate the vDSO pagelist, plus a page for the data. */ |
153 | vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), | 219 | vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), |
@@ -232,6 +298,9 @@ void update_vsyscall(struct timekeeper *tk) | |||
232 | vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; | 298 | vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; |
233 | vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; | 299 | vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; |
234 | 300 | ||
301 | /* Read without the seqlock held by clock_getres() */ | ||
302 | WRITE_ONCE(vdso_data->hrtimer_res, hrtimer_resolution); | ||
303 | |||
235 | if (!use_syscall) { | 304 | if (!use_syscall) { |
236 | /* tkr_mono.cycle_last == tkr_raw.cycle_last */ | 305 | /* tkr_mono.cycle_last == tkr_raw.cycle_last */ |
237 | vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; | 306 | vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; |
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index b215c712d897..744b9dbaba03 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile | |||
@@ -12,17 +12,12 @@ obj-vdso := gettimeofday.o note.o sigreturn.o | |||
12 | targets := $(obj-vdso) vdso.so vdso.so.dbg | 12 | targets := $(obj-vdso) vdso.so vdso.so.dbg |
13 | obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) | 13 | obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) |
14 | 14 | ||
15 | ccflags-y := -shared -fno-common -fno-builtin | 15 | ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 \ |
16 | ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \ | 16 | $(call ld-option, --hash-style=sysv) -n -T |
17 | $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) | ||
18 | 17 | ||
19 | # Disable gcov profiling for VDSO code | 18 | # Disable gcov profiling for VDSO code |
20 | GCOV_PROFILE := n | 19 | GCOV_PROFILE := n |
21 | 20 | ||
22 | # Workaround for bare-metal (ELF) toolchains that neglect to pass -shared | ||
23 | # down to collect2, resulting in silent corruption of the vDSO image. | ||
24 | ccflags-y += -Wl,-shared | ||
25 | |||
26 | obj-y += vdso.o | 21 | obj-y += vdso.o |
27 | extra-y += vdso.lds | 22 | extra-y += vdso.lds |
28 | CPPFLAGS_vdso.lds += -P -C -U$(ARCH) | 23 | CPPFLAGS_vdso.lds += -P -C -U$(ARCH) |
@@ -31,8 +26,8 @@ CPPFLAGS_vdso.lds += -P -C -U$(ARCH) | |||
31 | $(obj)/vdso.o : $(obj)/vdso.so | 26 | $(obj)/vdso.o : $(obj)/vdso.so |
32 | 27 | ||
33 | # Link rule for the .so file, .lds has to be first | 28 | # Link rule for the .so file, .lds has to be first |
34 | $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) | 29 | $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE |
35 | $(call if_changed,vdsold) | 30 | $(call if_changed,ld) |
36 | 31 | ||
37 | # Strip rule for the .so file | 32 | # Strip rule for the .so file |
38 | $(obj)/%.so: OBJCOPYFLAGS := -S | 33 | $(obj)/%.so: OBJCOPYFLAGS := -S |
@@ -42,9 +37,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE | |||
42 | # Generate VDSO offsets using helper script | 37 | # Generate VDSO offsets using helper script |
43 | gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh | 38 | gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh |
44 | quiet_cmd_vdsosym = VDSOSYM $@ | 39 | quiet_cmd_vdsosym = VDSOSYM $@ |
45 | define cmd_vdsosym | 40 | cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ |
46 | $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ | ||
47 | endef | ||
48 | 41 | ||
49 | include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE | 42 | include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE |
50 | $(call if_changed,vdsosym) | 43 | $(call if_changed,vdsosym) |
@@ -54,8 +47,6 @@ $(obj-vdso): %.o: %.S FORCE | |||
54 | $(call if_changed_dep,vdsoas) | 47 | $(call if_changed_dep,vdsoas) |
55 | 48 | ||
56 | # Actual build commands | 49 | # Actual build commands |
57 | quiet_cmd_vdsold = VDSOL $@ | ||
58 | cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@ | ||
59 | quiet_cmd_vdsoas = VDSOA $@ | 50 | quiet_cmd_vdsoas = VDSOA $@ |
60 | cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< | 51 | cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< |
61 | 52 | ||
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index c39872a7b03c..856fee6d3512 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S | |||
@@ -73,6 +73,13 @@ x_tmp .req x8 | |||
73 | movn x_tmp, #0xff00, lsl #48 | 73 | movn x_tmp, #0xff00, lsl #48 |
74 | and \res, x_tmp, \res | 74 | and \res, x_tmp, \res |
75 | mul \res, \res, \mult | 75 | mul \res, \res, \mult |
76 | /* | ||
77 | * Fake address dependency from the value computed from the counter | ||
78 | * register to subsequent data page accesses so that the sequence | ||
79 | * locking also orders the read of the counter. | ||
80 | */ | ||
81 | and x_tmp, \res, xzr | ||
82 | add vdso_data, vdso_data, x_tmp | ||
76 | .endm | 83 | .endm |
77 | 84 | ||
78 | /* | 85 | /* |
@@ -147,12 +154,12 @@ ENTRY(__kernel_gettimeofday) | |||
147 | /* w11 = cs_mono_mult, w12 = cs_shift */ | 154 | /* w11 = cs_mono_mult, w12 = cs_shift */ |
148 | ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] | 155 | ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] |
149 | ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] | 156 | ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] |
150 | seqcnt_check fail=1b | ||
151 | 157 | ||
152 | get_nsec_per_sec res=x9 | 158 | get_nsec_per_sec res=x9 |
153 | lsl x9, x9, x12 | 159 | lsl x9, x9, x12 |
154 | 160 | ||
155 | get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 | 161 | get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 |
162 | seqcnt_check fail=1b | ||
156 | get_ts_realtime res_sec=x10, res_nsec=x11, \ | 163 | get_ts_realtime res_sec=x10, res_nsec=x11, \ |
157 | clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 | 164 | clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 |
158 | 165 | ||
@@ -211,13 +218,13 @@ realtime: | |||
211 | /* w11 = cs_mono_mult, w12 = cs_shift */ | 218 | /* w11 = cs_mono_mult, w12 = cs_shift */ |
212 | ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] | 219 | ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] |
213 | ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] | 220 | ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] |
214 | seqcnt_check fail=realtime | ||
215 | 221 | ||
216 | /* All computations are done with left-shifted nsecs. */ | 222 | /* All computations are done with left-shifted nsecs. */ |
217 | get_nsec_per_sec res=x9 | 223 | get_nsec_per_sec res=x9 |
218 | lsl x9, x9, x12 | 224 | lsl x9, x9, x12 |
219 | 225 | ||
220 | get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 | 226 | get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 |
227 | seqcnt_check fail=realtime | ||
221 | get_ts_realtime res_sec=x10, res_nsec=x11, \ | 228 | get_ts_realtime res_sec=x10, res_nsec=x11, \ |
222 | clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 | 229 | clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 |
223 | clock_gettime_return, shift=1 | 230 | clock_gettime_return, shift=1 |
@@ -231,7 +238,6 @@ monotonic: | |||
231 | ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] | 238 | ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] |
232 | ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] | 239 | ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] |
233 | ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC] | 240 | ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC] |
234 | seqcnt_check fail=monotonic | ||
235 | 241 | ||
236 | /* All computations are done with left-shifted nsecs. */ | 242 | /* All computations are done with left-shifted nsecs. */ |
237 | lsl x4, x4, x12 | 243 | lsl x4, x4, x12 |
@@ -239,6 +245,7 @@ monotonic: | |||
239 | lsl x9, x9, x12 | 245 | lsl x9, x9, x12 |
240 | 246 | ||
241 | get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 | 247 | get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 |
248 | seqcnt_check fail=monotonic | ||
242 | get_ts_realtime res_sec=x10, res_nsec=x11, \ | 249 | get_ts_realtime res_sec=x10, res_nsec=x11, \ |
243 | clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 | 250 | clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 |
244 | 251 | ||
@@ -253,13 +260,13 @@ monotonic_raw: | |||
253 | /* w11 = cs_raw_mult, w12 = cs_shift */ | 260 | /* w11 = cs_raw_mult, w12 = cs_shift */ |
254 | ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT] | 261 | ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT] |
255 | ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC] | 262 | ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC] |
256 | seqcnt_check fail=monotonic_raw | ||
257 | 263 | ||
258 | /* All computations are done with left-shifted nsecs. */ | 264 | /* All computations are done with left-shifted nsecs. */ |
259 | get_nsec_per_sec res=x9 | 265 | get_nsec_per_sec res=x9 |
260 | lsl x9, x9, x12 | 266 | lsl x9, x9, x12 |
261 | 267 | ||
262 | get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 | 268 | get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 |
269 | seqcnt_check fail=monotonic_raw | ||
263 | get_ts_clock_raw res_sec=x10, res_nsec=x11, \ | 270 | get_ts_clock_raw res_sec=x10, res_nsec=x11, \ |
264 | clock_nsec=x15, nsec_to_sec=x9 | 271 | clock_nsec=x15, nsec_to_sec=x9 |
265 | 272 | ||
@@ -301,13 +308,14 @@ ENTRY(__kernel_clock_getres) | |||
301 | ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne | 308 | ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne |
302 | b.ne 1f | 309 | b.ne 1f |
303 | 310 | ||
304 | ldr x2, 5f | 311 | adr vdso_data, _vdso_data |
312 | ldr w2, [vdso_data, #CLOCK_REALTIME_RES] | ||
305 | b 2f | 313 | b 2f |
306 | 1: | 314 | 1: |
307 | cmp w0, #CLOCK_REALTIME_COARSE | 315 | cmp w0, #CLOCK_REALTIME_COARSE |
308 | ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne | 316 | ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne |
309 | b.ne 4f | 317 | b.ne 4f |
310 | ldr x2, 6f | 318 | ldr x2, 5f |
311 | 2: | 319 | 2: |
312 | cbz x1, 3f | 320 | cbz x1, 3f |
313 | stp xzr, x2, [x1] | 321 | stp xzr, x2, [x1] |
@@ -321,8 +329,6 @@ ENTRY(__kernel_clock_getres) | |||
321 | svc #0 | 329 | svc #0 |
322 | ret | 330 | ret |
323 | 5: | 331 | 5: |
324 | .quad CLOCK_REALTIME_RES | ||
325 | 6: | ||
326 | .quad CLOCK_COARSE_RES | 332 | .quad CLOCK_COARSE_RES |
327 | .cfi_endproc | 333 | .cfi_endproc |
328 | ENDPROC(__kernel_clock_getres) | 334 | ENDPROC(__kernel_clock_getres) |
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 5540a1638baf..33c2a4abda04 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile | |||
@@ -24,7 +24,7 @@ CFLAGS_atomic_ll_sc.o := -ffixed-x1 -ffixed-x2 \ | |||
24 | -fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \ | 24 | -fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \ |
25 | -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \ | 25 | -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \ |
26 | -fcall-saved-x18 -fomit-frame-pointer | 26 | -fcall-saved-x18 -fomit-frame-pointer |
27 | CFLAGS_REMOVE_atomic_ll_sc.o := -pg | 27 | CFLAGS_REMOVE_atomic_ll_sc.o := $(CC_FLAGS_FTRACE) |
28 | GCOV_PROFILE_atomic_ll_sc.o := n | 28 | GCOV_PROFILE_atomic_ll_sc.o := n |
29 | KASAN_SANITIZE_atomic_ll_sc.o := n | 29 | KASAN_SANITIZE_atomic_ll_sc.o := n |
30 | KCOV_INSTRUMENT_atomic_ll_sc.o := n | 30 | KCOV_INSTRUMENT_atomic_ll_sc.o := n |
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 1a7e92ab69eb..0cb0e09995e1 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
@@ -148,7 +148,7 @@ static inline bool is_ttbr1_addr(unsigned long addr) | |||
148 | /* | 148 | /* |
149 | * Dump out the page tables associated with 'addr' in the currently active mm. | 149 | * Dump out the page tables associated with 'addr' in the currently active mm. |
150 | */ | 150 | */ |
151 | void show_pte(unsigned long addr) | 151 | static void show_pte(unsigned long addr) |
152 | { | 152 | { |
153 | struct mm_struct *mm; | 153 | struct mm_struct *mm; |
154 | pgd_t *pgdp; | 154 | pgd_t *pgdp; |
@@ -810,13 +810,12 @@ void __init hook_debug_fault_code(int nr, | |||
810 | debug_fault_info[nr].name = name; | 810 | debug_fault_info[nr].name = name; |
811 | } | 811 | } |
812 | 812 | ||
813 | asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint, | 813 | asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint, |
814 | unsigned int esr, | 814 | unsigned int esr, |
815 | struct pt_regs *regs) | 815 | struct pt_regs *regs) |
816 | { | 816 | { |
817 | const struct fault_info *inf = esr_to_debug_fault_info(esr); | 817 | const struct fault_info *inf = esr_to_debug_fault_info(esr); |
818 | unsigned long pc = instruction_pointer(regs); | 818 | unsigned long pc = instruction_pointer(regs); |
819 | int rv; | ||
820 | 819 | ||
821 | /* | 820 | /* |
822 | * Tell lockdep we disabled irqs in entry.S. Do nothing if they were | 821 | * Tell lockdep we disabled irqs in entry.S. Do nothing if they were |
@@ -828,17 +827,12 @@ asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint, | |||
828 | if (user_mode(regs) && !is_ttbr0_addr(pc)) | 827 | if (user_mode(regs) && !is_ttbr0_addr(pc)) |
829 | arm64_apply_bp_hardening(); | 828 | arm64_apply_bp_hardening(); |
830 | 829 | ||
831 | if (!inf->fn(addr_if_watchpoint, esr, regs)) { | 830 | if (inf->fn(addr_if_watchpoint, esr, regs)) { |
832 | rv = 1; | ||
833 | } else { | ||
834 | arm64_notify_die(inf->name, regs, | 831 | arm64_notify_die(inf->name, regs, |
835 | inf->sig, inf->code, (void __user *)pc, esr); | 832 | inf->sig, inf->code, (void __user *)pc, esr); |
836 | rv = 0; | ||
837 | } | 833 | } |
838 | 834 | ||
839 | if (interrupts_enabled(regs)) | 835 | if (interrupts_enabled(regs)) |
840 | trace_hardirqs_on(); | 836 | trace_hardirqs_on(); |
841 | |||
842 | return rv; | ||
843 | } | 837 | } |
844 | NOKPROBE_SYMBOL(do_debug_exception); | 838 | NOKPROBE_SYMBOL(do_debug_exception); |
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 7cae155e81a5..40e2d7e5efcb 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c | |||
@@ -377,7 +377,7 @@ void __init arm64_memblock_init(void) | |||
377 | base + size > memblock_start_of_DRAM() + | 377 | base + size > memblock_start_of_DRAM() + |
378 | linear_region_size, | 378 | linear_region_size, |
379 | "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) { | 379 | "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) { |
380 | initrd_start = 0; | 380 | phys_initrd_size = 0; |
381 | } else { | 381 | } else { |
382 | memblock_remove(base, size); /* clear MEMBLOCK_ flags */ | 382 | memblock_remove(base, size); /* clear MEMBLOCK_ flags */ |
383 | memblock_add(base, size); | 383 | memblock_add(base, size); |
@@ -440,6 +440,7 @@ void __init bootmem_init(void) | |||
440 | early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT); | 440 | early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT); |
441 | 441 | ||
442 | max_pfn = max_low_pfn = max; | 442 | max_pfn = max_low_pfn = max; |
443 | min_low_pfn = min; | ||
443 | 444 | ||
444 | arm64_numa_init(); | 445 | arm64_numa_init(); |
445 | /* | 446 | /* |
@@ -535,7 +536,7 @@ void __init mem_init(void) | |||
535 | else | 536 | else |
536 | swiotlb_force = SWIOTLB_NO_FORCE; | 537 | swiotlb_force = SWIOTLB_NO_FORCE; |
537 | 538 | ||
538 | set_max_mapnr(pfn_to_page(max_pfn) - mem_map); | 539 | set_max_mapnr(max_pfn - PHYS_PFN_OFFSET); |
539 | 540 | ||
540 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | 541 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
541 | free_unused_memmap(); | 542 | free_unused_memmap(); |
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index e97f018ff740..ef82312860ac 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -97,7 +97,7 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |||
97 | } | 97 | } |
98 | EXPORT_SYMBOL(phys_mem_access_prot); | 98 | EXPORT_SYMBOL(phys_mem_access_prot); |
99 | 99 | ||
100 | static phys_addr_t __init early_pgtable_alloc(void) | 100 | static phys_addr_t __init early_pgtable_alloc(int shift) |
101 | { | 101 | { |
102 | phys_addr_t phys; | 102 | phys_addr_t phys; |
103 | void *ptr; | 103 | void *ptr; |
@@ -174,7 +174,7 @@ static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, | |||
174 | static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, | 174 | static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, |
175 | unsigned long end, phys_addr_t phys, | 175 | unsigned long end, phys_addr_t phys, |
176 | pgprot_t prot, | 176 | pgprot_t prot, |
177 | phys_addr_t (*pgtable_alloc)(void), | 177 | phys_addr_t (*pgtable_alloc)(int), |
178 | int flags) | 178 | int flags) |
179 | { | 179 | { |
180 | unsigned long next; | 180 | unsigned long next; |
@@ -184,7 +184,7 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, | |||
184 | if (pmd_none(pmd)) { | 184 | if (pmd_none(pmd)) { |
185 | phys_addr_t pte_phys; | 185 | phys_addr_t pte_phys; |
186 | BUG_ON(!pgtable_alloc); | 186 | BUG_ON(!pgtable_alloc); |
187 | pte_phys = pgtable_alloc(); | 187 | pte_phys = pgtable_alloc(PAGE_SHIFT); |
188 | __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE); | 188 | __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE); |
189 | pmd = READ_ONCE(*pmdp); | 189 | pmd = READ_ONCE(*pmdp); |
190 | } | 190 | } |
@@ -208,7 +208,7 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, | |||
208 | 208 | ||
209 | static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, | 209 | static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, |
210 | phys_addr_t phys, pgprot_t prot, | 210 | phys_addr_t phys, pgprot_t prot, |
211 | phys_addr_t (*pgtable_alloc)(void), int flags) | 211 | phys_addr_t (*pgtable_alloc)(int), int flags) |
212 | { | 212 | { |
213 | unsigned long next; | 213 | unsigned long next; |
214 | pmd_t *pmdp; | 214 | pmd_t *pmdp; |
@@ -246,7 +246,7 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, | |||
246 | static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, | 246 | static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, |
247 | unsigned long end, phys_addr_t phys, | 247 | unsigned long end, phys_addr_t phys, |
248 | pgprot_t prot, | 248 | pgprot_t prot, |
249 | phys_addr_t (*pgtable_alloc)(void), int flags) | 249 | phys_addr_t (*pgtable_alloc)(int), int flags) |
250 | { | 250 | { |
251 | unsigned long next; | 251 | unsigned long next; |
252 | pud_t pud = READ_ONCE(*pudp); | 252 | pud_t pud = READ_ONCE(*pudp); |
@@ -258,7 +258,7 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, | |||
258 | if (pud_none(pud)) { | 258 | if (pud_none(pud)) { |
259 | phys_addr_t pmd_phys; | 259 | phys_addr_t pmd_phys; |
260 | BUG_ON(!pgtable_alloc); | 260 | BUG_ON(!pgtable_alloc); |
261 | pmd_phys = pgtable_alloc(); | 261 | pmd_phys = pgtable_alloc(PMD_SHIFT); |
262 | __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE); | 262 | __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE); |
263 | pud = READ_ONCE(*pudp); | 263 | pud = READ_ONCE(*pudp); |
264 | } | 264 | } |
@@ -294,7 +294,7 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next, | |||
294 | 294 | ||
295 | static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, | 295 | static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, |
296 | phys_addr_t phys, pgprot_t prot, | 296 | phys_addr_t phys, pgprot_t prot, |
297 | phys_addr_t (*pgtable_alloc)(void), | 297 | phys_addr_t (*pgtable_alloc)(int), |
298 | int flags) | 298 | int flags) |
299 | { | 299 | { |
300 | unsigned long next; | 300 | unsigned long next; |
@@ -304,7 +304,7 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, | |||
304 | if (pgd_none(pgd)) { | 304 | if (pgd_none(pgd)) { |
305 | phys_addr_t pud_phys; | 305 | phys_addr_t pud_phys; |
306 | BUG_ON(!pgtable_alloc); | 306 | BUG_ON(!pgtable_alloc); |
307 | pud_phys = pgtable_alloc(); | 307 | pud_phys = pgtable_alloc(PUD_SHIFT); |
308 | __pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE); | 308 | __pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE); |
309 | pgd = READ_ONCE(*pgdp); | 309 | pgd = READ_ONCE(*pgdp); |
310 | } | 310 | } |
@@ -345,7 +345,7 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, | |||
345 | static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, | 345 | static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, |
346 | unsigned long virt, phys_addr_t size, | 346 | unsigned long virt, phys_addr_t size, |
347 | pgprot_t prot, | 347 | pgprot_t prot, |
348 | phys_addr_t (*pgtable_alloc)(void), | 348 | phys_addr_t (*pgtable_alloc)(int), |
349 | int flags) | 349 | int flags) |
350 | { | 350 | { |
351 | unsigned long addr, length, end, next; | 351 | unsigned long addr, length, end, next; |
@@ -371,17 +371,36 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, | |||
371 | } while (pgdp++, addr = next, addr != end); | 371 | } while (pgdp++, addr = next, addr != end); |
372 | } | 372 | } |
373 | 373 | ||
374 | static phys_addr_t pgd_pgtable_alloc(void) | 374 | static phys_addr_t __pgd_pgtable_alloc(int shift) |
375 | { | 375 | { |
376 | void *ptr = (void *)__get_free_page(PGALLOC_GFP); | 376 | void *ptr = (void *)__get_free_page(PGALLOC_GFP); |
377 | if (!ptr || !pgtable_page_ctor(virt_to_page(ptr))) | 377 | BUG_ON(!ptr); |
378 | BUG(); | ||
379 | 378 | ||
380 | /* Ensure the zeroed page is visible to the page table walker */ | 379 | /* Ensure the zeroed page is visible to the page table walker */ |
381 | dsb(ishst); | 380 | dsb(ishst); |
382 | return __pa(ptr); | 381 | return __pa(ptr); |
383 | } | 382 | } |
384 | 383 | ||
384 | static phys_addr_t pgd_pgtable_alloc(int shift) | ||
385 | { | ||
386 | phys_addr_t pa = __pgd_pgtable_alloc(shift); | ||
387 | |||
388 | /* | ||
389 | * Call proper page table ctor in case later we need to | ||
390 | * call core mm functions like apply_to_page_range() on | ||
391 | * this pre-allocated page table. | ||
392 | * | ||
393 | * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is | ||
394 | * folded, and if so pgtable_pmd_page_ctor() becomes nop. | ||
395 | */ | ||
396 | if (shift == PAGE_SHIFT) | ||
397 | BUG_ON(!pgtable_page_ctor(phys_to_page(pa))); | ||
398 | else if (shift == PMD_SHIFT) | ||
399 | BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa))); | ||
400 | |||
401 | return pa; | ||
402 | } | ||
403 | |||
385 | /* | 404 | /* |
386 | * This function can only be used to modify existing table entries, | 405 | * This function can only be used to modify existing table entries, |
387 | * without allocating new levels of table. Note that this permits the | 406 | * without allocating new levels of table. Note that this permits the |
@@ -583,7 +602,7 @@ static int __init map_entry_trampoline(void) | |||
583 | /* Map only the text into the trampoline page table */ | 602 | /* Map only the text into the trampoline page table */ |
584 | memset(tramp_pg_dir, 0, PGD_SIZE); | 603 | memset(tramp_pg_dir, 0, PGD_SIZE); |
585 | __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, | 604 | __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, |
586 | prot, pgd_pgtable_alloc, 0); | 605 | prot, __pgd_pgtable_alloc, 0); |
587 | 606 | ||
588 | /* Map both the text and data into the kernel page table */ | 607 | /* Map both the text and data into the kernel page table */ |
589 | __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); | 608 | __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); |
@@ -1055,7 +1074,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, | |||
1055 | flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; | 1074 | flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; |
1056 | 1075 | ||
1057 | __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), | 1076 | __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), |
1058 | size, PAGE_KERNEL, pgd_pgtable_alloc, flags); | 1077 | size, PAGE_KERNEL, __pgd_pgtable_alloc, flags); |
1059 | 1078 | ||
1060 | return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, | 1079 | return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, |
1061 | altmap, want_memblock); | 1080 | altmap, want_memblock); |
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index 06a6f264f2dd..5202f63c29c9 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c | |||
@@ -124,7 +124,7 @@ static void __init setup_node_to_cpumask_map(void) | |||
124 | } | 124 | } |
125 | 125 | ||
126 | /* | 126 | /* |
127 | * Set the cpu to node and mem mapping | 127 | * Set the cpu to node and mem mapping |
128 | */ | 128 | */ |
129 | void numa_store_cpu_info(unsigned int cpu) | 129 | void numa_store_cpu_info(unsigned int cpu) |
130 | { | 130 | { |
@@ -200,7 +200,7 @@ void __init setup_per_cpu_areas(void) | |||
200 | #endif | 200 | #endif |
201 | 201 | ||
202 | /** | 202 | /** |
203 | * numa_add_memblk - Set node id to memblk | 203 | * numa_add_memblk() - Set node id to memblk |
204 | * @nid: NUMA node ID of the new memblk | 204 | * @nid: NUMA node ID of the new memblk |
205 | * @start: Start address of the new memblk | 205 | * @start: Start address of the new memblk |
206 | * @end: End address of the new memblk | 206 | * @end: End address of the new memblk |
@@ -223,7 +223,7 @@ int __init numa_add_memblk(int nid, u64 start, u64 end) | |||
223 | return ret; | 223 | return ret; |
224 | } | 224 | } |
225 | 225 | ||
226 | /** | 226 | /* |
227 | * Initialize NODE_DATA for a node on the local memory | 227 | * Initialize NODE_DATA for a node on the local memory |
228 | */ | 228 | */ |
229 | static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) | 229 | static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) |
@@ -257,7 +257,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) | |||
257 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; | 257 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; |
258 | } | 258 | } |
259 | 259 | ||
260 | /** | 260 | /* |
261 | * numa_free_distance | 261 | * numa_free_distance |
262 | * | 262 | * |
263 | * The current table is freed. | 263 | * The current table is freed. |
@@ -277,10 +277,8 @@ void __init numa_free_distance(void) | |||
277 | numa_distance = NULL; | 277 | numa_distance = NULL; |
278 | } | 278 | } |
279 | 279 | ||
280 | /** | 280 | /* |
281 | * | ||
282 | * Create a new NUMA distance table. | 281 | * Create a new NUMA distance table. |
283 | * | ||
284 | */ | 282 | */ |
285 | static int __init numa_alloc_distance(void) | 283 | static int __init numa_alloc_distance(void) |
286 | { | 284 | { |
@@ -311,7 +309,7 @@ static int __init numa_alloc_distance(void) | |||
311 | } | 309 | } |
312 | 310 | ||
313 | /** | 311 | /** |
314 | * numa_set_distance - Set inter node NUMA distance from node to node. | 312 | * numa_set_distance() - Set inter node NUMA distance from node to node. |
315 | * @from: the 'from' node to set distance | 313 | * @from: the 'from' node to set distance |
316 | * @to: the 'to' node to set distance | 314 | * @to: the 'to' node to set distance |
317 | * @distance: NUMA distance | 315 | * @distance: NUMA distance |
@@ -321,7 +319,6 @@ static int __init numa_alloc_distance(void) | |||
321 | * | 319 | * |
322 | * If @from or @to is higher than the highest known node or lower than zero | 320 | * If @from or @to is higher than the highest known node or lower than zero |
323 | * or @distance doesn't make sense, the call is ignored. | 321 | * or @distance doesn't make sense, the call is ignored. |
324 | * | ||
325 | */ | 322 | */ |
326 | void __init numa_set_distance(int from, int to, int distance) | 323 | void __init numa_set_distance(int from, int to, int distance) |
327 | { | 324 | { |
@@ -347,7 +344,7 @@ void __init numa_set_distance(int from, int to, int distance) | |||
347 | numa_distance[from * numa_distance_cnt + to] = distance; | 344 | numa_distance[from * numa_distance_cnt + to] = distance; |
348 | } | 345 | } |
349 | 346 | ||
350 | /** | 347 | /* |
351 | * Return NUMA distance @from to @to | 348 | * Return NUMA distance @from to @to |
352 | */ | 349 | */ |
353 | int __node_distance(int from, int to) | 350 | int __node_distance(int from, int to) |
@@ -422,13 +419,15 @@ out_free_distance: | |||
422 | } | 419 | } |
423 | 420 | ||
424 | /** | 421 | /** |
425 | * dummy_numa_init - Fallback dummy NUMA init | 422 | * dummy_numa_init() - Fallback dummy NUMA init |
426 | * | 423 | * |
427 | * Used if there's no underlying NUMA architecture, NUMA initialization | 424 | * Used if there's no underlying NUMA architecture, NUMA initialization |
428 | * fails, or NUMA is disabled on the command line. | 425 | * fails, or NUMA is disabled on the command line. |
429 | * | 426 | * |
430 | * Must online at least one node (node 0) and add memory blocks that cover all | 427 | * Must online at least one node (node 0) and add memory blocks that cover all |
431 | * allowed memory. It is unlikely that this function fails. | 428 | * allowed memory. It is unlikely that this function fails. |
429 | * | ||
430 | * Return: 0 on success, -errno on failure. | ||
432 | */ | 431 | */ |
433 | static int __init dummy_numa_init(void) | 432 | static int __init dummy_numa_init(void) |
434 | { | 433 | { |
@@ -454,9 +453,9 @@ static int __init dummy_numa_init(void) | |||
454 | } | 453 | } |
455 | 454 | ||
456 | /** | 455 | /** |
457 | * arm64_numa_init - Initialize NUMA | 456 | * arm64_numa_init() - Initialize NUMA |
458 | * | 457 | * |
459 | * Try each configured NUMA initialization method until one succeeds. The | 458 | * Try each configured NUMA initialization method until one succeeds. The |
460 | * last fallback is dummy single node config encomapssing whole memory. | 459 | * last fallback is dummy single node config encomapssing whole memory. |
461 | */ | 460 | */ |
462 | void __init arm64_numa_init(void) | 461 | void __init arm64_numa_init(void) |
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index aa0817c9c4c3..fdd626d34274 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S | |||
@@ -65,24 +65,25 @@ ENTRY(cpu_do_suspend) | |||
65 | mrs x2, tpidr_el0 | 65 | mrs x2, tpidr_el0 |
66 | mrs x3, tpidrro_el0 | 66 | mrs x3, tpidrro_el0 |
67 | mrs x4, contextidr_el1 | 67 | mrs x4, contextidr_el1 |
68 | mrs x5, cpacr_el1 | 68 | mrs x5, osdlr_el1 |
69 | mrs x6, tcr_el1 | 69 | mrs x6, cpacr_el1 |
70 | mrs x7, vbar_el1 | 70 | mrs x7, tcr_el1 |
71 | mrs x8, mdscr_el1 | 71 | mrs x8, vbar_el1 |
72 | mrs x9, oslsr_el1 | 72 | mrs x9, mdscr_el1 |
73 | mrs x10, sctlr_el1 | 73 | mrs x10, oslsr_el1 |
74 | mrs x11, sctlr_el1 | ||
74 | alternative_if_not ARM64_HAS_VIRT_HOST_EXTN | 75 | alternative_if_not ARM64_HAS_VIRT_HOST_EXTN |
75 | mrs x11, tpidr_el1 | 76 | mrs x12, tpidr_el1 |
76 | alternative_else | 77 | alternative_else |
77 | mrs x11, tpidr_el2 | 78 | mrs x12, tpidr_el2 |
78 | alternative_endif | 79 | alternative_endif |
79 | mrs x12, sp_el0 | 80 | mrs x13, sp_el0 |
80 | stp x2, x3, [x0] | 81 | stp x2, x3, [x0] |
81 | stp x4, xzr, [x0, #16] | 82 | stp x4, x5, [x0, #16] |
82 | stp x5, x6, [x0, #32] | 83 | stp x6, x7, [x0, #32] |
83 | stp x7, x8, [x0, #48] | 84 | stp x8, x9, [x0, #48] |
84 | stp x9, x10, [x0, #64] | 85 | stp x10, x11, [x0, #64] |
85 | stp x11, x12, [x0, #80] | 86 | stp x12, x13, [x0, #80] |
86 | ret | 87 | ret |
87 | ENDPROC(cpu_do_suspend) | 88 | ENDPROC(cpu_do_suspend) |
88 | 89 | ||
@@ -105,8 +106,8 @@ ENTRY(cpu_do_resume) | |||
105 | msr cpacr_el1, x6 | 106 | msr cpacr_el1, x6 |
106 | 107 | ||
107 | /* Don't change t0sz here, mask those bits when restoring */ | 108 | /* Don't change t0sz here, mask those bits when restoring */ |
108 | mrs x5, tcr_el1 | 109 | mrs x7, tcr_el1 |
109 | bfi x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH | 110 | bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH |
110 | 111 | ||
111 | msr tcr_el1, x8 | 112 | msr tcr_el1, x8 |
112 | msr vbar_el1, x9 | 113 | msr vbar_el1, x9 |
@@ -130,6 +131,7 @@ alternative_endif | |||
130 | /* | 131 | /* |
131 | * Restore oslsr_el1 by writing oslar_el1 | 132 | * Restore oslsr_el1 by writing oslar_el1 |
132 | */ | 133 | */ |
134 | msr osdlr_el1, x5 | ||
133 | ubfx x11, x11, #1, #1 | 135 | ubfx x11, x11, #1, #1 |
134 | msr oslar_el1, x11 | 136 | msr oslar_el1, x11 |
135 | reset_pmuserenr_el0 x0 // Disable PMU access from EL0 | 137 | reset_pmuserenr_el0 x0 // Disable PMU access from EL0 |
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index e48894e002ba..adbf7cbedf80 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c | |||
@@ -356,7 +356,8 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, | |||
356 | if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { | 356 | if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { |
357 | if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || | 357 | if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || |
358 | node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX || | 358 | node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX || |
359 | node->type == ACPI_IORT_NODE_SMMU_V3) { | 359 | node->type == ACPI_IORT_NODE_SMMU_V3 || |
360 | node->type == ACPI_IORT_NODE_PMCG) { | ||
360 | *id_out = map->output_base; | 361 | *id_out = map->output_base; |
361 | return parent; | 362 | return parent; |
362 | } | 363 | } |
@@ -394,6 +395,8 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node) | |||
394 | } | 395 | } |
395 | 396 | ||
396 | return smmu->id_mapping_index; | 397 | return smmu->id_mapping_index; |
398 | case ACPI_IORT_NODE_PMCG: | ||
399 | return 0; | ||
397 | default: | 400 | default: |
398 | return -EINVAL; | 401 | return -EINVAL; |
399 | } | 402 | } |
@@ -1218,32 +1221,47 @@ static void __init arm_smmu_v3_init_resources(struct resource *res, | |||
1218 | } | 1221 | } |
1219 | } | 1222 | } |
1220 | 1223 | ||
1221 | static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node) | 1224 | static void __init arm_smmu_v3_dma_configure(struct device *dev, |
1225 | struct acpi_iort_node *node) | ||
1222 | { | 1226 | { |
1223 | struct acpi_iort_smmu_v3 *smmu; | 1227 | struct acpi_iort_smmu_v3 *smmu; |
1228 | enum dev_dma_attr attr; | ||
1224 | 1229 | ||
1225 | /* Retrieve SMMUv3 specific data */ | 1230 | /* Retrieve SMMUv3 specific data */ |
1226 | smmu = (struct acpi_iort_smmu_v3 *)node->node_data; | 1231 | smmu = (struct acpi_iort_smmu_v3 *)node->node_data; |
1227 | 1232 | ||
1228 | return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE; | 1233 | attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ? |
1234 | DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; | ||
1235 | |||
1236 | /* We expect the dma masks to be equivalent for all SMMUv3 set-ups */ | ||
1237 | dev->dma_mask = &dev->coherent_dma_mask; | ||
1238 | |||
1239 | /* Configure DMA for the page table walker */ | ||
1240 | acpi_dma_configure(dev, attr); | ||
1229 | } | 1241 | } |
1230 | 1242 | ||
1231 | #if defined(CONFIG_ACPI_NUMA) | 1243 | #if defined(CONFIG_ACPI_NUMA) |
1232 | /* | 1244 | /* |
1233 | * set numa proximity domain for smmuv3 device | 1245 | * set numa proximity domain for smmuv3 device |
1234 | */ | 1246 | */ |
1235 | static void __init arm_smmu_v3_set_proximity(struct device *dev, | 1247 | static int __init arm_smmu_v3_set_proximity(struct device *dev, |
1236 | struct acpi_iort_node *node) | 1248 | struct acpi_iort_node *node) |
1237 | { | 1249 | { |
1238 | struct acpi_iort_smmu_v3 *smmu; | 1250 | struct acpi_iort_smmu_v3 *smmu; |
1239 | 1251 | ||
1240 | smmu = (struct acpi_iort_smmu_v3 *)node->node_data; | 1252 | smmu = (struct acpi_iort_smmu_v3 *)node->node_data; |
1241 | if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { | 1253 | if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { |
1242 | set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm)); | 1254 | int node = acpi_map_pxm_to_node(smmu->pxm); |
1255 | |||
1256 | if (node != NUMA_NO_NODE && !node_online(node)) | ||
1257 | return -EINVAL; | ||
1258 | |||
1259 | set_dev_node(dev, node); | ||
1243 | pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", | 1260 | pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", |
1244 | smmu->base_address, | 1261 | smmu->base_address, |
1245 | smmu->pxm); | 1262 | smmu->pxm); |
1246 | } | 1263 | } |
1264 | return 0; | ||
1247 | } | 1265 | } |
1248 | #else | 1266 | #else |
1249 | #define arm_smmu_v3_set_proximity NULL | 1267 | #define arm_smmu_v3_set_proximity NULL |
@@ -1301,30 +1319,96 @@ static void __init arm_smmu_init_resources(struct resource *res, | |||
1301 | } | 1319 | } |
1302 | } | 1320 | } |
1303 | 1321 | ||
1304 | static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node) | 1322 | static void __init arm_smmu_dma_configure(struct device *dev, |
1323 | struct acpi_iort_node *node) | ||
1305 | { | 1324 | { |
1306 | struct acpi_iort_smmu *smmu; | 1325 | struct acpi_iort_smmu *smmu; |
1326 | enum dev_dma_attr attr; | ||
1307 | 1327 | ||
1308 | /* Retrieve SMMU specific data */ | 1328 | /* Retrieve SMMU specific data */ |
1309 | smmu = (struct acpi_iort_smmu *)node->node_data; | 1329 | smmu = (struct acpi_iort_smmu *)node->node_data; |
1310 | 1330 | ||
1311 | return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK; | 1331 | attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ? |
1332 | DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; | ||
1333 | |||
1334 | /* We expect the dma masks to be equivalent for SMMU set-ups */ | ||
1335 | dev->dma_mask = &dev->coherent_dma_mask; | ||
1336 | |||
1337 | /* Configure DMA for the page table walker */ | ||
1338 | acpi_dma_configure(dev, attr); | ||
1339 | } | ||
1340 | |||
1341 | static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node) | ||
1342 | { | ||
1343 | struct acpi_iort_pmcg *pmcg; | ||
1344 | |||
1345 | /* Retrieve PMCG specific data */ | ||
1346 | pmcg = (struct acpi_iort_pmcg *)node->node_data; | ||
1347 | |||
1348 | /* | ||
1349 | * There are always 2 memory resources. | ||
1350 | * If the overflow_gsiv is present then add that for a total of 3. | ||
1351 | */ | ||
1352 | return pmcg->overflow_gsiv ? 3 : 2; | ||
1353 | } | ||
1354 | |||
1355 | static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res, | ||
1356 | struct acpi_iort_node *node) | ||
1357 | { | ||
1358 | struct acpi_iort_pmcg *pmcg; | ||
1359 | |||
1360 | /* Retrieve PMCG specific data */ | ||
1361 | pmcg = (struct acpi_iort_pmcg *)node->node_data; | ||
1362 | |||
1363 | res[0].start = pmcg->page0_base_address; | ||
1364 | res[0].end = pmcg->page0_base_address + SZ_4K - 1; | ||
1365 | res[0].flags = IORESOURCE_MEM; | ||
1366 | res[1].start = pmcg->page1_base_address; | ||
1367 | res[1].end = pmcg->page1_base_address + SZ_4K - 1; | ||
1368 | res[1].flags = IORESOURCE_MEM; | ||
1369 | |||
1370 | if (pmcg->overflow_gsiv) | ||
1371 | acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow", | ||
1372 | ACPI_EDGE_SENSITIVE, &res[2]); | ||
1373 | } | ||
1374 | |||
1375 | static struct acpi_platform_list pmcg_plat_info[] __initdata = { | ||
1376 | /* HiSilicon Hip08 Platform */ | ||
1377 | {"HISI ", "HIP08 ", 0, ACPI_SIG_IORT, greater_than_or_equal, | ||
1378 | "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08}, | ||
1379 | { } | ||
1380 | }; | ||
1381 | |||
1382 | static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev) | ||
1383 | { | ||
1384 | u32 model; | ||
1385 | int idx; | ||
1386 | |||
1387 | idx = acpi_match_platform_list(pmcg_plat_info); | ||
1388 | if (idx >= 0) | ||
1389 | model = pmcg_plat_info[idx].data; | ||
1390 | else | ||
1391 | model = IORT_SMMU_V3_PMCG_GENERIC; | ||
1392 | |||
1393 | return platform_device_add_data(pdev, &model, sizeof(model)); | ||
1312 | } | 1394 | } |
1313 | 1395 | ||
1314 | struct iort_dev_config { | 1396 | struct iort_dev_config { |
1315 | const char *name; | 1397 | const char *name; |
1316 | int (*dev_init)(struct acpi_iort_node *node); | 1398 | int (*dev_init)(struct acpi_iort_node *node); |
1317 | bool (*dev_is_coherent)(struct acpi_iort_node *node); | 1399 | void (*dev_dma_configure)(struct device *dev, |
1400 | struct acpi_iort_node *node); | ||
1318 | int (*dev_count_resources)(struct acpi_iort_node *node); | 1401 | int (*dev_count_resources)(struct acpi_iort_node *node); |
1319 | void (*dev_init_resources)(struct resource *res, | 1402 | void (*dev_init_resources)(struct resource *res, |
1320 | struct acpi_iort_node *node); | 1403 | struct acpi_iort_node *node); |
1321 | void (*dev_set_proximity)(struct device *dev, | 1404 | int (*dev_set_proximity)(struct device *dev, |
1322 | struct acpi_iort_node *node); | 1405 | struct acpi_iort_node *node); |
1406 | int (*dev_add_platdata)(struct platform_device *pdev); | ||
1323 | }; | 1407 | }; |
1324 | 1408 | ||
1325 | static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = { | 1409 | static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = { |
1326 | .name = "arm-smmu-v3", | 1410 | .name = "arm-smmu-v3", |
1327 | .dev_is_coherent = arm_smmu_v3_is_coherent, | 1411 | .dev_dma_configure = arm_smmu_v3_dma_configure, |
1328 | .dev_count_resources = arm_smmu_v3_count_resources, | 1412 | .dev_count_resources = arm_smmu_v3_count_resources, |
1329 | .dev_init_resources = arm_smmu_v3_init_resources, | 1413 | .dev_init_resources = arm_smmu_v3_init_resources, |
1330 | .dev_set_proximity = arm_smmu_v3_set_proximity, | 1414 | .dev_set_proximity = arm_smmu_v3_set_proximity, |
@@ -1332,9 +1416,16 @@ static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = { | |||
1332 | 1416 | ||
1333 | static const struct iort_dev_config iort_arm_smmu_cfg __initconst = { | 1417 | static const struct iort_dev_config iort_arm_smmu_cfg __initconst = { |
1334 | .name = "arm-smmu", | 1418 | .name = "arm-smmu", |
1335 | .dev_is_coherent = arm_smmu_is_coherent, | 1419 | .dev_dma_configure = arm_smmu_dma_configure, |
1336 | .dev_count_resources = arm_smmu_count_resources, | 1420 | .dev_count_resources = arm_smmu_count_resources, |
1337 | .dev_init_resources = arm_smmu_init_resources | 1421 | .dev_init_resources = arm_smmu_init_resources, |
1422 | }; | ||
1423 | |||
1424 | static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = { | ||
1425 | .name = "arm-smmu-v3-pmcg", | ||
1426 | .dev_count_resources = arm_smmu_v3_pmcg_count_resources, | ||
1427 | .dev_init_resources = arm_smmu_v3_pmcg_init_resources, | ||
1428 | .dev_add_platdata = arm_smmu_v3_pmcg_add_platdata, | ||
1338 | }; | 1429 | }; |
1339 | 1430 | ||
1340 | static __init const struct iort_dev_config *iort_get_dev_cfg( | 1431 | static __init const struct iort_dev_config *iort_get_dev_cfg( |
@@ -1345,6 +1436,8 @@ static __init const struct iort_dev_config *iort_get_dev_cfg( | |||
1345 | return &iort_arm_smmu_v3_cfg; | 1436 | return &iort_arm_smmu_v3_cfg; |
1346 | case ACPI_IORT_NODE_SMMU: | 1437 | case ACPI_IORT_NODE_SMMU: |
1347 | return &iort_arm_smmu_cfg; | 1438 | return &iort_arm_smmu_cfg; |
1439 | case ACPI_IORT_NODE_PMCG: | ||
1440 | return &iort_arm_smmu_v3_pmcg_cfg; | ||
1348 | default: | 1441 | default: |
1349 | return NULL; | 1442 | return NULL; |
1350 | } | 1443 | } |
@@ -1362,15 +1455,17 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node, | |||
1362 | struct fwnode_handle *fwnode; | 1455 | struct fwnode_handle *fwnode; |
1363 | struct platform_device *pdev; | 1456 | struct platform_device *pdev; |
1364 | struct resource *r; | 1457 | struct resource *r; |
1365 | enum dev_dma_attr attr; | ||
1366 | int ret, count; | 1458 | int ret, count; |
1367 | 1459 | ||
1368 | pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); | 1460 | pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); |
1369 | if (!pdev) | 1461 | if (!pdev) |
1370 | return -ENOMEM; | 1462 | return -ENOMEM; |
1371 | 1463 | ||
1372 | if (ops->dev_set_proximity) | 1464 | if (ops->dev_set_proximity) { |
1373 | ops->dev_set_proximity(&pdev->dev, node); | 1465 | ret = ops->dev_set_proximity(&pdev->dev, node); |
1466 | if (ret) | ||
1467 | goto dev_put; | ||
1468 | } | ||
1374 | 1469 | ||
1375 | count = ops->dev_count_resources(node); | 1470 | count = ops->dev_count_resources(node); |
1376 | 1471 | ||
@@ -1393,19 +1488,19 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node, | |||
1393 | goto dev_put; | 1488 | goto dev_put; |
1394 | 1489 | ||
1395 | /* | 1490 | /* |
1396 | * Add a copy of IORT node pointer to platform_data to | 1491 | * Platform devices based on PMCG nodes uses platform_data to |
1397 | * be used to retrieve IORT data information. | 1492 | * pass the hardware model info to the driver. For others, add |
1493 | * a copy of IORT node pointer to platform_data to be used to | ||
1494 | * retrieve IORT data information. | ||
1398 | */ | 1495 | */ |
1399 | ret = platform_device_add_data(pdev, &node, sizeof(node)); | 1496 | if (ops->dev_add_platdata) |
1497 | ret = ops->dev_add_platdata(pdev); | ||
1498 | else | ||
1499 | ret = platform_device_add_data(pdev, &node, sizeof(node)); | ||
1500 | |||
1400 | if (ret) | 1501 | if (ret) |
1401 | goto dev_put; | 1502 | goto dev_put; |
1402 | 1503 | ||
1403 | /* | ||
1404 | * We expect the dma masks to be equivalent for | ||
1405 | * all SMMUs set-ups | ||
1406 | */ | ||
1407 | pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; | ||
1408 | |||
1409 | fwnode = iort_get_fwnode(node); | 1504 | fwnode = iort_get_fwnode(node); |
1410 | 1505 | ||
1411 | if (!fwnode) { | 1506 | if (!fwnode) { |
@@ -1415,11 +1510,8 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node, | |||
1415 | 1510 | ||
1416 | pdev->dev.fwnode = fwnode; | 1511 | pdev->dev.fwnode = fwnode; |
1417 | 1512 | ||
1418 | attr = ops->dev_is_coherent && ops->dev_is_coherent(node) ? | 1513 | if (ops->dev_dma_configure) |
1419 | DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; | 1514 | ops->dev_dma_configure(&pdev->dev, node); |
1420 | |||
1421 | /* Configure DMA for the page table walker */ | ||
1422 | acpi_dma_configure(&pdev->dev, attr); | ||
1423 | 1515 | ||
1424 | iort_set_device_domain(&pdev->dev, node); | 1516 | iort_set_device_domain(&pdev->dev, node); |
1425 | 1517 | ||
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index ea373cfbcecb..b2a951a798e2 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c | |||
@@ -149,6 +149,26 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, | |||
149 | return val; | 149 | return val; |
150 | } | 150 | } |
151 | 151 | ||
152 | static u64 arch_counter_get_cntpct_stable(void) | ||
153 | { | ||
154 | return __arch_counter_get_cntpct_stable(); | ||
155 | } | ||
156 | |||
157 | static u64 arch_counter_get_cntpct(void) | ||
158 | { | ||
159 | return __arch_counter_get_cntpct(); | ||
160 | } | ||
161 | |||
162 | static u64 arch_counter_get_cntvct_stable(void) | ||
163 | { | ||
164 | return __arch_counter_get_cntvct_stable(); | ||
165 | } | ||
166 | |||
167 | static u64 arch_counter_get_cntvct(void) | ||
168 | { | ||
169 | return __arch_counter_get_cntvct(); | ||
170 | } | ||
171 | |||
152 | /* | 172 | /* |
153 | * Default to cp15 based access because arm64 uses this function for | 173 | * Default to cp15 based access because arm64 uses this function for |
154 | * sched_clock() before DT is probed and the cp15 method is guaranteed | 174 | * sched_clock() before DT is probed and the cp15 method is guaranteed |
@@ -316,13 +336,6 @@ static u64 notrace arm64_858921_read_cntvct_el0(void) | |||
316 | } | 336 | } |
317 | #endif | 337 | #endif |
318 | 338 | ||
319 | #ifdef CONFIG_ARM64_ERRATUM_1188873 | ||
320 | static u64 notrace arm64_1188873_read_cntvct_el0(void) | ||
321 | { | ||
322 | return read_sysreg(cntvct_el0); | ||
323 | } | ||
324 | #endif | ||
325 | |||
326 | #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1 | 339 | #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1 |
327 | /* | 340 | /* |
328 | * The low bits of the counter registers are indeterminate while bit 10 or | 341 | * The low bits of the counter registers are indeterminate while bit 10 or |
@@ -369,8 +382,7 @@ static u32 notrace sun50i_a64_read_cntv_tval_el0(void) | |||
369 | DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround); | 382 | DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround); |
370 | EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround); | 383 | EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround); |
371 | 384 | ||
372 | DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled); | 385 | static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0); |
373 | EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled); | ||
374 | 386 | ||
375 | static void erratum_set_next_event_tval_generic(const int access, unsigned long evt, | 387 | static void erratum_set_next_event_tval_generic(const int access, unsigned long evt, |
376 | struct clock_event_device *clk) | 388 | struct clock_event_device *clk) |
@@ -454,14 +466,6 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { | |||
454 | .read_cntvct_el0 = arm64_858921_read_cntvct_el0, | 466 | .read_cntvct_el0 = arm64_858921_read_cntvct_el0, |
455 | }, | 467 | }, |
456 | #endif | 468 | #endif |
457 | #ifdef CONFIG_ARM64_ERRATUM_1188873 | ||
458 | { | ||
459 | .match_type = ate_match_local_cap_id, | ||
460 | .id = (void *)ARM64_WORKAROUND_1188873, | ||
461 | .desc = "ARM erratum 1188873", | ||
462 | .read_cntvct_el0 = arm64_1188873_read_cntvct_el0, | ||
463 | }, | ||
464 | #endif | ||
465 | #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1 | 469 | #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1 |
466 | { | 470 | { |
467 | .match_type = ate_match_dt, | 471 | .match_type = ate_match_dt, |
@@ -549,11 +553,8 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa | |||
549 | per_cpu(timer_unstable_counter_workaround, i) = wa; | 553 | per_cpu(timer_unstable_counter_workaround, i) = wa; |
550 | } | 554 | } |
551 | 555 | ||
552 | /* | 556 | if (wa->read_cntvct_el0 || wa->read_cntpct_el0) |
553 | * Use the locked version, as we're called from the CPU | 557 | atomic_set(&timer_unstable_counter_workaround_in_use, 1); |
554 | * hotplug framework. Otherwise, we end-up in deadlock-land. | ||
555 | */ | ||
556 | static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled); | ||
557 | 558 | ||
558 | /* | 559 | /* |
559 | * Don't use the vdso fastpath if errata require using the | 560 | * Don't use the vdso fastpath if errata require using the |
@@ -570,7 +571,7 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa | |||
570 | static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type, | 571 | static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type, |
571 | void *arg) | 572 | void *arg) |
572 | { | 573 | { |
573 | const struct arch_timer_erratum_workaround *wa; | 574 | const struct arch_timer_erratum_workaround *wa, *__wa; |
574 | ate_match_fn_t match_fn = NULL; | 575 | ate_match_fn_t match_fn = NULL; |
575 | bool local = false; | 576 | bool local = false; |
576 | 577 | ||
@@ -594,53 +595,32 @@ static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type t | |||
594 | if (!wa) | 595 | if (!wa) |
595 | return; | 596 | return; |
596 | 597 | ||
597 | if (needs_unstable_timer_counter_workaround()) { | 598 | __wa = __this_cpu_read(timer_unstable_counter_workaround); |
598 | const struct arch_timer_erratum_workaround *__wa; | 599 | if (__wa && wa != __wa) |
599 | __wa = __this_cpu_read(timer_unstable_counter_workaround); | 600 | pr_warn("Can't enable workaround for %s (clashes with %s\n)", |
600 | if (__wa && wa != __wa) | 601 | wa->desc, __wa->desc); |
601 | pr_warn("Can't enable workaround for %s (clashes with %s\n)", | ||
602 | wa->desc, __wa->desc); | ||
603 | 602 | ||
604 | if (__wa) | 603 | if (__wa) |
605 | return; | 604 | return; |
606 | } | ||
607 | 605 | ||
608 | arch_timer_enable_workaround(wa, local); | 606 | arch_timer_enable_workaround(wa, local); |
609 | pr_info("Enabling %s workaround for %s\n", | 607 | pr_info("Enabling %s workaround for %s\n", |
610 | local ? "local" : "global", wa->desc); | 608 | local ? "local" : "global", wa->desc); |
611 | } | 609 | } |
612 | 610 | ||
613 | #define erratum_handler(fn, r, ...) \ | ||
614 | ({ \ | ||
615 | bool __val; \ | ||
616 | if (needs_unstable_timer_counter_workaround()) { \ | ||
617 | const struct arch_timer_erratum_workaround *__wa; \ | ||
618 | __wa = __this_cpu_read(timer_unstable_counter_workaround); \ | ||
619 | if (__wa && __wa->fn) { \ | ||
620 | r = __wa->fn(__VA_ARGS__); \ | ||
621 | __val = true; \ | ||
622 | } else { \ | ||
623 | __val = false; \ | ||
624 | } \ | ||
625 | } else { \ | ||
626 | __val = false; \ | ||
627 | } \ | ||
628 | __val; \ | ||
629 | }) | ||
630 | |||
631 | static bool arch_timer_this_cpu_has_cntvct_wa(void) | 611 | static bool arch_timer_this_cpu_has_cntvct_wa(void) |
632 | { | 612 | { |
633 | const struct arch_timer_erratum_workaround *wa; | 613 | return has_erratum_handler(read_cntvct_el0); |
614 | } | ||
634 | 615 | ||
635 | wa = __this_cpu_read(timer_unstable_counter_workaround); | 616 | static bool arch_timer_counter_has_wa(void) |
636 | return wa && wa->read_cntvct_el0; | 617 | { |
618 | return atomic_read(&timer_unstable_counter_workaround_in_use); | ||
637 | } | 619 | } |
638 | #else | 620 | #else |
639 | #define arch_timer_check_ool_workaround(t,a) do { } while(0) | 621 | #define arch_timer_check_ool_workaround(t,a) do { } while(0) |
640 | #define erratum_set_next_event_tval_virt(...) ({BUG(); 0;}) | ||
641 | #define erratum_set_next_event_tval_phys(...) ({BUG(); 0;}) | ||
642 | #define erratum_handler(fn, r, ...) ({false;}) | ||
643 | #define arch_timer_this_cpu_has_cntvct_wa() ({false;}) | 622 | #define arch_timer_this_cpu_has_cntvct_wa() ({false;}) |
623 | #define arch_timer_counter_has_wa() ({false;}) | ||
644 | #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */ | 624 | #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */ |
645 | 625 | ||
646 | static __always_inline irqreturn_t timer_handler(const int access, | 626 | static __always_inline irqreturn_t timer_handler(const int access, |
@@ -733,11 +713,6 @@ static __always_inline void set_next_event(const int access, unsigned long evt, | |||
733 | static int arch_timer_set_next_event_virt(unsigned long evt, | 713 | static int arch_timer_set_next_event_virt(unsigned long evt, |
734 | struct clock_event_device *clk) | 714 | struct clock_event_device *clk) |
735 | { | 715 | { |
736 | int ret; | ||
737 | |||
738 | if (erratum_handler(set_next_event_virt, ret, evt, clk)) | ||
739 | return ret; | ||
740 | |||
741 | set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk); | 716 | set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk); |
742 | return 0; | 717 | return 0; |
743 | } | 718 | } |
@@ -745,11 +720,6 @@ static int arch_timer_set_next_event_virt(unsigned long evt, | |||
745 | static int arch_timer_set_next_event_phys(unsigned long evt, | 720 | static int arch_timer_set_next_event_phys(unsigned long evt, |
746 | struct clock_event_device *clk) | 721 | struct clock_event_device *clk) |
747 | { | 722 | { |
748 | int ret; | ||
749 | |||
750 | if (erratum_handler(set_next_event_phys, ret, evt, clk)) | ||
751 | return ret; | ||
752 | |||
753 | set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk); | 723 | set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk); |
754 | return 0; | 724 | return 0; |
755 | } | 725 | } |
@@ -774,6 +744,10 @@ static void __arch_timer_setup(unsigned type, | |||
774 | clk->features = CLOCK_EVT_FEAT_ONESHOT; | 744 | clk->features = CLOCK_EVT_FEAT_ONESHOT; |
775 | 745 | ||
776 | if (type == ARCH_TIMER_TYPE_CP15) { | 746 | if (type == ARCH_TIMER_TYPE_CP15) { |
747 | typeof(clk->set_next_event) sne; | ||
748 | |||
749 | arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL); | ||
750 | |||
777 | if (arch_timer_c3stop) | 751 | if (arch_timer_c3stop) |
778 | clk->features |= CLOCK_EVT_FEAT_C3STOP; | 752 | clk->features |= CLOCK_EVT_FEAT_C3STOP; |
779 | clk->name = "arch_sys_timer"; | 753 | clk->name = "arch_sys_timer"; |
@@ -784,20 +758,20 @@ static void __arch_timer_setup(unsigned type, | |||
784 | case ARCH_TIMER_VIRT_PPI: | 758 | case ARCH_TIMER_VIRT_PPI: |
785 | clk->set_state_shutdown = arch_timer_shutdown_virt; | 759 | clk->set_state_shutdown = arch_timer_shutdown_virt; |
786 | clk->set_state_oneshot_stopped = arch_timer_shutdown_virt; | 760 | clk->set_state_oneshot_stopped = arch_timer_shutdown_virt; |
787 | clk->set_next_event = arch_timer_set_next_event_virt; | 761 | sne = erratum_handler(set_next_event_virt); |
788 | break; | 762 | break; |
789 | case ARCH_TIMER_PHYS_SECURE_PPI: | 763 | case ARCH_TIMER_PHYS_SECURE_PPI: |
790 | case ARCH_TIMER_PHYS_NONSECURE_PPI: | 764 | case ARCH_TIMER_PHYS_NONSECURE_PPI: |
791 | case ARCH_TIMER_HYP_PPI: | 765 | case ARCH_TIMER_HYP_PPI: |
792 | clk->set_state_shutdown = arch_timer_shutdown_phys; | 766 | clk->set_state_shutdown = arch_timer_shutdown_phys; |
793 | clk->set_state_oneshot_stopped = arch_timer_shutdown_phys; | 767 | clk->set_state_oneshot_stopped = arch_timer_shutdown_phys; |
794 | clk->set_next_event = arch_timer_set_next_event_phys; | 768 | sne = erratum_handler(set_next_event_phys); |
795 | break; | 769 | break; |
796 | default: | 770 | default: |
797 | BUG(); | 771 | BUG(); |
798 | } | 772 | } |
799 | 773 | ||
800 | arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL); | 774 | clk->set_next_event = sne; |
801 | } else { | 775 | } else { |
802 | clk->features |= CLOCK_EVT_FEAT_DYNIRQ; | 776 | clk->features |= CLOCK_EVT_FEAT_DYNIRQ; |
803 | clk->name = "arch_mem_timer"; | 777 | clk->name = "arch_mem_timer"; |
@@ -830,7 +804,11 @@ static void arch_timer_evtstrm_enable(int divider) | |||
830 | cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT) | 804 | cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT) |
831 | | ARCH_TIMER_VIRT_EVT_EN; | 805 | | ARCH_TIMER_VIRT_EVT_EN; |
832 | arch_timer_set_cntkctl(cntkctl); | 806 | arch_timer_set_cntkctl(cntkctl); |
807 | #ifdef CONFIG_ARM64 | ||
808 | cpu_set_named_feature(EVTSTRM); | ||
809 | #else | ||
833 | elf_hwcap |= HWCAP_EVTSTRM; | 810 | elf_hwcap |= HWCAP_EVTSTRM; |
811 | #endif | ||
834 | #ifdef CONFIG_COMPAT | 812 | #ifdef CONFIG_COMPAT |
835 | compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM; | 813 | compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM; |
836 | #endif | 814 | #endif |
@@ -995,12 +973,22 @@ static void __init arch_counter_register(unsigned type) | |||
995 | 973 | ||
996 | /* Register the CP15 based counter if we have one */ | 974 | /* Register the CP15 based counter if we have one */ |
997 | if (type & ARCH_TIMER_TYPE_CP15) { | 975 | if (type & ARCH_TIMER_TYPE_CP15) { |
976 | u64 (*rd)(void); | ||
977 | |||
998 | if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) || | 978 | if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) || |
999 | arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) | 979 | arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) { |
1000 | arch_timer_read_counter = arch_counter_get_cntvct; | 980 | if (arch_timer_counter_has_wa()) |
1001 | else | 981 | rd = arch_counter_get_cntvct_stable; |
1002 | arch_timer_read_counter = arch_counter_get_cntpct; | 982 | else |
983 | rd = arch_counter_get_cntvct; | ||
984 | } else { | ||
985 | if (arch_timer_counter_has_wa()) | ||
986 | rd = arch_counter_get_cntpct_stable; | ||
987 | else | ||
988 | rd = arch_counter_get_cntpct; | ||
989 | } | ||
1003 | 990 | ||
991 | arch_timer_read_counter = rd; | ||
1004 | clocksource_counter.archdata.vdso_direct = vdso_default; | 992 | clocksource_counter.archdata.vdso_direct = vdso_default; |
1005 | } else { | 993 | } else { |
1006 | arch_timer_read_counter = arch_counter_get_cntvct_mem; | 994 | arch_timer_read_counter = arch_counter_get_cntvct_mem; |
@@ -1052,7 +1040,11 @@ static int arch_timer_cpu_pm_notify(struct notifier_block *self, | |||
1052 | } else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) { | 1040 | } else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) { |
1053 | arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl)); | 1041 | arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl)); |
1054 | 1042 | ||
1043 | #ifdef CONFIG_ARM64 | ||
1044 | if (cpu_have_named_feature(EVTSTRM)) | ||
1045 | #else | ||
1055 | if (elf_hwcap & HWCAP_EVTSTRM) | 1046 | if (elf_hwcap & HWCAP_EVTSTRM) |
1047 | #endif | ||
1056 | cpumask_set_cpu(smp_processor_id(), &evtstrm_available); | 1048 | cpumask_set_cpu(smp_processor_id(), &evtstrm_available); |
1057 | } | 1049 | } |
1058 | return NOTIFY_OK; | 1050 | return NOTIFY_OK; |
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index e6376f985ef7..9cd70d1a5622 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c | |||
@@ -165,6 +165,7 @@ static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0, | |||
165 | 165 | ||
166 | return err; | 166 | return err; |
167 | } | 167 | } |
168 | NOKPROBE_SYMBOL(invoke_sdei_fn); | ||
168 | 169 | ||
169 | static struct sdei_event *sdei_event_find(u32 event_num) | 170 | static struct sdei_event *sdei_event_find(u32 event_num) |
170 | { | 171 | { |
@@ -879,6 +880,7 @@ static void sdei_smccc_smc(unsigned long function_id, | |||
879 | { | 880 | { |
880 | arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res); | 881 | arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res); |
881 | } | 882 | } |
883 | NOKPROBE_SYMBOL(sdei_smccc_smc); | ||
882 | 884 | ||
883 | static void sdei_smccc_hvc(unsigned long function_id, | 885 | static void sdei_smccc_hvc(unsigned long function_id, |
884 | unsigned long arg0, unsigned long arg1, | 886 | unsigned long arg0, unsigned long arg1, |
@@ -887,6 +889,7 @@ static void sdei_smccc_hvc(unsigned long function_id, | |||
887 | { | 889 | { |
888 | arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res); | 890 | arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res); |
889 | } | 891 | } |
892 | NOKPROBE_SYMBOL(sdei_smccc_hvc); | ||
890 | 893 | ||
891 | int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, | 894 | int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, |
892 | sdei_event_callback *critical_cb) | 895 | sdei_event_callback *critical_cb) |
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index b1f7b64652db..0460c7581220 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile | |||
@@ -16,9 +16,9 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \ | |||
16 | 16 | ||
17 | # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly | 17 | # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly |
18 | # disable the stackleak plugin | 18 | # disable the stackleak plugin |
19 | cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie \ | 19 | cflags-$(CONFIG_ARM64) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \ |
20 | $(DISABLE_STACKLEAK_PLUGIN) | 20 | -fpie $(DISABLE_STACKLEAK_PLUGIN) |
21 | cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \ | 21 | cflags-$(CONFIG_ARM) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \ |
22 | -fno-builtin -fpic \ | 22 | -fno-builtin -fpic \ |
23 | $(call cc-option,-mno-single-pic-base) | 23 | $(call cc-option,-mno-single-pic-base) |
24 | 24 | ||
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index af9bc178495d..a94e586a58b2 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig | |||
@@ -52,6 +52,15 @@ config ARM_PMU_ACPI | |||
52 | depends on ARM_PMU && ACPI | 52 | depends on ARM_PMU && ACPI |
53 | def_bool y | 53 | def_bool y |
54 | 54 | ||
55 | config ARM_SMMU_V3_PMU | ||
56 | tristate "ARM SMMUv3 Performance Monitors Extension" | ||
57 | depends on ARM64 && ACPI && ARM_SMMU_V3 | ||
58 | help | ||
59 | Provides support for the ARM SMMUv3 Performance Monitor Counter | ||
60 | Groups (PMCG), which provide monitoring of transactions passing | ||
61 | through the SMMU and allow the resulting information to be filtered | ||
62 | based on the Stream ID of the corresponding master. | ||
63 | |||
55 | config ARM_DSU_PMU | 64 | config ARM_DSU_PMU |
56 | tristate "ARM DynamIQ Shared Unit (DSU) PMU" | 65 | tristate "ARM DynamIQ Shared Unit (DSU) PMU" |
57 | depends on ARM64 | 66 | depends on ARM64 |
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index 909f27fd9db3..30489941f3d6 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile | |||
@@ -4,6 +4,7 @@ obj-$(CONFIG_ARM_CCN) += arm-ccn.o | |||
4 | obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o | 4 | obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o |
5 | obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o | 5 | obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o |
6 | obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o | 6 | obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o |
7 | obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o | ||
7 | obj-$(CONFIG_HISI_PMU) += hisilicon/ | 8 | obj-$(CONFIG_HISI_PMU) += hisilicon/ |
8 | obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o | 9 | obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o |
9 | obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o | 10 | obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o |
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index bfd03e023308..8f8606b9bc9e 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c | |||
@@ -1684,21 +1684,24 @@ static int cci_pmu_probe(struct platform_device *pdev) | |||
1684 | raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); | 1684 | raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); |
1685 | mutex_init(&cci_pmu->reserve_mutex); | 1685 | mutex_init(&cci_pmu->reserve_mutex); |
1686 | atomic_set(&cci_pmu->active_events, 0); | 1686 | atomic_set(&cci_pmu->active_events, 0); |
1687 | cci_pmu->cpu = get_cpu(); | ||
1688 | |||
1689 | ret = cci_pmu_init(cci_pmu, pdev); | ||
1690 | if (ret) { | ||
1691 | put_cpu(); | ||
1692 | return ret; | ||
1693 | } | ||
1694 | 1687 | ||
1688 | cci_pmu->cpu = raw_smp_processor_id(); | ||
1689 | g_cci_pmu = cci_pmu; | ||
1695 | cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, | 1690 | cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, |
1696 | "perf/arm/cci:online", NULL, | 1691 | "perf/arm/cci:online", NULL, |
1697 | cci_pmu_offline_cpu); | 1692 | cci_pmu_offline_cpu); |
1698 | put_cpu(); | 1693 | |
1699 | g_cci_pmu = cci_pmu; | 1694 | ret = cci_pmu_init(cci_pmu, pdev); |
1695 | if (ret) | ||
1696 | goto error_pmu_init; | ||
1697 | |||
1700 | pr_info("ARM %s PMU driver probed", cci_pmu->model->name); | 1698 | pr_info("ARM %s PMU driver probed", cci_pmu->model->name); |
1701 | return 0; | 1699 | return 0; |
1700 | |||
1701 | error_pmu_init: | ||
1702 | cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE); | ||
1703 | g_cci_pmu = NULL; | ||
1704 | return ret; | ||
1702 | } | 1705 | } |
1703 | 1706 | ||
1704 | static int cci_pmu_remove(struct platform_device *pdev) | 1707 | static int cci_pmu_remove(struct platform_device *pdev) |
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c index 2ae76026e947..0bb52d9bdcf7 100644 --- a/drivers/perf/arm-ccn.c +++ b/drivers/perf/arm-ccn.c | |||
@@ -167,7 +167,7 @@ struct arm_ccn_dt { | |||
167 | 167 | ||
168 | struct hrtimer hrtimer; | 168 | struct hrtimer hrtimer; |
169 | 169 | ||
170 | cpumask_t cpu; | 170 | unsigned int cpu; |
171 | struct hlist_node node; | 171 | struct hlist_node node; |
172 | 172 | ||
173 | struct pmu pmu; | 173 | struct pmu pmu; |
@@ -559,7 +559,7 @@ static ssize_t arm_ccn_pmu_cpumask_show(struct device *dev, | |||
559 | { | 559 | { |
560 | struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); | 560 | struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); |
561 | 561 | ||
562 | return cpumap_print_to_pagebuf(true, buf, &ccn->dt.cpu); | 562 | return cpumap_print_to_pagebuf(true, buf, cpumask_of(ccn->dt.cpu)); |
563 | } | 563 | } |
564 | 564 | ||
565 | static struct device_attribute arm_ccn_pmu_cpumask_attr = | 565 | static struct device_attribute arm_ccn_pmu_cpumask_attr = |
@@ -759,7 +759,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) | |||
759 | * mitigate this, we enforce CPU assignment to one, selected | 759 | * mitigate this, we enforce CPU assignment to one, selected |
760 | * processor (the one described in the "cpumask" attribute). | 760 | * processor (the one described in the "cpumask" attribute). |
761 | */ | 761 | */ |
762 | event->cpu = cpumask_first(&ccn->dt.cpu); | 762 | event->cpu = ccn->dt.cpu; |
763 | 763 | ||
764 | node_xp = CCN_CONFIG_NODE(event->attr.config); | 764 | node_xp = CCN_CONFIG_NODE(event->attr.config); |
765 | type = CCN_CONFIG_TYPE(event->attr.config); | 765 | type = CCN_CONFIG_TYPE(event->attr.config); |
@@ -1215,15 +1215,15 @@ static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) | |||
1215 | struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); | 1215 | struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); |
1216 | unsigned int target; | 1216 | unsigned int target; |
1217 | 1217 | ||
1218 | if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) | 1218 | if (cpu != dt->cpu) |
1219 | return 0; | 1219 | return 0; |
1220 | target = cpumask_any_but(cpu_online_mask, cpu); | 1220 | target = cpumask_any_but(cpu_online_mask, cpu); |
1221 | if (target >= nr_cpu_ids) | 1221 | if (target >= nr_cpu_ids) |
1222 | return 0; | 1222 | return 0; |
1223 | perf_pmu_migrate_context(&dt->pmu, cpu, target); | 1223 | perf_pmu_migrate_context(&dt->pmu, cpu, target); |
1224 | cpumask_set_cpu(target, &dt->cpu); | 1224 | dt->cpu = target; |
1225 | if (ccn->irq) | 1225 | if (ccn->irq) |
1226 | WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0); | 1226 | WARN_ON(irq_set_affinity_hint(ccn->irq, cpumask_of(dt->cpu))); |
1227 | return 0; | 1227 | return 0; |
1228 | } | 1228 | } |
1229 | 1229 | ||
@@ -1299,29 +1299,30 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) | |||
1299 | } | 1299 | } |
1300 | 1300 | ||
1301 | /* Pick one CPU which we will use to collect data from CCN... */ | 1301 | /* Pick one CPU which we will use to collect data from CCN... */ |
1302 | cpumask_set_cpu(get_cpu(), &ccn->dt.cpu); | 1302 | ccn->dt.cpu = raw_smp_processor_id(); |
1303 | 1303 | ||
1304 | /* Also make sure that the overflow interrupt is handled by this CPU */ | 1304 | /* Also make sure that the overflow interrupt is handled by this CPU */ |
1305 | if (ccn->irq) { | 1305 | if (ccn->irq) { |
1306 | err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu); | 1306 | err = irq_set_affinity_hint(ccn->irq, cpumask_of(ccn->dt.cpu)); |
1307 | if (err) { | 1307 | if (err) { |
1308 | dev_err(ccn->dev, "Failed to set interrupt affinity!\n"); | 1308 | dev_err(ccn->dev, "Failed to set interrupt affinity!\n"); |
1309 | goto error_set_affinity; | 1309 | goto error_set_affinity; |
1310 | } | 1310 | } |
1311 | } | 1311 | } |
1312 | 1312 | ||
1313 | cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, | ||
1314 | &ccn->dt.node); | ||
1315 | |||
1313 | err = perf_pmu_register(&ccn->dt.pmu, name, -1); | 1316 | err = perf_pmu_register(&ccn->dt.pmu, name, -1); |
1314 | if (err) | 1317 | if (err) |
1315 | goto error_pmu_register; | 1318 | goto error_pmu_register; |
1316 | 1319 | ||
1317 | cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, | ||
1318 | &ccn->dt.node); | ||
1319 | put_cpu(); | ||
1320 | return 0; | 1320 | return 0; |
1321 | 1321 | ||
1322 | error_pmu_register: | 1322 | error_pmu_register: |
1323 | cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, | ||
1324 | &ccn->dt.node); | ||
1323 | error_set_affinity: | 1325 | error_set_affinity: |
1324 | put_cpu(); | ||
1325 | error_choose_name: | 1326 | error_choose_name: |
1326 | ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); | 1327 | ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); |
1327 | for (i = 0; i < ccn->num_xps; i++) | 1328 | for (i = 0; i < ccn->num_xps; i++) |
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c new file mode 100644 index 000000000000..da71c741cb46 --- /dev/null +++ b/drivers/perf/arm_smmuv3_pmu.c | |||
@@ -0,0 +1,865 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | /* | ||
4 | * This driver adds support for perf events to use the Performance | ||
5 | * Monitor Counter Groups (PMCG) associated with an SMMUv3 node | ||
6 | * to monitor that node. | ||
7 | * | ||
8 | * SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where | ||
9 | * <phys_addr_page> is the physical page address of the SMMU PMCG wrapped | ||
10 | * to 4K boundary. For example, the PMCG at 0xff88840000 is named | ||
11 | * smmuv3_pmcg_ff88840 | ||
12 | * | ||
13 | * Filtering by stream id is done by specifying filtering parameters | ||
14 | * with the event. options are: | ||
15 | * filter_enable - 0 = no filtering, 1 = filtering enabled | ||
16 | * filter_span - 0 = exact match, 1 = pattern match | ||
17 | * filter_stream_id - pattern to filter against | ||
18 | * | ||
19 | * To match a partial StreamID where the X most-significant bits must match | ||
20 | * but the Y least-significant bits might differ, STREAMID is programmed | ||
21 | * with a value that contains: | ||
22 | * STREAMID[Y - 1] == 0. | ||
23 | * STREAMID[Y - 2:0] == 1 (where Y > 1). | ||
24 | * The remainder of implemented bits of STREAMID (X bits, from bit Y upwards) | ||
25 | * contain a value to match from the corresponding bits of event StreamID. | ||
26 | * | ||
27 | * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1, | ||
28 | * filter_span=1,filter_stream_id=0x42/ -a netperf | ||
29 | * Applies filter pattern 0x42 to transaction events, which means events | ||
30 | * matching stream ids 0x42 and 0x43 are counted. Further filtering | ||
31 | * information is available in the SMMU documentation. | ||
32 | * | ||
33 | * SMMU events are not attributable to a CPU, so task mode and sampling | ||
34 | * are not supported. | ||
35 | */ | ||
36 | |||
37 | #include <linux/acpi.h> | ||
38 | #include <linux/acpi_iort.h> | ||
39 | #include <linux/bitfield.h> | ||
40 | #include <linux/bitops.h> | ||
41 | #include <linux/cpuhotplug.h> | ||
42 | #include <linux/cpumask.h> | ||
43 | #include <linux/device.h> | ||
44 | #include <linux/errno.h> | ||
45 | #include <linux/interrupt.h> | ||
46 | #include <linux/irq.h> | ||
47 | #include <linux/kernel.h> | ||
48 | #include <linux/list.h> | ||
49 | #include <linux/msi.h> | ||
50 | #include <linux/perf_event.h> | ||
51 | #include <linux/platform_device.h> | ||
52 | #include <linux/smp.h> | ||
53 | #include <linux/sysfs.h> | ||
54 | #include <linux/types.h> | ||
55 | |||
56 | #define SMMU_PMCG_EVCNTR0 0x0 | ||
57 | #define SMMU_PMCG_EVCNTR(n, stride) (SMMU_PMCG_EVCNTR0 + (n) * (stride)) | ||
58 | #define SMMU_PMCG_EVTYPER0 0x400 | ||
59 | #define SMMU_PMCG_EVTYPER(n) (SMMU_PMCG_EVTYPER0 + (n) * 4) | ||
60 | #define SMMU_PMCG_SID_SPAN_SHIFT 29 | ||
61 | #define SMMU_PMCG_SMR0 0xA00 | ||
62 | #define SMMU_PMCG_SMR(n) (SMMU_PMCG_SMR0 + (n) * 4) | ||
63 | #define SMMU_PMCG_CNTENSET0 0xC00 | ||
64 | #define SMMU_PMCG_CNTENCLR0 0xC20 | ||
65 | #define SMMU_PMCG_INTENSET0 0xC40 | ||
66 | #define SMMU_PMCG_INTENCLR0 0xC60 | ||
67 | #define SMMU_PMCG_OVSCLR0 0xC80 | ||
68 | #define SMMU_PMCG_OVSSET0 0xCC0 | ||
69 | #define SMMU_PMCG_CFGR 0xE00 | ||
70 | #define SMMU_PMCG_CFGR_SID_FILTER_TYPE BIT(23) | ||
71 | #define SMMU_PMCG_CFGR_MSI BIT(21) | ||
72 | #define SMMU_PMCG_CFGR_RELOC_CTRS BIT(20) | ||
73 | #define SMMU_PMCG_CFGR_SIZE GENMASK(13, 8) | ||
74 | #define SMMU_PMCG_CFGR_NCTR GENMASK(5, 0) | ||
75 | #define SMMU_PMCG_CR 0xE04 | ||
76 | #define SMMU_PMCG_CR_ENABLE BIT(0) | ||
77 | #define SMMU_PMCG_CEID0 0xE20 | ||
78 | #define SMMU_PMCG_CEID1 0xE28 | ||
79 | #define SMMU_PMCG_IRQ_CTRL 0xE50 | ||
80 | #define SMMU_PMCG_IRQ_CTRL_IRQEN BIT(0) | ||
81 | #define SMMU_PMCG_IRQ_CFG0 0xE58 | ||
82 | #define SMMU_PMCG_IRQ_CFG1 0xE60 | ||
83 | #define SMMU_PMCG_IRQ_CFG2 0xE64 | ||
84 | |||
85 | /* MSI config fields */ | ||
86 | #define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2) | ||
87 | #define MSI_CFG2_MEMATTR_DEVICE_nGnRE 0x1 | ||
88 | |||
89 | #define SMMU_PMCG_DEFAULT_FILTER_SPAN 1 | ||
90 | #define SMMU_PMCG_DEFAULT_FILTER_SID GENMASK(31, 0) | ||
91 | |||
92 | #define SMMU_PMCG_MAX_COUNTERS 64 | ||
93 | #define SMMU_PMCG_ARCH_MAX_EVENTS 128 | ||
94 | |||
95 | #define SMMU_PMCG_PA_SHIFT 12 | ||
96 | |||
97 | #define SMMU_PMCG_EVCNTR_RDONLY BIT(0) | ||
98 | |||
99 | static int cpuhp_state_num; | ||
100 | |||
101 | struct smmu_pmu { | ||
102 | struct hlist_node node; | ||
103 | struct perf_event *events[SMMU_PMCG_MAX_COUNTERS]; | ||
104 | DECLARE_BITMAP(used_counters, SMMU_PMCG_MAX_COUNTERS); | ||
105 | DECLARE_BITMAP(supported_events, SMMU_PMCG_ARCH_MAX_EVENTS); | ||
106 | unsigned int irq; | ||
107 | unsigned int on_cpu; | ||
108 | struct pmu pmu; | ||
109 | unsigned int num_counters; | ||
110 | struct device *dev; | ||
111 | void __iomem *reg_base; | ||
112 | void __iomem *reloc_base; | ||
113 | u64 counter_mask; | ||
114 | u32 options; | ||
115 | bool global_filter; | ||
116 | u32 global_filter_span; | ||
117 | u32 global_filter_sid; | ||
118 | }; | ||
119 | |||
120 | #define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu)) | ||
121 | |||
122 | #define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start, _end) \ | ||
123 | static inline u32 get_##_name(struct perf_event *event) \ | ||
124 | { \ | ||
125 | return FIELD_GET(GENMASK_ULL(_end, _start), \ | ||
126 | event->attr._config); \ | ||
127 | } \ | ||
128 | |||
129 | SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15); | ||
130 | SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31); | ||
131 | SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32); | ||
132 | SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33); | ||
133 | |||
134 | static inline void smmu_pmu_enable(struct pmu *pmu) | ||
135 | { | ||
136 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); | ||
137 | |||
138 | writel(SMMU_PMCG_IRQ_CTRL_IRQEN, | ||
139 | smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL); | ||
140 | writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR); | ||
141 | } | ||
142 | |||
143 | static inline void smmu_pmu_disable(struct pmu *pmu) | ||
144 | { | ||
145 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); | ||
146 | |||
147 | writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR); | ||
148 | writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL); | ||
149 | } | ||
150 | |||
151 | static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu, | ||
152 | u32 idx, u64 value) | ||
153 | { | ||
154 | if (smmu_pmu->counter_mask & BIT(32)) | ||
155 | writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8)); | ||
156 | else | ||
157 | writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4)); | ||
158 | } | ||
159 | |||
160 | static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx) | ||
161 | { | ||
162 | u64 value; | ||
163 | |||
164 | if (smmu_pmu->counter_mask & BIT(32)) | ||
165 | value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8)); | ||
166 | else | ||
167 | value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4)); | ||
168 | |||
169 | return value; | ||
170 | } | ||
171 | |||
172 | static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx) | ||
173 | { | ||
174 | writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0); | ||
175 | } | ||
176 | |||
177 | static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx) | ||
178 | { | ||
179 | writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0); | ||
180 | } | ||
181 | |||
182 | static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx) | ||
183 | { | ||
184 | writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0); | ||
185 | } | ||
186 | |||
187 | static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu, | ||
188 | u32 idx) | ||
189 | { | ||
190 | writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0); | ||
191 | } | ||
192 | |||
193 | static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx, | ||
194 | u32 val) | ||
195 | { | ||
196 | writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx)); | ||
197 | } | ||
198 | |||
199 | static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val) | ||
200 | { | ||
201 | writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx)); | ||
202 | } | ||
203 | |||
204 | static void smmu_pmu_event_update(struct perf_event *event) | ||
205 | { | ||
206 | struct hw_perf_event *hwc = &event->hw; | ||
207 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); | ||
208 | u64 delta, prev, now; | ||
209 | u32 idx = hwc->idx; | ||
210 | |||
211 | do { | ||
212 | prev = local64_read(&hwc->prev_count); | ||
213 | now = smmu_pmu_counter_get_value(smmu_pmu, idx); | ||
214 | } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev); | ||
215 | |||
216 | /* handle overflow. */ | ||
217 | delta = now - prev; | ||
218 | delta &= smmu_pmu->counter_mask; | ||
219 | |||
220 | local64_add(delta, &event->count); | ||
221 | } | ||
222 | |||
223 | static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu, | ||
224 | struct hw_perf_event *hwc) | ||
225 | { | ||
226 | u32 idx = hwc->idx; | ||
227 | u64 new; | ||
228 | |||
229 | if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) { | ||
230 | /* | ||
231 | * On platforms that require this quirk, if the counter starts | ||
232 | * at < half_counter value and wraps, the current logic of | ||
233 | * handling the overflow may not work. It is expected that, | ||
234 | * those platforms will have full 64 counter bits implemented | ||
235 | * so that such a possibility is remote(eg: HiSilicon HIP08). | ||
236 | */ | ||
237 | new = smmu_pmu_counter_get_value(smmu_pmu, idx); | ||
238 | } else { | ||
239 | /* | ||
240 | * We limit the max period to half the max counter value | ||
241 | * of the counter size, so that even in the case of extreme | ||
242 | * interrupt latency the counter will (hopefully) not wrap | ||
243 | * past its initial value. | ||
244 | */ | ||
245 | new = smmu_pmu->counter_mask >> 1; | ||
246 | smmu_pmu_counter_set_value(smmu_pmu, idx, new); | ||
247 | } | ||
248 | |||
249 | local64_set(&hwc->prev_count, new); | ||
250 | } | ||
251 | |||
252 | static void smmu_pmu_set_event_filter(struct perf_event *event, | ||
253 | int idx, u32 span, u32 sid) | ||
254 | { | ||
255 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); | ||
256 | u32 evtyper; | ||
257 | |||
258 | evtyper = get_event(event) | span << SMMU_PMCG_SID_SPAN_SHIFT; | ||
259 | smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper); | ||
260 | smmu_pmu_set_smr(smmu_pmu, idx, sid); | ||
261 | } | ||
262 | |||
263 | static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu, | ||
264 | struct perf_event *event, int idx) | ||
265 | { | ||
266 | u32 span, sid; | ||
267 | unsigned int num_ctrs = smmu_pmu->num_counters; | ||
268 | bool filter_en = !!get_filter_enable(event); | ||
269 | |||
270 | span = filter_en ? get_filter_span(event) : | ||
271 | SMMU_PMCG_DEFAULT_FILTER_SPAN; | ||
272 | sid = filter_en ? get_filter_stream_id(event) : | ||
273 | SMMU_PMCG_DEFAULT_FILTER_SID; | ||
274 | |||
275 | /* Support individual filter settings */ | ||
276 | if (!smmu_pmu->global_filter) { | ||
277 | smmu_pmu_set_event_filter(event, idx, span, sid); | ||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | /* Requested settings same as current global settings*/ | ||
282 | if (span == smmu_pmu->global_filter_span && | ||
283 | sid == smmu_pmu->global_filter_sid) | ||
284 | return 0; | ||
285 | |||
286 | if (!bitmap_empty(smmu_pmu->used_counters, num_ctrs)) | ||
287 | return -EAGAIN; | ||
288 | |||
289 | smmu_pmu_set_event_filter(event, 0, span, sid); | ||
290 | smmu_pmu->global_filter_span = span; | ||
291 | smmu_pmu->global_filter_sid = sid; | ||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu, | ||
296 | struct perf_event *event) | ||
297 | { | ||
298 | int idx, err; | ||
299 | unsigned int num_ctrs = smmu_pmu->num_counters; | ||
300 | |||
301 | idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs); | ||
302 | if (idx == num_ctrs) | ||
303 | /* The counters are all in use. */ | ||
304 | return -EAGAIN; | ||
305 | |||
306 | err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx); | ||
307 | if (err) | ||
308 | return err; | ||
309 | |||
310 | set_bit(idx, smmu_pmu->used_counters); | ||
311 | |||
312 | return idx; | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * Implementation of abstract pmu functionality required by | ||
317 | * the core perf events code. | ||
318 | */ | ||
319 | |||
320 | static int smmu_pmu_event_init(struct perf_event *event) | ||
321 | { | ||
322 | struct hw_perf_event *hwc = &event->hw; | ||
323 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); | ||
324 | struct device *dev = smmu_pmu->dev; | ||
325 | struct perf_event *sibling; | ||
326 | u16 event_id; | ||
327 | |||
328 | if (event->attr.type != event->pmu->type) | ||
329 | return -ENOENT; | ||
330 | |||
331 | if (hwc->sample_period) { | ||
332 | dev_dbg(dev, "Sampling not supported\n"); | ||
333 | return -EOPNOTSUPP; | ||
334 | } | ||
335 | |||
336 | if (event->cpu < 0) { | ||
337 | dev_dbg(dev, "Per-task mode not supported\n"); | ||
338 | return -EOPNOTSUPP; | ||
339 | } | ||
340 | |||
341 | /* Verify specified event is supported on this PMU */ | ||
342 | event_id = get_event(event); | ||
343 | if (event_id < SMMU_PMCG_ARCH_MAX_EVENTS && | ||
344 | (!test_bit(event_id, smmu_pmu->supported_events))) { | ||
345 | dev_dbg(dev, "Invalid event %d for this PMU\n", event_id); | ||
346 | return -EINVAL; | ||
347 | } | ||
348 | |||
349 | /* Don't allow groups with mixed PMUs, except for s/w events */ | ||
350 | if (event->group_leader->pmu != event->pmu && | ||
351 | !is_software_event(event->group_leader)) { | ||
352 | dev_dbg(dev, "Can't create mixed PMU group\n"); | ||
353 | return -EINVAL; | ||
354 | } | ||
355 | |||
356 | for_each_sibling_event(sibling, event->group_leader) { | ||
357 | if (sibling->pmu != event->pmu && | ||
358 | !is_software_event(sibling)) { | ||
359 | dev_dbg(dev, "Can't create mixed PMU group\n"); | ||
360 | return -EINVAL; | ||
361 | } | ||
362 | } | ||
363 | |||
364 | hwc->idx = -1; | ||
365 | |||
366 | /* | ||
367 | * Ensure all events are on the same cpu so all events are in the | ||
368 | * same cpu context, to avoid races on pmu_enable etc. | ||
369 | */ | ||
370 | event->cpu = smmu_pmu->on_cpu; | ||
371 | |||
372 | return 0; | ||
373 | } | ||
374 | |||
375 | static void smmu_pmu_event_start(struct perf_event *event, int flags) | ||
376 | { | ||
377 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); | ||
378 | struct hw_perf_event *hwc = &event->hw; | ||
379 | int idx = hwc->idx; | ||
380 | |||
381 | hwc->state = 0; | ||
382 | |||
383 | smmu_pmu_set_period(smmu_pmu, hwc); | ||
384 | |||
385 | smmu_pmu_counter_enable(smmu_pmu, idx); | ||
386 | } | ||
387 | |||
388 | static void smmu_pmu_event_stop(struct perf_event *event, int flags) | ||
389 | { | ||
390 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); | ||
391 | struct hw_perf_event *hwc = &event->hw; | ||
392 | int idx = hwc->idx; | ||
393 | |||
394 | if (hwc->state & PERF_HES_STOPPED) | ||
395 | return; | ||
396 | |||
397 | smmu_pmu_counter_disable(smmu_pmu, idx); | ||
398 | /* As the counter gets updated on _start, ignore PERF_EF_UPDATE */ | ||
399 | smmu_pmu_event_update(event); | ||
400 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
401 | } | ||
402 | |||
403 | static int smmu_pmu_event_add(struct perf_event *event, int flags) | ||
404 | { | ||
405 | struct hw_perf_event *hwc = &event->hw; | ||
406 | int idx; | ||
407 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); | ||
408 | |||
409 | idx = smmu_pmu_get_event_idx(smmu_pmu, event); | ||
410 | if (idx < 0) | ||
411 | return idx; | ||
412 | |||
413 | hwc->idx = idx; | ||
414 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
415 | smmu_pmu->events[idx] = event; | ||
416 | local64_set(&hwc->prev_count, 0); | ||
417 | |||
418 | smmu_pmu_interrupt_enable(smmu_pmu, idx); | ||
419 | |||
420 | if (flags & PERF_EF_START) | ||
421 | smmu_pmu_event_start(event, flags); | ||
422 | |||
423 | /* Propagate changes to the userspace mapping. */ | ||
424 | perf_event_update_userpage(event); | ||
425 | |||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | static void smmu_pmu_event_del(struct perf_event *event, int flags) | ||
430 | { | ||
431 | struct hw_perf_event *hwc = &event->hw; | ||
432 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); | ||
433 | int idx = hwc->idx; | ||
434 | |||
435 | smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE); | ||
436 | smmu_pmu_interrupt_disable(smmu_pmu, idx); | ||
437 | smmu_pmu->events[idx] = NULL; | ||
438 | clear_bit(idx, smmu_pmu->used_counters); | ||
439 | |||
440 | perf_event_update_userpage(event); | ||
441 | } | ||
442 | |||
443 | static void smmu_pmu_event_read(struct perf_event *event) | ||
444 | { | ||
445 | smmu_pmu_event_update(event); | ||
446 | } | ||
447 | |||
448 | /* cpumask */ | ||
449 | |||
450 | static ssize_t smmu_pmu_cpumask_show(struct device *dev, | ||
451 | struct device_attribute *attr, | ||
452 | char *buf) | ||
453 | { | ||
454 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev)); | ||
455 | |||
456 | return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu)); | ||
457 | } | ||
458 | |||
459 | static struct device_attribute smmu_pmu_cpumask_attr = | ||
460 | __ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL); | ||
461 | |||
462 | static struct attribute *smmu_pmu_cpumask_attrs[] = { | ||
463 | &smmu_pmu_cpumask_attr.attr, | ||
464 | NULL | ||
465 | }; | ||
466 | |||
467 | static struct attribute_group smmu_pmu_cpumask_group = { | ||
468 | .attrs = smmu_pmu_cpumask_attrs, | ||
469 | }; | ||
470 | |||
471 | /* Events */ | ||
472 | |||
473 | static ssize_t smmu_pmu_event_show(struct device *dev, | ||
474 | struct device_attribute *attr, char *page) | ||
475 | { | ||
476 | struct perf_pmu_events_attr *pmu_attr; | ||
477 | |||
478 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); | ||
479 | |||
480 | return sprintf(page, "event=0x%02llx\n", pmu_attr->id); | ||
481 | } | ||
482 | |||
483 | #define SMMU_EVENT_ATTR(name, config) \ | ||
484 | PMU_EVENT_ATTR(name, smmu_event_attr_##name, \ | ||
485 | config, smmu_pmu_event_show) | ||
486 | SMMU_EVENT_ATTR(cycles, 0); | ||
487 | SMMU_EVENT_ATTR(transaction, 1); | ||
488 | SMMU_EVENT_ATTR(tlb_miss, 2); | ||
489 | SMMU_EVENT_ATTR(config_cache_miss, 3); | ||
490 | SMMU_EVENT_ATTR(trans_table_walk_access, 4); | ||
491 | SMMU_EVENT_ATTR(config_struct_access, 5); | ||
492 | SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6); | ||
493 | SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7); | ||
494 | |||
495 | static struct attribute *smmu_pmu_events[] = { | ||
496 | &smmu_event_attr_cycles.attr.attr, | ||
497 | &smmu_event_attr_transaction.attr.attr, | ||
498 | &smmu_event_attr_tlb_miss.attr.attr, | ||
499 | &smmu_event_attr_config_cache_miss.attr.attr, | ||
500 | &smmu_event_attr_trans_table_walk_access.attr.attr, | ||
501 | &smmu_event_attr_config_struct_access.attr.attr, | ||
502 | &smmu_event_attr_pcie_ats_trans_rq.attr.attr, | ||
503 | &smmu_event_attr_pcie_ats_trans_passed.attr.attr, | ||
504 | NULL | ||
505 | }; | ||
506 | |||
507 | static umode_t smmu_pmu_event_is_visible(struct kobject *kobj, | ||
508 | struct attribute *attr, int unused) | ||
509 | { | ||
510 | struct device *dev = kobj_to_dev(kobj); | ||
511 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev)); | ||
512 | struct perf_pmu_events_attr *pmu_attr; | ||
513 | |||
514 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr); | ||
515 | |||
516 | if (test_bit(pmu_attr->id, smmu_pmu->supported_events)) | ||
517 | return attr->mode; | ||
518 | |||
519 | return 0; | ||
520 | } | ||
521 | |||
522 | static struct attribute_group smmu_pmu_events_group = { | ||
523 | .name = "events", | ||
524 | .attrs = smmu_pmu_events, | ||
525 | .is_visible = smmu_pmu_event_is_visible, | ||
526 | }; | ||
527 | |||
528 | /* Formats */ | ||
529 | PMU_FORMAT_ATTR(event, "config:0-15"); | ||
530 | PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31"); | ||
531 | PMU_FORMAT_ATTR(filter_span, "config1:32"); | ||
532 | PMU_FORMAT_ATTR(filter_enable, "config1:33"); | ||
533 | |||
534 | static struct attribute *smmu_pmu_formats[] = { | ||
535 | &format_attr_event.attr, | ||
536 | &format_attr_filter_stream_id.attr, | ||
537 | &format_attr_filter_span.attr, | ||
538 | &format_attr_filter_enable.attr, | ||
539 | NULL | ||
540 | }; | ||
541 | |||
542 | static struct attribute_group smmu_pmu_format_group = { | ||
543 | .name = "format", | ||
544 | .attrs = smmu_pmu_formats, | ||
545 | }; | ||
546 | |||
547 | static const struct attribute_group *smmu_pmu_attr_grps[] = { | ||
548 | &smmu_pmu_cpumask_group, | ||
549 | &smmu_pmu_events_group, | ||
550 | &smmu_pmu_format_group, | ||
551 | NULL | ||
552 | }; | ||
553 | |||
554 | /* | ||
555 | * Generic device handlers | ||
556 | */ | ||
557 | |||
558 | static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) | ||
559 | { | ||
560 | struct smmu_pmu *smmu_pmu; | ||
561 | unsigned int target; | ||
562 | |||
563 | smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node); | ||
564 | if (cpu != smmu_pmu->on_cpu) | ||
565 | return 0; | ||
566 | |||
567 | target = cpumask_any_but(cpu_online_mask, cpu); | ||
568 | if (target >= nr_cpu_ids) | ||
569 | return 0; | ||
570 | |||
571 | perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target); | ||
572 | smmu_pmu->on_cpu = target; | ||
573 | WARN_ON(irq_set_affinity_hint(smmu_pmu->irq, cpumask_of(target))); | ||
574 | |||
575 | return 0; | ||
576 | } | ||
577 | |||
578 | static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data) | ||
579 | { | ||
580 | struct smmu_pmu *smmu_pmu = data; | ||
581 | u64 ovsr; | ||
582 | unsigned int idx; | ||
583 | |||
584 | ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0); | ||
585 | if (!ovsr) | ||
586 | return IRQ_NONE; | ||
587 | |||
588 | writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0); | ||
589 | |||
590 | for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters) { | ||
591 | struct perf_event *event = smmu_pmu->events[idx]; | ||
592 | struct hw_perf_event *hwc; | ||
593 | |||
594 | if (WARN_ON_ONCE(!event)) | ||
595 | continue; | ||
596 | |||
597 | smmu_pmu_event_update(event); | ||
598 | hwc = &event->hw; | ||
599 | |||
600 | smmu_pmu_set_period(smmu_pmu, hwc); | ||
601 | } | ||
602 | |||
603 | return IRQ_HANDLED; | ||
604 | } | ||
605 | |||
606 | static void smmu_pmu_free_msis(void *data) | ||
607 | { | ||
608 | struct device *dev = data; | ||
609 | |||
610 | platform_msi_domain_free_irqs(dev); | ||
611 | } | ||
612 | |||
613 | static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) | ||
614 | { | ||
615 | phys_addr_t doorbell; | ||
616 | struct device *dev = msi_desc_to_dev(desc); | ||
617 | struct smmu_pmu *pmu = dev_get_drvdata(dev); | ||
618 | |||
619 | doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; | ||
620 | doorbell &= MSI_CFG0_ADDR_MASK; | ||
621 | |||
622 | writeq_relaxed(doorbell, pmu->reg_base + SMMU_PMCG_IRQ_CFG0); | ||
623 | writel_relaxed(msg->data, pmu->reg_base + SMMU_PMCG_IRQ_CFG1); | ||
624 | writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, | ||
625 | pmu->reg_base + SMMU_PMCG_IRQ_CFG2); | ||
626 | } | ||
627 | |||
628 | static void smmu_pmu_setup_msi(struct smmu_pmu *pmu) | ||
629 | { | ||
630 | struct msi_desc *desc; | ||
631 | struct device *dev = pmu->dev; | ||
632 | int ret; | ||
633 | |||
634 | /* Clear MSI address reg */ | ||
635 | writeq_relaxed(0, pmu->reg_base + SMMU_PMCG_IRQ_CFG0); | ||
636 | |||
637 | /* MSI supported or not */ | ||
638 | if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI)) | ||
639 | return; | ||
640 | |||
641 | ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg); | ||
642 | if (ret) { | ||
643 | dev_warn(dev, "failed to allocate MSIs\n"); | ||
644 | return; | ||
645 | } | ||
646 | |||
647 | desc = first_msi_entry(dev); | ||
648 | if (desc) | ||
649 | pmu->irq = desc->irq; | ||
650 | |||
651 | /* Add callback to free MSIs on teardown */ | ||
652 | devm_add_action(dev, smmu_pmu_free_msis, dev); | ||
653 | } | ||
654 | |||
655 | static int smmu_pmu_setup_irq(struct smmu_pmu *pmu) | ||
656 | { | ||
657 | unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED | IRQF_NO_THREAD; | ||
658 | int irq, ret = -ENXIO; | ||
659 | |||
660 | smmu_pmu_setup_msi(pmu); | ||
661 | |||
662 | irq = pmu->irq; | ||
663 | if (irq) | ||
664 | ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq, | ||
665 | flags, "smmuv3-pmu", pmu); | ||
666 | return ret; | ||
667 | } | ||
668 | |||
669 | static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu) | ||
670 | { | ||
671 | u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0); | ||
672 | |||
673 | smmu_pmu_disable(&smmu_pmu->pmu); | ||
674 | |||
675 | /* Disable counter and interrupt */ | ||
676 | writeq_relaxed(counter_present_mask, | ||
677 | smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0); | ||
678 | writeq_relaxed(counter_present_mask, | ||
679 | smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0); | ||
680 | writeq_relaxed(counter_present_mask, | ||
681 | smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0); | ||
682 | } | ||
683 | |||
684 | static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu) | ||
685 | { | ||
686 | u32 model; | ||
687 | |||
688 | model = *(u32 *)dev_get_platdata(smmu_pmu->dev); | ||
689 | |||
690 | switch (model) { | ||
691 | case IORT_SMMU_V3_PMCG_HISI_HIP08: | ||
692 | /* HiSilicon Erratum 162001800 */ | ||
693 | smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY; | ||
694 | break; | ||
695 | } | ||
696 | |||
697 | dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options); | ||
698 | } | ||
699 | |||
700 | static int smmu_pmu_probe(struct platform_device *pdev) | ||
701 | { | ||
702 | struct smmu_pmu *smmu_pmu; | ||
703 | struct resource *res_0, *res_1; | ||
704 | u32 cfgr, reg_size; | ||
705 | u64 ceid_64[2]; | ||
706 | int irq, err; | ||
707 | char *name; | ||
708 | struct device *dev = &pdev->dev; | ||
709 | |||
710 | smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL); | ||
711 | if (!smmu_pmu) | ||
712 | return -ENOMEM; | ||
713 | |||
714 | smmu_pmu->dev = dev; | ||
715 | platform_set_drvdata(pdev, smmu_pmu); | ||
716 | |||
717 | smmu_pmu->pmu = (struct pmu) { | ||
718 | .task_ctx_nr = perf_invalid_context, | ||
719 | .pmu_enable = smmu_pmu_enable, | ||
720 | .pmu_disable = smmu_pmu_disable, | ||
721 | .event_init = smmu_pmu_event_init, | ||
722 | .add = smmu_pmu_event_add, | ||
723 | .del = smmu_pmu_event_del, | ||
724 | .start = smmu_pmu_event_start, | ||
725 | .stop = smmu_pmu_event_stop, | ||
726 | .read = smmu_pmu_event_read, | ||
727 | .attr_groups = smmu_pmu_attr_grps, | ||
728 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, | ||
729 | }; | ||
730 | |||
731 | res_0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
732 | smmu_pmu->reg_base = devm_ioremap_resource(dev, res_0); | ||
733 | if (IS_ERR(smmu_pmu->reg_base)) | ||
734 | return PTR_ERR(smmu_pmu->reg_base); | ||
735 | |||
736 | cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR); | ||
737 | |||
738 | /* Determine if page 1 is present */ | ||
739 | if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) { | ||
740 | res_1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
741 | smmu_pmu->reloc_base = devm_ioremap_resource(dev, res_1); | ||
742 | if (IS_ERR(smmu_pmu->reloc_base)) | ||
743 | return PTR_ERR(smmu_pmu->reloc_base); | ||
744 | } else { | ||
745 | smmu_pmu->reloc_base = smmu_pmu->reg_base; | ||
746 | } | ||
747 | |||
748 | irq = platform_get_irq(pdev, 0); | ||
749 | if (irq > 0) | ||
750 | smmu_pmu->irq = irq; | ||
751 | |||
752 | ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0); | ||
753 | ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1); | ||
754 | bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64, | ||
755 | SMMU_PMCG_ARCH_MAX_EVENTS); | ||
756 | |||
757 | smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1; | ||
758 | |||
759 | smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE); | ||
760 | |||
761 | reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE, cfgr); | ||
762 | smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0); | ||
763 | |||
764 | smmu_pmu_reset(smmu_pmu); | ||
765 | |||
766 | err = smmu_pmu_setup_irq(smmu_pmu); | ||
767 | if (err) { | ||
768 | dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start); | ||
769 | return err; | ||
770 | } | ||
771 | |||
772 | name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx", | ||
773 | (res_0->start) >> SMMU_PMCG_PA_SHIFT); | ||
774 | if (!name) { | ||
775 | dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start); | ||
776 | return -EINVAL; | ||
777 | } | ||
778 | |||
779 | smmu_pmu_get_acpi_options(smmu_pmu); | ||
780 | |||
781 | /* Pick one CPU to be the preferred one to use */ | ||
782 | smmu_pmu->on_cpu = raw_smp_processor_id(); | ||
783 | WARN_ON(irq_set_affinity_hint(smmu_pmu->irq, | ||
784 | cpumask_of(smmu_pmu->on_cpu))); | ||
785 | |||
786 | err = cpuhp_state_add_instance_nocalls(cpuhp_state_num, | ||
787 | &smmu_pmu->node); | ||
788 | if (err) { | ||
789 | dev_err(dev, "Error %d registering hotplug, PMU @%pa\n", | ||
790 | err, &res_0->start); | ||
791 | goto out_cpuhp_err; | ||
792 | } | ||
793 | |||
794 | err = perf_pmu_register(&smmu_pmu->pmu, name, -1); | ||
795 | if (err) { | ||
796 | dev_err(dev, "Error %d registering PMU @%pa\n", | ||
797 | err, &res_0->start); | ||
798 | goto out_unregister; | ||
799 | } | ||
800 | |||
801 | dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter settings\n", | ||
802 | &res_0->start, smmu_pmu->num_counters, | ||
803 | smmu_pmu->global_filter ? "Global(Counter0)" : | ||
804 | "Individual"); | ||
805 | |||
806 | return 0; | ||
807 | |||
808 | out_unregister: | ||
809 | cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); | ||
810 | out_cpuhp_err: | ||
811 | put_cpu(); | ||
812 | return err; | ||
813 | } | ||
814 | |||
815 | static int smmu_pmu_remove(struct platform_device *pdev) | ||
816 | { | ||
817 | struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev); | ||
818 | |||
819 | perf_pmu_unregister(&smmu_pmu->pmu); | ||
820 | cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); | ||
821 | |||
822 | return 0; | ||
823 | } | ||
824 | |||
825 | static void smmu_pmu_shutdown(struct platform_device *pdev) | ||
826 | { | ||
827 | struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev); | ||
828 | |||
829 | smmu_pmu_disable(&smmu_pmu->pmu); | ||
830 | } | ||
831 | |||
832 | static struct platform_driver smmu_pmu_driver = { | ||
833 | .driver = { | ||
834 | .name = "arm-smmu-v3-pmcg", | ||
835 | }, | ||
836 | .probe = smmu_pmu_probe, | ||
837 | .remove = smmu_pmu_remove, | ||
838 | .shutdown = smmu_pmu_shutdown, | ||
839 | }; | ||
840 | |||
841 | static int __init arm_smmu_pmu_init(void) | ||
842 | { | ||
843 | cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, | ||
844 | "perf/arm/pmcg:online", | ||
845 | NULL, | ||
846 | smmu_pmu_offline_cpu); | ||
847 | if (cpuhp_state_num < 0) | ||
848 | return cpuhp_state_num; | ||
849 | |||
850 | return platform_driver_register(&smmu_pmu_driver); | ||
851 | } | ||
852 | module_init(arm_smmu_pmu_init); | ||
853 | |||
854 | static void __exit arm_smmu_pmu_exit(void) | ||
855 | { | ||
856 | platform_driver_unregister(&smmu_pmu_driver); | ||
857 | cpuhp_remove_multi_state(cpuhp_state_num); | ||
858 | } | ||
859 | |||
860 | module_exit(arm_smmu_pmu_exit); | ||
861 | |||
862 | MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension"); | ||
863 | MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>"); | ||
864 | MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>"); | ||
865 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c index e8bd9887c566..e221e47396ab 100644 --- a/drivers/watchdog/sbsa_gwdt.c +++ b/drivers/watchdog/sbsa_gwdt.c | |||
@@ -161,7 +161,7 @@ static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd) | |||
161 | timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR); | 161 | timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR); |
162 | 162 | ||
163 | timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) - | 163 | timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) - |
164 | arch_counter_get_cntvct(); | 164 | arch_timer_read_counter(); |
165 | 165 | ||
166 | do_div(timeleft, gwdt->clk); | 166 | do_div(timeleft, gwdt->clk); |
167 | 167 | ||
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index fcb61b4659b3..8666fe7f35d7 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h | |||
@@ -23,7 +23,9 @@ | |||
23 | * | 23 | * |
24 | * Return: | 24 | * Return: |
25 | * 0 - On success | 25 | * 0 - On success |
26 | * <0 - On error | 26 | * -EFAULT - User access resulted in a page fault |
27 | * -EAGAIN - Atomic operation was unable to complete due to contention | ||
28 | * -ENOSYS - Operation not supported | ||
27 | */ | 29 | */ |
28 | static inline int | 30 | static inline int |
29 | arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) | 31 | arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) |
@@ -85,7 +87,9 @@ out_pagefault_enable: | |||
85 | * | 87 | * |
86 | * Return: | 88 | * Return: |
87 | * 0 - On success | 89 | * 0 - On success |
88 | * <0 - On error | 90 | * -EFAULT - User access resulted in a page fault |
91 | * -EAGAIN - Atomic operation was unable to complete due to contention | ||
92 | * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG) | ||
89 | */ | 93 | */ |
90 | static inline int | 94 | static inline int |
91 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | 95 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 38cd77b39a64..723e4dfa1c14 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h | |||
@@ -26,6 +26,14 @@ | |||
26 | #define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL) | 26 | #define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL) |
27 | #define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL) | 27 | #define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL) |
28 | 28 | ||
29 | /* | ||
30 | * PMCG model identifiers for use in smmu pmu driver. Please note | ||
31 | * that this is purely for the use of software and has nothing to | ||
32 | * do with hardware or with IORT specification. | ||
33 | */ | ||
34 | #define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */ | ||
35 | #define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */ | ||
36 | |||
29 | int iort_register_domain_token(int trans_id, phys_addr_t base, | 37 | int iort_register_domain_token(int trans_id, phys_addr_t base, |
30 | struct fwnode_handle *fw_node); | 38 | struct fwnode_handle *fw_node); |
31 | void iort_deregister_domain_token(int trans_id); | 39 | void iort_deregister_domain_token(int trans_id); |
diff --git a/kernel/futex.c b/kernel/futex.c index 9e40cf7be606..6262f1534ac9 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -1311,13 +1311,15 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval, | |||
1311 | 1311 | ||
1312 | static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) | 1312 | static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) |
1313 | { | 1313 | { |
1314 | int err; | ||
1314 | u32 uninitialized_var(curval); | 1315 | u32 uninitialized_var(curval); |
1315 | 1316 | ||
1316 | if (unlikely(should_fail_futex(true))) | 1317 | if (unlikely(should_fail_futex(true))) |
1317 | return -EFAULT; | 1318 | return -EFAULT; |
1318 | 1319 | ||
1319 | if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))) | 1320 | err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); |
1320 | return -EFAULT; | 1321 | if (unlikely(err)) |
1322 | return err; | ||
1321 | 1323 | ||
1322 | /* If user space value changed, let the caller retry */ | 1324 | /* If user space value changed, let the caller retry */ |
1323 | return curval != uval ? -EAGAIN : 0; | 1325 | return curval != uval ? -EAGAIN : 0; |
@@ -1502,10 +1504,8 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ | |||
1502 | if (unlikely(should_fail_futex(true))) | 1504 | if (unlikely(should_fail_futex(true))) |
1503 | ret = -EFAULT; | 1505 | ret = -EFAULT; |
1504 | 1506 | ||
1505 | if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) { | 1507 | ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); |
1506 | ret = -EFAULT; | 1508 | if (!ret && (curval != uval)) { |
1507 | |||
1508 | } else if (curval != uval) { | ||
1509 | /* | 1509 | /* |
1510 | * If a unconditional UNLOCK_PI operation (user space did not | 1510 | * If a unconditional UNLOCK_PI operation (user space did not |
1511 | * try the TID->0 transition) raced with a waiter setting the | 1511 | * try the TID->0 transition) raced with a waiter setting the |
@@ -1700,32 +1700,32 @@ retry_private: | |||
1700 | double_lock_hb(hb1, hb2); | 1700 | double_lock_hb(hb1, hb2); |
1701 | op_ret = futex_atomic_op_inuser(op, uaddr2); | 1701 | op_ret = futex_atomic_op_inuser(op, uaddr2); |
1702 | if (unlikely(op_ret < 0)) { | 1702 | if (unlikely(op_ret < 0)) { |
1703 | |||
1704 | double_unlock_hb(hb1, hb2); | 1703 | double_unlock_hb(hb1, hb2); |
1705 | 1704 | ||
1706 | #ifndef CONFIG_MMU | 1705 | if (!IS_ENABLED(CONFIG_MMU) || |
1707 | /* | 1706 | unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) { |
1708 | * we don't get EFAULT from MMU faults if we don't have an MMU, | 1707 | /* |
1709 | * but we might get them from range checking | 1708 | * we don't get EFAULT from MMU faults if we don't have |
1710 | */ | 1709 | * an MMU, but we might get them from range checking |
1711 | ret = op_ret; | 1710 | */ |
1712 | goto out_put_keys; | ||
1713 | #endif | ||
1714 | |||
1715 | if (unlikely(op_ret != -EFAULT)) { | ||
1716 | ret = op_ret; | 1711 | ret = op_ret; |
1717 | goto out_put_keys; | 1712 | goto out_put_keys; |
1718 | } | 1713 | } |
1719 | 1714 | ||
1720 | ret = fault_in_user_writeable(uaddr2); | 1715 | if (op_ret == -EFAULT) { |
1721 | if (ret) | 1716 | ret = fault_in_user_writeable(uaddr2); |
1722 | goto out_put_keys; | 1717 | if (ret) |
1718 | goto out_put_keys; | ||
1719 | } | ||
1723 | 1720 | ||
1724 | if (!(flags & FLAGS_SHARED)) | 1721 | if (!(flags & FLAGS_SHARED)) { |
1722 | cond_resched(); | ||
1725 | goto retry_private; | 1723 | goto retry_private; |
1724 | } | ||
1726 | 1725 | ||
1727 | put_futex_key(&key2); | 1726 | put_futex_key(&key2); |
1728 | put_futex_key(&key1); | 1727 | put_futex_key(&key1); |
1728 | cond_resched(); | ||
1729 | goto retry; | 1729 | goto retry; |
1730 | } | 1730 | } |
1731 | 1731 | ||
@@ -2350,7 +2350,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, | |||
2350 | u32 uval, uninitialized_var(curval), newval; | 2350 | u32 uval, uninitialized_var(curval), newval; |
2351 | struct task_struct *oldowner, *newowner; | 2351 | struct task_struct *oldowner, *newowner; |
2352 | u32 newtid; | 2352 | u32 newtid; |
2353 | int ret; | 2353 | int ret, err = 0; |
2354 | 2354 | ||
2355 | lockdep_assert_held(q->lock_ptr); | 2355 | lockdep_assert_held(q->lock_ptr); |
2356 | 2356 | ||
@@ -2421,14 +2421,17 @@ retry: | |||
2421 | if (!pi_state->owner) | 2421 | if (!pi_state->owner) |
2422 | newtid |= FUTEX_OWNER_DIED; | 2422 | newtid |= FUTEX_OWNER_DIED; |
2423 | 2423 | ||
2424 | if (get_futex_value_locked(&uval, uaddr)) | 2424 | err = get_futex_value_locked(&uval, uaddr); |
2425 | goto handle_fault; | 2425 | if (err) |
2426 | goto handle_err; | ||
2426 | 2427 | ||
2427 | for (;;) { | 2428 | for (;;) { |
2428 | newval = (uval & FUTEX_OWNER_DIED) | newtid; | 2429 | newval = (uval & FUTEX_OWNER_DIED) | newtid; |
2429 | 2430 | ||
2430 | if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) | 2431 | err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); |
2431 | goto handle_fault; | 2432 | if (err) |
2433 | goto handle_err; | ||
2434 | |||
2432 | if (curval == uval) | 2435 | if (curval == uval) |
2433 | break; | 2436 | break; |
2434 | uval = curval; | 2437 | uval = curval; |
@@ -2456,23 +2459,37 @@ retry: | |||
2456 | return 0; | 2459 | return 0; |
2457 | 2460 | ||
2458 | /* | 2461 | /* |
2459 | * To handle the page fault we need to drop the locks here. That gives | 2462 | * In order to reschedule or handle a page fault, we need to drop the |
2460 | * the other task (either the highest priority waiter itself or the | 2463 | * locks here. In the case of a fault, this gives the other task |
2461 | * task which stole the rtmutex) the chance to try the fixup of the | 2464 | * (either the highest priority waiter itself or the task which stole |
2462 | * pi_state. So once we are back from handling the fault we need to | 2465 | * the rtmutex) the chance to try the fixup of the pi_state. So once we |
2463 | * check the pi_state after reacquiring the locks and before trying to | 2466 | * are back from handling the fault we need to check the pi_state after |
2464 | * do another fixup. When the fixup has been done already we simply | 2467 | * reacquiring the locks and before trying to do another fixup. When |
2465 | * return. | 2468 | * the fixup has been done already we simply return. |
2466 | * | 2469 | * |
2467 | * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely | 2470 | * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely |
2468 | * drop hb->lock since the caller owns the hb -> futex_q relation. | 2471 | * drop hb->lock since the caller owns the hb -> futex_q relation. |
2469 | * Dropping the pi_mutex->wait_lock requires the state revalidate. | 2472 | * Dropping the pi_mutex->wait_lock requires the state revalidate. |
2470 | */ | 2473 | */ |
2471 | handle_fault: | 2474 | handle_err: |
2472 | raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); | 2475 | raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
2473 | spin_unlock(q->lock_ptr); | 2476 | spin_unlock(q->lock_ptr); |
2474 | 2477 | ||
2475 | ret = fault_in_user_writeable(uaddr); | 2478 | switch (err) { |
2479 | case -EFAULT: | ||
2480 | ret = fault_in_user_writeable(uaddr); | ||
2481 | break; | ||
2482 | |||
2483 | case -EAGAIN: | ||
2484 | cond_resched(); | ||
2485 | ret = 0; | ||
2486 | break; | ||
2487 | |||
2488 | default: | ||
2489 | WARN_ON_ONCE(1); | ||
2490 | ret = err; | ||
2491 | break; | ||
2492 | } | ||
2476 | 2493 | ||
2477 | spin_lock(q->lock_ptr); | 2494 | spin_lock(q->lock_ptr); |
2478 | raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); | 2495 | raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
@@ -3041,10 +3058,8 @@ retry: | |||
3041 | * A unconditional UNLOCK_PI op raced against a waiter | 3058 | * A unconditional UNLOCK_PI op raced against a waiter |
3042 | * setting the FUTEX_WAITERS bit. Try again. | 3059 | * setting the FUTEX_WAITERS bit. Try again. |
3043 | */ | 3060 | */ |
3044 | if (ret == -EAGAIN) { | 3061 | if (ret == -EAGAIN) |
3045 | put_futex_key(&key); | 3062 | goto pi_retry; |
3046 | goto retry; | ||
3047 | } | ||
3048 | /* | 3063 | /* |
3049 | * wake_futex_pi has detected invalid state. Tell user | 3064 | * wake_futex_pi has detected invalid state. Tell user |
3050 | * space. | 3065 | * space. |
@@ -3059,9 +3074,19 @@ retry: | |||
3059 | * preserve the WAITERS bit not the OWNER_DIED one. We are the | 3074 | * preserve the WAITERS bit not the OWNER_DIED one. We are the |
3060 | * owner. | 3075 | * owner. |
3061 | */ | 3076 | */ |
3062 | if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) { | 3077 | if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) { |
3063 | spin_unlock(&hb->lock); | 3078 | spin_unlock(&hb->lock); |
3064 | goto pi_faulted; | 3079 | switch (ret) { |
3080 | case -EFAULT: | ||
3081 | goto pi_faulted; | ||
3082 | |||
3083 | case -EAGAIN: | ||
3084 | goto pi_retry; | ||
3085 | |||
3086 | default: | ||
3087 | WARN_ON_ONCE(1); | ||
3088 | goto out_putkey; | ||
3089 | } | ||
3065 | } | 3090 | } |
3066 | 3091 | ||
3067 | /* | 3092 | /* |
@@ -3075,6 +3100,11 @@ out_putkey: | |||
3075 | put_futex_key(&key); | 3100 | put_futex_key(&key); |
3076 | return ret; | 3101 | return ret; |
3077 | 3102 | ||
3103 | pi_retry: | ||
3104 | put_futex_key(&key); | ||
3105 | cond_resched(); | ||
3106 | goto retry; | ||
3107 | |||
3078 | pi_faulted: | 3108 | pi_faulted: |
3079 | put_futex_key(&key); | 3109 | put_futex_key(&key); |
3080 | 3110 | ||
@@ -3435,6 +3465,7 @@ err_unlock: | |||
3435 | static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) | 3465 | static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) |
3436 | { | 3466 | { |
3437 | u32 uval, uninitialized_var(nval), mval; | 3467 | u32 uval, uninitialized_var(nval), mval; |
3468 | int err; | ||
3438 | 3469 | ||
3439 | /* Futex address must be 32bit aligned */ | 3470 | /* Futex address must be 32bit aligned */ |
3440 | if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0) | 3471 | if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0) |
@@ -3444,42 +3475,57 @@ retry: | |||
3444 | if (get_user(uval, uaddr)) | 3475 | if (get_user(uval, uaddr)) |
3445 | return -1; | 3476 | return -1; |
3446 | 3477 | ||
3447 | if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) { | 3478 | if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr)) |
3448 | /* | 3479 | return 0; |
3449 | * Ok, this dying thread is truly holding a futex | 3480 | |
3450 | * of interest. Set the OWNER_DIED bit atomically | 3481 | /* |
3451 | * via cmpxchg, and if the value had FUTEX_WAITERS | 3482 | * Ok, this dying thread is truly holding a futex |
3452 | * set, wake up a waiter (if any). (We have to do a | 3483 | * of interest. Set the OWNER_DIED bit atomically |
3453 | * futex_wake() even if OWNER_DIED is already set - | 3484 | * via cmpxchg, and if the value had FUTEX_WAITERS |
3454 | * to handle the rare but possible case of recursive | 3485 | * set, wake up a waiter (if any). (We have to do a |
3455 | * thread-death.) The rest of the cleanup is done in | 3486 | * futex_wake() even if OWNER_DIED is already set - |
3456 | * userspace. | 3487 | * to handle the rare but possible case of recursive |
3457 | */ | 3488 | * thread-death.) The rest of the cleanup is done in |
3458 | mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; | 3489 | * userspace. |
3459 | /* | 3490 | */ |
3460 | * We are not holding a lock here, but we want to have | 3491 | mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; |
3461 | * the pagefault_disable/enable() protection because | 3492 | |
3462 | * we want to handle the fault gracefully. If the | 3493 | /* |
3463 | * access fails we try to fault in the futex with R/W | 3494 | * We are not holding a lock here, but we want to have |
3464 | * verification via get_user_pages. get_user() above | 3495 | * the pagefault_disable/enable() protection because |
3465 | * does not guarantee R/W access. If that fails we | 3496 | * we want to handle the fault gracefully. If the |
3466 | * give up and leave the futex locked. | 3497 | * access fails we try to fault in the futex with R/W |
3467 | */ | 3498 | * verification via get_user_pages. get_user() above |
3468 | if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) { | 3499 | * does not guarantee R/W access. If that fails we |
3500 | * give up and leave the futex locked. | ||
3501 | */ | ||
3502 | if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) { | ||
3503 | switch (err) { | ||
3504 | case -EFAULT: | ||
3469 | if (fault_in_user_writeable(uaddr)) | 3505 | if (fault_in_user_writeable(uaddr)) |
3470 | return -1; | 3506 | return -1; |
3471 | goto retry; | 3507 | goto retry; |
3472 | } | 3508 | |
3473 | if (nval != uval) | 3509 | case -EAGAIN: |
3510 | cond_resched(); | ||
3474 | goto retry; | 3511 | goto retry; |
3475 | 3512 | ||
3476 | /* | 3513 | default: |
3477 | * Wake robust non-PI futexes here. The wakeup of | 3514 | WARN_ON_ONCE(1); |
3478 | * PI futexes happens in exit_pi_state(): | 3515 | return err; |
3479 | */ | 3516 | } |
3480 | if (!pi && (uval & FUTEX_WAITERS)) | ||
3481 | futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); | ||
3482 | } | 3517 | } |
3518 | |||
3519 | if (nval != uval) | ||
3520 | goto retry; | ||
3521 | |||
3522 | /* | ||
3523 | * Wake robust non-PI futexes here. The wakeup of | ||
3524 | * PI futexes happens in exit_pi_state(): | ||
3525 | */ | ||
3526 | if (!pi && (uval & FUTEX_WAITERS)) | ||
3527 | futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); | ||
3528 | |||
3483 | return 0; | 3529 | return 0; |
3484 | } | 3530 | } |
3485 | 3531 | ||
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index 613dfe681e9f..08b43de2383b 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile | |||
@@ -6,10 +6,10 @@ UBSAN_SANITIZE_generic_report.o := n | |||
6 | UBSAN_SANITIZE_tags.o := n | 6 | UBSAN_SANITIZE_tags.o := n |
7 | KCOV_INSTRUMENT := n | 7 | KCOV_INSTRUMENT := n |
8 | 8 | ||
9 | CFLAGS_REMOVE_common.o = -pg | 9 | CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE) |
10 | CFLAGS_REMOVE_generic.o = -pg | 10 | CFLAGS_REMOVE_generic.o = $(CC_FLAGS_FTRACE) |
11 | CFLAGS_REMOVE_generic_report.o = -pg | 11 | CFLAGS_REMOVE_generic_report.o = $(CC_FLAGS_FTRACE) |
12 | CFLAGS_REMOVE_tags.o = -pg | 12 | CFLAGS_REMOVE_tags.o = $(CC_FLAGS_FTRACE) |
13 | 13 | ||
14 | # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 | 14 | # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 |
15 | # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 | 15 | # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 |
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index a39dcfdbcc65..74b6582eaa3c 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c | |||
@@ -189,7 +189,7 @@ static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr | |||
189 | VM_BUG_ON(pmd_thp_or_huge(*pmd)); | 189 | VM_BUG_ON(pmd_thp_or_huge(*pmd)); |
190 | pmd_clear(pmd); | 190 | pmd_clear(pmd); |
191 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 191 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
192 | pte_free_kernel(NULL, pte_table); | 192 | free_page((unsigned long)pte_table); |
193 | put_page(virt_to_page(pmd)); | 193 | put_page(virt_to_page(pmd)); |
194 | } | 194 | } |
195 | 195 | ||