diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-13 19:39:21 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-13 19:39:21 -0500 |
commit | f4000cd99750065d5177555c0a805c97174d1b9f (patch) | |
tree | 88ab9f09e8fe1e97f34553f7964ee4598e7a0bfc | |
parent | 2ec4584eb89b8933d1ee307f2fc9c42e745847d7 (diff) | |
parent | 75037120e62b58c536999eb23d70cfcb6d6c0bcc (diff) |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas:
- struct thread_info moved off-stack (also touching
include/linux/thread_info.h and include/linux/restart_block.h)
- cpus_have_cap() reworked to avoid __builtin_constant_p() for static
key use (also touching drivers/irqchip/irq-gic-v3.c)
- uprobes support (currently only for native 64-bit tasks)
- Emulation of kernel Privileged Access Never (PAN) using TTBR0_EL1
switching to a reserved page table
- CPU capacity information passing via DT or sysfs (used by the
scheduler)
- support for systems without FP/SIMD (IOW, kernel avoids touching
these registers; there is no soft-float ABI, nor kernel emulation for
AArch64 FP/SIMD)
- handling of hardware watchpoint with unaligned addresses, varied
lengths and offsets from base
- use of the page table contiguous hint for kernel mappings
- hugetlb fixes for sizes involving the contiguous hint
- remove unnecessary I-cache invalidation in flush_cache_range()
- CNTHCTL_EL2 access fix for CPUs with VHE support (ARMv8.1)
- boot-time checks for writable+executable kernel mappings
- simplify asm/opcodes.h and avoid including the 32-bit ARM counterpart
and make the arm64 kernel headers self-consistent (Xen headers patch
merged separately)
- Workaround for broken .inst support in certain binutils versions
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (60 commits)
arm64: Disable PAN on uaccess_enable()
arm64: Work around broken .inst when defective gas is detected
arm64: Add detection code for broken .inst support in binutils
arm64: Remove reference to asm/opcodes.h
arm64: Get rid of asm/opcodes.h
arm64: smp: Prevent raw_smp_processor_id() recursion
arm64: head.S: Fix CNTHCTL_EL2 access on VHE system
arm64: Remove I-cache invalidation from flush_cache_range()
arm64: Enable HIBERNATION in defconfig
arm64: Enable CONFIG_ARM64_SW_TTBR0_PAN
arm64: xen: Enable user access before a privcmd hvc call
arm64: Handle faults caused by inadvertent user access with PAN enabled
arm64: Disable TTBR0_EL1 during normal kernel execution
arm64: Introduce uaccess_{disable,enable} functionality based on TTBR0_EL1
arm64: Factor out TTBR0_EL1 post-update workaround into a specific asm macro
arm64: Factor out PAN enabling/disabling into separate uaccess_* macros
arm64: Update the synchronous external abort fault description
selftests: arm64: add test for unaligned/inexact watchpoint handling
arm64: Allow hw watchpoint of length 3,5,6 and 7
arm64: hw_breakpoint: Handle inexact watchpoint addresses
...
90 files changed, 2284 insertions, 525 deletions
diff --git a/Documentation/devicetree/bindings/arm/cpu-capacity.txt b/Documentation/devicetree/bindings/arm/cpu-capacity.txt new file mode 100644 index 000000000000..7809fbe0cdb7 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/cpu-capacity.txt | |||
@@ -0,0 +1,236 @@ | |||
1 | ========================================== | ||
2 | ARM CPUs capacity bindings | ||
3 | ========================================== | ||
4 | |||
5 | ========================================== | ||
6 | 1 - Introduction | ||
7 | ========================================== | ||
8 | |||
9 | ARM systems may be configured to have cpus with different power/performance | ||
10 | characteristics within the same chip. In this case, additional information has | ||
11 | to be made available to the kernel for it to be aware of such differences and | ||
12 | take decisions accordingly. | ||
13 | |||
14 | ========================================== | ||
15 | 2 - CPU capacity definition | ||
16 | ========================================== | ||
17 | |||
18 | CPU capacity is a number that provides the scheduler information about CPUs | ||
19 | heterogeneity. Such heterogeneity can come from micro-architectural differences | ||
20 | (e.g., ARM big.LITTLE systems) or maximum frequency at which CPUs can run | ||
21 | (e.g., SMP systems with multiple frequency domains). Heterogeneity in this | ||
22 | context is about differing performance characteristics; this binding tries to | ||
23 | capture a first-order approximation of the relative performance of CPUs. | ||
24 | |||
25 | CPU capacities are obtained by running a suitable benchmark. This binding makes | ||
26 | no guarantees on the validity or suitability of any particular benchmark, the | ||
27 | final capacity should, however, be: | ||
28 | |||
29 | * A "single-threaded" or CPU affine benchmark | ||
30 | * Divided by the running frequency of the CPU executing the benchmark | ||
31 | * Not subject to dynamic frequency scaling of the CPU | ||
32 | |||
33 | For the time being we however advise usage of the Dhrystone benchmark. What | ||
34 | above thus becomes: | ||
35 | |||
36 | CPU capacities are obtained by running the Dhrystone benchmark on each CPU at | ||
37 | max frequency (with caches enabled). The obtained DMIPS score is then divided | ||
38 | by the frequency (in MHz) at which the benchmark has been run, so that | ||
39 | DMIPS/MHz are obtained. Such values are then normalized w.r.t. the highest | ||
40 | score obtained in the system. | ||
41 | |||
42 | ========================================== | ||
43 | 3 - capacity-dmips-mhz | ||
44 | ========================================== | ||
45 | |||
46 | capacity-dmips-mhz is an optional cpu node [1] property: u32 value | ||
47 | representing CPU capacity expressed in normalized DMIPS/MHz. At boot time, the | ||
48 | maximum frequency available to the cpu is then used to calculate the capacity | ||
49 | value internally used by the kernel. | ||
50 | |||
51 | capacity-dmips-mhz property is all-or-nothing: if it is specified for a cpu | ||
52 | node, it has to be specified for every other cpu nodes, or the system will | ||
53 | fall back to the default capacity value for every CPU. If cpufreq is not | ||
54 | available, final capacities are calculated by directly using capacity-dmips- | ||
55 | mhz values (normalized w.r.t. the highest value found while parsing the DT). | ||
56 | |||
57 | =========================================== | ||
58 | 4 - Examples | ||
59 | =========================================== | ||
60 | |||
61 | Example 1 (ARM 64-bit, 6-cpu system, two clusters): | ||
62 | capacities-dmips-mhz are scaled w.r.t. 1024 (cpu@0 and cpu@1) | ||
63 | supposing cluster0@max-freq=1100 and custer1@max-freq=850, | ||
64 | final capacities are 1024 for cluster0 and 446 for cluster1 | ||
65 | |||
66 | cpus { | ||
67 | #address-cells = <2>; | ||
68 | #size-cells = <0>; | ||
69 | |||
70 | cpu-map { | ||
71 | cluster0 { | ||
72 | core0 { | ||
73 | cpu = <&A57_0>; | ||
74 | }; | ||
75 | core1 { | ||
76 | cpu = <&A57_1>; | ||
77 | }; | ||
78 | }; | ||
79 | |||
80 | cluster1 { | ||
81 | core0 { | ||
82 | cpu = <&A53_0>; | ||
83 | }; | ||
84 | core1 { | ||
85 | cpu = <&A53_1>; | ||
86 | }; | ||
87 | core2 { | ||
88 | cpu = <&A53_2>; | ||
89 | }; | ||
90 | core3 { | ||
91 | cpu = <&A53_3>; | ||
92 | }; | ||
93 | }; | ||
94 | }; | ||
95 | |||
96 | idle-states { | ||
97 | entry-method = "arm,psci"; | ||
98 | |||
99 | CPU_SLEEP_0: cpu-sleep-0 { | ||
100 | compatible = "arm,idle-state"; | ||
101 | arm,psci-suspend-param = <0x0010000>; | ||
102 | local-timer-stop; | ||
103 | entry-latency-us = <100>; | ||
104 | exit-latency-us = <250>; | ||
105 | min-residency-us = <150>; | ||
106 | }; | ||
107 | |||
108 | CLUSTER_SLEEP_0: cluster-sleep-0 { | ||
109 | compatible = "arm,idle-state"; | ||
110 | arm,psci-suspend-param = <0x1010000>; | ||
111 | local-timer-stop; | ||
112 | entry-latency-us = <800>; | ||
113 | exit-latency-us = <700>; | ||
114 | min-residency-us = <2500>; | ||
115 | }; | ||
116 | }; | ||
117 | |||
118 | A57_0: cpu@0 { | ||
119 | compatible = "arm,cortex-a57","arm,armv8"; | ||
120 | reg = <0x0 0x0>; | ||
121 | device_type = "cpu"; | ||
122 | enable-method = "psci"; | ||
123 | next-level-cache = <&A57_L2>; | ||
124 | clocks = <&scpi_dvfs 0>; | ||
125 | cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; | ||
126 | capacity-dmips-mhz = <1024>; | ||
127 | }; | ||
128 | |||
129 | A57_1: cpu@1 { | ||
130 | compatible = "arm,cortex-a57","arm,armv8"; | ||
131 | reg = <0x0 0x1>; | ||
132 | device_type = "cpu"; | ||
133 | enable-method = "psci"; | ||
134 | next-level-cache = <&A57_L2>; | ||
135 | clocks = <&scpi_dvfs 0>; | ||
136 | cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; | ||
137 | capacity-dmips-mhz = <1024>; | ||
138 | }; | ||
139 | |||
140 | A53_0: cpu@100 { | ||
141 | compatible = "arm,cortex-a53","arm,armv8"; | ||
142 | reg = <0x0 0x100>; | ||
143 | device_type = "cpu"; | ||
144 | enable-method = "psci"; | ||
145 | next-level-cache = <&A53_L2>; | ||
146 | clocks = <&scpi_dvfs 1>; | ||
147 | cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; | ||
148 | capacity-dmips-mhz = <578>; | ||
149 | }; | ||
150 | |||
151 | A53_1: cpu@101 { | ||
152 | compatible = "arm,cortex-a53","arm,armv8"; | ||
153 | reg = <0x0 0x101>; | ||
154 | device_type = "cpu"; | ||
155 | enable-method = "psci"; | ||
156 | next-level-cache = <&A53_L2>; | ||
157 | clocks = <&scpi_dvfs 1>; | ||
158 | cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; | ||
159 | capacity-dmips-mhz = <578>; | ||
160 | }; | ||
161 | |||
162 | A53_2: cpu@102 { | ||
163 | compatible = "arm,cortex-a53","arm,armv8"; | ||
164 | reg = <0x0 0x102>; | ||
165 | device_type = "cpu"; | ||
166 | enable-method = "psci"; | ||
167 | next-level-cache = <&A53_L2>; | ||
168 | clocks = <&scpi_dvfs 1>; | ||
169 | cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; | ||
170 | capacity-dmips-mhz = <578>; | ||
171 | }; | ||
172 | |||
173 | A53_3: cpu@103 { | ||
174 | compatible = "arm,cortex-a53","arm,armv8"; | ||
175 | reg = <0x0 0x103>; | ||
176 | device_type = "cpu"; | ||
177 | enable-method = "psci"; | ||
178 | next-level-cache = <&A53_L2>; | ||
179 | clocks = <&scpi_dvfs 1>; | ||
180 | cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; | ||
181 | capacity-dmips-mhz = <578>; | ||
182 | }; | ||
183 | |||
184 | A57_L2: l2-cache0 { | ||
185 | compatible = "cache"; | ||
186 | }; | ||
187 | |||
188 | A53_L2: l2-cache1 { | ||
189 | compatible = "cache"; | ||
190 | }; | ||
191 | }; | ||
192 | |||
193 | Example 2 (ARM 32-bit, 4-cpu system, two clusters, | ||
194 | cpus 0,1@1GHz, cpus 2,3@500MHz): | ||
195 | capacities-dmips-mhz are scaled w.r.t. 2 (cpu@0 and cpu@1), this means that first | ||
196 | cpu@0 and cpu@1 are twice fast than cpu@2 and cpu@3 (at the same frequency) | ||
197 | |||
198 | cpus { | ||
199 | #address-cells = <1>; | ||
200 | #size-cells = <0>; | ||
201 | |||
202 | cpu0: cpu@0 { | ||
203 | device_type = "cpu"; | ||
204 | compatible = "arm,cortex-a15"; | ||
205 | reg = <0>; | ||
206 | capacity-dmips-mhz = <2>; | ||
207 | }; | ||
208 | |||
209 | cpu1: cpu@1 { | ||
210 | device_type = "cpu"; | ||
211 | compatible = "arm,cortex-a15"; | ||
212 | reg = <1>; | ||
213 | capacity-dmips-mhz = <2>; | ||
214 | }; | ||
215 | |||
216 | cpu2: cpu@2 { | ||
217 | device_type = "cpu"; | ||
218 | compatible = "arm,cortex-a15"; | ||
219 | reg = <0x100>; | ||
220 | capacity-dmips-mhz = <1>; | ||
221 | }; | ||
222 | |||
223 | cpu3: cpu@3 { | ||
224 | device_type = "cpu"; | ||
225 | compatible = "arm,cortex-a15"; | ||
226 | reg = <0x101>; | ||
227 | capacity-dmips-mhz = <1>; | ||
228 | }; | ||
229 | }; | ||
230 | |||
231 | =========================================== | ||
232 | 5 - References | ||
233 | =========================================== | ||
234 | |||
235 | [1] ARM Linux Kernel documentation - CPUs bindings | ||
236 | Documentation/devicetree/bindings/arm/cpus.txt | ||
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt index e6782d50cbcd..c1dcf4cade2e 100644 --- a/Documentation/devicetree/bindings/arm/cpus.txt +++ b/Documentation/devicetree/bindings/arm/cpus.txt | |||
@@ -241,6 +241,14 @@ nodes to be present and contain the properties described below. | |||
241 | # List of phandles to idle state nodes supported | 241 | # List of phandles to idle state nodes supported |
242 | by this cpu [3]. | 242 | by this cpu [3]. |
243 | 243 | ||
244 | - capacity-dmips-mhz | ||
245 | Usage: Optional | ||
246 | Value type: <u32> | ||
247 | Definition: | ||
248 | # u32 value representing CPU capacity [3] in | ||
249 | DMIPS/MHz, relative to highest capacity-dmips-mhz | ||
250 | in the system. | ||
251 | |||
244 | - rockchip,pmu | 252 | - rockchip,pmu |
245 | Usage: optional for systems that have an "enable-method" | 253 | Usage: optional for systems that have an "enable-method" |
246 | property value of "rockchip,rk3066-smp" | 254 | property value of "rockchip,rk3066-smp" |
@@ -464,3 +472,5 @@ cpus { | |||
464 | [2] arm/msm/qcom,kpss-acc.txt | 472 | [2] arm/msm/qcom,kpss-acc.txt |
465 | [3] ARM Linux kernel documentation - idle states bindings | 473 | [3] ARM Linux kernel documentation - idle states bindings |
466 | Documentation/devicetree/bindings/arm/idle-states.txt | 474 | Documentation/devicetree/bindings/arm/idle-states.txt |
475 | [3] ARM Linux kernel documentation - cpu capacity bindings | ||
476 | Documentation/devicetree/bindings/arm/cpu-capacity.txt | ||
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 657be7f5014e..111742126897 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -110,6 +110,7 @@ config ARM64 | |||
110 | select POWER_SUPPLY | 110 | select POWER_SUPPLY |
111 | select SPARSE_IRQ | 111 | select SPARSE_IRQ |
112 | select SYSCTL_EXCEPTION_TRACE | 112 | select SYSCTL_EXCEPTION_TRACE |
113 | select THREAD_INFO_IN_TASK | ||
113 | help | 114 | help |
114 | ARM 64-bit (AArch64) Linux support. | 115 | ARM 64-bit (AArch64) Linux support. |
115 | 116 | ||
@@ -239,6 +240,9 @@ config PGTABLE_LEVELS | |||
239 | default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47 | 240 | default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47 |
240 | default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48 | 241 | default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48 |
241 | 242 | ||
243 | config ARCH_SUPPORTS_UPROBES | ||
244 | def_bool y | ||
245 | |||
242 | source "init/Kconfig" | 246 | source "init/Kconfig" |
243 | 247 | ||
244 | source "kernel/Kconfig.freezer" | 248 | source "kernel/Kconfig.freezer" |
@@ -791,6 +795,14 @@ config SETEND_EMULATION | |||
791 | If unsure, say Y | 795 | If unsure, say Y |
792 | endif | 796 | endif |
793 | 797 | ||
798 | config ARM64_SW_TTBR0_PAN | ||
799 | bool "Emulate Privileged Access Never using TTBR0_EL1 switching" | ||
800 | help | ||
801 | Enabling this option prevents the kernel from accessing | ||
802 | user-space memory directly by pointing TTBR0_EL1 to a reserved | ||
803 | zeroed area and reserved ASID. The user access routines | ||
804 | restore the valid TTBR0_EL1 temporarily. | ||
805 | |||
794 | menu "ARMv8.1 architectural features" | 806 | menu "ARMv8.1 architectural features" |
795 | 807 | ||
796 | config ARM64_HW_AFDBM | 808 | config ARM64_HW_AFDBM |
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index b661fe742615..d1ebd46872fd 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug | |||
@@ -2,9 +2,13 @@ menu "Kernel hacking" | |||
2 | 2 | ||
3 | source "lib/Kconfig.debug" | 3 | source "lib/Kconfig.debug" |
4 | 4 | ||
5 | config ARM64_PTDUMP | 5 | config ARM64_PTDUMP_CORE |
6 | def_bool n | ||
7 | |||
8 | config ARM64_PTDUMP_DEBUGFS | ||
6 | bool "Export kernel pagetable layout to userspace via debugfs" | 9 | bool "Export kernel pagetable layout to userspace via debugfs" |
7 | depends on DEBUG_KERNEL | 10 | depends on DEBUG_KERNEL |
11 | select ARM64_PTDUMP_CORE | ||
8 | select DEBUG_FS | 12 | select DEBUG_FS |
9 | help | 13 | help |
10 | Say Y here if you want to show the kernel pagetable layout in a | 14 | Say Y here if you want to show the kernel pagetable layout in a |
@@ -38,6 +42,35 @@ config ARM64_RANDOMIZE_TEXT_OFFSET | |||
38 | of TEXT_OFFSET and platforms must not require a specific | 42 | of TEXT_OFFSET and platforms must not require a specific |
39 | value. | 43 | value. |
40 | 44 | ||
45 | config DEBUG_WX | ||
46 | bool "Warn on W+X mappings at boot" | ||
47 | select ARM64_PTDUMP_CORE | ||
48 | ---help--- | ||
49 | Generate a warning if any W+X mappings are found at boot. | ||
50 | |||
51 | This is useful for discovering cases where the kernel is leaving | ||
52 | W+X mappings after applying NX, as such mappings are a security risk. | ||
53 | This check also includes UXN, which should be set on all kernel | ||
54 | mappings. | ||
55 | |||
56 | Look for a message in dmesg output like this: | ||
57 | |||
58 | arm64/mm: Checked W+X mappings: passed, no W+X pages found. | ||
59 | |||
60 | or like this, if the check failed: | ||
61 | |||
62 | arm64/mm: Checked W+X mappings: FAILED, <N> W+X pages found. | ||
63 | |||
64 | Note that even if the check fails, your kernel is possibly | ||
65 | still fine, as W+X mappings are not a security hole in | ||
66 | themselves, what they do is that they make the exploitation | ||
67 | of other unfixed kernel bugs easier. | ||
68 | |||
69 | There is no runtime or memory usage effect of this option | ||
70 | once the kernel has booted up - it's a one time check. | ||
71 | |||
72 | If in doubt, say "Y". | ||
73 | |||
41 | config DEBUG_SET_MODULE_RONX | 74 | config DEBUG_SET_MODULE_RONX |
42 | bool "Set loadable kernel module data as NX and text as RO" | 75 | bool "Set loadable kernel module data as NX and text as RO" |
43 | depends on MODULES | 76 | depends on MODULES |
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 3635b8662724..b9a4a934ca05 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile | |||
@@ -37,10 +37,16 @@ $(warning LSE atomics not supported by binutils) | |||
37 | endif | 37 | endif |
38 | endif | 38 | endif |
39 | 39 | ||
40 | KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) | 40 | brokengasinst := $(call as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n,,-DCONFIG_BROKEN_GAS_INST=1) |
41 | |||
42 | ifneq ($(brokengasinst),) | ||
43 | $(warning Detected assembler with broken .inst; disassembly will be unreliable) | ||
44 | endif | ||
45 | |||
46 | KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) | ||
41 | KBUILD_CFLAGS += -fno-asynchronous-unwind-tables | 47 | KBUILD_CFLAGS += -fno-asynchronous-unwind-tables |
42 | KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads) | 48 | KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads) |
43 | KBUILD_AFLAGS += $(lseinstr) | 49 | KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) |
44 | 50 | ||
45 | ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) | 51 | ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) |
46 | KBUILD_CPPFLAGS += -mbig-endian | 52 | KBUILD_CPPFLAGS += -mbig-endian |
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 6be08113a96d..c3caaddde6cc 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
@@ -82,6 +82,7 @@ CONFIG_KEXEC=y | |||
82 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 82 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
83 | CONFIG_COMPAT=y | 83 | CONFIG_COMPAT=y |
84 | CONFIG_CPU_IDLE=y | 84 | CONFIG_CPU_IDLE=y |
85 | CONFIG_HIBERNATION=y | ||
85 | CONFIG_ARM_CPUIDLE=y | 86 | CONFIG_ARM_CPUIDLE=y |
86 | CONFIG_CPU_FREQ=y | 87 | CONFIG_CPU_FREQ=y |
87 | CONFIG_CPUFREQ_DT=y | 88 | CONFIG_CPUFREQ_DT=y |
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index b4ab238a59ec..8365a84c2640 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild | |||
@@ -1,7 +1,6 @@ | |||
1 | generic-y += bugs.h | 1 | generic-y += bugs.h |
2 | generic-y += clkdev.h | 2 | generic-y += clkdev.h |
3 | generic-y += cputime.h | 3 | generic-y += cputime.h |
4 | generic-y += current.h | ||
5 | generic-y += delay.h | 4 | generic-y += delay.h |
6 | generic-y += div64.h | 5 | generic-y += div64.h |
7 | generic-y += dma.h | 6 | generic-y += dma.h |
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 28bfe6132eb6..446f6c46d4b1 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h | |||
@@ -41,6 +41,15 @@ | |||
41 | msr daifclr, #2 | 41 | msr daifclr, #2 |
42 | .endm | 42 | .endm |
43 | 43 | ||
44 | .macro save_and_disable_irq, flags | ||
45 | mrs \flags, daif | ||
46 | msr daifset, #2 | ||
47 | .endm | ||
48 | |||
49 | .macro restore_irq, flags | ||
50 | msr daif, \flags | ||
51 | .endm | ||
52 | |||
44 | /* | 53 | /* |
45 | * Enable and disable debug exceptions. | 54 | * Enable and disable debug exceptions. |
46 | */ | 55 | */ |
@@ -202,14 +211,25 @@ lr .req x30 // link register | |||
202 | .endm | 211 | .endm |
203 | 212 | ||
204 | /* | 213 | /* |
214 | * @dst: Result of per_cpu(sym, smp_processor_id()) | ||
205 | * @sym: The name of the per-cpu variable | 215 | * @sym: The name of the per-cpu variable |
206 | * @reg: Result of per_cpu(sym, smp_processor_id()) | ||
207 | * @tmp: scratch register | 216 | * @tmp: scratch register |
208 | */ | 217 | */ |
209 | .macro this_cpu_ptr, sym, reg, tmp | 218 | .macro adr_this_cpu, dst, sym, tmp |
210 | adr_l \reg, \sym | 219 | adr_l \dst, \sym |
211 | mrs \tmp, tpidr_el1 | 220 | mrs \tmp, tpidr_el1 |
212 | add \reg, \reg, \tmp | 221 | add \dst, \dst, \tmp |
222 | .endm | ||
223 | |||
224 | /* | ||
225 | * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id())) | ||
226 | * @sym: The name of the per-cpu variable | ||
227 | * @tmp: scratch register | ||
228 | */ | ||
229 | .macro ldr_this_cpu dst, sym, tmp | ||
230 | adr_l \dst, \sym | ||
231 | mrs \tmp, tpidr_el1 | ||
232 | ldr \dst, [\dst, \tmp] | ||
213 | .endm | 233 | .endm |
214 | 234 | ||
215 | /* | 235 | /* |
@@ -395,4 +415,24 @@ alternative_endif | |||
395 | movk \reg, :abs_g0_nc:\val | 415 | movk \reg, :abs_g0_nc:\val |
396 | .endm | 416 | .endm |
397 | 417 | ||
418 | /* | ||
419 | * Return the current thread_info. | ||
420 | */ | ||
421 | .macro get_thread_info, rd | ||
422 | mrs \rd, sp_el0 | ||
423 | .endm | ||
424 | |||
425 | /* | ||
426 | * Errata workaround post TTBR0_EL1 update. | ||
427 | */ | ||
428 | .macro post_ttbr0_update_workaround | ||
429 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 | ||
430 | alternative_if ARM64_WORKAROUND_CAVIUM_27456 | ||
431 | ic iallu | ||
432 | dsb nsh | ||
433 | isb | ||
434 | alternative_else_nop_endif | ||
435 | #endif | ||
436 | .endm | ||
437 | |||
398 | #endif /* __ASM_ASSEMBLER_H */ | 438 | #endif /* __ASM_ASSEMBLER_H */ |
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 2e5fb976a572..5a2a6ee65f65 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h | |||
@@ -65,12 +65,12 @@ | |||
65 | * - kaddr - page address | 65 | * - kaddr - page address |
66 | * - size - region size | 66 | * - size - region size |
67 | */ | 67 | */ |
68 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | ||
69 | extern void flush_icache_range(unsigned long start, unsigned long end); | 68 | extern void flush_icache_range(unsigned long start, unsigned long end); |
70 | extern void __flush_dcache_area(void *addr, size_t len); | 69 | extern void __flush_dcache_area(void *addr, size_t len); |
71 | extern void __clean_dcache_area_poc(void *addr, size_t len); | 70 | extern void __clean_dcache_area_poc(void *addr, size_t len); |
72 | extern void __clean_dcache_area_pou(void *addr, size_t len); | 71 | extern void __clean_dcache_area_pou(void *addr, size_t len); |
73 | extern long __flush_cache_user_range(unsigned long start, unsigned long end); | 72 | extern long __flush_cache_user_range(unsigned long start, unsigned long end); |
73 | extern void sync_icache_aliases(void *kaddr, unsigned long len); | ||
74 | 74 | ||
75 | static inline void flush_cache_mm(struct mm_struct *mm) | 75 | static inline void flush_cache_mm(struct mm_struct *mm) |
76 | { | 76 | { |
@@ -81,6 +81,11 @@ static inline void flush_cache_page(struct vm_area_struct *vma, | |||
81 | { | 81 | { |
82 | } | 82 | } |
83 | 83 | ||
84 | static inline void flush_cache_range(struct vm_area_struct *vma, | ||
85 | unsigned long start, unsigned long end) | ||
86 | { | ||
87 | } | ||
88 | |||
84 | /* | 89 | /* |
85 | * Cache maintenance functions used by the DMA API. No to be used directly. | 90 | * Cache maintenance functions used by the DMA API. No to be used directly. |
86 | */ | 91 | */ |
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 87b446535185..4174f09678c4 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h | |||
@@ -34,7 +34,8 @@ | |||
34 | #define ARM64_HAS_32BIT_EL0 13 | 34 | #define ARM64_HAS_32BIT_EL0 13 |
35 | #define ARM64_HYP_OFFSET_LOW 14 | 35 | #define ARM64_HYP_OFFSET_LOW 14 |
36 | #define ARM64_MISMATCHED_CACHE_LINE_SIZE 15 | 36 | #define ARM64_MISMATCHED_CACHE_LINE_SIZE 15 |
37 | #define ARM64_HAS_NO_FPSIMD 16 | ||
37 | 38 | ||
38 | #define ARM64_NCAPS 16 | 39 | #define ARM64_NCAPS 17 |
39 | 40 | ||
40 | #endif /* __ASM_CPUCAPS_H */ | 41 | #endif /* __ASM_CPUCAPS_H */ |
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 0bc0b1de90c4..b4989df48670 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h | |||
@@ -9,8 +9,6 @@ | |||
9 | #ifndef __ASM_CPUFEATURE_H | 9 | #ifndef __ASM_CPUFEATURE_H |
10 | #define __ASM_CPUFEATURE_H | 10 | #define __ASM_CPUFEATURE_H |
11 | 11 | ||
12 | #include <linux/jump_label.h> | ||
13 | |||
14 | #include <asm/cpucaps.h> | 12 | #include <asm/cpucaps.h> |
15 | #include <asm/hwcap.h> | 13 | #include <asm/hwcap.h> |
16 | #include <asm/sysreg.h> | 14 | #include <asm/sysreg.h> |
@@ -27,6 +25,8 @@ | |||
27 | 25 | ||
28 | #ifndef __ASSEMBLY__ | 26 | #ifndef __ASSEMBLY__ |
29 | 27 | ||
28 | #include <linux/bug.h> | ||
29 | #include <linux/jump_label.h> | ||
30 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
31 | 31 | ||
32 | /* CPU feature register tracking */ | 32 | /* CPU feature register tracking */ |
@@ -104,14 +104,19 @@ static inline bool cpu_have_feature(unsigned int num) | |||
104 | return elf_hwcap & (1UL << num); | 104 | return elf_hwcap & (1UL << num); |
105 | } | 105 | } |
106 | 106 | ||
107 | /* System capability check for constant caps */ | ||
108 | static inline bool cpus_have_const_cap(int num) | ||
109 | { | ||
110 | if (num >= ARM64_NCAPS) | ||
111 | return false; | ||
112 | return static_branch_unlikely(&cpu_hwcap_keys[num]); | ||
113 | } | ||
114 | |||
107 | static inline bool cpus_have_cap(unsigned int num) | 115 | static inline bool cpus_have_cap(unsigned int num) |
108 | { | 116 | { |
109 | if (num >= ARM64_NCAPS) | 117 | if (num >= ARM64_NCAPS) |
110 | return false; | 118 | return false; |
111 | if (__builtin_constant_p(num)) | 119 | return test_bit(num, cpu_hwcaps); |
112 | return static_branch_unlikely(&cpu_hwcap_keys[num]); | ||
113 | else | ||
114 | return test_bit(num, cpu_hwcaps); | ||
115 | } | 120 | } |
116 | 121 | ||
117 | static inline void cpus_set_cap(unsigned int num) | 122 | static inline void cpus_set_cap(unsigned int num) |
@@ -200,7 +205,7 @@ static inline bool cpu_supports_mixed_endian_el0(void) | |||
200 | 205 | ||
201 | static inline bool system_supports_32bit_el0(void) | 206 | static inline bool system_supports_32bit_el0(void) |
202 | { | 207 | { |
203 | return cpus_have_cap(ARM64_HAS_32BIT_EL0); | 208 | return cpus_have_const_cap(ARM64_HAS_32BIT_EL0); |
204 | } | 209 | } |
205 | 210 | ||
206 | static inline bool system_supports_mixed_endian_el0(void) | 211 | static inline bool system_supports_mixed_endian_el0(void) |
@@ -208,6 +213,17 @@ static inline bool system_supports_mixed_endian_el0(void) | |||
208 | return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1)); | 213 | return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1)); |
209 | } | 214 | } |
210 | 215 | ||
216 | static inline bool system_supports_fpsimd(void) | ||
217 | { | ||
218 | return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); | ||
219 | } | ||
220 | |||
221 | static inline bool system_uses_ttbr0_pan(void) | ||
222 | { | ||
223 | return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && | ||
224 | !cpus_have_cap(ARM64_HAS_PAN); | ||
225 | } | ||
226 | |||
211 | #endif /* __ASSEMBLY__ */ | 227 | #endif /* __ASSEMBLY__ */ |
212 | 228 | ||
213 | #endif | 229 | #endif |
diff --git a/arch/arm64/include/asm/current.h b/arch/arm64/include/asm/current.h new file mode 100644 index 000000000000..f2bcbe2d9889 --- /dev/null +++ b/arch/arm64/include/asm/current.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef __ASM_CURRENT_H | ||
2 | #define __ASM_CURRENT_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | |||
6 | #include <asm/sysreg.h> | ||
7 | |||
8 | #ifndef __ASSEMBLY__ | ||
9 | |||
10 | struct task_struct; | ||
11 | |||
12 | static __always_inline struct task_struct *get_current(void) | ||
13 | { | ||
14 | return (struct task_struct *)read_sysreg(sp_el0); | ||
15 | } | ||
16 | |||
17 | #define current get_current() | ||
18 | |||
19 | #endif /* __ASSEMBLY__ */ | ||
20 | |||
21 | #endif /* __ASM_CURRENT_H */ | ||
22 | |||
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index b71420a12f26..a44cf5225429 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h | |||
@@ -68,6 +68,9 @@ | |||
68 | #define BRK64_ESR_MASK 0xFFFF | 68 | #define BRK64_ESR_MASK 0xFFFF |
69 | #define BRK64_ESR_KPROBES 0x0004 | 69 | #define BRK64_ESR_KPROBES 0x0004 |
70 | #define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (BRK64_ESR_KPROBES << 5)) | 70 | #define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (BRK64_ESR_KPROBES << 5)) |
71 | /* uprobes BRK opcodes with ESR encoding */ | ||
72 | #define BRK64_ESR_UPROBES 0x0005 | ||
73 | #define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (BRK64_ESR_UPROBES << 5)) | ||
71 | 74 | ||
72 | /* AArch32 */ | 75 | /* AArch32 */ |
73 | #define DBG_ESR_EVT_BKPT 0x4 | 76 | #define DBG_ESR_EVT_BKPT 0x4 |
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 771b3f0bc757..0b6b1633017f 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _ASM_EFI_H | 1 | #ifndef _ASM_EFI_H |
2 | #define _ASM_EFI_H | 2 | #define _ASM_EFI_H |
3 | 3 | ||
4 | #include <asm/cpufeature.h> | ||
4 | #include <asm/io.h> | 5 | #include <asm/io.h> |
5 | #include <asm/mmu_context.h> | 6 | #include <asm/mmu_context.h> |
6 | #include <asm/neon.h> | 7 | #include <asm/neon.h> |
@@ -78,7 +79,30 @@ static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) | |||
78 | 79 | ||
79 | static inline void efi_set_pgd(struct mm_struct *mm) | 80 | static inline void efi_set_pgd(struct mm_struct *mm) |
80 | { | 81 | { |
81 | switch_mm(NULL, mm, NULL); | 82 | __switch_mm(mm); |
83 | |||
84 | if (system_uses_ttbr0_pan()) { | ||
85 | if (mm != current->active_mm) { | ||
86 | /* | ||
87 | * Update the current thread's saved ttbr0 since it is | ||
88 | * restored as part of a return from exception. Set | ||
89 | * the hardware TTBR0_EL1 using cpu_switch_mm() | ||
90 | * directly to enable potential errata workarounds. | ||
91 | */ | ||
92 | update_saved_ttbr0(current, mm); | ||
93 | cpu_switch_mm(mm->pgd, mm); | ||
94 | } else { | ||
95 | /* | ||
96 | * Defer the switch to the current thread's TTBR0_EL1 | ||
97 | * until uaccess_enable(). Restore the current | ||
98 | * thread's saved ttbr0 corresponding to its active_mm | ||
99 | * (if different from init_mm). | ||
100 | */ | ||
101 | cpu_set_reserved_ttbr0(); | ||
102 | if (current->active_mm != &init_mm) | ||
103 | update_saved_ttbr0(current, current->active_mm); | ||
104 | } | ||
105 | } | ||
82 | } | 106 | } |
83 | 107 | ||
84 | void efi_virtmap_load(void); | 108 | void efi_virtmap_load(void); |
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index a55384f4a5d7..5d1700425efe 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h | |||
@@ -138,7 +138,11 @@ typedef struct user_fpsimd_state elf_fpregset_t; | |||
138 | */ | 138 | */ |
139 | #define ELF_PLAT_INIT(_r, load_addr) (_r)->regs[0] = 0 | 139 | #define ELF_PLAT_INIT(_r, load_addr) (_r)->regs[0] = 0 |
140 | 140 | ||
141 | #define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT); | 141 | #define SET_PERSONALITY(ex) \ |
142 | ({ \ | ||
143 | clear_bit(TIF_32BIT, ¤t->mm->context.flags); \ | ||
144 | clear_thread_flag(TIF_32BIT); \ | ||
145 | }) | ||
142 | 146 | ||
143 | /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ | 147 | /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ |
144 | #define ARCH_DLINFO \ | 148 | #define ARCH_DLINFO \ |
@@ -183,7 +187,11 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; | |||
183 | ((x)->e_flags & EF_ARM_EABI_MASK)) | 187 | ((x)->e_flags & EF_ARM_EABI_MASK)) |
184 | 188 | ||
185 | #define compat_start_thread compat_start_thread | 189 | #define compat_start_thread compat_start_thread |
186 | #define COMPAT_SET_PERSONALITY(ex) set_thread_flag(TIF_32BIT); | 190 | #define COMPAT_SET_PERSONALITY(ex) \ |
191 | ({ \ | ||
192 | set_bit(TIF_32BIT, ¤t->mm->context.flags); \ | ||
193 | set_thread_flag(TIF_32BIT); \ | ||
194 | }) | ||
187 | #define COMPAT_ARCH_DLINFO | 195 | #define COMPAT_ARCH_DLINFO |
188 | extern int aarch32_setup_vectors_page(struct linux_binprm *bprm, | 196 | extern int aarch32_setup_vectors_page(struct linux_binprm *bprm, |
189 | int uses_interp); | 197 | int uses_interp); |
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index f2585cdd32c2..85c4a8981d47 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h | |||
@@ -21,15 +21,12 @@ | |||
21 | #include <linux/futex.h> | 21 | #include <linux/futex.h> |
22 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
23 | 23 | ||
24 | #include <asm/alternative.h> | ||
25 | #include <asm/cpufeature.h> | ||
26 | #include <asm/errno.h> | 24 | #include <asm/errno.h> |
27 | #include <asm/sysreg.h> | ||
28 | 25 | ||
29 | #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ | 26 | #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ |
27 | do { \ | ||
28 | uaccess_enable(); \ | ||
30 | asm volatile( \ | 29 | asm volatile( \ |
31 | ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ | ||
32 | CONFIG_ARM64_PAN) \ | ||
33 | " prfm pstl1strm, %2\n" \ | 30 | " prfm pstl1strm, %2\n" \ |
34 | "1: ldxr %w1, %2\n" \ | 31 | "1: ldxr %w1, %2\n" \ |
35 | insn "\n" \ | 32 | insn "\n" \ |
@@ -44,11 +41,11 @@ | |||
44 | " .popsection\n" \ | 41 | " .popsection\n" \ |
45 | _ASM_EXTABLE(1b, 4b) \ | 42 | _ASM_EXTABLE(1b, 4b) \ |
46 | _ASM_EXTABLE(2b, 4b) \ | 43 | _ASM_EXTABLE(2b, 4b) \ |
47 | ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ | ||
48 | CONFIG_ARM64_PAN) \ | ||
49 | : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ | 44 | : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ |
50 | : "r" (oparg), "Ir" (-EFAULT) \ | 45 | : "r" (oparg), "Ir" (-EFAULT) \ |
51 | : "memory") | 46 | : "memory"); \ |
47 | uaccess_disable(); \ | ||
48 | } while (0) | ||
52 | 49 | ||
53 | static inline int | 50 | static inline int |
54 | futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | 51 | futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) |
@@ -118,8 +115,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
118 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | 115 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
119 | return -EFAULT; | 116 | return -EFAULT; |
120 | 117 | ||
118 | uaccess_enable(); | ||
121 | asm volatile("// futex_atomic_cmpxchg_inatomic\n" | 119 | asm volatile("// futex_atomic_cmpxchg_inatomic\n" |
122 | ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN) | ||
123 | " prfm pstl1strm, %2\n" | 120 | " prfm pstl1strm, %2\n" |
124 | "1: ldxr %w1, %2\n" | 121 | "1: ldxr %w1, %2\n" |
125 | " sub %w3, %w1, %w4\n" | 122 | " sub %w3, %w1, %w4\n" |
@@ -134,10 +131,10 @@ ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN) | |||
134 | " .popsection\n" | 131 | " .popsection\n" |
135 | _ASM_EXTABLE(1b, 4b) | 132 | _ASM_EXTABLE(1b, 4b) |
136 | _ASM_EXTABLE(2b, 4b) | 133 | _ASM_EXTABLE(2b, 4b) |
137 | ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN) | ||
138 | : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) | 134 | : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) |
139 | : "r" (oldval), "r" (newval), "Ir" (-EFAULT) | 135 | : "r" (oldval), "r" (newval), "Ir" (-EFAULT) |
140 | : "memory"); | 136 | : "memory"); |
137 | uaccess_disable(); | ||
141 | 138 | ||
142 | *uval = val; | 139 | *uval = val; |
143 | return ret; | 140 | return ret; |
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index 9510ace570e2..b6b167ac082b 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h | |||
@@ -77,7 +77,11 @@ static inline void decode_ctrl_reg(u32 reg, | |||
77 | /* Lengths */ | 77 | /* Lengths */ |
78 | #define ARM_BREAKPOINT_LEN_1 0x1 | 78 | #define ARM_BREAKPOINT_LEN_1 0x1 |
79 | #define ARM_BREAKPOINT_LEN_2 0x3 | 79 | #define ARM_BREAKPOINT_LEN_2 0x3 |
80 | #define ARM_BREAKPOINT_LEN_3 0x7 | ||
80 | #define ARM_BREAKPOINT_LEN_4 0xf | 81 | #define ARM_BREAKPOINT_LEN_4 0xf |
82 | #define ARM_BREAKPOINT_LEN_5 0x1f | ||
83 | #define ARM_BREAKPOINT_LEN_6 0x3f | ||
84 | #define ARM_BREAKPOINT_LEN_7 0x7f | ||
81 | #define ARM_BREAKPOINT_LEN_8 0xff | 85 | #define ARM_BREAKPOINT_LEN_8 0xff |
82 | 86 | ||
83 | /* Kernel stepping */ | 87 | /* Kernel stepping */ |
@@ -119,7 +123,7 @@ struct perf_event; | |||
119 | struct pmu; | 123 | struct pmu; |
120 | 124 | ||
121 | extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, | 125 | extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, |
122 | int *gen_len, int *gen_type); | 126 | int *gen_len, int *gen_type, int *offset); |
123 | extern int arch_check_bp_in_kernelspace(struct perf_event *bp); | 127 | extern int arch_check_bp_in_kernelspace(struct perf_event *bp); |
124 | extern int arch_validate_hwbkpt_settings(struct perf_event *bp); | 128 | extern int arch_validate_hwbkpt_settings(struct perf_event *bp); |
125 | extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, | 129 | extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, |
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 7e51d1b57c0c..7803343e5881 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #ifndef __ASM_KERNEL_PGTABLE_H | 19 | #ifndef __ASM_KERNEL_PGTABLE_H |
20 | #define __ASM_KERNEL_PGTABLE_H | 20 | #define __ASM_KERNEL_PGTABLE_H |
21 | 21 | ||
22 | #include <asm/pgtable.h> | ||
22 | #include <asm/sparsemem.h> | 23 | #include <asm/sparsemem.h> |
23 | 24 | ||
24 | /* | 25 | /* |
@@ -54,6 +55,12 @@ | |||
54 | #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) | 55 | #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) |
55 | #define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE) | 56 | #define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE) |
56 | 57 | ||
58 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | ||
59 | #define RESERVED_TTBR0_SIZE (PAGE_SIZE) | ||
60 | #else | ||
61 | #define RESERVED_TTBR0_SIZE (0) | ||
62 | #endif | ||
63 | |||
57 | /* Initial memory map size */ | 64 | /* Initial memory map size */ |
58 | #if ARM64_SWAPPER_USES_SECTION_MAPS | 65 | #if ARM64_SWAPPER_USES_SECTION_MAPS |
59 | #define SWAPPER_BLOCK_SHIFT SECTION_SHIFT | 66 | #define SWAPPER_BLOCK_SHIFT SECTION_SHIFT |
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 8d9fce037b2f..47619411f0ff 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h | |||
@@ -19,6 +19,7 @@ | |||
19 | typedef struct { | 19 | typedef struct { |
20 | atomic64_t id; | 20 | atomic64_t id; |
21 | void *vdso; | 21 | void *vdso; |
22 | unsigned long flags; | ||
22 | } mm_context_t; | 23 | } mm_context_t; |
23 | 24 | ||
24 | /* | 25 | /* |
@@ -34,7 +35,7 @@ extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); | |||
34 | extern void init_mem_pgprot(void); | 35 | extern void init_mem_pgprot(void); |
35 | extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, | 36 | extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, |
36 | unsigned long virt, phys_addr_t size, | 37 | unsigned long virt, phys_addr_t size, |
37 | pgprot_t prot, bool allow_block_mappings); | 38 | pgprot_t prot, bool page_mappings_only); |
38 | extern void *fixmap_remap_fdt(phys_addr_t dt_phys); | 39 | extern void *fixmap_remap_fdt(phys_addr_t dt_phys); |
39 | 40 | ||
40 | #endif | 41 | #endif |
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index a50185375f09..0363fe80455c 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
24 | 24 | ||
25 | #include <asm/cacheflush.h> | 25 | #include <asm/cacheflush.h> |
26 | #include <asm/cpufeature.h> | ||
26 | #include <asm/proc-fns.h> | 27 | #include <asm/proc-fns.h> |
27 | #include <asm-generic/mm_hooks.h> | 28 | #include <asm-generic/mm_hooks.h> |
28 | #include <asm/cputype.h> | 29 | #include <asm/cputype.h> |
@@ -103,7 +104,7 @@ static inline void cpu_uninstall_idmap(void) | |||
103 | local_flush_tlb_all(); | 104 | local_flush_tlb_all(); |
104 | cpu_set_default_tcr_t0sz(); | 105 | cpu_set_default_tcr_t0sz(); |
105 | 106 | ||
106 | if (mm != &init_mm) | 107 | if (mm != &init_mm && !system_uses_ttbr0_pan()) |
107 | cpu_switch_mm(mm->pgd, mm); | 108 | cpu_switch_mm(mm->pgd, mm); |
108 | } | 109 | } |
109 | 110 | ||
@@ -163,20 +164,26 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
163 | { | 164 | { |
164 | } | 165 | } |
165 | 166 | ||
166 | /* | 167 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
167 | * This is the actual mm switch as far as the scheduler | 168 | static inline void update_saved_ttbr0(struct task_struct *tsk, |
168 | * is concerned. No registers are touched. We avoid | 169 | struct mm_struct *mm) |
169 | * calling the CPU specific function when the mm hasn't | ||
170 | * actually changed. | ||
171 | */ | ||
172 | static inline void | ||
173 | switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
174 | struct task_struct *tsk) | ||
175 | { | 170 | { |
176 | unsigned int cpu = smp_processor_id(); | 171 | if (system_uses_ttbr0_pan()) { |
172 | BUG_ON(mm->pgd == swapper_pg_dir); | ||
173 | task_thread_info(tsk)->ttbr0 = | ||
174 | virt_to_phys(mm->pgd) | ASID(mm) << 48; | ||
175 | } | ||
176 | } | ||
177 | #else | ||
178 | static inline void update_saved_ttbr0(struct task_struct *tsk, | ||
179 | struct mm_struct *mm) | ||
180 | { | ||
181 | } | ||
182 | #endif | ||
177 | 183 | ||
178 | if (prev == next) | 184 | static inline void __switch_mm(struct mm_struct *next) |
179 | return; | 185 | { |
186 | unsigned int cpu = smp_processor_id(); | ||
180 | 187 | ||
181 | /* | 188 | /* |
182 | * init_mm.pgd does not contain any user mappings and it is always | 189 | * init_mm.pgd does not contain any user mappings and it is always |
@@ -190,8 +197,26 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
190 | check_and_switch_context(next, cpu); | 197 | check_and_switch_context(next, cpu); |
191 | } | 198 | } |
192 | 199 | ||
200 | static inline void | ||
201 | switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
202 | struct task_struct *tsk) | ||
203 | { | ||
204 | if (prev != next) | ||
205 | __switch_mm(next); | ||
206 | |||
207 | /* | ||
208 | * Update the saved TTBR0_EL1 of the scheduled-in task as the previous | ||
209 | * value may have not been initialised yet (activate_mm caller) or the | ||
210 | * ASID has changed since the last run (following the context switch | ||
211 | * of another thread of the same process). Avoid setting the reserved | ||
212 | * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit). | ||
213 | */ | ||
214 | if (next != &init_mm) | ||
215 | update_saved_ttbr0(tsk, next); | ||
216 | } | ||
217 | |||
193 | #define deactivate_mm(tsk,mm) do { } while (0) | 218 | #define deactivate_mm(tsk,mm) do { } while (0) |
194 | #define activate_mm(prev,next) switch_mm(prev, next, NULL) | 219 | #define activate_mm(prev,next) switch_mm(prev, next, current) |
195 | 220 | ||
196 | void verify_cpu_asid_bits(void); | 221 | void verify_cpu_asid_bits(void); |
197 | 222 | ||
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h index 13ce4cc18e26..ad4cdc966c0f 100644 --- a/arch/arm64/include/asm/neon.h +++ b/arch/arm64/include/asm/neon.h | |||
@@ -9,8 +9,9 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <asm/fpsimd.h> | ||
12 | 13 | ||
13 | #define cpu_has_neon() (1) | 14 | #define cpu_has_neon() system_supports_fpsimd() |
14 | 15 | ||
15 | #define kernel_neon_begin() kernel_neon_begin_partial(32) | 16 | #define kernel_neon_begin() kernel_neon_begin_partial(32) |
16 | 17 | ||
diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h deleted file mode 100644 index 123f45d92cd1..000000000000 --- a/arch/arm64/include/asm/opcodes.h +++ /dev/null | |||
@@ -1,5 +0,0 @@ | |||
1 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
2 | #define CONFIG_CPU_ENDIAN_BE8 CONFIG_CPU_BIG_ENDIAN | ||
3 | #endif | ||
4 | |||
5 | #include <../../arm/include/asm/opcodes.h> | ||
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 5394c8405e66..3bd498e4de4c 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #ifndef __ASM_PERCPU_H | 16 | #ifndef __ASM_PERCPU_H |
17 | #define __ASM_PERCPU_H | 17 | #define __ASM_PERCPU_H |
18 | 18 | ||
19 | #include <asm/stack_pointer.h> | ||
20 | |||
19 | static inline void set_my_cpu_offset(unsigned long off) | 21 | static inline void set_my_cpu_offset(unsigned long off) |
20 | { | 22 | { |
21 | asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); | 23 | asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); |
@@ -101,16 +103,16 @@ static inline unsigned long __percpu_read(void *ptr, int size) | |||
101 | 103 | ||
102 | switch (size) { | 104 | switch (size) { |
103 | case 1: | 105 | case 1: |
104 | ret = ACCESS_ONCE(*(u8 *)ptr); | 106 | ret = READ_ONCE(*(u8 *)ptr); |
105 | break; | 107 | break; |
106 | case 2: | 108 | case 2: |
107 | ret = ACCESS_ONCE(*(u16 *)ptr); | 109 | ret = READ_ONCE(*(u16 *)ptr); |
108 | break; | 110 | break; |
109 | case 4: | 111 | case 4: |
110 | ret = ACCESS_ONCE(*(u32 *)ptr); | 112 | ret = READ_ONCE(*(u32 *)ptr); |
111 | break; | 113 | break; |
112 | case 8: | 114 | case 8: |
113 | ret = ACCESS_ONCE(*(u64 *)ptr); | 115 | ret = READ_ONCE(*(u64 *)ptr); |
114 | break; | 116 | break; |
115 | default: | 117 | default: |
116 | BUILD_BUG(); | 118 | BUILD_BUG(); |
@@ -123,16 +125,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size) | |||
123 | { | 125 | { |
124 | switch (size) { | 126 | switch (size) { |
125 | case 1: | 127 | case 1: |
126 | ACCESS_ONCE(*(u8 *)ptr) = (u8)val; | 128 | WRITE_ONCE(*(u8 *)ptr, (u8)val); |
127 | break; | 129 | break; |
128 | case 2: | 130 | case 2: |
129 | ACCESS_ONCE(*(u16 *)ptr) = (u16)val; | 131 | WRITE_ONCE(*(u16 *)ptr, (u16)val); |
130 | break; | 132 | break; |
131 | case 4: | 133 | case 4: |
132 | ACCESS_ONCE(*(u32 *)ptr) = (u32)val; | 134 | WRITE_ONCE(*(u32 *)ptr, (u32)val); |
133 | break; | 135 | break; |
134 | case 8: | 136 | case 8: |
135 | ACCESS_ONCE(*(u64 *)ptr) = (u64)val; | 137 | WRITE_ONCE(*(u64 *)ptr, (u64)val); |
136 | break; | 138 | break; |
137 | default: | 139 | default: |
138 | BUILD_BUG(); | 140 | BUILD_BUG(); |
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index 38b6a2b49d68..8d5cbec17d80 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h | |||
@@ -17,6 +17,8 @@ | |||
17 | #ifndef __ASM_PERF_EVENT_H | 17 | #ifndef __ASM_PERF_EVENT_H |
18 | #define __ASM_PERF_EVENT_H | 18 | #define __ASM_PERF_EVENT_H |
19 | 19 | ||
20 | #include <asm/stack_pointer.h> | ||
21 | |||
20 | #define ARMV8_PMU_MAX_COUNTERS 32 | 22 | #define ARMV8_PMU_MAX_COUNTERS 32 |
21 | #define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) | 23 | #define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) |
22 | 24 | ||
diff --git a/arch/arm64/include/asm/probes.h b/arch/arm64/include/asm/probes.h index 5af574d632fa..6a5b28904c33 100644 --- a/arch/arm64/include/asm/probes.h +++ b/arch/arm64/include/asm/probes.h | |||
@@ -15,21 +15,22 @@ | |||
15 | #ifndef _ARM_PROBES_H | 15 | #ifndef _ARM_PROBES_H |
16 | #define _ARM_PROBES_H | 16 | #define _ARM_PROBES_H |
17 | 17 | ||
18 | #include <asm/opcodes.h> | 18 | typedef u32 probe_opcode_t; |
19 | 19 | typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *); | |
20 | struct kprobe; | ||
21 | struct arch_specific_insn; | ||
22 | |||
23 | typedef u32 kprobe_opcode_t; | ||
24 | typedef void (kprobes_handler_t) (u32 opcode, long addr, struct pt_regs *); | ||
25 | 20 | ||
26 | /* architecture specific copy of original instruction */ | 21 | /* architecture specific copy of original instruction */ |
27 | struct arch_specific_insn { | 22 | struct arch_probe_insn { |
28 | kprobe_opcode_t *insn; | 23 | probe_opcode_t *insn; |
29 | pstate_check_t *pstate_cc; | 24 | pstate_check_t *pstate_cc; |
30 | kprobes_handler_t *handler; | 25 | probes_handler_t *handler; |
31 | /* restore address after step xol */ | 26 | /* restore address after step xol */ |
32 | unsigned long restore; | 27 | unsigned long restore; |
33 | }; | 28 | }; |
29 | #ifdef CONFIG_KPROBES | ||
30 | typedef u32 kprobe_opcode_t; | ||
31 | struct arch_specific_insn { | ||
32 | struct arch_probe_insn api; | ||
33 | }; | ||
34 | #endif | ||
34 | 35 | ||
35 | #endif | 36 | #endif |
diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h index 07b8ed037dee..6afd8476c60c 100644 --- a/arch/arm64/include/asm/ptdump.h +++ b/arch/arm64/include/asm/ptdump.h | |||
@@ -16,9 +16,10 @@ | |||
16 | #ifndef __ASM_PTDUMP_H | 16 | #ifndef __ASM_PTDUMP_H |
17 | #define __ASM_PTDUMP_H | 17 | #define __ASM_PTDUMP_H |
18 | 18 | ||
19 | #ifdef CONFIG_ARM64_PTDUMP | 19 | #ifdef CONFIG_ARM64_PTDUMP_CORE |
20 | 20 | ||
21 | #include <linux/mm_types.h> | 21 | #include <linux/mm_types.h> |
22 | #include <linux/seq_file.h> | ||
22 | 23 | ||
23 | struct addr_marker { | 24 | struct addr_marker { |
24 | unsigned long start_address; | 25 | unsigned long start_address; |
@@ -29,16 +30,25 @@ struct ptdump_info { | |||
29 | struct mm_struct *mm; | 30 | struct mm_struct *mm; |
30 | const struct addr_marker *markers; | 31 | const struct addr_marker *markers; |
31 | unsigned long base_addr; | 32 | unsigned long base_addr; |
32 | unsigned long max_addr; | ||
33 | }; | 33 | }; |
34 | 34 | ||
35 | int ptdump_register(struct ptdump_info *info, const char *name); | 35 | void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info); |
36 | 36 | #ifdef CONFIG_ARM64_PTDUMP_DEBUGFS | |
37 | int ptdump_debugfs_register(struct ptdump_info *info, const char *name); | ||
37 | #else | 38 | #else |
38 | static inline int ptdump_register(struct ptdump_info *info, const char *name) | 39 | static inline int ptdump_debugfs_register(struct ptdump_info *info, |
40 | const char *name) | ||
39 | { | 41 | { |
40 | return 0; | 42 | return 0; |
41 | } | 43 | } |
42 | #endif /* CONFIG_ARM64_PTDUMP */ | 44 | #endif |
45 | void ptdump_check_wx(void); | ||
46 | #endif /* CONFIG_ARM64_PTDUMP_CORE */ | ||
47 | |||
48 | #ifdef CONFIG_DEBUG_WX | ||
49 | #define debug_checkwx() ptdump_check_wx() | ||
50 | #else | ||
51 | #define debug_checkwx() do { } while (0) | ||
52 | #endif | ||
43 | 53 | ||
44 | #endif /* __ASM_PTDUMP_H */ | 54 | #endif /* __ASM_PTDUMP_H */ |
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index ada08b5b036d..513daf050e84 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h | |||
@@ -217,6 +217,14 @@ int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task); | |||
217 | 217 | ||
218 | #include <asm-generic/ptrace.h> | 218 | #include <asm-generic/ptrace.h> |
219 | 219 | ||
220 | #define procedure_link_pointer(regs) ((regs)->regs[30]) | ||
221 | |||
222 | static inline void procedure_link_pointer_set(struct pt_regs *regs, | ||
223 | unsigned long val) | ||
224 | { | ||
225 | procedure_link_pointer(regs) = val; | ||
226 | } | ||
227 | |||
220 | #undef profile_pc | 228 | #undef profile_pc |
221 | extern unsigned long profile_pc(struct pt_regs *regs); | 229 | extern unsigned long profile_pc(struct pt_regs *regs); |
222 | 230 | ||
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 022644704a93..d050d720a1b4 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h | |||
@@ -29,11 +29,22 @@ | |||
29 | 29 | ||
30 | #ifndef __ASSEMBLY__ | 30 | #ifndef __ASSEMBLY__ |
31 | 31 | ||
32 | #include <asm/percpu.h> | ||
33 | |||
32 | #include <linux/threads.h> | 34 | #include <linux/threads.h> |
33 | #include <linux/cpumask.h> | 35 | #include <linux/cpumask.h> |
34 | #include <linux/thread_info.h> | 36 | #include <linux/thread_info.h> |
35 | 37 | ||
36 | #define raw_smp_processor_id() (current_thread_info()->cpu) | 38 | DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); |
39 | |||
40 | /* | ||
41 | * We don't use this_cpu_read(cpu_number) as that has implicit writes to | ||
42 | * preempt_count, and associated (compiler) barriers, that we'd like to avoid | ||
43 | * the expense of. If we're preemptible, the value can be stale at use anyway. | ||
44 | * And we can't use this_cpu_ptr() either, as that winds up recursing back | ||
45 | * here under CONFIG_DEBUG_PREEMPT=y. | ||
46 | */ | ||
47 | #define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number)) | ||
37 | 48 | ||
38 | struct seq_file; | 49 | struct seq_file; |
39 | 50 | ||
@@ -73,6 +84,7 @@ asmlinkage void secondary_start_kernel(void); | |||
73 | */ | 84 | */ |
74 | struct secondary_data { | 85 | struct secondary_data { |
75 | void *stack; | 86 | void *stack; |
87 | struct task_struct *task; | ||
76 | long status; | 88 | long status; |
77 | }; | 89 | }; |
78 | 90 | ||
diff --git a/arch/arm64/include/asm/stack_pointer.h b/arch/arm64/include/asm/stack_pointer.h new file mode 100644 index 000000000000..ffcdf742cddf --- /dev/null +++ b/arch/arm64/include/asm/stack_pointer.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef __ASM_STACK_POINTER_H | ||
2 | #define __ASM_STACK_POINTER_H | ||
3 | |||
4 | /* | ||
5 | * how to get the current stack pointer from C | ||
6 | */ | ||
7 | register unsigned long current_stack_pointer asm ("sp"); | ||
8 | |||
9 | #endif /* __ASM_STACK_POINTER_H */ | ||
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index b8a313fd7a09..de5600f40adf 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef __ASM_SUSPEND_H | 1 | #ifndef __ASM_SUSPEND_H |
2 | #define __ASM_SUSPEND_H | 2 | #define __ASM_SUSPEND_H |
3 | 3 | ||
4 | #define NR_CTX_REGS 10 | 4 | #define NR_CTX_REGS 12 |
5 | #define NR_CALLEE_SAVED_REGS 12 | 5 | #define NR_CALLEE_SAVED_REGS 12 |
6 | 6 | ||
7 | /* | 7 | /* |
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6c80b3699cb8..98ae03f8eedd 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h | |||
@@ -22,8 +22,6 @@ | |||
22 | 22 | ||
23 | #include <linux/stringify.h> | 23 | #include <linux/stringify.h> |
24 | 24 | ||
25 | #include <asm/opcodes.h> | ||
26 | |||
27 | /* | 25 | /* |
28 | * ARMv8 ARM reserves the following encoding for system registers: | 26 | * ARMv8 ARM reserves the following encoding for system registers: |
29 | * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview", | 27 | * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview", |
@@ -37,6 +35,33 @@ | |||
37 | #define sys_reg(op0, op1, crn, crm, op2) \ | 35 | #define sys_reg(op0, op1, crn, crm, op2) \ |
38 | ((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5)) | 36 | ((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5)) |
39 | 37 | ||
38 | #ifndef CONFIG_BROKEN_GAS_INST | ||
39 | |||
40 | #ifdef __ASSEMBLY__ | ||
41 | #define __emit_inst(x) .inst (x) | ||
42 | #else | ||
43 | #define __emit_inst(x) ".inst " __stringify((x)) "\n\t" | ||
44 | #endif | ||
45 | |||
46 | #else /* CONFIG_BROKEN_GAS_INST */ | ||
47 | |||
48 | #ifndef CONFIG_CPU_BIG_ENDIAN | ||
49 | #define __INSTR_BSWAP(x) (x) | ||
50 | #else /* CONFIG_CPU_BIG_ENDIAN */ | ||
51 | #define __INSTR_BSWAP(x) ((((x) << 24) & 0xff000000) | \ | ||
52 | (((x) << 8) & 0x00ff0000) | \ | ||
53 | (((x) >> 8) & 0x0000ff00) | \ | ||
54 | (((x) >> 24) & 0x000000ff)) | ||
55 | #endif /* CONFIG_CPU_BIG_ENDIAN */ | ||
56 | |||
57 | #ifdef __ASSEMBLY__ | ||
58 | #define __emit_inst(x) .long __INSTR_BSWAP(x) | ||
59 | #else /* __ASSEMBLY__ */ | ||
60 | #define __emit_inst(x) ".long " __stringify(__INSTR_BSWAP(x)) "\n\t" | ||
61 | #endif /* __ASSEMBLY__ */ | ||
62 | |||
63 | #endif /* CONFIG_BROKEN_GAS_INST */ | ||
64 | |||
40 | #define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0) | 65 | #define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0) |
41 | #define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5) | 66 | #define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5) |
42 | #define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6) | 67 | #define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6) |
@@ -81,10 +106,10 @@ | |||
81 | #define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4) | 106 | #define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4) |
82 | #define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3) | 107 | #define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3) |
83 | 108 | ||
84 | #define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\ | 109 | #define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \ |
85 | (!!x)<<8 | 0x1f) | 110 | (!!x)<<8 | 0x1f) |
86 | #define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM |\ | 111 | #define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \ |
87 | (!!x)<<8 | 0x1f) | 112 | (!!x)<<8 | 0x1f) |
88 | 113 | ||
89 | /* Common SCTLR_ELx flags. */ | 114 | /* Common SCTLR_ELx flags. */ |
90 | #define SCTLR_ELx_EE (1 << 25) | 115 | #define SCTLR_ELx_EE (1 << 25) |
@@ -228,11 +253,11 @@ | |||
228 | .equ .L__reg_num_xzr, 31 | 253 | .equ .L__reg_num_xzr, 31 |
229 | 254 | ||
230 | .macro mrs_s, rt, sreg | 255 | .macro mrs_s, rt, sreg |
231 | .inst 0xd5200000|(\sreg)|(.L__reg_num_\rt) | 256 | __emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt)) |
232 | .endm | 257 | .endm |
233 | 258 | ||
234 | .macro msr_s, sreg, rt | 259 | .macro msr_s, sreg, rt |
235 | .inst 0xd5000000|(\sreg)|(.L__reg_num_\rt) | 260 | __emit_inst(0xd5000000|(\sreg)|(.L__reg_num_\rt)) |
236 | .endm | 261 | .endm |
237 | 262 | ||
238 | #else | 263 | #else |
@@ -246,11 +271,11 @@ asm( | |||
246 | " .equ .L__reg_num_xzr, 31\n" | 271 | " .equ .L__reg_num_xzr, 31\n" |
247 | "\n" | 272 | "\n" |
248 | " .macro mrs_s, rt, sreg\n" | 273 | " .macro mrs_s, rt, sreg\n" |
249 | " .inst 0xd5200000|(\\sreg)|(.L__reg_num_\\rt)\n" | 274 | __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt)) |
250 | " .endm\n" | 275 | " .endm\n" |
251 | "\n" | 276 | "\n" |
252 | " .macro msr_s, sreg, rt\n" | 277 | " .macro msr_s, sreg, rt\n" |
253 | " .inst 0xd5000000|(\\sreg)|(.L__reg_num_\\rt)\n" | 278 | __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt)) |
254 | " .endm\n" | 279 | " .endm\n" |
255 | ); | 280 | ); |
256 | 281 | ||
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index e9ea5a6bd449..46c3b93cf865 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h | |||
@@ -36,58 +36,31 @@ | |||
36 | 36 | ||
37 | struct task_struct; | 37 | struct task_struct; |
38 | 38 | ||
39 | #include <asm/stack_pointer.h> | ||
39 | #include <asm/types.h> | 40 | #include <asm/types.h> |
40 | 41 | ||
41 | typedef unsigned long mm_segment_t; | 42 | typedef unsigned long mm_segment_t; |
42 | 43 | ||
43 | /* | 44 | /* |
44 | * low level task data that entry.S needs immediate access to. | 45 | * low level task data that entry.S needs immediate access to. |
45 | * __switch_to() assumes cpu_context follows immediately after cpu_domain. | ||
46 | */ | 46 | */ |
47 | struct thread_info { | 47 | struct thread_info { |
48 | unsigned long flags; /* low level flags */ | 48 | unsigned long flags; /* low level flags */ |
49 | mm_segment_t addr_limit; /* address limit */ | 49 | mm_segment_t addr_limit; /* address limit */ |
50 | struct task_struct *task; /* main task structure */ | 50 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
51 | u64 ttbr0; /* saved TTBR0_EL1 */ | ||
52 | #endif | ||
51 | int preempt_count; /* 0 => preemptable, <0 => bug */ | 53 | int preempt_count; /* 0 => preemptable, <0 => bug */ |
52 | int cpu; /* cpu */ | ||
53 | }; | 54 | }; |
54 | 55 | ||
55 | #define INIT_THREAD_INFO(tsk) \ | 56 | #define INIT_THREAD_INFO(tsk) \ |
56 | { \ | 57 | { \ |
57 | .task = &tsk, \ | ||
58 | .flags = 0, \ | ||
59 | .preempt_count = INIT_PREEMPT_COUNT, \ | 58 | .preempt_count = INIT_PREEMPT_COUNT, \ |
60 | .addr_limit = KERNEL_DS, \ | 59 | .addr_limit = KERNEL_DS, \ |
61 | } | 60 | } |
62 | 61 | ||
63 | #define init_thread_info (init_thread_union.thread_info) | ||
64 | #define init_stack (init_thread_union.stack) | 62 | #define init_stack (init_thread_union.stack) |
65 | 63 | ||
66 | /* | ||
67 | * how to get the current stack pointer from C | ||
68 | */ | ||
69 | register unsigned long current_stack_pointer asm ("sp"); | ||
70 | |||
71 | /* | ||
72 | * how to get the thread information struct from C | ||
73 | */ | ||
74 | static inline struct thread_info *current_thread_info(void) __attribute_const__; | ||
75 | |||
76 | /* | ||
77 | * struct thread_info can be accessed directly via sp_el0. | ||
78 | * | ||
79 | * We don't use read_sysreg() as we want the compiler to cache the value where | ||
80 | * possible. | ||
81 | */ | ||
82 | static inline struct thread_info *current_thread_info(void) | ||
83 | { | ||
84 | unsigned long sp_el0; | ||
85 | |||
86 | asm ("mrs %0, sp_el0" : "=r" (sp_el0)); | ||
87 | |||
88 | return (struct thread_info *)sp_el0; | ||
89 | } | ||
90 | |||
91 | #define thread_saved_pc(tsk) \ | 64 | #define thread_saved_pc(tsk) \ |
92 | ((unsigned long)(tsk->thread.cpu_context.pc)) | 65 | ((unsigned long)(tsk->thread.cpu_context.pc)) |
93 | #define thread_saved_sp(tsk) \ | 66 | #define thread_saved_sp(tsk) \ |
@@ -112,6 +85,7 @@ static inline struct thread_info *current_thread_info(void) | |||
112 | #define TIF_NEED_RESCHED 1 | 85 | #define TIF_NEED_RESCHED 1 |
113 | #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ | 86 | #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ |
114 | #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ | 87 | #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ |
88 | #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ | ||
115 | #define TIF_NOHZ 7 | 89 | #define TIF_NOHZ 7 |
116 | #define TIF_SYSCALL_TRACE 8 | 90 | #define TIF_SYSCALL_TRACE 8 |
117 | #define TIF_SYSCALL_AUDIT 9 | 91 | #define TIF_SYSCALL_AUDIT 9 |
@@ -132,10 +106,12 @@ static inline struct thread_info *current_thread_info(void) | |||
132 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 106 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
133 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) | 107 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) |
134 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | 108 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
109 | #define _TIF_UPROBE (1 << TIF_UPROBE) | ||
135 | #define _TIF_32BIT (1 << TIF_32BIT) | 110 | #define _TIF_32BIT (1 << TIF_32BIT) |
136 | 111 | ||
137 | #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ | 112 | #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ |
138 | _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE) | 113 | _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ |
114 | _TIF_UPROBE) | ||
139 | 115 | ||
140 | #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ | 116 | #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ |
141 | _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ | 117 | _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ |
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 55d0adbf6509..d26750ca6e06 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h | |||
@@ -18,6 +18,12 @@ | |||
18 | #ifndef __ASM_UACCESS_H | 18 | #ifndef __ASM_UACCESS_H |
19 | #define __ASM_UACCESS_H | 19 | #define __ASM_UACCESS_H |
20 | 20 | ||
21 | #include <asm/alternative.h> | ||
22 | #include <asm/kernel-pgtable.h> | ||
23 | #include <asm/sysreg.h> | ||
24 | |||
25 | #ifndef __ASSEMBLY__ | ||
26 | |||
21 | /* | 27 | /* |
22 | * User space memory access functions | 28 | * User space memory access functions |
23 | */ | 29 | */ |
@@ -26,10 +32,8 @@ | |||
26 | #include <linux/string.h> | 32 | #include <linux/string.h> |
27 | #include <linux/thread_info.h> | 33 | #include <linux/thread_info.h> |
28 | 34 | ||
29 | #include <asm/alternative.h> | ||
30 | #include <asm/cpufeature.h> | 35 | #include <asm/cpufeature.h> |
31 | #include <asm/ptrace.h> | 36 | #include <asm/ptrace.h> |
32 | #include <asm/sysreg.h> | ||
33 | #include <asm/errno.h> | 37 | #include <asm/errno.h> |
34 | #include <asm/memory.h> | 38 | #include <asm/memory.h> |
35 | #include <asm/compiler.h> | 39 | #include <asm/compiler.h> |
@@ -120,6 +124,99 @@ static inline void set_fs(mm_segment_t fs) | |||
120 | " .popsection\n" | 124 | " .popsection\n" |
121 | 125 | ||
122 | /* | 126 | /* |
127 | * User access enabling/disabling. | ||
128 | */ | ||
129 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | ||
130 | static inline void __uaccess_ttbr0_disable(void) | ||
131 | { | ||
132 | unsigned long ttbr; | ||
133 | |||
134 | /* reserved_ttbr0 placed at the end of swapper_pg_dir */ | ||
135 | ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE; | ||
136 | write_sysreg(ttbr, ttbr0_el1); | ||
137 | isb(); | ||
138 | } | ||
139 | |||
140 | static inline void __uaccess_ttbr0_enable(void) | ||
141 | { | ||
142 | unsigned long flags; | ||
143 | |||
144 | /* | ||
145 | * Disable interrupts to avoid preemption between reading the 'ttbr0' | ||
146 | * variable and the MSR. A context switch could trigger an ASID | ||
147 | * roll-over and an update of 'ttbr0'. | ||
148 | */ | ||
149 | local_irq_save(flags); | ||
150 | write_sysreg(current_thread_info()->ttbr0, ttbr0_el1); | ||
151 | isb(); | ||
152 | local_irq_restore(flags); | ||
153 | } | ||
154 | |||
155 | static inline bool uaccess_ttbr0_disable(void) | ||
156 | { | ||
157 | if (!system_uses_ttbr0_pan()) | ||
158 | return false; | ||
159 | __uaccess_ttbr0_disable(); | ||
160 | return true; | ||
161 | } | ||
162 | |||
163 | static inline bool uaccess_ttbr0_enable(void) | ||
164 | { | ||
165 | if (!system_uses_ttbr0_pan()) | ||
166 | return false; | ||
167 | __uaccess_ttbr0_enable(); | ||
168 | return true; | ||
169 | } | ||
170 | #else | ||
171 | static inline bool uaccess_ttbr0_disable(void) | ||
172 | { | ||
173 | return false; | ||
174 | } | ||
175 | |||
176 | static inline bool uaccess_ttbr0_enable(void) | ||
177 | { | ||
178 | return false; | ||
179 | } | ||
180 | #endif | ||
181 | |||
182 | #define __uaccess_disable(alt) \ | ||
183 | do { \ | ||
184 | if (!uaccess_ttbr0_disable()) \ | ||
185 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \ | ||
186 | CONFIG_ARM64_PAN)); \ | ||
187 | } while (0) | ||
188 | |||
189 | #define __uaccess_enable(alt) \ | ||
190 | do { \ | ||
191 | if (!uaccess_ttbr0_enable()) \ | ||
192 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \ | ||
193 | CONFIG_ARM64_PAN)); \ | ||
194 | } while (0) | ||
195 | |||
196 | static inline void uaccess_disable(void) | ||
197 | { | ||
198 | __uaccess_disable(ARM64_HAS_PAN); | ||
199 | } | ||
200 | |||
201 | static inline void uaccess_enable(void) | ||
202 | { | ||
203 | __uaccess_enable(ARM64_HAS_PAN); | ||
204 | } | ||
205 | |||
206 | /* | ||
207 | * These functions are no-ops when UAO is present. | ||
208 | */ | ||
209 | static inline void uaccess_disable_not_uao(void) | ||
210 | { | ||
211 | __uaccess_disable(ARM64_ALT_PAN_NOT_UAO); | ||
212 | } | ||
213 | |||
214 | static inline void uaccess_enable_not_uao(void) | ||
215 | { | ||
216 | __uaccess_enable(ARM64_ALT_PAN_NOT_UAO); | ||
217 | } | ||
218 | |||
219 | /* | ||
123 | * The "__xxx" versions of the user access functions do not verify the address | 220 | * The "__xxx" versions of the user access functions do not verify the address |
124 | * space - it must have been done previously with a separate "access_ok()" | 221 | * space - it must have been done previously with a separate "access_ok()" |
125 | * call. | 222 | * call. |
@@ -146,8 +243,7 @@ static inline void set_fs(mm_segment_t fs) | |||
146 | do { \ | 243 | do { \ |
147 | unsigned long __gu_val; \ | 244 | unsigned long __gu_val; \ |
148 | __chk_user_ptr(ptr); \ | 245 | __chk_user_ptr(ptr); \ |
149 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\ | 246 | uaccess_enable_not_uao(); \ |
150 | CONFIG_ARM64_PAN)); \ | ||
151 | switch (sizeof(*(ptr))) { \ | 247 | switch (sizeof(*(ptr))) { \ |
152 | case 1: \ | 248 | case 1: \ |
153 | __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ | 249 | __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ |
@@ -168,9 +264,8 @@ do { \ | |||
168 | default: \ | 264 | default: \ |
169 | BUILD_BUG(); \ | 265 | BUILD_BUG(); \ |
170 | } \ | 266 | } \ |
267 | uaccess_disable_not_uao(); \ | ||
171 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ | 268 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
172 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\ | ||
173 | CONFIG_ARM64_PAN)); \ | ||
174 | } while (0) | 269 | } while (0) |
175 | 270 | ||
176 | #define __get_user(x, ptr) \ | 271 | #define __get_user(x, ptr) \ |
@@ -215,8 +310,7 @@ do { \ | |||
215 | do { \ | 310 | do { \ |
216 | __typeof__(*(ptr)) __pu_val = (x); \ | 311 | __typeof__(*(ptr)) __pu_val = (x); \ |
217 | __chk_user_ptr(ptr); \ | 312 | __chk_user_ptr(ptr); \ |
218 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\ | 313 | uaccess_enable_not_uao(); \ |
219 | CONFIG_ARM64_PAN)); \ | ||
220 | switch (sizeof(*(ptr))) { \ | 314 | switch (sizeof(*(ptr))) { \ |
221 | case 1: \ | 315 | case 1: \ |
222 | __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ | 316 | __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ |
@@ -237,8 +331,7 @@ do { \ | |||
237 | default: \ | 331 | default: \ |
238 | BUILD_BUG(); \ | 332 | BUILD_BUG(); \ |
239 | } \ | 333 | } \ |
240 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\ | 334 | uaccess_disable_not_uao(); \ |
241 | CONFIG_ARM64_PAN)); \ | ||
242 | } while (0) | 335 | } while (0) |
243 | 336 | ||
244 | #define __put_user(x, ptr) \ | 337 | #define __put_user(x, ptr) \ |
@@ -331,4 +424,66 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count); | |||
331 | extern __must_check long strlen_user(const char __user *str); | 424 | extern __must_check long strlen_user(const char __user *str); |
332 | extern __must_check long strnlen_user(const char __user *str, long n); | 425 | extern __must_check long strnlen_user(const char __user *str, long n); |
333 | 426 | ||
427 | #else /* __ASSEMBLY__ */ | ||
428 | |||
429 | #include <asm/assembler.h> | ||
430 | |||
431 | /* | ||
432 | * User access enabling/disabling macros. | ||
433 | */ | ||
434 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | ||
435 | .macro __uaccess_ttbr0_disable, tmp1 | ||
436 | mrs \tmp1, ttbr1_el1 // swapper_pg_dir | ||
437 | add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir | ||
438 | msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 | ||
439 | isb | ||
440 | .endm | ||
441 | |||
442 | .macro __uaccess_ttbr0_enable, tmp1 | ||
443 | get_thread_info \tmp1 | ||
444 | ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1 | ||
445 | msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1 | ||
446 | isb | ||
447 | .endm | ||
448 | |||
449 | .macro uaccess_ttbr0_disable, tmp1 | ||
450 | alternative_if_not ARM64_HAS_PAN | ||
451 | __uaccess_ttbr0_disable \tmp1 | ||
452 | alternative_else_nop_endif | ||
453 | .endm | ||
454 | |||
455 | .macro uaccess_ttbr0_enable, tmp1, tmp2 | ||
456 | alternative_if_not ARM64_HAS_PAN | ||
457 | save_and_disable_irq \tmp2 // avoid preemption | ||
458 | __uaccess_ttbr0_enable \tmp1 | ||
459 | restore_irq \tmp2 | ||
460 | alternative_else_nop_endif | ||
461 | .endm | ||
462 | #else | ||
463 | .macro uaccess_ttbr0_disable, tmp1 | ||
464 | .endm | ||
465 | |||
466 | .macro uaccess_ttbr0_enable, tmp1, tmp2 | ||
467 | .endm | ||
468 | #endif | ||
469 | |||
470 | /* | ||
471 | * These macros are no-ops when UAO is present. | ||
472 | */ | ||
473 | .macro uaccess_disable_not_uao, tmp1 | ||
474 | uaccess_ttbr0_disable \tmp1 | ||
475 | alternative_if ARM64_ALT_PAN_NOT_UAO | ||
476 | SET_PSTATE_PAN(1) | ||
477 | alternative_else_nop_endif | ||
478 | .endm | ||
479 | |||
480 | .macro uaccess_enable_not_uao, tmp1, tmp2 | ||
481 | uaccess_ttbr0_enable \tmp1, \tmp2 | ||
482 | alternative_if ARM64_ALT_PAN_NOT_UAO | ||
483 | SET_PSTATE_PAN(0) | ||
484 | alternative_else_nop_endif | ||
485 | .endm | ||
486 | |||
487 | #endif /* __ASSEMBLY__ */ | ||
488 | |||
334 | #endif /* __ASM_UACCESS_H */ | 489 | #endif /* __ASM_UACCESS_H */ |
diff --git a/arch/arm64/include/asm/uprobes.h b/arch/arm64/include/asm/uprobes.h new file mode 100644 index 000000000000..8d004073d0e8 --- /dev/null +++ b/arch/arm64/include/asm/uprobes.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_UPROBES_H | ||
10 | #define _ASM_UPROBES_H | ||
11 | |||
12 | #include <asm/debug-monitors.h> | ||
13 | #include <asm/insn.h> | ||
14 | #include <asm/probes.h> | ||
15 | |||
16 | #define MAX_UINSN_BYTES AARCH64_INSN_SIZE | ||
17 | |||
18 | #define UPROBE_SWBP_INSN BRK64_OPCODE_UPROBES | ||
19 | #define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE | ||
20 | #define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES | ||
21 | |||
22 | typedef u32 uprobe_opcode_t; | ||
23 | |||
24 | struct arch_uprobe_task { | ||
25 | }; | ||
26 | |||
27 | struct arch_uprobe { | ||
28 | union { | ||
29 | u8 insn[MAX_UINSN_BYTES]; | ||
30 | u8 ixol[MAX_UINSN_BYTES]; | ||
31 | }; | ||
32 | struct arch_probe_insn api; | ||
33 | bool simulate; | ||
34 | }; | ||
35 | |||
36 | #endif | ||
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index b0988bb1bf64..04de188a36c9 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c | |||
@@ -14,10 +14,8 @@ | |||
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/sysctl.h> | 15 | #include <linux/sysctl.h> |
16 | 16 | ||
17 | #include <asm/alternative.h> | ||
18 | #include <asm/cpufeature.h> | 17 | #include <asm/cpufeature.h> |
19 | #include <asm/insn.h> | 18 | #include <asm/insn.h> |
20 | #include <asm/opcodes.h> | ||
21 | #include <asm/sysreg.h> | 19 | #include <asm/sysreg.h> |
22 | #include <asm/system_misc.h> | 20 | #include <asm/system_misc.h> |
23 | #include <asm/traps.h> | 21 | #include <asm/traps.h> |
@@ -285,10 +283,10 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table) | |||
285 | #define __SWP_LL_SC_LOOPS 4 | 283 | #define __SWP_LL_SC_LOOPS 4 |
286 | 284 | ||
287 | #define __user_swpX_asm(data, addr, res, temp, temp2, B) \ | 285 | #define __user_swpX_asm(data, addr, res, temp, temp2, B) \ |
286 | do { \ | ||
287 | uaccess_enable(); \ | ||
288 | __asm__ __volatile__( \ | 288 | __asm__ __volatile__( \ |
289 | " mov %w3, %w7\n" \ | 289 | " mov %w3, %w7\n" \ |
290 | ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ | ||
291 | CONFIG_ARM64_PAN) \ | ||
292 | "0: ldxr"B" %w2, [%4]\n" \ | 290 | "0: ldxr"B" %w2, [%4]\n" \ |
293 | "1: stxr"B" %w0, %w1, [%4]\n" \ | 291 | "1: stxr"B" %w0, %w1, [%4]\n" \ |
294 | " cbz %w0, 2f\n" \ | 292 | " cbz %w0, 2f\n" \ |
@@ -306,12 +304,12 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table) | |||
306 | " .popsection" \ | 304 | " .popsection" \ |
307 | _ASM_EXTABLE(0b, 4b) \ | 305 | _ASM_EXTABLE(0b, 4b) \ |
308 | _ASM_EXTABLE(1b, 4b) \ | 306 | _ASM_EXTABLE(1b, 4b) \ |
309 | ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ | ||
310 | CONFIG_ARM64_PAN) \ | ||
311 | : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \ | 307 | : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \ |
312 | : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT), \ | 308 | : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT), \ |
313 | "i" (__SWP_LL_SC_LOOPS) \ | 309 | "i" (__SWP_LL_SC_LOOPS) \ |
314 | : "memory") | 310 | : "memory"); \ |
311 | uaccess_disable(); \ | ||
312 | } while (0) | ||
315 | 313 | ||
316 | #define __user_swp_asm(data, addr, res, temp, temp2) \ | 314 | #define __user_swp_asm(data, addr, res, temp, temp2) \ |
317 | __user_swpX_asm(data, addr, res, temp, temp2, "") | 315 | __user_swpX_asm(data, addr, res, temp, temp2, "") |
@@ -352,6 +350,10 @@ static int emulate_swpX(unsigned int address, unsigned int *data, | |||
352 | return res; | 350 | return res; |
353 | } | 351 | } |
354 | 352 | ||
353 | #define ARM_OPCODE_CONDTEST_FAIL 0 | ||
354 | #define ARM_OPCODE_CONDTEST_PASS 1 | ||
355 | #define ARM_OPCODE_CONDTEST_UNCOND 2 | ||
356 | |||
355 | #define ARM_OPCODE_CONDITION_UNCOND 0xf | 357 | #define ARM_OPCODE_CONDITION_UNCOND 0xf |
356 | 358 | ||
357 | static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr) | 359 | static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr) |
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 4a2f0f0fef32..bc049afc73a7 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c | |||
@@ -36,11 +36,13 @@ int main(void) | |||
36 | { | 36 | { |
37 | DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); | 37 | DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); |
38 | BLANK(); | 38 | BLANK(); |
39 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | 39 | DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags)); |
40 | DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); | 40 | DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count)); |
41 | DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); | 41 | DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit)); |
42 | DEFINE(TI_TASK, offsetof(struct thread_info, task)); | 42 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
43 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); | 43 | DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); |
44 | #endif | ||
45 | DEFINE(TSK_STACK, offsetof(struct task_struct, stack)); | ||
44 | BLANK(); | 46 | BLANK(); |
45 | DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); | 47 | DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); |
46 | BLANK(); | 48 | BLANK(); |
@@ -123,6 +125,7 @@ int main(void) | |||
123 | DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); | 125 | DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); |
124 | BLANK(); | 126 | BLANK(); |
125 | DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack)); | 127 | DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack)); |
128 | DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task)); | ||
126 | BLANK(); | 129 | BLANK(); |
127 | #ifdef CONFIG_KVM_ARM_HOST | 130 | #ifdef CONFIG_KVM_ARM_HOST |
128 | DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); | 131 | DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); |
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index c02504ea304b..fdf8f045929f 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c | |||
@@ -47,6 +47,7 @@ unsigned int compat_elf_hwcap2 __read_mostly; | |||
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); | 49 | DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); |
50 | EXPORT_SYMBOL(cpu_hwcaps); | ||
50 | 51 | ||
51 | DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS); | 52 | DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS); |
52 | EXPORT_SYMBOL(cpu_hwcap_keys); | 53 | EXPORT_SYMBOL(cpu_hwcap_keys); |
@@ -746,6 +747,14 @@ static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry, | |||
746 | return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode(); | 747 | return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode(); |
747 | } | 748 | } |
748 | 749 | ||
750 | static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused) | ||
751 | { | ||
752 | u64 pfr0 = read_system_reg(SYS_ID_AA64PFR0_EL1); | ||
753 | |||
754 | return cpuid_feature_extract_signed_field(pfr0, | ||
755 | ID_AA64PFR0_FP_SHIFT) < 0; | ||
756 | } | ||
757 | |||
749 | static const struct arm64_cpu_capabilities arm64_features[] = { | 758 | static const struct arm64_cpu_capabilities arm64_features[] = { |
750 | { | 759 | { |
751 | .desc = "GIC system register CPU interface", | 760 | .desc = "GIC system register CPU interface", |
@@ -829,6 +838,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = { | |||
829 | .def_scope = SCOPE_SYSTEM, | 838 | .def_scope = SCOPE_SYSTEM, |
830 | .matches = hyp_offset_low, | 839 | .matches = hyp_offset_low, |
831 | }, | 840 | }, |
841 | { | ||
842 | /* FP/SIMD is not implemented */ | ||
843 | .capability = ARM64_HAS_NO_FPSIMD, | ||
844 | .def_scope = SCOPE_SYSTEM, | ||
845 | .min_field_value = 0, | ||
846 | .matches = has_no_fpsimd, | ||
847 | }, | ||
832 | {}, | 848 | {}, |
833 | }; | 849 | }; |
834 | 850 | ||
@@ -1102,5 +1118,5 @@ void __init setup_cpu_features(void) | |||
1102 | static bool __maybe_unused | 1118 | static bool __maybe_unused |
1103 | cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused) | 1119 | cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused) |
1104 | { | 1120 | { |
1105 | return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO)); | 1121 | return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO)); |
1106 | } | 1122 | } |
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 73ae90ef434c..605df76f0a06 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c | |||
@@ -226,6 +226,8 @@ static void send_user_sigtrap(int si_code) | |||
226 | static int single_step_handler(unsigned long addr, unsigned int esr, | 226 | static int single_step_handler(unsigned long addr, unsigned int esr, |
227 | struct pt_regs *regs) | 227 | struct pt_regs *regs) |
228 | { | 228 | { |
229 | bool handler_found = false; | ||
230 | |||
229 | /* | 231 | /* |
230 | * If we are stepping a pending breakpoint, call the hw_breakpoint | 232 | * If we are stepping a pending breakpoint, call the hw_breakpoint |
231 | * handler first. | 233 | * handler first. |
@@ -233,7 +235,14 @@ static int single_step_handler(unsigned long addr, unsigned int esr, | |||
233 | if (!reinstall_suspended_bps(regs)) | 235 | if (!reinstall_suspended_bps(regs)) |
234 | return 0; | 236 | return 0; |
235 | 237 | ||
236 | if (user_mode(regs)) { | 238 | #ifdef CONFIG_KPROBES |
239 | if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED) | ||
240 | handler_found = true; | ||
241 | #endif | ||
242 | if (!handler_found && call_step_hook(regs, esr) == DBG_HOOK_HANDLED) | ||
243 | handler_found = true; | ||
244 | |||
245 | if (!handler_found && user_mode(regs)) { | ||
237 | send_user_sigtrap(TRAP_TRACE); | 246 | send_user_sigtrap(TRAP_TRACE); |
238 | 247 | ||
239 | /* | 248 | /* |
@@ -243,15 +252,8 @@ static int single_step_handler(unsigned long addr, unsigned int esr, | |||
243 | * to the active-not-pending state). | 252 | * to the active-not-pending state). |
244 | */ | 253 | */ |
245 | user_rewind_single_step(current); | 254 | user_rewind_single_step(current); |
246 | } else { | 255 | } else if (!handler_found) { |
247 | #ifdef CONFIG_KPROBES | 256 | pr_warn("Unexpected kernel single-step exception at EL1\n"); |
248 | if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED) | ||
249 | return 0; | ||
250 | #endif | ||
251 | if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED) | ||
252 | return 0; | ||
253 | |||
254 | pr_warning("Unexpected kernel single-step exception at EL1\n"); | ||
255 | /* | 257 | /* |
256 | * Re-enable stepping since we know that we will be | 258 | * Re-enable stepping since we know that we will be |
257 | * returning to regs. | 259 | * returning to regs. |
@@ -304,16 +306,20 @@ NOKPROBE_SYMBOL(call_break_hook); | |||
304 | static int brk_handler(unsigned long addr, unsigned int esr, | 306 | static int brk_handler(unsigned long addr, unsigned int esr, |
305 | struct pt_regs *regs) | 307 | struct pt_regs *regs) |
306 | { | 308 | { |
307 | if (user_mode(regs)) { | 309 | bool handler_found = false; |
308 | send_user_sigtrap(TRAP_BRKPT); | 310 | |
309 | } | ||
310 | #ifdef CONFIG_KPROBES | 311 | #ifdef CONFIG_KPROBES |
311 | else if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) { | 312 | if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) { |
312 | if (kprobe_breakpoint_handler(regs, esr) != DBG_HOOK_HANDLED) | 313 | if (kprobe_breakpoint_handler(regs, esr) == DBG_HOOK_HANDLED) |
313 | return -EFAULT; | 314 | handler_found = true; |
314 | } | 315 | } |
315 | #endif | 316 | #endif |
316 | else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) { | 317 | if (!handler_found && call_break_hook(regs, esr) == DBG_HOOK_HANDLED) |
318 | handler_found = true; | ||
319 | |||
320 | if (!handler_found && user_mode(regs)) { | ||
321 | send_user_sigtrap(TRAP_BRKPT); | ||
322 | } else if (!handler_found) { | ||
317 | pr_warn("Unexpected kernel BRK exception at EL1\n"); | 323 | pr_warn("Unexpected kernel BRK exception at EL1\n"); |
318 | return -EFAULT; | 324 | return -EFAULT; |
319 | } | 325 | } |
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index ba9bee389fd5..5d17f377d905 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c | |||
@@ -62,8 +62,8 @@ struct screen_info screen_info __section(.data); | |||
62 | int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) | 62 | int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) |
63 | { | 63 | { |
64 | pteval_t prot_val = create_mapping_protection(md); | 64 | pteval_t prot_val = create_mapping_protection(md); |
65 | bool allow_block_mappings = (md->type != EFI_RUNTIME_SERVICES_CODE && | 65 | bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE || |
66 | md->type != EFI_RUNTIME_SERVICES_DATA); | 66 | md->type == EFI_RUNTIME_SERVICES_DATA); |
67 | 67 | ||
68 | if (!PAGE_ALIGNED(md->phys_addr) || | 68 | if (!PAGE_ALIGNED(md->phys_addr) || |
69 | !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) { | 69 | !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) { |
@@ -76,12 +76,12 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) | |||
76 | * from the MMU routines. So avoid block mappings altogether in | 76 | * from the MMU routines. So avoid block mappings altogether in |
77 | * that case. | 77 | * that case. |
78 | */ | 78 | */ |
79 | allow_block_mappings = false; | 79 | page_mappings_only = true; |
80 | } | 80 | } |
81 | 81 | ||
82 | create_pgd_mapping(mm, md->phys_addr, md->virt_addr, | 82 | create_pgd_mapping(mm, md->phys_addr, md->virt_addr, |
83 | md->num_pages << EFI_PAGE_SHIFT, | 83 | md->num_pages << EFI_PAGE_SHIFT, |
84 | __pgprot(prot_val | PTE_NG), allow_block_mappings); | 84 | __pgprot(prot_val | PTE_NG), page_mappings_only); |
85 | return 0; | 85 | return 0; |
86 | } | 86 | } |
87 | 87 | ||
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 223d54a4d66b..4f0d76339414 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S | |||
@@ -29,7 +29,9 @@ | |||
29 | #include <asm/esr.h> | 29 | #include <asm/esr.h> |
30 | #include <asm/irq.h> | 30 | #include <asm/irq.h> |
31 | #include <asm/memory.h> | 31 | #include <asm/memory.h> |
32 | #include <asm/ptrace.h> | ||
32 | #include <asm/thread_info.h> | 33 | #include <asm/thread_info.h> |
34 | #include <asm/uaccess.h> | ||
33 | #include <asm/unistd.h> | 35 | #include <asm/unistd.h> |
34 | 36 | ||
35 | /* | 37 | /* |
@@ -90,9 +92,8 @@ | |||
90 | 92 | ||
91 | .if \el == 0 | 93 | .if \el == 0 |
92 | mrs x21, sp_el0 | 94 | mrs x21, sp_el0 |
93 | mov tsk, sp | 95 | ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear, |
94 | and tsk, tsk, #~(THREAD_SIZE - 1) // Ensure MDSCR_EL1.SS is clear, | 96 | ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug |
95 | ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug | ||
96 | disable_step_tsk x19, x20 // exceptions when scheduling. | 97 | disable_step_tsk x19, x20 // exceptions when scheduling. |
97 | 98 | ||
98 | mov x29, xzr // fp pointed to user-space | 99 | mov x29, xzr // fp pointed to user-space |
@@ -100,15 +101,41 @@ | |||
100 | add x21, sp, #S_FRAME_SIZE | 101 | add x21, sp, #S_FRAME_SIZE |
101 | get_thread_info tsk | 102 | get_thread_info tsk |
102 | /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */ | 103 | /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */ |
103 | ldr x20, [tsk, #TI_ADDR_LIMIT] | 104 | ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] |
104 | str x20, [sp, #S_ORIG_ADDR_LIMIT] | 105 | str x20, [sp, #S_ORIG_ADDR_LIMIT] |
105 | mov x20, #TASK_SIZE_64 | 106 | mov x20, #TASK_SIZE_64 |
106 | str x20, [tsk, #TI_ADDR_LIMIT] | 107 | str x20, [tsk, #TSK_TI_ADDR_LIMIT] |
107 | /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */ | 108 | /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */ |
108 | .endif /* \el == 0 */ | 109 | .endif /* \el == 0 */ |
109 | mrs x22, elr_el1 | 110 | mrs x22, elr_el1 |
110 | mrs x23, spsr_el1 | 111 | mrs x23, spsr_el1 |
111 | stp lr, x21, [sp, #S_LR] | 112 | stp lr, x21, [sp, #S_LR] |
113 | |||
114 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | ||
115 | /* | ||
116 | * Set the TTBR0 PAN bit in SPSR. When the exception is taken from | ||
117 | * EL0, there is no need to check the state of TTBR0_EL1 since | ||
118 | * accesses are always enabled. | ||
119 | * Note that the meaning of this bit differs from the ARMv8.1 PAN | ||
120 | * feature as all TTBR0_EL1 accesses are disabled, not just those to | ||
121 | * user mappings. | ||
122 | */ | ||
123 | alternative_if ARM64_HAS_PAN | ||
124 | b 1f // skip TTBR0 PAN | ||
125 | alternative_else_nop_endif | ||
126 | |||
127 | .if \el != 0 | ||
128 | mrs x21, ttbr0_el1 | ||
129 | tst x21, #0xffff << 48 // Check for the reserved ASID | ||
130 | orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR | ||
131 | b.eq 1f // TTBR0 access already disabled | ||
132 | and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR | ||
133 | .endif | ||
134 | |||
135 | __uaccess_ttbr0_disable x21 | ||
136 | 1: | ||
137 | #endif | ||
138 | |||
112 | stp x22, x23, [sp, #S_PC] | 139 | stp x22, x23, [sp, #S_PC] |
113 | 140 | ||
114 | /* | 141 | /* |
@@ -139,7 +166,7 @@ | |||
139 | .if \el != 0 | 166 | .if \el != 0 |
140 | /* Restore the task's original addr_limit. */ | 167 | /* Restore the task's original addr_limit. */ |
141 | ldr x20, [sp, #S_ORIG_ADDR_LIMIT] | 168 | ldr x20, [sp, #S_ORIG_ADDR_LIMIT] |
142 | str x20, [tsk, #TI_ADDR_LIMIT] | 169 | str x20, [tsk, #TSK_TI_ADDR_LIMIT] |
143 | 170 | ||
144 | /* No need to restore UAO, it will be restored from SPSR_EL1 */ | 171 | /* No need to restore UAO, it will be restored from SPSR_EL1 */ |
145 | .endif | 172 | .endif |
@@ -147,6 +174,40 @@ | |||
147 | ldp x21, x22, [sp, #S_PC] // load ELR, SPSR | 174 | ldp x21, x22, [sp, #S_PC] // load ELR, SPSR |
148 | .if \el == 0 | 175 | .if \el == 0 |
149 | ct_user_enter | 176 | ct_user_enter |
177 | .endif | ||
178 | |||
179 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | ||
180 | /* | ||
181 | * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR | ||
182 | * PAN bit checking. | ||
183 | */ | ||
184 | alternative_if ARM64_HAS_PAN | ||
185 | b 2f // skip TTBR0 PAN | ||
186 | alternative_else_nop_endif | ||
187 | |||
188 | .if \el != 0 | ||
189 | tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set | ||
190 | .endif | ||
191 | |||
192 | __uaccess_ttbr0_enable x0 | ||
193 | |||
194 | .if \el == 0 | ||
195 | /* | ||
196 | * Enable errata workarounds only if returning to user. The only | ||
197 | * workaround currently required for TTBR0_EL1 changes are for the | ||
198 | * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache | ||
199 | * corruption). | ||
200 | */ | ||
201 | post_ttbr0_update_workaround | ||
202 | .endif | ||
203 | 1: | ||
204 | .if \el != 0 | ||
205 | and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit | ||
206 | .endif | ||
207 | 2: | ||
208 | #endif | ||
209 | |||
210 | .if \el == 0 | ||
150 | ldr x23, [sp, #S_SP] // load return stack pointer | 211 | ldr x23, [sp, #S_SP] // load return stack pointer |
151 | msr sp_el0, x23 | 212 | msr sp_el0, x23 |
152 | #ifdef CONFIG_ARM64_ERRATUM_845719 | 213 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
@@ -162,6 +223,7 @@ alternative_if ARM64_WORKAROUND_845719 | |||
162 | alternative_else_nop_endif | 223 | alternative_else_nop_endif |
163 | #endif | 224 | #endif |
164 | .endif | 225 | .endif |
226 | |||
165 | msr elr_el1, x21 // set up the return data | 227 | msr elr_el1, x21 // set up the return data |
166 | msr spsr_el1, x22 | 228 | msr spsr_el1, x22 |
167 | ldp x0, x1, [sp, #16 * 0] | 229 | ldp x0, x1, [sp, #16 * 0] |
@@ -184,23 +246,20 @@ alternative_else_nop_endif | |||
184 | eret // return to kernel | 246 | eret // return to kernel |
185 | .endm | 247 | .endm |
186 | 248 | ||
187 | .macro get_thread_info, rd | ||
188 | mrs \rd, sp_el0 | ||
189 | .endm | ||
190 | |||
191 | .macro irq_stack_entry | 249 | .macro irq_stack_entry |
192 | mov x19, sp // preserve the original sp | 250 | mov x19, sp // preserve the original sp |
193 | 251 | ||
194 | /* | 252 | /* |
195 | * Compare sp with the current thread_info, if the top | 253 | * Compare sp with the base of the task stack. |
196 | * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and | 254 | * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack, |
197 | * should switch to the irq stack. | 255 | * and should switch to the irq stack. |
198 | */ | 256 | */ |
199 | and x25, x19, #~(THREAD_SIZE - 1) | 257 | ldr x25, [tsk, TSK_STACK] |
200 | cmp x25, tsk | 258 | eor x25, x25, x19 |
201 | b.ne 9998f | 259 | and x25, x25, #~(THREAD_SIZE - 1) |
260 | cbnz x25, 9998f | ||
202 | 261 | ||
203 | this_cpu_ptr irq_stack, x25, x26 | 262 | adr_this_cpu x25, irq_stack, x26 |
204 | mov x26, #IRQ_STACK_START_SP | 263 | mov x26, #IRQ_STACK_START_SP |
205 | add x26, x25, x26 | 264 | add x26, x25, x26 |
206 | 265 | ||
@@ -427,9 +486,9 @@ el1_irq: | |||
427 | irq_handler | 486 | irq_handler |
428 | 487 | ||
429 | #ifdef CONFIG_PREEMPT | 488 | #ifdef CONFIG_PREEMPT |
430 | ldr w24, [tsk, #TI_PREEMPT] // get preempt count | 489 | ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count |
431 | cbnz w24, 1f // preempt count != 0 | 490 | cbnz w24, 1f // preempt count != 0 |
432 | ldr x0, [tsk, #TI_FLAGS] // get flags | 491 | ldr x0, [tsk, #TSK_TI_FLAGS] // get flags |
433 | tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? | 492 | tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? |
434 | bl el1_preempt | 493 | bl el1_preempt |
435 | 1: | 494 | 1: |
@@ -444,7 +503,7 @@ ENDPROC(el1_irq) | |||
444 | el1_preempt: | 503 | el1_preempt: |
445 | mov x24, lr | 504 | mov x24, lr |
446 | 1: bl preempt_schedule_irq // irq en/disable is done inside | 505 | 1: bl preempt_schedule_irq // irq en/disable is done inside |
447 | ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS | 506 | ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS |
448 | tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? | 507 | tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? |
449 | ret x24 | 508 | ret x24 |
450 | #endif | 509 | #endif |
@@ -674,8 +733,7 @@ ENTRY(cpu_switch_to) | |||
674 | ldp x29, x9, [x8], #16 | 733 | ldp x29, x9, [x8], #16 |
675 | ldr lr, [x8] | 734 | ldr lr, [x8] |
676 | mov sp, x9 | 735 | mov sp, x9 |
677 | and x9, x9, #~(THREAD_SIZE - 1) | 736 | msr sp_el0, x1 |
678 | msr sp_el0, x9 | ||
679 | ret | 737 | ret |
680 | ENDPROC(cpu_switch_to) | 738 | ENDPROC(cpu_switch_to) |
681 | 739 | ||
@@ -686,7 +744,7 @@ ENDPROC(cpu_switch_to) | |||
686 | ret_fast_syscall: | 744 | ret_fast_syscall: |
687 | disable_irq // disable interrupts | 745 | disable_irq // disable interrupts |
688 | str x0, [sp, #S_X0] // returned x0 | 746 | str x0, [sp, #S_X0] // returned x0 |
689 | ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing | 747 | ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing |
690 | and x2, x1, #_TIF_SYSCALL_WORK | 748 | and x2, x1, #_TIF_SYSCALL_WORK |
691 | cbnz x2, ret_fast_syscall_trace | 749 | cbnz x2, ret_fast_syscall_trace |
692 | and x2, x1, #_TIF_WORK_MASK | 750 | and x2, x1, #_TIF_WORK_MASK |
@@ -706,14 +764,14 @@ work_pending: | |||
706 | #ifdef CONFIG_TRACE_IRQFLAGS | 764 | #ifdef CONFIG_TRACE_IRQFLAGS |
707 | bl trace_hardirqs_on // enabled while in userspace | 765 | bl trace_hardirqs_on // enabled while in userspace |
708 | #endif | 766 | #endif |
709 | ldr x1, [tsk, #TI_FLAGS] // re-check for single-step | 767 | ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step |
710 | b finish_ret_to_user | 768 | b finish_ret_to_user |
711 | /* | 769 | /* |
712 | * "slow" syscall return path. | 770 | * "slow" syscall return path. |
713 | */ | 771 | */ |
714 | ret_to_user: | 772 | ret_to_user: |
715 | disable_irq // disable interrupts | 773 | disable_irq // disable interrupts |
716 | ldr x1, [tsk, #TI_FLAGS] | 774 | ldr x1, [tsk, #TSK_TI_FLAGS] |
717 | and x2, x1, #_TIF_WORK_MASK | 775 | and x2, x1, #_TIF_WORK_MASK |
718 | cbnz x2, work_pending | 776 | cbnz x2, work_pending |
719 | finish_ret_to_user: | 777 | finish_ret_to_user: |
@@ -746,7 +804,7 @@ el0_svc_naked: // compat entry point | |||
746 | enable_dbg_and_irq | 804 | enable_dbg_and_irq |
747 | ct_user_exit 1 | 805 | ct_user_exit 1 |
748 | 806 | ||
749 | ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks | 807 | ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks |
750 | tst x16, #_TIF_SYSCALL_WORK | 808 | tst x16, #_TIF_SYSCALL_WORK |
751 | b.ne __sys_trace | 809 | b.ne __sys_trace |
752 | cmp scno, sc_nr // check upper syscall limit | 810 | cmp scno, sc_nr // check upper syscall limit |
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 394c61db5566..b883f1f75216 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c | |||
@@ -127,6 +127,8 @@ void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) | |||
127 | 127 | ||
128 | void fpsimd_thread_switch(struct task_struct *next) | 128 | void fpsimd_thread_switch(struct task_struct *next) |
129 | { | 129 | { |
130 | if (!system_supports_fpsimd()) | ||
131 | return; | ||
130 | /* | 132 | /* |
131 | * Save the current FPSIMD state to memory, but only if whatever is in | 133 | * Save the current FPSIMD state to memory, but only if whatever is in |
132 | * the registers is in fact the most recent userland FPSIMD state of | 134 | * the registers is in fact the most recent userland FPSIMD state of |
@@ -157,6 +159,8 @@ void fpsimd_thread_switch(struct task_struct *next) | |||
157 | 159 | ||
158 | void fpsimd_flush_thread(void) | 160 | void fpsimd_flush_thread(void) |
159 | { | 161 | { |
162 | if (!system_supports_fpsimd()) | ||
163 | return; | ||
160 | memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); | 164 | memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); |
161 | fpsimd_flush_task_state(current); | 165 | fpsimd_flush_task_state(current); |
162 | set_thread_flag(TIF_FOREIGN_FPSTATE); | 166 | set_thread_flag(TIF_FOREIGN_FPSTATE); |
@@ -168,6 +172,8 @@ void fpsimd_flush_thread(void) | |||
168 | */ | 172 | */ |
169 | void fpsimd_preserve_current_state(void) | 173 | void fpsimd_preserve_current_state(void) |
170 | { | 174 | { |
175 | if (!system_supports_fpsimd()) | ||
176 | return; | ||
171 | preempt_disable(); | 177 | preempt_disable(); |
172 | if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) | 178 | if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) |
173 | fpsimd_save_state(¤t->thread.fpsimd_state); | 179 | fpsimd_save_state(¤t->thread.fpsimd_state); |
@@ -181,6 +187,8 @@ void fpsimd_preserve_current_state(void) | |||
181 | */ | 187 | */ |
182 | void fpsimd_restore_current_state(void) | 188 | void fpsimd_restore_current_state(void) |
183 | { | 189 | { |
190 | if (!system_supports_fpsimd()) | ||
191 | return; | ||
184 | preempt_disable(); | 192 | preempt_disable(); |
185 | if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { | 193 | if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { |
186 | struct fpsimd_state *st = ¤t->thread.fpsimd_state; | 194 | struct fpsimd_state *st = ¤t->thread.fpsimd_state; |
@@ -199,6 +207,8 @@ void fpsimd_restore_current_state(void) | |||
199 | */ | 207 | */ |
200 | void fpsimd_update_current_state(struct fpsimd_state *state) | 208 | void fpsimd_update_current_state(struct fpsimd_state *state) |
201 | { | 209 | { |
210 | if (!system_supports_fpsimd()) | ||
211 | return; | ||
202 | preempt_disable(); | 212 | preempt_disable(); |
203 | fpsimd_load_state(state); | 213 | fpsimd_load_state(state); |
204 | if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { | 214 | if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { |
@@ -228,6 +238,8 @@ static DEFINE_PER_CPU(struct fpsimd_partial_state, softirq_fpsimdstate); | |||
228 | */ | 238 | */ |
229 | void kernel_neon_begin_partial(u32 num_regs) | 239 | void kernel_neon_begin_partial(u32 num_regs) |
230 | { | 240 | { |
241 | if (WARN_ON(!system_supports_fpsimd())) | ||
242 | return; | ||
231 | if (in_interrupt()) { | 243 | if (in_interrupt()) { |
232 | struct fpsimd_partial_state *s = this_cpu_ptr( | 244 | struct fpsimd_partial_state *s = this_cpu_ptr( |
233 | in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate); | 245 | in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate); |
@@ -252,6 +264,8 @@ EXPORT_SYMBOL(kernel_neon_begin_partial); | |||
252 | 264 | ||
253 | void kernel_neon_end(void) | 265 | void kernel_neon_end(void) |
254 | { | 266 | { |
267 | if (!system_supports_fpsimd()) | ||
268 | return; | ||
255 | if (in_interrupt()) { | 269 | if (in_interrupt()) { |
256 | struct fpsimd_partial_state *s = this_cpu_ptr( | 270 | struct fpsimd_partial_state *s = this_cpu_ptr( |
257 | in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate); | 271 | in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate); |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 332e33193ccf..4b1abac3485a 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -326,14 +326,14 @@ __create_page_tables: | |||
326 | * dirty cache lines being evicted. | 326 | * dirty cache lines being evicted. |
327 | */ | 327 | */ |
328 | adrp x0, idmap_pg_dir | 328 | adrp x0, idmap_pg_dir |
329 | adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE | 329 | adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE |
330 | bl __inval_cache_range | 330 | bl __inval_cache_range |
331 | 331 | ||
332 | /* | 332 | /* |
333 | * Clear the idmap and swapper page tables. | 333 | * Clear the idmap and swapper page tables. |
334 | */ | 334 | */ |
335 | adrp x0, idmap_pg_dir | 335 | adrp x0, idmap_pg_dir |
336 | adrp x6, swapper_pg_dir + SWAPPER_DIR_SIZE | 336 | adrp x6, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE |
337 | 1: stp xzr, xzr, [x0], #16 | 337 | 1: stp xzr, xzr, [x0], #16 |
338 | stp xzr, xzr, [x0], #16 | 338 | stp xzr, xzr, [x0], #16 |
339 | stp xzr, xzr, [x0], #16 | 339 | stp xzr, xzr, [x0], #16 |
@@ -412,7 +412,7 @@ __create_page_tables: | |||
412 | * tables again to remove any speculatively loaded cache lines. | 412 | * tables again to remove any speculatively loaded cache lines. |
413 | */ | 413 | */ |
414 | adrp x0, idmap_pg_dir | 414 | adrp x0, idmap_pg_dir |
415 | adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE | 415 | adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE |
416 | dmb sy | 416 | dmb sy |
417 | bl __inval_cache_range | 417 | bl __inval_cache_range |
418 | 418 | ||
@@ -428,7 +428,8 @@ ENDPROC(__create_page_tables) | |||
428 | __primary_switched: | 428 | __primary_switched: |
429 | adrp x4, init_thread_union | 429 | adrp x4, init_thread_union |
430 | add sp, x4, #THREAD_SIZE | 430 | add sp, x4, #THREAD_SIZE |
431 | msr sp_el0, x4 // Save thread_info | 431 | adr_l x5, init_task |
432 | msr sp_el0, x5 // Save thread_info | ||
432 | 433 | ||
433 | adr_l x8, vectors // load VBAR_EL1 with virtual | 434 | adr_l x8, vectors // load VBAR_EL1 with virtual |
434 | msr vbar_el1, x8 // vector table address | 435 | msr vbar_el1, x8 // vector table address |
@@ -524,10 +525,21 @@ set_hcr: | |||
524 | msr hcr_el2, x0 | 525 | msr hcr_el2, x0 |
525 | isb | 526 | isb |
526 | 527 | ||
527 | /* Generic timers. */ | 528 | /* |
529 | * Allow Non-secure EL1 and EL0 to access physical timer and counter. | ||
530 | * This is not necessary for VHE, since the host kernel runs in EL2, | ||
531 | * and EL0 accesses are configured in the later stage of boot process. | ||
532 | * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout | ||
533 | * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined | ||
534 | * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1 | ||
535 | * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in | ||
536 | * EL2. | ||
537 | */ | ||
538 | cbnz x2, 1f | ||
528 | mrs x0, cnthctl_el2 | 539 | mrs x0, cnthctl_el2 |
529 | orr x0, x0, #3 // Enable EL1 physical timers | 540 | orr x0, x0, #3 // Enable EL1 physical timers |
530 | msr cnthctl_el2, x0 | 541 | msr cnthctl_el2, x0 |
542 | 1: | ||
531 | msr cntvoff_el2, xzr // Clear virtual offset | 543 | msr cntvoff_el2, xzr // Clear virtual offset |
532 | 544 | ||
533 | #ifdef CONFIG_ARM_GIC_V3 | 545 | #ifdef CONFIG_ARM_GIC_V3 |
@@ -699,10 +711,10 @@ __secondary_switched: | |||
699 | isb | 711 | isb |
700 | 712 | ||
701 | adr_l x0, secondary_data | 713 | adr_l x0, secondary_data |
702 | ldr x0, [x0, #CPU_BOOT_STACK] // get secondary_data.stack | 714 | ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack |
703 | mov sp, x0 | 715 | mov sp, x1 |
704 | and x0, x0, #~(THREAD_SIZE - 1) | 716 | ldr x2, [x0, #CPU_BOOT_TASK] |
705 | msr sp_el0, x0 // save thread_info | 717 | msr sp_el0, x2 |
706 | mov x29, #0 | 718 | mov x29, #0 |
707 | b secondary_start_kernel | 719 | b secondary_start_kernel |
708 | ENDPROC(__secondary_switched) | 720 | ENDPROC(__secondary_switched) |
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 948b73148d56..1b3c747fedda 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c | |||
@@ -317,9 +317,21 @@ static int get_hbp_len(u8 hbp_len) | |||
317 | case ARM_BREAKPOINT_LEN_2: | 317 | case ARM_BREAKPOINT_LEN_2: |
318 | len_in_bytes = 2; | 318 | len_in_bytes = 2; |
319 | break; | 319 | break; |
320 | case ARM_BREAKPOINT_LEN_3: | ||
321 | len_in_bytes = 3; | ||
322 | break; | ||
320 | case ARM_BREAKPOINT_LEN_4: | 323 | case ARM_BREAKPOINT_LEN_4: |
321 | len_in_bytes = 4; | 324 | len_in_bytes = 4; |
322 | break; | 325 | break; |
326 | case ARM_BREAKPOINT_LEN_5: | ||
327 | len_in_bytes = 5; | ||
328 | break; | ||
329 | case ARM_BREAKPOINT_LEN_6: | ||
330 | len_in_bytes = 6; | ||
331 | break; | ||
332 | case ARM_BREAKPOINT_LEN_7: | ||
333 | len_in_bytes = 7; | ||
334 | break; | ||
323 | case ARM_BREAKPOINT_LEN_8: | 335 | case ARM_BREAKPOINT_LEN_8: |
324 | len_in_bytes = 8; | 336 | len_in_bytes = 8; |
325 | break; | 337 | break; |
@@ -349,7 +361,7 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp) | |||
349 | * to generic breakpoint descriptions. | 361 | * to generic breakpoint descriptions. |
350 | */ | 362 | */ |
351 | int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, | 363 | int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, |
352 | int *gen_len, int *gen_type) | 364 | int *gen_len, int *gen_type, int *offset) |
353 | { | 365 | { |
354 | /* Type */ | 366 | /* Type */ |
355 | switch (ctrl.type) { | 367 | switch (ctrl.type) { |
@@ -369,17 +381,33 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, | |||
369 | return -EINVAL; | 381 | return -EINVAL; |
370 | } | 382 | } |
371 | 383 | ||
384 | if (!ctrl.len) | ||
385 | return -EINVAL; | ||
386 | *offset = __ffs(ctrl.len); | ||
387 | |||
372 | /* Len */ | 388 | /* Len */ |
373 | switch (ctrl.len) { | 389 | switch (ctrl.len >> *offset) { |
374 | case ARM_BREAKPOINT_LEN_1: | 390 | case ARM_BREAKPOINT_LEN_1: |
375 | *gen_len = HW_BREAKPOINT_LEN_1; | 391 | *gen_len = HW_BREAKPOINT_LEN_1; |
376 | break; | 392 | break; |
377 | case ARM_BREAKPOINT_LEN_2: | 393 | case ARM_BREAKPOINT_LEN_2: |
378 | *gen_len = HW_BREAKPOINT_LEN_2; | 394 | *gen_len = HW_BREAKPOINT_LEN_2; |
379 | break; | 395 | break; |
396 | case ARM_BREAKPOINT_LEN_3: | ||
397 | *gen_len = HW_BREAKPOINT_LEN_3; | ||
398 | break; | ||
380 | case ARM_BREAKPOINT_LEN_4: | 399 | case ARM_BREAKPOINT_LEN_4: |
381 | *gen_len = HW_BREAKPOINT_LEN_4; | 400 | *gen_len = HW_BREAKPOINT_LEN_4; |
382 | break; | 401 | break; |
402 | case ARM_BREAKPOINT_LEN_5: | ||
403 | *gen_len = HW_BREAKPOINT_LEN_5; | ||
404 | break; | ||
405 | case ARM_BREAKPOINT_LEN_6: | ||
406 | *gen_len = HW_BREAKPOINT_LEN_6; | ||
407 | break; | ||
408 | case ARM_BREAKPOINT_LEN_7: | ||
409 | *gen_len = HW_BREAKPOINT_LEN_7; | ||
410 | break; | ||
383 | case ARM_BREAKPOINT_LEN_8: | 411 | case ARM_BREAKPOINT_LEN_8: |
384 | *gen_len = HW_BREAKPOINT_LEN_8; | 412 | *gen_len = HW_BREAKPOINT_LEN_8; |
385 | break; | 413 | break; |
@@ -423,9 +451,21 @@ static int arch_build_bp_info(struct perf_event *bp) | |||
423 | case HW_BREAKPOINT_LEN_2: | 451 | case HW_BREAKPOINT_LEN_2: |
424 | info->ctrl.len = ARM_BREAKPOINT_LEN_2; | 452 | info->ctrl.len = ARM_BREAKPOINT_LEN_2; |
425 | break; | 453 | break; |
454 | case HW_BREAKPOINT_LEN_3: | ||
455 | info->ctrl.len = ARM_BREAKPOINT_LEN_3; | ||
456 | break; | ||
426 | case HW_BREAKPOINT_LEN_4: | 457 | case HW_BREAKPOINT_LEN_4: |
427 | info->ctrl.len = ARM_BREAKPOINT_LEN_4; | 458 | info->ctrl.len = ARM_BREAKPOINT_LEN_4; |
428 | break; | 459 | break; |
460 | case HW_BREAKPOINT_LEN_5: | ||
461 | info->ctrl.len = ARM_BREAKPOINT_LEN_5; | ||
462 | break; | ||
463 | case HW_BREAKPOINT_LEN_6: | ||
464 | info->ctrl.len = ARM_BREAKPOINT_LEN_6; | ||
465 | break; | ||
466 | case HW_BREAKPOINT_LEN_7: | ||
467 | info->ctrl.len = ARM_BREAKPOINT_LEN_7; | ||
468 | break; | ||
429 | case HW_BREAKPOINT_LEN_8: | 469 | case HW_BREAKPOINT_LEN_8: |
430 | info->ctrl.len = ARM_BREAKPOINT_LEN_8; | 470 | info->ctrl.len = ARM_BREAKPOINT_LEN_8; |
431 | break; | 471 | break; |
@@ -517,18 +557,17 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) | |||
517 | default: | 557 | default: |
518 | return -EINVAL; | 558 | return -EINVAL; |
519 | } | 559 | } |
520 | |||
521 | info->address &= ~alignment_mask; | ||
522 | info->ctrl.len <<= offset; | ||
523 | } else { | 560 | } else { |
524 | if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) | 561 | if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) |
525 | alignment_mask = 0x3; | 562 | alignment_mask = 0x3; |
526 | else | 563 | else |
527 | alignment_mask = 0x7; | 564 | alignment_mask = 0x7; |
528 | if (info->address & alignment_mask) | 565 | offset = info->address & alignment_mask; |
529 | return -EINVAL; | ||
530 | } | 566 | } |
531 | 567 | ||
568 | info->address &= ~alignment_mask; | ||
569 | info->ctrl.len <<= offset; | ||
570 | |||
532 | /* | 571 | /* |
533 | * Disallow per-task kernel breakpoints since these would | 572 | * Disallow per-task kernel breakpoints since these would |
534 | * complicate the stepping code. | 573 | * complicate the stepping code. |
@@ -661,12 +700,47 @@ unlock: | |||
661 | } | 700 | } |
662 | NOKPROBE_SYMBOL(breakpoint_handler); | 701 | NOKPROBE_SYMBOL(breakpoint_handler); |
663 | 702 | ||
703 | /* | ||
704 | * Arm64 hardware does not always report a watchpoint hit address that matches | ||
705 | * one of the watchpoints set. It can also report an address "near" the | ||
706 | * watchpoint if a single instruction access both watched and unwatched | ||
707 | * addresses. There is no straight-forward way, short of disassembling the | ||
708 | * offending instruction, to map that address back to the watchpoint. This | ||
709 | * function computes the distance of the memory access from the watchpoint as a | ||
710 | * heuristic for the likelyhood that a given access triggered the watchpoint. | ||
711 | * | ||
712 | * See Section D2.10.5 "Determining the memory location that caused a Watchpoint | ||
713 | * exception" of ARMv8 Architecture Reference Manual for details. | ||
714 | * | ||
715 | * The function returns the distance of the address from the bytes watched by | ||
716 | * the watchpoint. In case of an exact match, it returns 0. | ||
717 | */ | ||
718 | static u64 get_distance_from_watchpoint(unsigned long addr, u64 val, | ||
719 | struct arch_hw_breakpoint_ctrl *ctrl) | ||
720 | { | ||
721 | u64 wp_low, wp_high; | ||
722 | u32 lens, lene; | ||
723 | |||
724 | lens = __ffs(ctrl->len); | ||
725 | lene = __fls(ctrl->len); | ||
726 | |||
727 | wp_low = val + lens; | ||
728 | wp_high = val + lene; | ||
729 | if (addr < wp_low) | ||
730 | return wp_low - addr; | ||
731 | else if (addr > wp_high) | ||
732 | return addr - wp_high; | ||
733 | else | ||
734 | return 0; | ||
735 | } | ||
736 | |||
664 | static int watchpoint_handler(unsigned long addr, unsigned int esr, | 737 | static int watchpoint_handler(unsigned long addr, unsigned int esr, |
665 | struct pt_regs *regs) | 738 | struct pt_regs *regs) |
666 | { | 739 | { |
667 | int i, step = 0, *kernel_step, access; | 740 | int i, step = 0, *kernel_step, access, closest_match = 0; |
741 | u64 min_dist = -1, dist; | ||
668 | u32 ctrl_reg; | 742 | u32 ctrl_reg; |
669 | u64 val, alignment_mask; | 743 | u64 val; |
670 | struct perf_event *wp, **slots; | 744 | struct perf_event *wp, **slots; |
671 | struct debug_info *debug_info; | 745 | struct debug_info *debug_info; |
672 | struct arch_hw_breakpoint *info; | 746 | struct arch_hw_breakpoint *info; |
@@ -675,35 +749,15 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, | |||
675 | slots = this_cpu_ptr(wp_on_reg); | 749 | slots = this_cpu_ptr(wp_on_reg); |
676 | debug_info = ¤t->thread.debug; | 750 | debug_info = ¤t->thread.debug; |
677 | 751 | ||
752 | /* | ||
753 | * Find all watchpoints that match the reported address. If no exact | ||
754 | * match is found. Attribute the hit to the closest watchpoint. | ||
755 | */ | ||
756 | rcu_read_lock(); | ||
678 | for (i = 0; i < core_num_wrps; ++i) { | 757 | for (i = 0; i < core_num_wrps; ++i) { |
679 | rcu_read_lock(); | ||
680 | |||
681 | wp = slots[i]; | 758 | wp = slots[i]; |
682 | |||
683 | if (wp == NULL) | 759 | if (wp == NULL) |
684 | goto unlock; | 760 | continue; |
685 | |||
686 | info = counter_arch_bp(wp); | ||
687 | /* AArch32 watchpoints are either 4 or 8 bytes aligned. */ | ||
688 | if (is_compat_task()) { | ||
689 | if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) | ||
690 | alignment_mask = 0x7; | ||
691 | else | ||
692 | alignment_mask = 0x3; | ||
693 | } else { | ||
694 | alignment_mask = 0x7; | ||
695 | } | ||
696 | |||
697 | /* Check if the watchpoint value matches. */ | ||
698 | val = read_wb_reg(AARCH64_DBG_REG_WVR, i); | ||
699 | if (val != (addr & ~alignment_mask)) | ||
700 | goto unlock; | ||
701 | |||
702 | /* Possible match, check the byte address select to confirm. */ | ||
703 | ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i); | ||
704 | decode_ctrl_reg(ctrl_reg, &ctrl); | ||
705 | if (!((1 << (addr & alignment_mask)) & ctrl.len)) | ||
706 | goto unlock; | ||
707 | 761 | ||
708 | /* | 762 | /* |
709 | * Check that the access type matches. | 763 | * Check that the access type matches. |
@@ -712,18 +766,41 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, | |||
712 | access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W : | 766 | access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W : |
713 | HW_BREAKPOINT_R; | 767 | HW_BREAKPOINT_R; |
714 | if (!(access & hw_breakpoint_type(wp))) | 768 | if (!(access & hw_breakpoint_type(wp))) |
715 | goto unlock; | 769 | continue; |
716 | 770 | ||
771 | /* Check if the watchpoint value and byte select match. */ | ||
772 | val = read_wb_reg(AARCH64_DBG_REG_WVR, i); | ||
773 | ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i); | ||
774 | decode_ctrl_reg(ctrl_reg, &ctrl); | ||
775 | dist = get_distance_from_watchpoint(addr, val, &ctrl); | ||
776 | if (dist < min_dist) { | ||
777 | min_dist = dist; | ||
778 | closest_match = i; | ||
779 | } | ||
780 | /* Is this an exact match? */ | ||
781 | if (dist != 0) | ||
782 | continue; | ||
783 | |||
784 | info = counter_arch_bp(wp); | ||
717 | info->trigger = addr; | 785 | info->trigger = addr; |
718 | perf_bp_event(wp, regs); | 786 | perf_bp_event(wp, regs); |
719 | 787 | ||
720 | /* Do we need to handle the stepping? */ | 788 | /* Do we need to handle the stepping? */ |
721 | if (is_default_overflow_handler(wp)) | 789 | if (is_default_overflow_handler(wp)) |
722 | step = 1; | 790 | step = 1; |
791 | } | ||
792 | if (min_dist > 0 && min_dist != -1) { | ||
793 | /* No exact match found. */ | ||
794 | wp = slots[closest_match]; | ||
795 | info = counter_arch_bp(wp); | ||
796 | info->trigger = addr; | ||
797 | perf_bp_event(wp, regs); | ||
723 | 798 | ||
724 | unlock: | 799 | /* Do we need to handle the stepping? */ |
725 | rcu_read_unlock(); | 800 | if (is_default_overflow_handler(wp)) |
801 | step = 1; | ||
726 | } | 802 | } |
803 | rcu_read_unlock(); | ||
727 | 804 | ||
728 | if (!step) | 805 | if (!step) |
729 | return 0; | 806 | return 0; |
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 6f2ac4fc66ca..94b62c1fa4df 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
31 | #include <asm/debug-monitors.h> | 31 | #include <asm/debug-monitors.h> |
32 | #include <asm/fixmap.h> | 32 | #include <asm/fixmap.h> |
33 | #include <asm/opcodes.h> | ||
34 | #include <asm/insn.h> | 33 | #include <asm/insn.h> |
35 | 34 | ||
36 | #define AARCH64_INSN_SF_BIT BIT(31) | 35 | #define AARCH64_INSN_SF_BIT BIT(31) |
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index e017a9493b92..d217c9e95b06 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c | |||
@@ -247,6 +247,9 @@ NOKPROBE_SYMBOL(kgdb_compiled_brk_fn); | |||
247 | 247 | ||
248 | static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) | 248 | static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) |
249 | { | 249 | { |
250 | if (!kgdb_single_step) | ||
251 | return DBG_HOOK_ERROR; | ||
252 | |||
250 | kgdb_handle_exception(1, SIGTRAP, 0, regs); | 253 | kgdb_handle_exception(1, SIGTRAP, 0, regs); |
251 | return 0; | 254 | return 0; |
252 | } | 255 | } |
diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile index ce06312e3d34..89b6df613dde 100644 --- a/arch/arm64/kernel/probes/Makefile +++ b/arch/arm64/kernel/probes/Makefile | |||
@@ -1,3 +1,5 @@ | |||
1 | obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \ | 1 | obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \ |
2 | kprobes_trampoline.o \ | 2 | kprobes_trampoline.o \ |
3 | simulate-insn.o | 3 | simulate-insn.o |
4 | obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o \ | ||
5 | simulate-insn.o | ||
diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c index d1731bf977ef..6bf6657a5a52 100644 --- a/arch/arm64/kernel/probes/decode-insn.c +++ b/arch/arm64/kernel/probes/decode-insn.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/kprobes.h> | 17 | #include <linux/kprobes.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/kallsyms.h> | 19 | #include <linux/kallsyms.h> |
20 | #include <asm/kprobes.h> | ||
21 | #include <asm/insn.h> | 20 | #include <asm/insn.h> |
22 | #include <asm/sections.h> | 21 | #include <asm/sections.h> |
23 | 22 | ||
@@ -78,8 +77,8 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn) | |||
78 | * INSN_GOOD If instruction is supported and uses instruction slot, | 77 | * INSN_GOOD If instruction is supported and uses instruction slot, |
79 | * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. | 78 | * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. |
80 | */ | 79 | */ |
81 | static enum kprobe_insn __kprobes | 80 | enum probe_insn __kprobes |
82 | arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) | 81 | arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *api) |
83 | { | 82 | { |
84 | /* | 83 | /* |
85 | * Instructions reading or modifying the PC won't work from the XOL | 84 | * Instructions reading or modifying the PC won't work from the XOL |
@@ -89,26 +88,26 @@ arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) | |||
89 | return INSN_GOOD; | 88 | return INSN_GOOD; |
90 | 89 | ||
91 | if (aarch64_insn_is_bcond(insn)) { | 90 | if (aarch64_insn_is_bcond(insn)) { |
92 | asi->handler = simulate_b_cond; | 91 | api->handler = simulate_b_cond; |
93 | } else if (aarch64_insn_is_cbz(insn) || | 92 | } else if (aarch64_insn_is_cbz(insn) || |
94 | aarch64_insn_is_cbnz(insn)) { | 93 | aarch64_insn_is_cbnz(insn)) { |
95 | asi->handler = simulate_cbz_cbnz; | 94 | api->handler = simulate_cbz_cbnz; |
96 | } else if (aarch64_insn_is_tbz(insn) || | 95 | } else if (aarch64_insn_is_tbz(insn) || |
97 | aarch64_insn_is_tbnz(insn)) { | 96 | aarch64_insn_is_tbnz(insn)) { |
98 | asi->handler = simulate_tbz_tbnz; | 97 | api->handler = simulate_tbz_tbnz; |
99 | } else if (aarch64_insn_is_adr_adrp(insn)) { | 98 | } else if (aarch64_insn_is_adr_adrp(insn)) { |
100 | asi->handler = simulate_adr_adrp; | 99 | api->handler = simulate_adr_adrp; |
101 | } else if (aarch64_insn_is_b(insn) || | 100 | } else if (aarch64_insn_is_b(insn) || |
102 | aarch64_insn_is_bl(insn)) { | 101 | aarch64_insn_is_bl(insn)) { |
103 | asi->handler = simulate_b_bl; | 102 | api->handler = simulate_b_bl; |
104 | } else if (aarch64_insn_is_br(insn) || | 103 | } else if (aarch64_insn_is_br(insn) || |
105 | aarch64_insn_is_blr(insn) || | 104 | aarch64_insn_is_blr(insn) || |
106 | aarch64_insn_is_ret(insn)) { | 105 | aarch64_insn_is_ret(insn)) { |
107 | asi->handler = simulate_br_blr_ret; | 106 | api->handler = simulate_br_blr_ret; |
108 | } else if (aarch64_insn_is_ldr_lit(insn)) { | 107 | } else if (aarch64_insn_is_ldr_lit(insn)) { |
109 | asi->handler = simulate_ldr_literal; | 108 | api->handler = simulate_ldr_literal; |
110 | } else if (aarch64_insn_is_ldrsw_lit(insn)) { | 109 | } else if (aarch64_insn_is_ldrsw_lit(insn)) { |
111 | asi->handler = simulate_ldrsw_literal; | 110 | api->handler = simulate_ldrsw_literal; |
112 | } else { | 111 | } else { |
113 | /* | 112 | /* |
114 | * Instruction cannot be stepped out-of-line and we don't | 113 | * Instruction cannot be stepped out-of-line and we don't |
@@ -120,6 +119,7 @@ arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) | |||
120 | return INSN_GOOD_NO_SLOT; | 119 | return INSN_GOOD_NO_SLOT; |
121 | } | 120 | } |
122 | 121 | ||
122 | #ifdef CONFIG_KPROBES | ||
123 | static bool __kprobes | 123 | static bool __kprobes |
124 | is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end) | 124 | is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end) |
125 | { | 125 | { |
@@ -138,12 +138,12 @@ is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end) | |||
138 | return false; | 138 | return false; |
139 | } | 139 | } |
140 | 140 | ||
141 | enum kprobe_insn __kprobes | 141 | enum probe_insn __kprobes |
142 | arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi) | 142 | arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi) |
143 | { | 143 | { |
144 | enum kprobe_insn decoded; | 144 | enum probe_insn decoded; |
145 | kprobe_opcode_t insn = le32_to_cpu(*addr); | 145 | probe_opcode_t insn = le32_to_cpu(*addr); |
146 | kprobe_opcode_t *scan_end = NULL; | 146 | probe_opcode_t *scan_end = NULL; |
147 | unsigned long size = 0, offset = 0; | 147 | unsigned long size = 0, offset = 0; |
148 | 148 | ||
149 | /* | 149 | /* |
@@ -162,7 +162,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi) | |||
162 | else | 162 | else |
163 | scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE; | 163 | scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE; |
164 | } | 164 | } |
165 | decoded = arm_probe_decode_insn(insn, asi); | 165 | decoded = arm_probe_decode_insn(insn, &asi->api); |
166 | 166 | ||
167 | if (decoded != INSN_REJECTED && scan_end) | 167 | if (decoded != INSN_REJECTED && scan_end) |
168 | if (is_probed_address_atomic(addr - 1, scan_end)) | 168 | if (is_probed_address_atomic(addr - 1, scan_end)) |
@@ -170,3 +170,4 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi) | |||
170 | 170 | ||
171 | return decoded; | 171 | return decoded; |
172 | } | 172 | } |
173 | #endif | ||
diff --git a/arch/arm64/kernel/probes/decode-insn.h b/arch/arm64/kernel/probes/decode-insn.h index d438289646a6..76d3f315407f 100644 --- a/arch/arm64/kernel/probes/decode-insn.h +++ b/arch/arm64/kernel/probes/decode-insn.h | |||
@@ -23,13 +23,17 @@ | |||
23 | */ | 23 | */ |
24 | #define MAX_ATOMIC_CONTEXT_SIZE (128 / sizeof(kprobe_opcode_t)) | 24 | #define MAX_ATOMIC_CONTEXT_SIZE (128 / sizeof(kprobe_opcode_t)) |
25 | 25 | ||
26 | enum kprobe_insn { | 26 | enum probe_insn { |
27 | INSN_REJECTED, | 27 | INSN_REJECTED, |
28 | INSN_GOOD_NO_SLOT, | 28 | INSN_GOOD_NO_SLOT, |
29 | INSN_GOOD, | 29 | INSN_GOOD, |
30 | }; | 30 | }; |
31 | 31 | ||
32 | enum kprobe_insn __kprobes | 32 | #ifdef CONFIG_KPROBES |
33 | enum probe_insn __kprobes | ||
33 | arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi); | 34 | arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi); |
35 | #endif | ||
36 | enum probe_insn __kprobes | ||
37 | arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *asi); | ||
34 | 38 | ||
35 | #endif /* _ARM_KERNEL_KPROBES_ARM64_H */ | 39 | #endif /* _ARM_KERNEL_KPROBES_ARM64_H */ |
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index f5077ea7af6d..1decd2b2c730 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c | |||
@@ -44,31 +44,31 @@ post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); | |||
44 | static void __kprobes arch_prepare_ss_slot(struct kprobe *p) | 44 | static void __kprobes arch_prepare_ss_slot(struct kprobe *p) |
45 | { | 45 | { |
46 | /* prepare insn slot */ | 46 | /* prepare insn slot */ |
47 | p->ainsn.insn[0] = cpu_to_le32(p->opcode); | 47 | p->ainsn.api.insn[0] = cpu_to_le32(p->opcode); |
48 | 48 | ||
49 | flush_icache_range((uintptr_t) (p->ainsn.insn), | 49 | flush_icache_range((uintptr_t) (p->ainsn.api.insn), |
50 | (uintptr_t) (p->ainsn.insn) + | 50 | (uintptr_t) (p->ainsn.api.insn) + |
51 | MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); | 51 | MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * Needs restoring of return address after stepping xol. | 54 | * Needs restoring of return address after stepping xol. |
55 | */ | 55 | */ |
56 | p->ainsn.restore = (unsigned long) p->addr + | 56 | p->ainsn.api.restore = (unsigned long) p->addr + |
57 | sizeof(kprobe_opcode_t); | 57 | sizeof(kprobe_opcode_t); |
58 | } | 58 | } |
59 | 59 | ||
60 | static void __kprobes arch_prepare_simulate(struct kprobe *p) | 60 | static void __kprobes arch_prepare_simulate(struct kprobe *p) |
61 | { | 61 | { |
62 | /* This instructions is not executed xol. No need to adjust the PC */ | 62 | /* This instructions is not executed xol. No need to adjust the PC */ |
63 | p->ainsn.restore = 0; | 63 | p->ainsn.api.restore = 0; |
64 | } | 64 | } |
65 | 65 | ||
66 | static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) | 66 | static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) |
67 | { | 67 | { |
68 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 68 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
69 | 69 | ||
70 | if (p->ainsn.handler) | 70 | if (p->ainsn.api.handler) |
71 | p->ainsn.handler((u32)p->opcode, (long)p->addr, regs); | 71 | p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs); |
72 | 72 | ||
73 | /* single step simulated, now go for post processing */ | 73 | /* single step simulated, now go for post processing */ |
74 | post_kprobe_handler(kcb, regs); | 74 | post_kprobe_handler(kcb, regs); |
@@ -98,18 +98,18 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
98 | return -EINVAL; | 98 | return -EINVAL; |
99 | 99 | ||
100 | case INSN_GOOD_NO_SLOT: /* insn need simulation */ | 100 | case INSN_GOOD_NO_SLOT: /* insn need simulation */ |
101 | p->ainsn.insn = NULL; | 101 | p->ainsn.api.insn = NULL; |
102 | break; | 102 | break; |
103 | 103 | ||
104 | case INSN_GOOD: /* instruction uses slot */ | 104 | case INSN_GOOD: /* instruction uses slot */ |
105 | p->ainsn.insn = get_insn_slot(); | 105 | p->ainsn.api.insn = get_insn_slot(); |
106 | if (!p->ainsn.insn) | 106 | if (!p->ainsn.api.insn) |
107 | return -ENOMEM; | 107 | return -ENOMEM; |
108 | break; | 108 | break; |
109 | }; | 109 | }; |
110 | 110 | ||
111 | /* prepare the instruction */ | 111 | /* prepare the instruction */ |
112 | if (p->ainsn.insn) | 112 | if (p->ainsn.api.insn) |
113 | arch_prepare_ss_slot(p); | 113 | arch_prepare_ss_slot(p); |
114 | else | 114 | else |
115 | arch_prepare_simulate(p); | 115 | arch_prepare_simulate(p); |
@@ -142,9 +142,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) | |||
142 | 142 | ||
143 | void __kprobes arch_remove_kprobe(struct kprobe *p) | 143 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
144 | { | 144 | { |
145 | if (p->ainsn.insn) { | 145 | if (p->ainsn.api.insn) { |
146 | free_insn_slot(p->ainsn.insn, 0); | 146 | free_insn_slot(p->ainsn.api.insn, 0); |
147 | p->ainsn.insn = NULL; | 147 | p->ainsn.api.insn = NULL; |
148 | } | 148 | } |
149 | } | 149 | } |
150 | 150 | ||
@@ -244,9 +244,9 @@ static void __kprobes setup_singlestep(struct kprobe *p, | |||
244 | } | 244 | } |
245 | 245 | ||
246 | 246 | ||
247 | if (p->ainsn.insn) { | 247 | if (p->ainsn.api.insn) { |
248 | /* prepare for single stepping */ | 248 | /* prepare for single stepping */ |
249 | slot = (unsigned long)p->ainsn.insn; | 249 | slot = (unsigned long)p->ainsn.api.insn; |
250 | 250 | ||
251 | set_ss_context(kcb, slot); /* mark pending ss */ | 251 | set_ss_context(kcb, slot); /* mark pending ss */ |
252 | 252 | ||
@@ -295,8 +295,8 @@ post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs) | |||
295 | return; | 295 | return; |
296 | 296 | ||
297 | /* return addr restore if non-branching insn */ | 297 | /* return addr restore if non-branching insn */ |
298 | if (cur->ainsn.restore != 0) | 298 | if (cur->ainsn.api.restore != 0) |
299 | instruction_pointer_set(regs, cur->ainsn.restore); | 299 | instruction_pointer_set(regs, cur->ainsn.api.restore); |
300 | 300 | ||
301 | /* restore back original saved kprobe variables and continue */ | 301 | /* restore back original saved kprobe variables and continue */ |
302 | if (kcb->kprobe_status == KPROBE_REENTER) { | 302 | if (kcb->kprobe_status == KPROBE_REENTER) { |
diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c index 8977ce9d009d..357d3efe1366 100644 --- a/arch/arm64/kernel/probes/simulate-insn.c +++ b/arch/arm64/kernel/probes/simulate-insn.c | |||
@@ -13,28 +13,26 @@ | |||
13 | * General Public License for more details. | 13 | * General Public License for more details. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/bitops.h> | ||
16 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
17 | #include <linux/kprobes.h> | 18 | #include <linux/kprobes.h> |
18 | 19 | ||
19 | #include "simulate-insn.h" | 20 | #include "simulate-insn.h" |
20 | 21 | ||
21 | #define sign_extend(x, signbit) \ | ||
22 | ((x) | (0 - ((x) & (1 << (signbit))))) | ||
23 | |||
24 | #define bbl_displacement(insn) \ | 22 | #define bbl_displacement(insn) \ |
25 | sign_extend(((insn) & 0x3ffffff) << 2, 27) | 23 | sign_extend32(((insn) & 0x3ffffff) << 2, 27) |
26 | 24 | ||
27 | #define bcond_displacement(insn) \ | 25 | #define bcond_displacement(insn) \ |
28 | sign_extend(((insn >> 5) & 0x7ffff) << 2, 20) | 26 | sign_extend32(((insn >> 5) & 0x7ffff) << 2, 20) |
29 | 27 | ||
30 | #define cbz_displacement(insn) \ | 28 | #define cbz_displacement(insn) \ |
31 | sign_extend(((insn >> 5) & 0x7ffff) << 2, 20) | 29 | sign_extend32(((insn >> 5) & 0x7ffff) << 2, 20) |
32 | 30 | ||
33 | #define tbz_displacement(insn) \ | 31 | #define tbz_displacement(insn) \ |
34 | sign_extend(((insn >> 5) & 0x3fff) << 2, 15) | 32 | sign_extend32(((insn >> 5) & 0x3fff) << 2, 15) |
35 | 33 | ||
36 | #define ldr_displacement(insn) \ | 34 | #define ldr_displacement(insn) \ |
37 | sign_extend(((insn >> 5) & 0x7ffff) << 2, 20) | 35 | sign_extend32(((insn >> 5) & 0x7ffff) << 2, 20) |
38 | 36 | ||
39 | static inline void set_x_reg(struct pt_regs *regs, int reg, u64 val) | 37 | static inline void set_x_reg(struct pt_regs *regs, int reg, u64 val) |
40 | { | 38 | { |
@@ -106,7 +104,7 @@ simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs) | |||
106 | 104 | ||
107 | xn = opcode & 0x1f; | 105 | xn = opcode & 0x1f; |
108 | imm = ((opcode >> 3) & 0x1ffffc) | ((opcode >> 29) & 0x3); | 106 | imm = ((opcode >> 3) & 0x1ffffc) | ((opcode >> 29) & 0x3); |
109 | imm = sign_extend(imm, 20); | 107 | imm = sign_extend64(imm, 20); |
110 | if (opcode & 0x80000000) | 108 | if (opcode & 0x80000000) |
111 | val = (imm<<12) + (addr & 0xfffffffffffff000); | 109 | val = (imm<<12) + (addr & 0xfffffffffffff000); |
112 | else | 110 | else |
diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c new file mode 100644 index 000000000000..26c998534dca --- /dev/null +++ b/arch/arm64/kernel/probes/uprobes.c | |||
@@ -0,0 +1,216 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #include <linux/highmem.h> | ||
9 | #include <linux/ptrace.h> | ||
10 | #include <linux/uprobes.h> | ||
11 | #include <asm/cacheflush.h> | ||
12 | |||
13 | #include "decode-insn.h" | ||
14 | |||
15 | #define UPROBE_INV_FAULT_CODE UINT_MAX | ||
16 | |||
17 | void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, | ||
18 | void *src, unsigned long len) | ||
19 | { | ||
20 | void *xol_page_kaddr = kmap_atomic(page); | ||
21 | void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK); | ||
22 | |||
23 | /* Initialize the slot */ | ||
24 | memcpy(dst, src, len); | ||
25 | |||
26 | /* flush caches (dcache/icache) */ | ||
27 | sync_icache_aliases(dst, len); | ||
28 | |||
29 | kunmap_atomic(xol_page_kaddr); | ||
30 | } | ||
31 | |||
32 | unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) | ||
33 | { | ||
34 | return instruction_pointer(regs); | ||
35 | } | ||
36 | |||
37 | int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, | ||
38 | unsigned long addr) | ||
39 | { | ||
40 | probe_opcode_t insn; | ||
41 | |||
42 | /* TODO: Currently we do not support AARCH32 instruction probing */ | ||
43 | if (test_bit(TIF_32BIT, &mm->context.flags)) | ||
44 | return -ENOTSUPP; | ||
45 | else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE)) | ||
46 | return -EINVAL; | ||
47 | |||
48 | insn = *(probe_opcode_t *)(&auprobe->insn[0]); | ||
49 | |||
50 | switch (arm_probe_decode_insn(insn, &auprobe->api)) { | ||
51 | case INSN_REJECTED: | ||
52 | return -EINVAL; | ||
53 | |||
54 | case INSN_GOOD_NO_SLOT: | ||
55 | auprobe->simulate = true; | ||
56 | break; | ||
57 | |||
58 | default: | ||
59 | break; | ||
60 | } | ||
61 | |||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) | ||
66 | { | ||
67 | struct uprobe_task *utask = current->utask; | ||
68 | |||
69 | /* Initialize with an invalid fault code to detect if ol insn trapped */ | ||
70 | current->thread.fault_code = UPROBE_INV_FAULT_CODE; | ||
71 | |||
72 | /* Instruction points to execute ol */ | ||
73 | instruction_pointer_set(regs, utask->xol_vaddr); | ||
74 | |||
75 | user_enable_single_step(current); | ||
76 | |||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) | ||
81 | { | ||
82 | struct uprobe_task *utask = current->utask; | ||
83 | |||
84 | WARN_ON_ONCE(current->thread.fault_code != UPROBE_INV_FAULT_CODE); | ||
85 | |||
86 | /* Instruction points to execute next to breakpoint address */ | ||
87 | instruction_pointer_set(regs, utask->vaddr + 4); | ||
88 | |||
89 | user_disable_single_step(current); | ||
90 | |||
91 | return 0; | ||
92 | } | ||
93 | bool arch_uprobe_xol_was_trapped(struct task_struct *t) | ||
94 | { | ||
95 | /* | ||
96 | * Between arch_uprobe_pre_xol and arch_uprobe_post_xol, if an xol | ||
97 | * insn itself is trapped, then detect the case with the help of | ||
98 | * invalid fault code which is being set in arch_uprobe_pre_xol | ||
99 | */ | ||
100 | if (t->thread.fault_code != UPROBE_INV_FAULT_CODE) | ||
101 | return true; | ||
102 | |||
103 | return false; | ||
104 | } | ||
105 | |||
106 | bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) | ||
107 | { | ||
108 | probe_opcode_t insn; | ||
109 | unsigned long addr; | ||
110 | |||
111 | if (!auprobe->simulate) | ||
112 | return false; | ||
113 | |||
114 | insn = *(probe_opcode_t *)(&auprobe->insn[0]); | ||
115 | addr = instruction_pointer(regs); | ||
116 | |||
117 | if (auprobe->api.handler) | ||
118 | auprobe->api.handler(insn, addr, regs); | ||
119 | |||
120 | return true; | ||
121 | } | ||
122 | |||
123 | void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) | ||
124 | { | ||
125 | struct uprobe_task *utask = current->utask; | ||
126 | |||
127 | /* | ||
128 | * Task has received a fatal signal, so reset back to probbed | ||
129 | * address. | ||
130 | */ | ||
131 | instruction_pointer_set(regs, utask->vaddr); | ||
132 | |||
133 | user_disable_single_step(current); | ||
134 | } | ||
135 | |||
136 | bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, | ||
137 | struct pt_regs *regs) | ||
138 | { | ||
139 | /* | ||
140 | * If a simple branch instruction (B) was called for retprobed | ||
141 | * assembly label then return true even when regs->sp and ret->stack | ||
142 | * are same. It will ensure that cleanup and reporting of return | ||
143 | * instances corresponding to callee label is done when | ||
144 | * handle_trampoline for called function is executed. | ||
145 | */ | ||
146 | if (ctx == RP_CHECK_CHAIN_CALL) | ||
147 | return regs->sp <= ret->stack; | ||
148 | else | ||
149 | return regs->sp < ret->stack; | ||
150 | } | ||
151 | |||
152 | unsigned long | ||
153 | arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, | ||
154 | struct pt_regs *regs) | ||
155 | { | ||
156 | unsigned long orig_ret_vaddr; | ||
157 | |||
158 | orig_ret_vaddr = procedure_link_pointer(regs); | ||
159 | /* Replace the return addr with trampoline addr */ | ||
160 | procedure_link_pointer_set(regs, trampoline_vaddr); | ||
161 | |||
162 | return orig_ret_vaddr; | ||
163 | } | ||
164 | |||
165 | int arch_uprobe_exception_notify(struct notifier_block *self, | ||
166 | unsigned long val, void *data) | ||
167 | { | ||
168 | return NOTIFY_DONE; | ||
169 | } | ||
170 | |||
171 | static int uprobe_breakpoint_handler(struct pt_regs *regs, | ||
172 | unsigned int esr) | ||
173 | { | ||
174 | if (user_mode(regs) && uprobe_pre_sstep_notifier(regs)) | ||
175 | return DBG_HOOK_HANDLED; | ||
176 | |||
177 | return DBG_HOOK_ERROR; | ||
178 | } | ||
179 | |||
180 | static int uprobe_single_step_handler(struct pt_regs *regs, | ||
181 | unsigned int esr) | ||
182 | { | ||
183 | struct uprobe_task *utask = current->utask; | ||
184 | |||
185 | if (user_mode(regs)) { | ||
186 | WARN_ON(utask && | ||
187 | (instruction_pointer(regs) != utask->xol_vaddr + 4)); | ||
188 | |||
189 | if (uprobe_post_sstep_notifier(regs)) | ||
190 | return DBG_HOOK_HANDLED; | ||
191 | } | ||
192 | |||
193 | return DBG_HOOK_ERROR; | ||
194 | } | ||
195 | |||
196 | /* uprobe breakpoint handler hook */ | ||
197 | static struct break_hook uprobes_break_hook = { | ||
198 | .esr_mask = BRK64_ESR_MASK, | ||
199 | .esr_val = BRK64_ESR_UPROBES, | ||
200 | .fn = uprobe_breakpoint_handler, | ||
201 | }; | ||
202 | |||
203 | /* uprobe single step handler hook */ | ||
204 | static struct step_hook uprobes_step_hook = { | ||
205 | .fn = uprobe_single_step_handler, | ||
206 | }; | ||
207 | |||
208 | static int __init arch_init_uprobes(void) | ||
209 | { | ||
210 | register_break_hook(&uprobes_break_hook); | ||
211 | register_step_hook(&uprobes_step_hook); | ||
212 | |||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | device_initcall(arch_init_uprobes); | ||
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 01753cd7d3f0..a3a2816ba73a 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/personality.h> | 45 | #include <linux/personality.h> |
46 | #include <linux/notifier.h> | 46 | #include <linux/notifier.h> |
47 | #include <trace/events/power.h> | 47 | #include <trace/events/power.h> |
48 | #include <linux/percpu.h> | ||
48 | 49 | ||
49 | #include <asm/alternative.h> | 50 | #include <asm/alternative.h> |
50 | #include <asm/compat.h> | 51 | #include <asm/compat.h> |
@@ -282,7 +283,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, | |||
282 | memset(childregs, 0, sizeof(struct pt_regs)); | 283 | memset(childregs, 0, sizeof(struct pt_regs)); |
283 | childregs->pstate = PSR_MODE_EL1h; | 284 | childregs->pstate = PSR_MODE_EL1h; |
284 | if (IS_ENABLED(CONFIG_ARM64_UAO) && | 285 | if (IS_ENABLED(CONFIG_ARM64_UAO) && |
285 | cpus_have_cap(ARM64_HAS_UAO)) | 286 | cpus_have_const_cap(ARM64_HAS_UAO)) |
286 | childregs->pstate |= PSR_UAO_BIT; | 287 | childregs->pstate |= PSR_UAO_BIT; |
287 | p->thread.cpu_context.x19 = stack_start; | 288 | p->thread.cpu_context.x19 = stack_start; |
288 | p->thread.cpu_context.x20 = stk_sz; | 289 | p->thread.cpu_context.x20 = stk_sz; |
@@ -322,6 +323,20 @@ void uao_thread_switch(struct task_struct *next) | |||
322 | } | 323 | } |
323 | 324 | ||
324 | /* | 325 | /* |
326 | * We store our current task in sp_el0, which is clobbered by userspace. Keep a | ||
327 | * shadow copy so that we can restore this upon entry from userspace. | ||
328 | * | ||
329 | * This is *only* for exception entry from EL0, and is not valid until we | ||
330 | * __switch_to() a user task. | ||
331 | */ | ||
332 | DEFINE_PER_CPU(struct task_struct *, __entry_task); | ||
333 | |||
334 | static void entry_task_switch(struct task_struct *next) | ||
335 | { | ||
336 | __this_cpu_write(__entry_task, next); | ||
337 | } | ||
338 | |||
339 | /* | ||
325 | * Thread switching. | 340 | * Thread switching. |
326 | */ | 341 | */ |
327 | struct task_struct *__switch_to(struct task_struct *prev, | 342 | struct task_struct *__switch_to(struct task_struct *prev, |
@@ -333,6 +348,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
333 | tls_thread_switch(next); | 348 | tls_thread_switch(next); |
334 | hw_breakpoint_thread_switch(next); | 349 | hw_breakpoint_thread_switch(next); |
335 | contextidr_thread_switch(next); | 350 | contextidr_thread_switch(next); |
351 | entry_task_switch(next); | ||
336 | uao_thread_switch(next); | 352 | uao_thread_switch(next); |
337 | 353 | ||
338 | /* | 354 | /* |
@@ -350,27 +366,35 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
350 | unsigned long get_wchan(struct task_struct *p) | 366 | unsigned long get_wchan(struct task_struct *p) |
351 | { | 367 | { |
352 | struct stackframe frame; | 368 | struct stackframe frame; |
353 | unsigned long stack_page; | 369 | unsigned long stack_page, ret = 0; |
354 | int count = 0; | 370 | int count = 0; |
355 | if (!p || p == current || p->state == TASK_RUNNING) | 371 | if (!p || p == current || p->state == TASK_RUNNING) |
356 | return 0; | 372 | return 0; |
357 | 373 | ||
374 | stack_page = (unsigned long)try_get_task_stack(p); | ||
375 | if (!stack_page) | ||
376 | return 0; | ||
377 | |||
358 | frame.fp = thread_saved_fp(p); | 378 | frame.fp = thread_saved_fp(p); |
359 | frame.sp = thread_saved_sp(p); | 379 | frame.sp = thread_saved_sp(p); |
360 | frame.pc = thread_saved_pc(p); | 380 | frame.pc = thread_saved_pc(p); |
361 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 381 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
362 | frame.graph = p->curr_ret_stack; | 382 | frame.graph = p->curr_ret_stack; |
363 | #endif | 383 | #endif |
364 | stack_page = (unsigned long)task_stack_page(p); | ||
365 | do { | 384 | do { |
366 | if (frame.sp < stack_page || | 385 | if (frame.sp < stack_page || |
367 | frame.sp >= stack_page + THREAD_SIZE || | 386 | frame.sp >= stack_page + THREAD_SIZE || |
368 | unwind_frame(p, &frame)) | 387 | unwind_frame(p, &frame)) |
369 | return 0; | 388 | goto out; |
370 | if (!in_sched_functions(frame.pc)) | 389 | if (!in_sched_functions(frame.pc)) { |
371 | return frame.pc; | 390 | ret = frame.pc; |
391 | goto out; | ||
392 | } | ||
372 | } while (count ++ < 16); | 393 | } while (count ++ < 16); |
373 | return 0; | 394 | |
395 | out: | ||
396 | put_task_stack(p); | ||
397 | return ret; | ||
374 | } | 398 | } |
375 | 399 | ||
376 | unsigned long arch_align_stack(unsigned long sp) | 400 | unsigned long arch_align_stack(unsigned long sp) |
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index e0c81da60f76..fc35e06ccaac 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c | |||
@@ -327,13 +327,13 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, | |||
327 | struct arch_hw_breakpoint_ctrl ctrl, | 327 | struct arch_hw_breakpoint_ctrl ctrl, |
328 | struct perf_event_attr *attr) | 328 | struct perf_event_attr *attr) |
329 | { | 329 | { |
330 | int err, len, type, disabled = !ctrl.enabled; | 330 | int err, len, type, offset, disabled = !ctrl.enabled; |
331 | 331 | ||
332 | attr->disabled = disabled; | 332 | attr->disabled = disabled; |
333 | if (disabled) | 333 | if (disabled) |
334 | return 0; | 334 | return 0; |
335 | 335 | ||
336 | err = arch_bp_generic_fields(ctrl, &len, &type); | 336 | err = arch_bp_generic_fields(ctrl, &len, &type, &offset); |
337 | if (err) | 337 | if (err) |
338 | return err; | 338 | return err; |
339 | 339 | ||
@@ -352,6 +352,7 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, | |||
352 | 352 | ||
353 | attr->bp_len = len; | 353 | attr->bp_len = len; |
354 | attr->bp_type = type; | 354 | attr->bp_type = type; |
355 | attr->bp_addr += offset; | ||
355 | 356 | ||
356 | return 0; | 357 | return 0; |
357 | } | 358 | } |
@@ -404,7 +405,7 @@ static int ptrace_hbp_get_addr(unsigned int note_type, | |||
404 | if (IS_ERR(bp)) | 405 | if (IS_ERR(bp)) |
405 | return PTR_ERR(bp); | 406 | return PTR_ERR(bp); |
406 | 407 | ||
407 | *addr = bp ? bp->attr.bp_addr : 0; | 408 | *addr = bp ? counter_arch_bp(bp)->address : 0; |
408 | return 0; | 409 | return 0; |
409 | } | 410 | } |
410 | 411 | ||
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c index 1718706fde83..12a87f2600f2 100644 --- a/arch/arm64/kernel/return_address.c +++ b/arch/arm64/kernel/return_address.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/export.h> | 12 | #include <linux/export.h> |
13 | #include <linux/ftrace.h> | 13 | #include <linux/ftrace.h> |
14 | 14 | ||
15 | #include <asm/stack_pointer.h> | ||
15 | #include <asm/stacktrace.h> | 16 | #include <asm/stacktrace.h> |
16 | 17 | ||
17 | struct return_address_data { | 18 | struct return_address_data { |
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index f534f492a268..a53f52ac81c6 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -291,6 +291,15 @@ void __init setup_arch(char **cmdline_p) | |||
291 | smp_init_cpus(); | 291 | smp_init_cpus(); |
292 | smp_build_mpidr_hash(); | 292 | smp_build_mpidr_hash(); |
293 | 293 | ||
294 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | ||
295 | /* | ||
296 | * Make sure init_thread_info.ttbr0 always generates translation | ||
297 | * faults in case uaccess_enable() is inadvertently called by the init | ||
298 | * thread. | ||
299 | */ | ||
300 | init_task.thread_info.ttbr0 = virt_to_phys(empty_zero_page); | ||
301 | #endif | ||
302 | |||
294 | #ifdef CONFIG_VT | 303 | #ifdef CONFIG_VT |
295 | #if defined(CONFIG_VGA_CONSOLE) | 304 | #if defined(CONFIG_VGA_CONSOLE) |
296 | conswitchp = &vga_con; | 305 | conswitchp = &vga_con; |
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 404dd67080b9..c7b6de62f9d3 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c | |||
@@ -414,6 +414,9 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, | |||
414 | } else { | 414 | } else { |
415 | local_irq_enable(); | 415 | local_irq_enable(); |
416 | 416 | ||
417 | if (thread_flags & _TIF_UPROBE) | ||
418 | uprobe_notify_resume(regs); | ||
419 | |||
417 | if (thread_flags & _TIF_SIGPENDING) | 420 | if (thread_flags & _TIF_SIGPENDING) |
418 | do_signal(regs); | 421 | do_signal(regs); |
419 | 422 | ||
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 1bec41b5fda3..df67652e46f0 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S | |||
@@ -125,9 +125,6 @@ ENTRY(_cpu_resume) | |||
125 | /* load sp from context */ | 125 | /* load sp from context */ |
126 | ldr x2, [x0, #CPU_CTX_SP] | 126 | ldr x2, [x0, #CPU_CTX_SP] |
127 | mov sp, x2 | 127 | mov sp, x2 |
128 | /* save thread_info */ | ||
129 | and x2, x2, #~(THREAD_SIZE - 1) | ||
130 | msr sp_el0, x2 | ||
131 | /* | 128 | /* |
132 | * cpu_do_resume expects x0 to contain context address pointer | 129 | * cpu_do_resume expects x0 to contain context address pointer |
133 | */ | 130 | */ |
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 8507703dabe4..cb87234cfcf2 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
@@ -58,6 +58,9 @@ | |||
58 | #define CREATE_TRACE_POINTS | 58 | #define CREATE_TRACE_POINTS |
59 | #include <trace/events/ipi.h> | 59 | #include <trace/events/ipi.h> |
60 | 60 | ||
61 | DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number); | ||
62 | EXPORT_PER_CPU_SYMBOL(cpu_number); | ||
63 | |||
61 | /* | 64 | /* |
62 | * as from 2.5, kernels no longer have an init_tasks structure | 65 | * as from 2.5, kernels no longer have an init_tasks structure |
63 | * so we need some other way of telling a new secondary core | 66 | * so we need some other way of telling a new secondary core |
@@ -146,6 +149,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
146 | * We need to tell the secondary core where to find its stack and the | 149 | * We need to tell the secondary core where to find its stack and the |
147 | * page tables. | 150 | * page tables. |
148 | */ | 151 | */ |
152 | secondary_data.task = idle; | ||
149 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; | 153 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; |
150 | update_cpu_boot_status(CPU_MMU_OFF); | 154 | update_cpu_boot_status(CPU_MMU_OFF); |
151 | __flush_dcache_area(&secondary_data, sizeof(secondary_data)); | 155 | __flush_dcache_area(&secondary_data, sizeof(secondary_data)); |
@@ -170,6 +174,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
170 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); | 174 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); |
171 | } | 175 | } |
172 | 176 | ||
177 | secondary_data.task = NULL; | ||
173 | secondary_data.stack = NULL; | 178 | secondary_data.stack = NULL; |
174 | status = READ_ONCE(secondary_data.status); | 179 | status = READ_ONCE(secondary_data.status); |
175 | if (ret && status) { | 180 | if (ret && status) { |
@@ -208,7 +213,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
208 | asmlinkage void secondary_start_kernel(void) | 213 | asmlinkage void secondary_start_kernel(void) |
209 | { | 214 | { |
210 | struct mm_struct *mm = &init_mm; | 215 | struct mm_struct *mm = &init_mm; |
211 | unsigned int cpu = smp_processor_id(); | 216 | unsigned int cpu; |
217 | |||
218 | cpu = task_cpu(current); | ||
219 | set_my_cpu_offset(per_cpu_offset(cpu)); | ||
212 | 220 | ||
213 | /* | 221 | /* |
214 | * All kernel threads share the same mm context; grab a | 222 | * All kernel threads share the same mm context; grab a |
@@ -217,8 +225,6 @@ asmlinkage void secondary_start_kernel(void) | |||
217 | atomic_inc(&mm->mm_count); | 225 | atomic_inc(&mm->mm_count); |
218 | current->active_mm = mm; | 226 | current->active_mm = mm; |
219 | 227 | ||
220 | set_my_cpu_offset(per_cpu_offset(smp_processor_id())); | ||
221 | |||
222 | /* | 228 | /* |
223 | * TTBR0 is only used for the identity mapping at this stage. Make it | 229 | * TTBR0 is only used for the identity mapping at this stage. Make it |
224 | * point to zero page to avoid speculatively fetching new entries. | 230 | * point to zero page to avoid speculatively fetching new entries. |
@@ -718,6 +724,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
718 | */ | 724 | */ |
719 | for_each_possible_cpu(cpu) { | 725 | for_each_possible_cpu(cpu) { |
720 | 726 | ||
727 | per_cpu(cpu_number, cpu) = cpu; | ||
728 | |||
721 | if (cpu == smp_processor_id()) | 729 | if (cpu == smp_processor_id()) |
722 | continue; | 730 | continue; |
723 | 731 | ||
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index c2efddfca18c..8a552a33c6ef 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/stacktrace.h> | 22 | #include <linux/stacktrace.h> |
23 | 23 | ||
24 | #include <asm/irq.h> | 24 | #include <asm/irq.h> |
25 | #include <asm/stack_pointer.h> | ||
25 | #include <asm/stacktrace.h> | 26 | #include <asm/stacktrace.h> |
26 | 27 | ||
27 | /* | 28 | /* |
@@ -128,7 +129,6 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, | |||
128 | break; | 129 | break; |
129 | } | 130 | } |
130 | } | 131 | } |
131 | EXPORT_SYMBOL(walk_stackframe); | ||
132 | 132 | ||
133 | #ifdef CONFIG_STACKTRACE | 133 | #ifdef CONFIG_STACKTRACE |
134 | struct stack_trace_data { | 134 | struct stack_trace_data { |
@@ -181,6 +181,9 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
181 | struct stack_trace_data data; | 181 | struct stack_trace_data data; |
182 | struct stackframe frame; | 182 | struct stackframe frame; |
183 | 183 | ||
184 | if (!try_get_task_stack(tsk)) | ||
185 | return; | ||
186 | |||
184 | data.trace = trace; | 187 | data.trace = trace; |
185 | data.skip = trace->skip; | 188 | data.skip = trace->skip; |
186 | 189 | ||
@@ -202,6 +205,8 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
202 | walk_stackframe(tsk, &frame, save_trace, &data); | 205 | walk_stackframe(tsk, &frame, save_trace, &data); |
203 | if (trace->nr_entries < trace->max_entries) | 206 | if (trace->nr_entries < trace->max_entries) |
204 | trace->entries[trace->nr_entries++] = ULONG_MAX; | 207 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
208 | |||
209 | put_task_stack(tsk); | ||
205 | } | 210 | } |
206 | 211 | ||
207 | void save_stack_trace(struct stack_trace *trace) | 212 | void save_stack_trace(struct stack_trace *trace) |
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index bb0cd787a9d3..1e3be9064cfa 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c | |||
@@ -47,12 +47,6 @@ void notrace __cpu_suspend_exit(void) | |||
47 | cpu_uninstall_idmap(); | 47 | cpu_uninstall_idmap(); |
48 | 48 | ||
49 | /* | 49 | /* |
50 | * Restore per-cpu offset before any kernel | ||
51 | * subsystem relying on it has a chance to run. | ||
52 | */ | ||
53 | set_my_cpu_offset(per_cpu_offset(cpu)); | ||
54 | |||
55 | /* | ||
56 | * PSTATE was not saved over suspend/resume, re-enable any detected | 50 | * PSTATE was not saved over suspend/resume, re-enable any detected |
57 | * features that might not have been set correctly. | 51 | * features that might not have been set correctly. |
58 | */ | 52 | */ |
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 694f6deedbab..23e9e13bd2aa 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c | |||
@@ -19,10 +19,226 @@ | |||
19 | #include <linux/nodemask.h> | 19 | #include <linux/nodemask.h> |
20 | #include <linux/of.h> | 20 | #include <linux/of.h> |
21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
22 | #include <linux/slab.h> | ||
23 | #include <linux/string.h> | ||
24 | #include <linux/cpufreq.h> | ||
22 | 25 | ||
26 | #include <asm/cpu.h> | ||
23 | #include <asm/cputype.h> | 27 | #include <asm/cputype.h> |
24 | #include <asm/topology.h> | 28 | #include <asm/topology.h> |
25 | 29 | ||
30 | static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; | ||
31 | static DEFINE_MUTEX(cpu_scale_mutex); | ||
32 | |||
33 | unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) | ||
34 | { | ||
35 | return per_cpu(cpu_scale, cpu); | ||
36 | } | ||
37 | |||
38 | static void set_capacity_scale(unsigned int cpu, unsigned long capacity) | ||
39 | { | ||
40 | per_cpu(cpu_scale, cpu) = capacity; | ||
41 | } | ||
42 | |||
43 | #ifdef CONFIG_PROC_SYSCTL | ||
44 | static ssize_t cpu_capacity_show(struct device *dev, | ||
45 | struct device_attribute *attr, | ||
46 | char *buf) | ||
47 | { | ||
48 | struct cpu *cpu = container_of(dev, struct cpu, dev); | ||
49 | |||
50 | return sprintf(buf, "%lu\n", | ||
51 | arch_scale_cpu_capacity(NULL, cpu->dev.id)); | ||
52 | } | ||
53 | |||
54 | static ssize_t cpu_capacity_store(struct device *dev, | ||
55 | struct device_attribute *attr, | ||
56 | const char *buf, | ||
57 | size_t count) | ||
58 | { | ||
59 | struct cpu *cpu = container_of(dev, struct cpu, dev); | ||
60 | int this_cpu = cpu->dev.id, i; | ||
61 | unsigned long new_capacity; | ||
62 | ssize_t ret; | ||
63 | |||
64 | if (count) { | ||
65 | ret = kstrtoul(buf, 0, &new_capacity); | ||
66 | if (ret) | ||
67 | return ret; | ||
68 | if (new_capacity > SCHED_CAPACITY_SCALE) | ||
69 | return -EINVAL; | ||
70 | |||
71 | mutex_lock(&cpu_scale_mutex); | ||
72 | for_each_cpu(i, &cpu_topology[this_cpu].core_sibling) | ||
73 | set_capacity_scale(i, new_capacity); | ||
74 | mutex_unlock(&cpu_scale_mutex); | ||
75 | } | ||
76 | |||
77 | return count; | ||
78 | } | ||
79 | |||
80 | static DEVICE_ATTR_RW(cpu_capacity); | ||
81 | |||
82 | static int register_cpu_capacity_sysctl(void) | ||
83 | { | ||
84 | int i; | ||
85 | struct device *cpu; | ||
86 | |||
87 | for_each_possible_cpu(i) { | ||
88 | cpu = get_cpu_device(i); | ||
89 | if (!cpu) { | ||
90 | pr_err("%s: too early to get CPU%d device!\n", | ||
91 | __func__, i); | ||
92 | continue; | ||
93 | } | ||
94 | device_create_file(cpu, &dev_attr_cpu_capacity); | ||
95 | } | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | subsys_initcall(register_cpu_capacity_sysctl); | ||
100 | #endif | ||
101 | |||
102 | static u32 capacity_scale; | ||
103 | static u32 *raw_capacity; | ||
104 | static bool cap_parsing_failed; | ||
105 | |||
106 | static void __init parse_cpu_capacity(struct device_node *cpu_node, int cpu) | ||
107 | { | ||
108 | int ret; | ||
109 | u32 cpu_capacity; | ||
110 | |||
111 | if (cap_parsing_failed) | ||
112 | return; | ||
113 | |||
114 | ret = of_property_read_u32(cpu_node, | ||
115 | "capacity-dmips-mhz", | ||
116 | &cpu_capacity); | ||
117 | if (!ret) { | ||
118 | if (!raw_capacity) { | ||
119 | raw_capacity = kcalloc(num_possible_cpus(), | ||
120 | sizeof(*raw_capacity), | ||
121 | GFP_KERNEL); | ||
122 | if (!raw_capacity) { | ||
123 | pr_err("cpu_capacity: failed to allocate memory for raw capacities\n"); | ||
124 | cap_parsing_failed = true; | ||
125 | return; | ||
126 | } | ||
127 | } | ||
128 | capacity_scale = max(cpu_capacity, capacity_scale); | ||
129 | raw_capacity[cpu] = cpu_capacity; | ||
130 | pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n", | ||
131 | cpu_node->full_name, raw_capacity[cpu]); | ||
132 | } else { | ||
133 | if (raw_capacity) { | ||
134 | pr_err("cpu_capacity: missing %s raw capacity\n", | ||
135 | cpu_node->full_name); | ||
136 | pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); | ||
137 | } | ||
138 | cap_parsing_failed = true; | ||
139 | kfree(raw_capacity); | ||
140 | } | ||
141 | } | ||
142 | |||
143 | static void normalize_cpu_capacity(void) | ||
144 | { | ||
145 | u64 capacity; | ||
146 | int cpu; | ||
147 | |||
148 | if (!raw_capacity || cap_parsing_failed) | ||
149 | return; | ||
150 | |||
151 | pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale); | ||
152 | mutex_lock(&cpu_scale_mutex); | ||
153 | for_each_possible_cpu(cpu) { | ||
154 | pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n", | ||
155 | cpu, raw_capacity[cpu]); | ||
156 | capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT) | ||
157 | / capacity_scale; | ||
158 | set_capacity_scale(cpu, capacity); | ||
159 | pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", | ||
160 | cpu, arch_scale_cpu_capacity(NULL, cpu)); | ||
161 | } | ||
162 | mutex_unlock(&cpu_scale_mutex); | ||
163 | } | ||
164 | |||
165 | #ifdef CONFIG_CPU_FREQ | ||
166 | static cpumask_var_t cpus_to_visit; | ||
167 | static bool cap_parsing_done; | ||
168 | static void parsing_done_workfn(struct work_struct *work); | ||
169 | static DECLARE_WORK(parsing_done_work, parsing_done_workfn); | ||
170 | |||
171 | static int | ||
172 | init_cpu_capacity_callback(struct notifier_block *nb, | ||
173 | unsigned long val, | ||
174 | void *data) | ||
175 | { | ||
176 | struct cpufreq_policy *policy = data; | ||
177 | int cpu; | ||
178 | |||
179 | if (cap_parsing_failed || cap_parsing_done) | ||
180 | return 0; | ||
181 | |||
182 | switch (val) { | ||
183 | case CPUFREQ_NOTIFY: | ||
184 | pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", | ||
185 | cpumask_pr_args(policy->related_cpus), | ||
186 | cpumask_pr_args(cpus_to_visit)); | ||
187 | cpumask_andnot(cpus_to_visit, | ||
188 | cpus_to_visit, | ||
189 | policy->related_cpus); | ||
190 | for_each_cpu(cpu, policy->related_cpus) { | ||
191 | raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) * | ||
192 | policy->cpuinfo.max_freq / 1000UL; | ||
193 | capacity_scale = max(raw_capacity[cpu], capacity_scale); | ||
194 | } | ||
195 | if (cpumask_empty(cpus_to_visit)) { | ||
196 | normalize_cpu_capacity(); | ||
197 | kfree(raw_capacity); | ||
198 | pr_debug("cpu_capacity: parsing done\n"); | ||
199 | cap_parsing_done = true; | ||
200 | schedule_work(&parsing_done_work); | ||
201 | } | ||
202 | } | ||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static struct notifier_block init_cpu_capacity_notifier = { | ||
207 | .notifier_call = init_cpu_capacity_callback, | ||
208 | }; | ||
209 | |||
210 | static int __init register_cpufreq_notifier(void) | ||
211 | { | ||
212 | if (cap_parsing_failed) | ||
213 | return -EINVAL; | ||
214 | |||
215 | if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) { | ||
216 | pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n"); | ||
217 | return -ENOMEM; | ||
218 | } | ||
219 | cpumask_copy(cpus_to_visit, cpu_possible_mask); | ||
220 | |||
221 | return cpufreq_register_notifier(&init_cpu_capacity_notifier, | ||
222 | CPUFREQ_POLICY_NOTIFIER); | ||
223 | } | ||
224 | core_initcall(register_cpufreq_notifier); | ||
225 | |||
226 | static void parsing_done_workfn(struct work_struct *work) | ||
227 | { | ||
228 | cpufreq_unregister_notifier(&init_cpu_capacity_notifier, | ||
229 | CPUFREQ_POLICY_NOTIFIER); | ||
230 | } | ||
231 | |||
232 | #else | ||
233 | static int __init free_raw_capacity(void) | ||
234 | { | ||
235 | kfree(raw_capacity); | ||
236 | |||
237 | return 0; | ||
238 | } | ||
239 | core_initcall(free_raw_capacity); | ||
240 | #endif | ||
241 | |||
26 | static int __init get_cpu_for_node(struct device_node *node) | 242 | static int __init get_cpu_for_node(struct device_node *node) |
27 | { | 243 | { |
28 | struct device_node *cpu_node; | 244 | struct device_node *cpu_node; |
@@ -34,6 +250,7 @@ static int __init get_cpu_for_node(struct device_node *node) | |||
34 | 250 | ||
35 | for_each_possible_cpu(cpu) { | 251 | for_each_possible_cpu(cpu) { |
36 | if (of_get_cpu_node(cpu, NULL) == cpu_node) { | 252 | if (of_get_cpu_node(cpu, NULL) == cpu_node) { |
253 | parse_cpu_capacity(cpu_node, cpu); | ||
37 | of_node_put(cpu_node); | 254 | of_node_put(cpu_node); |
38 | return cpu; | 255 | return cpu; |
39 | } | 256 | } |
@@ -178,13 +395,17 @@ static int __init parse_dt_topology(void) | |||
178 | * cluster with restricted subnodes. | 395 | * cluster with restricted subnodes. |
179 | */ | 396 | */ |
180 | map = of_get_child_by_name(cn, "cpu-map"); | 397 | map = of_get_child_by_name(cn, "cpu-map"); |
181 | if (!map) | 398 | if (!map) { |
399 | cap_parsing_failed = true; | ||
182 | goto out; | 400 | goto out; |
401 | } | ||
183 | 402 | ||
184 | ret = parse_cluster(map, 0); | 403 | ret = parse_cluster(map, 0); |
185 | if (ret != 0) | 404 | if (ret != 0) |
186 | goto out_map; | 405 | goto out_map; |
187 | 406 | ||
407 | normalize_cpu_capacity(); | ||
408 | |||
188 | /* | 409 | /* |
189 | * Check that all cores are in the topology; the SMP code will | 410 | * Check that all cores are in the topology; the SMP code will |
190 | * only mark cores described in the DT as possible. | 411 | * only mark cores described in the DT as possible. |
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index c9986b3e0a96..5b830be79c01 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <asm/esr.h> | 38 | #include <asm/esr.h> |
39 | #include <asm/insn.h> | 39 | #include <asm/insn.h> |
40 | #include <asm/traps.h> | 40 | #include <asm/traps.h> |
41 | #include <asm/stack_pointer.h> | ||
41 | #include <asm/stacktrace.h> | 42 | #include <asm/stacktrace.h> |
42 | #include <asm/exception.h> | 43 | #include <asm/exception.h> |
43 | #include <asm/system_misc.h> | 44 | #include <asm/system_misc.h> |
@@ -147,6 +148,9 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) | |||
147 | if (!tsk) | 148 | if (!tsk) |
148 | tsk = current; | 149 | tsk = current; |
149 | 150 | ||
151 | if (!try_get_task_stack(tsk)) | ||
152 | return; | ||
153 | |||
150 | /* | 154 | /* |
151 | * Switching between stacks is valid when tracing current and in | 155 | * Switching between stacks is valid when tracing current and in |
152 | * non-preemptible context. | 156 | * non-preemptible context. |
@@ -212,6 +216,8 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) | |||
212 | stack + sizeof(struct pt_regs)); | 216 | stack + sizeof(struct pt_regs)); |
213 | } | 217 | } |
214 | } | 218 | } |
219 | |||
220 | put_task_stack(tsk); | ||
215 | } | 221 | } |
216 | 222 | ||
217 | void show_stack(struct task_struct *tsk, unsigned long *sp) | 223 | void show_stack(struct task_struct *tsk, unsigned long *sp) |
@@ -227,10 +233,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) | |||
227 | #endif | 233 | #endif |
228 | #define S_SMP " SMP" | 234 | #define S_SMP " SMP" |
229 | 235 | ||
230 | static int __die(const char *str, int err, struct thread_info *thread, | 236 | static int __die(const char *str, int err, struct pt_regs *regs) |
231 | struct pt_regs *regs) | ||
232 | { | 237 | { |
233 | struct task_struct *tsk = thread->task; | 238 | struct task_struct *tsk = current; |
234 | static int die_counter; | 239 | static int die_counter; |
235 | int ret; | 240 | int ret; |
236 | 241 | ||
@@ -245,7 +250,8 @@ static int __die(const char *str, int err, struct thread_info *thread, | |||
245 | print_modules(); | 250 | print_modules(); |
246 | __show_regs(regs); | 251 | __show_regs(regs); |
247 | pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", | 252 | pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", |
248 | TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1); | 253 | TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), |
254 | end_of_stack(tsk)); | ||
249 | 255 | ||
250 | if (!user_mode(regs)) { | 256 | if (!user_mode(regs)) { |
251 | dump_mem(KERN_EMERG, "Stack: ", regs->sp, | 257 | dump_mem(KERN_EMERG, "Stack: ", regs->sp, |
@@ -264,7 +270,6 @@ static DEFINE_RAW_SPINLOCK(die_lock); | |||
264 | */ | 270 | */ |
265 | void die(const char *str, struct pt_regs *regs, int err) | 271 | void die(const char *str, struct pt_regs *regs, int err) |
266 | { | 272 | { |
267 | struct thread_info *thread = current_thread_info(); | ||
268 | int ret; | 273 | int ret; |
269 | 274 | ||
270 | oops_enter(); | 275 | oops_enter(); |
@@ -272,9 +277,9 @@ void die(const char *str, struct pt_regs *regs, int err) | |||
272 | raw_spin_lock_irq(&die_lock); | 277 | raw_spin_lock_irq(&die_lock); |
273 | console_verbose(); | 278 | console_verbose(); |
274 | bust_spinlocks(1); | 279 | bust_spinlocks(1); |
275 | ret = __die(str, err, thread, regs); | 280 | ret = __die(str, err, regs); |
276 | 281 | ||
277 | if (regs && kexec_should_crash(thread->task)) | 282 | if (regs && kexec_should_crash(current)) |
278 | crash_kexec(regs); | 283 | crash_kexec(regs); |
279 | 284 | ||
280 | bust_spinlocks(0); | 285 | bust_spinlocks(0); |
@@ -435,9 +440,10 @@ int cpu_enable_cache_maint_trap(void *__unused) | |||
435 | } | 440 | } |
436 | 441 | ||
437 | #define __user_cache_maint(insn, address, res) \ | 442 | #define __user_cache_maint(insn, address, res) \ |
438 | if (untagged_addr(address) >= user_addr_max()) \ | 443 | if (untagged_addr(address) >= user_addr_max()) { \ |
439 | res = -EFAULT; \ | 444 | res = -EFAULT; \ |
440 | else \ | 445 | } else { \ |
446 | uaccess_ttbr0_enable(); \ | ||
441 | asm volatile ( \ | 447 | asm volatile ( \ |
442 | "1: " insn ", %1\n" \ | 448 | "1: " insn ", %1\n" \ |
443 | " mov %w0, #0\n" \ | 449 | " mov %w0, #0\n" \ |
@@ -449,7 +455,9 @@ int cpu_enable_cache_maint_trap(void *__unused) | |||
449 | " .popsection\n" \ | 455 | " .popsection\n" \ |
450 | _ASM_EXTABLE(1b, 3b) \ | 456 | _ASM_EXTABLE(1b, 3b) \ |
451 | : "=r" (res) \ | 457 | : "=r" (res) \ |
452 | : "r" (address), "i" (-EFAULT) ) | 458 | : "r" (address), "i" (-EFAULT)); \ |
459 | uaccess_ttbr0_disable(); \ | ||
460 | } | ||
453 | 461 | ||
454 | static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) | 462 | static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) |
455 | { | 463 | { |
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 1105aab1e6d6..b8deffa9e1bf 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S | |||
@@ -216,6 +216,11 @@ SECTIONS | |||
216 | swapper_pg_dir = .; | 216 | swapper_pg_dir = .; |
217 | . += SWAPPER_DIR_SIZE; | 217 | . += SWAPPER_DIR_SIZE; |
218 | 218 | ||
219 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | ||
220 | reserved_ttbr0 = .; | ||
221 | . += RESERVED_TTBR0_SIZE; | ||
222 | #endif | ||
223 | |||
219 | _end = .; | 224 | _end = .; |
220 | 225 | ||
221 | STABS_DEBUG | 226 | STABS_DEBUG |
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index a204adf29f0a..1bfe30dfbfe7 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c | |||
@@ -57,6 +57,16 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
57 | return 1; | 57 | return 1; |
58 | } | 58 | } |
59 | 59 | ||
60 | /* | ||
61 | * Guest access to FP/ASIMD registers are routed to this handler only | ||
62 | * when the system doesn't support FP/ASIMD. | ||
63 | */ | ||
64 | static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
65 | { | ||
66 | kvm_inject_undefined(vcpu); | ||
67 | return 1; | ||
68 | } | ||
69 | |||
60 | /** | 70 | /** |
61 | * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event | 71 | * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event |
62 | * instruction executed by a guest | 72 | * instruction executed by a guest |
@@ -144,6 +154,7 @@ static exit_handle_fn arm_exit_handlers[] = { | |||
144 | [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug, | 154 | [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug, |
145 | [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug, | 155 | [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug, |
146 | [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug, | 156 | [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug, |
157 | [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd, | ||
147 | }; | 158 | }; |
148 | 159 | ||
149 | static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) | 160 | static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) |
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 4e92399f7105..5e9052f087f2 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S | |||
@@ -106,9 +106,16 @@ el1_trap: | |||
106 | * x0: ESR_EC | 106 | * x0: ESR_EC |
107 | */ | 107 | */ |
108 | 108 | ||
109 | /* Guest accessed VFP/SIMD registers, save host, restore Guest */ | 109 | /* |
110 | * We trap the first access to the FP/SIMD to save the host context | ||
111 | * and restore the guest context lazily. | ||
112 | * If FP/SIMD is not implemented, handle the trap and inject an | ||
113 | * undefined instruction exception to the guest. | ||
114 | */ | ||
115 | alternative_if_not ARM64_HAS_NO_FPSIMD | ||
110 | cmp x0, #ESR_ELx_EC_FP_ASIMD | 116 | cmp x0, #ESR_ELx_EC_FP_ASIMD |
111 | b.eq __fpsimd_guest_restore | 117 | b.eq __fpsimd_guest_restore |
118 | alternative_else_nop_endif | ||
112 | 119 | ||
113 | mrs x1, tpidr_el2 | 120 | mrs x1, tpidr_el2 |
114 | mov x0, #ARM_EXCEPTION_TRAP | 121 | mov x0, #ARM_EXCEPTION_TRAP |
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 0c848c18ca44..75e83dd40d43 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <asm/kvm_asm.h> | 21 | #include <asm/kvm_asm.h> |
22 | #include <asm/kvm_emulate.h> | 22 | #include <asm/kvm_emulate.h> |
23 | #include <asm/kvm_hyp.h> | 23 | #include <asm/kvm_hyp.h> |
24 | #include <asm/fpsimd.h> | ||
24 | 25 | ||
25 | static bool __hyp_text __fpsimd_enabled_nvhe(void) | 26 | static bool __hyp_text __fpsimd_enabled_nvhe(void) |
26 | { | 27 | { |
@@ -76,9 +77,11 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) | |||
76 | * traps are only taken to EL2 if the operation would not otherwise | 77 | * traps are only taken to EL2 if the operation would not otherwise |
77 | * trap to EL1. Therefore, always make sure that for 32-bit guests, | 78 | * trap to EL1. Therefore, always make sure that for 32-bit guests, |
78 | * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit. | 79 | * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit. |
80 | * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to | ||
81 | * it will cause an exception. | ||
79 | */ | 82 | */ |
80 | val = vcpu->arch.hcr_el2; | 83 | val = vcpu->arch.hcr_el2; |
81 | if (!(val & HCR_RW)) { | 84 | if (!(val & HCR_RW) && system_supports_fpsimd()) { |
82 | write_sysreg(1 << 30, fpexc32_el2); | 85 | write_sysreg(1 << 30, fpexc32_el2); |
83 | isb(); | 86 | isb(); |
84 | } | 87 | } |
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index 5d1cad3ce6d6..d7150e30438a 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S | |||
@@ -17,10 +17,7 @@ | |||
17 | */ | 17 | */ |
18 | #include <linux/linkage.h> | 18 | #include <linux/linkage.h> |
19 | 19 | ||
20 | #include <asm/alternative.h> | 20 | #include <asm/uaccess.h> |
21 | #include <asm/assembler.h> | ||
22 | #include <asm/cpufeature.h> | ||
23 | #include <asm/sysreg.h> | ||
24 | 21 | ||
25 | .text | 22 | .text |
26 | 23 | ||
@@ -33,8 +30,7 @@ | |||
33 | * Alignment fixed up by hardware. | 30 | * Alignment fixed up by hardware. |
34 | */ | 31 | */ |
35 | ENTRY(__clear_user) | 32 | ENTRY(__clear_user) |
36 | ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \ | 33 | uaccess_enable_not_uao x2, x3 |
37 | CONFIG_ARM64_PAN) | ||
38 | mov x2, x1 // save the size for fixup return | 34 | mov x2, x1 // save the size for fixup return |
39 | subs x1, x1, #8 | 35 | subs x1, x1, #8 |
40 | b.mi 2f | 36 | b.mi 2f |
@@ -54,8 +50,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2 | |||
54 | b.mi 5f | 50 | b.mi 5f |
55 | uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 | 51 | uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 |
56 | 5: mov x0, #0 | 52 | 5: mov x0, #0 |
57 | ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ | 53 | uaccess_disable_not_uao x2 |
58 | CONFIG_ARM64_PAN) | ||
59 | ret | 54 | ret |
60 | ENDPROC(__clear_user) | 55 | ENDPROC(__clear_user) |
61 | 56 | ||
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 4fd67ea03bb0..cfe13396085b 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S | |||
@@ -16,11 +16,8 @@ | |||
16 | 16 | ||
17 | #include <linux/linkage.h> | 17 | #include <linux/linkage.h> |
18 | 18 | ||
19 | #include <asm/alternative.h> | ||
20 | #include <asm/assembler.h> | ||
21 | #include <asm/cache.h> | 19 | #include <asm/cache.h> |
22 | #include <asm/cpufeature.h> | 20 | #include <asm/uaccess.h> |
23 | #include <asm/sysreg.h> | ||
24 | 21 | ||
25 | /* | 22 | /* |
26 | * Copy from user space to a kernel buffer (alignment handled by the hardware) | 23 | * Copy from user space to a kernel buffer (alignment handled by the hardware) |
@@ -67,12 +64,10 @@ | |||
67 | 64 | ||
68 | end .req x5 | 65 | end .req x5 |
69 | ENTRY(__arch_copy_from_user) | 66 | ENTRY(__arch_copy_from_user) |
70 | ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \ | 67 | uaccess_enable_not_uao x3, x4 |
71 | CONFIG_ARM64_PAN) | ||
72 | add end, x0, x2 | 68 | add end, x0, x2 |
73 | #include "copy_template.S" | 69 | #include "copy_template.S" |
74 | ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ | 70 | uaccess_disable_not_uao x3 |
75 | CONFIG_ARM64_PAN) | ||
76 | mov x0, #0 // Nothing to copy | 71 | mov x0, #0 // Nothing to copy |
77 | ret | 72 | ret |
78 | ENDPROC(__arch_copy_from_user) | 73 | ENDPROC(__arch_copy_from_user) |
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index f7292dd08c84..718b1c4e2f85 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S | |||
@@ -18,11 +18,8 @@ | |||
18 | 18 | ||
19 | #include <linux/linkage.h> | 19 | #include <linux/linkage.h> |
20 | 20 | ||
21 | #include <asm/alternative.h> | ||
22 | #include <asm/assembler.h> | ||
23 | #include <asm/cache.h> | 21 | #include <asm/cache.h> |
24 | #include <asm/cpufeature.h> | 22 | #include <asm/uaccess.h> |
25 | #include <asm/sysreg.h> | ||
26 | 23 | ||
27 | /* | 24 | /* |
28 | * Copy from user space to user space (alignment handled by the hardware) | 25 | * Copy from user space to user space (alignment handled by the hardware) |
@@ -68,12 +65,10 @@ | |||
68 | 65 | ||
69 | end .req x5 | 66 | end .req x5 |
70 | ENTRY(__copy_in_user) | 67 | ENTRY(__copy_in_user) |
71 | ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \ | 68 | uaccess_enable_not_uao x3, x4 |
72 | CONFIG_ARM64_PAN) | ||
73 | add end, x0, x2 | 69 | add end, x0, x2 |
74 | #include "copy_template.S" | 70 | #include "copy_template.S" |
75 | ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ | 71 | uaccess_disable_not_uao x3 |
76 | CONFIG_ARM64_PAN) | ||
77 | mov x0, #0 | 72 | mov x0, #0 |
78 | ret | 73 | ret |
79 | ENDPROC(__copy_in_user) | 74 | ENDPROC(__copy_in_user) |
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 7a7efe255034..e99e31c9acac 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S | |||
@@ -16,11 +16,8 @@ | |||
16 | 16 | ||
17 | #include <linux/linkage.h> | 17 | #include <linux/linkage.h> |
18 | 18 | ||
19 | #include <asm/alternative.h> | ||
20 | #include <asm/assembler.h> | ||
21 | #include <asm/cache.h> | 19 | #include <asm/cache.h> |
22 | #include <asm/cpufeature.h> | 20 | #include <asm/uaccess.h> |
23 | #include <asm/sysreg.h> | ||
24 | 21 | ||
25 | /* | 22 | /* |
26 | * Copy to user space from a kernel buffer (alignment handled by the hardware) | 23 | * Copy to user space from a kernel buffer (alignment handled by the hardware) |
@@ -66,12 +63,10 @@ | |||
66 | 63 | ||
67 | end .req x5 | 64 | end .req x5 |
68 | ENTRY(__arch_copy_to_user) | 65 | ENTRY(__arch_copy_to_user) |
69 | ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \ | 66 | uaccess_enable_not_uao x3, x4 |
70 | CONFIG_ARM64_PAN) | ||
71 | add end, x0, x2 | 67 | add end, x0, x2 |
72 | #include "copy_template.S" | 68 | #include "copy_template.S" |
73 | ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ | 69 | uaccess_disable_not_uao x3 |
74 | CONFIG_ARM64_PAN) | ||
75 | mov x0, #0 | 70 | mov x0, #0 |
76 | ret | 71 | ret |
77 | ENDPROC(__arch_copy_to_user) | 72 | ENDPROC(__arch_copy_to_user) |
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index 54bb209cae8e..e703fb9defad 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile | |||
@@ -3,7 +3,8 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ | |||
3 | ioremap.o mmap.o pgd.o mmu.o \ | 3 | ioremap.o mmap.o pgd.o mmu.o \ |
4 | context.o proc.o pageattr.o | 4 | context.o proc.o pageattr.o |
5 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 5 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
6 | obj-$(CONFIG_ARM64_PTDUMP) += dump.o | 6 | obj-$(CONFIG_ARM64_PTDUMP_CORE) += dump.o |
7 | obj-$(CONFIG_ARM64_PTDUMP_DEBUGFS) += ptdump_debugfs.o | ||
7 | obj-$(CONFIG_NUMA) += numa.o | 8 | obj-$(CONFIG_NUMA) += numa.o |
8 | 9 | ||
9 | obj-$(CONFIG_KASAN) += kasan_init.o | 10 | obj-$(CONFIG_KASAN) += kasan_init.o |
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 58b5a906ff78..da9576932322 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <asm/assembler.h> | 23 | #include <asm/assembler.h> |
24 | #include <asm/cpufeature.h> | 24 | #include <asm/cpufeature.h> |
25 | #include <asm/alternative.h> | 25 | #include <asm/alternative.h> |
26 | #include <asm/uaccess.h> | ||
26 | 27 | ||
27 | /* | 28 | /* |
28 | * flush_icache_range(start,end) | 29 | * flush_icache_range(start,end) |
@@ -48,6 +49,7 @@ ENTRY(flush_icache_range) | |||
48 | * - end - virtual end address of region | 49 | * - end - virtual end address of region |
49 | */ | 50 | */ |
50 | ENTRY(__flush_cache_user_range) | 51 | ENTRY(__flush_cache_user_range) |
52 | uaccess_ttbr0_enable x2, x3 | ||
51 | dcache_line_size x2, x3 | 53 | dcache_line_size x2, x3 |
52 | sub x3, x2, #1 | 54 | sub x3, x2, #1 |
53 | bic x4, x0, x3 | 55 | bic x4, x0, x3 |
@@ -69,10 +71,12 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU | |||
69 | dsb ish | 71 | dsb ish |
70 | isb | 72 | isb |
71 | mov x0, #0 | 73 | mov x0, #0 |
74 | 1: | ||
75 | uaccess_ttbr0_disable x1 | ||
72 | ret | 76 | ret |
73 | 9: | 77 | 9: |
74 | mov x0, #-EFAULT | 78 | mov x0, #-EFAULT |
75 | ret | 79 | b 1b |
76 | ENDPROC(flush_icache_range) | 80 | ENDPROC(flush_icache_range) |
77 | ENDPROC(__flush_cache_user_range) | 81 | ENDPROC(__flush_cache_user_range) |
78 | 82 | ||
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index efcf1f7ef1e4..4c63cb154859 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c | |||
@@ -221,7 +221,12 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) | |||
221 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); | 221 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); |
222 | 222 | ||
223 | switch_mm_fastpath: | 223 | switch_mm_fastpath: |
224 | cpu_switch_mm(mm->pgd, mm); | 224 | /* |
225 | * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when | ||
226 | * emulating PAN. | ||
227 | */ | ||
228 | if (!system_uses_ttbr0_pan()) | ||
229 | cpu_switch_mm(mm->pgd, mm); | ||
225 | } | 230 | } |
226 | 231 | ||
227 | static int asids_init(void) | 232 | static int asids_init(void) |
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 3f74d0d98de6..aa6c8f834d9e 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
@@ -938,11 +938,6 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, | |||
938 | 938 | ||
939 | void arch_teardown_dma_ops(struct device *dev) | 939 | void arch_teardown_dma_ops(struct device *dev) |
940 | { | 940 | { |
941 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | ||
942 | |||
943 | if (WARN_ON(domain)) | ||
944 | iommu_detach_device(domain, dev); | ||
945 | |||
946 | dev->archdata.dma_ops = NULL; | 941 | dev->archdata.dma_ops = NULL; |
947 | } | 942 | } |
948 | 943 | ||
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index 9c3e75df2180..ca74a2aace42 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c | |||
@@ -50,6 +50,18 @@ static const struct addr_marker address_markers[] = { | |||
50 | { -1, NULL }, | 50 | { -1, NULL }, |
51 | }; | 51 | }; |
52 | 52 | ||
53 | #define pt_dump_seq_printf(m, fmt, args...) \ | ||
54 | ({ \ | ||
55 | if (m) \ | ||
56 | seq_printf(m, fmt, ##args); \ | ||
57 | }) | ||
58 | |||
59 | #define pt_dump_seq_puts(m, fmt) \ | ||
60 | ({ \ | ||
61 | if (m) \ | ||
62 | seq_printf(m, fmt); \ | ||
63 | }) | ||
64 | |||
53 | /* | 65 | /* |
54 | * The page dumper groups page table entries of the same type into a single | 66 | * The page dumper groups page table entries of the same type into a single |
55 | * description. It uses pg_state to track the range information while | 67 | * description. It uses pg_state to track the range information while |
@@ -62,6 +74,9 @@ struct pg_state { | |||
62 | unsigned long start_address; | 74 | unsigned long start_address; |
63 | unsigned level; | 75 | unsigned level; |
64 | u64 current_prot; | 76 | u64 current_prot; |
77 | bool check_wx; | ||
78 | unsigned long wx_pages; | ||
79 | unsigned long uxn_pages; | ||
65 | }; | 80 | }; |
66 | 81 | ||
67 | struct prot_bits { | 82 | struct prot_bits { |
@@ -186,10 +201,39 @@ static void dump_prot(struct pg_state *st, const struct prot_bits *bits, | |||
186 | s = bits->clear; | 201 | s = bits->clear; |
187 | 202 | ||
188 | if (s) | 203 | if (s) |
189 | seq_printf(st->seq, " %s", s); | 204 | pt_dump_seq_printf(st->seq, " %s", s); |
190 | } | 205 | } |
191 | } | 206 | } |
192 | 207 | ||
208 | static void note_prot_uxn(struct pg_state *st, unsigned long addr) | ||
209 | { | ||
210 | if (!st->check_wx) | ||
211 | return; | ||
212 | |||
213 | if ((st->current_prot & PTE_UXN) == PTE_UXN) | ||
214 | return; | ||
215 | |||
216 | WARN_ONCE(1, "arm64/mm: Found non-UXN mapping at address %p/%pS\n", | ||
217 | (void *)st->start_address, (void *)st->start_address); | ||
218 | |||
219 | st->uxn_pages += (addr - st->start_address) / PAGE_SIZE; | ||
220 | } | ||
221 | |||
222 | static void note_prot_wx(struct pg_state *st, unsigned long addr) | ||
223 | { | ||
224 | if (!st->check_wx) | ||
225 | return; | ||
226 | if ((st->current_prot & PTE_RDONLY) == PTE_RDONLY) | ||
227 | return; | ||
228 | if ((st->current_prot & PTE_PXN) == PTE_PXN) | ||
229 | return; | ||
230 | |||
231 | WARN_ONCE(1, "arm64/mm: Found insecure W+X mapping at address %p/%pS\n", | ||
232 | (void *)st->start_address, (void *)st->start_address); | ||
233 | |||
234 | st->wx_pages += (addr - st->start_address) / PAGE_SIZE; | ||
235 | } | ||
236 | |||
193 | static void note_page(struct pg_state *st, unsigned long addr, unsigned level, | 237 | static void note_page(struct pg_state *st, unsigned long addr, unsigned level, |
194 | u64 val) | 238 | u64 val) |
195 | { | 239 | { |
@@ -200,14 +244,16 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, | |||
200 | st->level = level; | 244 | st->level = level; |
201 | st->current_prot = prot; | 245 | st->current_prot = prot; |
202 | st->start_address = addr; | 246 | st->start_address = addr; |
203 | seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); | 247 | pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); |
204 | } else if (prot != st->current_prot || level != st->level || | 248 | } else if (prot != st->current_prot || level != st->level || |
205 | addr >= st->marker[1].start_address) { | 249 | addr >= st->marker[1].start_address) { |
206 | const char *unit = units; | 250 | const char *unit = units; |
207 | unsigned long delta; | 251 | unsigned long delta; |
208 | 252 | ||
209 | if (st->current_prot) { | 253 | if (st->current_prot) { |
210 | seq_printf(st->seq, "0x%016lx-0x%016lx ", | 254 | note_prot_uxn(st, addr); |
255 | note_prot_wx(st, addr); | ||
256 | pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ", | ||
211 | st->start_address, addr); | 257 | st->start_address, addr); |
212 | 258 | ||
213 | delta = (addr - st->start_address) >> 10; | 259 | delta = (addr - st->start_address) >> 10; |
@@ -215,17 +261,17 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, | |||
215 | delta >>= 10; | 261 | delta >>= 10; |
216 | unit++; | 262 | unit++; |
217 | } | 263 | } |
218 | seq_printf(st->seq, "%9lu%c %s", delta, *unit, | 264 | pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit, |
219 | pg_level[st->level].name); | 265 | pg_level[st->level].name); |
220 | if (pg_level[st->level].bits) | 266 | if (pg_level[st->level].bits) |
221 | dump_prot(st, pg_level[st->level].bits, | 267 | dump_prot(st, pg_level[st->level].bits, |
222 | pg_level[st->level].num); | 268 | pg_level[st->level].num); |
223 | seq_puts(st->seq, "\n"); | 269 | pt_dump_seq_puts(st->seq, "\n"); |
224 | } | 270 | } |
225 | 271 | ||
226 | if (addr >= st->marker[1].start_address) { | 272 | if (addr >= st->marker[1].start_address) { |
227 | st->marker++; | 273 | st->marker++; |
228 | seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); | 274 | pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); |
229 | } | 275 | } |
230 | 276 | ||
231 | st->start_address = addr; | 277 | st->start_address = addr; |
@@ -235,7 +281,7 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, | |||
235 | 281 | ||
236 | if (addr >= st->marker[1].start_address) { | 282 | if (addr >= st->marker[1].start_address) { |
237 | st->marker++; | 283 | st->marker++; |
238 | seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); | 284 | pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); |
239 | } | 285 | } |
240 | 286 | ||
241 | } | 287 | } |
@@ -304,9 +350,8 @@ static void walk_pgd(struct pg_state *st, struct mm_struct *mm, | |||
304 | } | 350 | } |
305 | } | 351 | } |
306 | 352 | ||
307 | static int ptdump_show(struct seq_file *m, void *v) | 353 | void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info) |
308 | { | 354 | { |
309 | struct ptdump_info *info = m->private; | ||
310 | struct pg_state st = { | 355 | struct pg_state st = { |
311 | .seq = m, | 356 | .seq = m, |
312 | .marker = info->markers, | 357 | .marker = info->markers, |
@@ -315,33 +360,16 @@ static int ptdump_show(struct seq_file *m, void *v) | |||
315 | walk_pgd(&st, info->mm, info->base_addr); | 360 | walk_pgd(&st, info->mm, info->base_addr); |
316 | 361 | ||
317 | note_page(&st, 0, 0, 0); | 362 | note_page(&st, 0, 0, 0); |
318 | return 0; | ||
319 | } | 363 | } |
320 | 364 | ||
321 | static int ptdump_open(struct inode *inode, struct file *file) | 365 | static void ptdump_initialize(void) |
322 | { | 366 | { |
323 | return single_open(file, ptdump_show, inode->i_private); | ||
324 | } | ||
325 | |||
326 | static const struct file_operations ptdump_fops = { | ||
327 | .open = ptdump_open, | ||
328 | .read = seq_read, | ||
329 | .llseek = seq_lseek, | ||
330 | .release = single_release, | ||
331 | }; | ||
332 | |||
333 | int ptdump_register(struct ptdump_info *info, const char *name) | ||
334 | { | ||
335 | struct dentry *pe; | ||
336 | unsigned i, j; | 367 | unsigned i, j; |
337 | 368 | ||
338 | for (i = 0; i < ARRAY_SIZE(pg_level); i++) | 369 | for (i = 0; i < ARRAY_SIZE(pg_level); i++) |
339 | if (pg_level[i].bits) | 370 | if (pg_level[i].bits) |
340 | for (j = 0; j < pg_level[i].num; j++) | 371 | for (j = 0; j < pg_level[i].num; j++) |
341 | pg_level[i].mask |= pg_level[i].bits[j].mask; | 372 | pg_level[i].mask |= pg_level[i].bits[j].mask; |
342 | |||
343 | pe = debugfs_create_file(name, 0400, NULL, info, &ptdump_fops); | ||
344 | return pe ? 0 : -ENOMEM; | ||
345 | } | 373 | } |
346 | 374 | ||
347 | static struct ptdump_info kernel_ptdump_info = { | 375 | static struct ptdump_info kernel_ptdump_info = { |
@@ -350,8 +378,30 @@ static struct ptdump_info kernel_ptdump_info = { | |||
350 | .base_addr = VA_START, | 378 | .base_addr = VA_START, |
351 | }; | 379 | }; |
352 | 380 | ||
381 | void ptdump_check_wx(void) | ||
382 | { | ||
383 | struct pg_state st = { | ||
384 | .seq = NULL, | ||
385 | .marker = (struct addr_marker[]) { | ||
386 | { 0, NULL}, | ||
387 | { -1, NULL}, | ||
388 | }, | ||
389 | .check_wx = true, | ||
390 | }; | ||
391 | |||
392 | walk_pgd(&st, &init_mm, 0); | ||
393 | note_page(&st, 0, 0, 0); | ||
394 | if (st.wx_pages || st.uxn_pages) | ||
395 | pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n", | ||
396 | st.wx_pages, st.uxn_pages); | ||
397 | else | ||
398 | pr_info("Checked W+X mappings: passed, no W+X pages found\n"); | ||
399 | } | ||
400 | |||
353 | static int ptdump_init(void) | 401 | static int ptdump_init(void) |
354 | { | 402 | { |
355 | return ptdump_register(&kernel_ptdump_info, "kernel_page_tables"); | 403 | ptdump_initialize(); |
404 | return ptdump_debugfs_register(&kernel_ptdump_info, | ||
405 | "kernel_page_tables"); | ||
356 | } | 406 | } |
357 | device_initcall(ptdump_init); | 407 | device_initcall(ptdump_init); |
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 0f8788374815..a78a5c401806 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
@@ -269,13 +269,19 @@ out: | |||
269 | return fault; | 269 | return fault; |
270 | } | 270 | } |
271 | 271 | ||
272 | static inline bool is_permission_fault(unsigned int esr) | 272 | static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs) |
273 | { | 273 | { |
274 | unsigned int ec = ESR_ELx_EC(esr); | 274 | unsigned int ec = ESR_ELx_EC(esr); |
275 | unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; | 275 | unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; |
276 | 276 | ||
277 | return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) || | 277 | if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR) |
278 | (ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM); | 278 | return false; |
279 | |||
280 | if (system_uses_ttbr0_pan()) | ||
281 | return fsc_type == ESR_ELx_FSC_FAULT && | ||
282 | (regs->pstate & PSR_PAN_BIT); | ||
283 | else | ||
284 | return fsc_type == ESR_ELx_FSC_PERM; | ||
279 | } | 285 | } |
280 | 286 | ||
281 | static bool is_el0_instruction_abort(unsigned int esr) | 287 | static bool is_el0_instruction_abort(unsigned int esr) |
@@ -315,7 +321,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, | |||
315 | mm_flags |= FAULT_FLAG_WRITE; | 321 | mm_flags |= FAULT_FLAG_WRITE; |
316 | } | 322 | } |
317 | 323 | ||
318 | if (is_permission_fault(esr) && (addr < USER_DS)) { | 324 | if (addr < USER_DS && is_permission_fault(esr, regs)) { |
319 | /* regs->orig_addr_limit may be 0 if we entered from EL0 */ | 325 | /* regs->orig_addr_limit may be 0 if we entered from EL0 */ |
320 | if (regs->orig_addr_limit == KERNEL_DS) | 326 | if (regs->orig_addr_limit == KERNEL_DS) |
321 | die("Accessing user space memory with fs=KERNEL_DS", regs, esr); | 327 | die("Accessing user space memory with fs=KERNEL_DS", regs, esr); |
@@ -507,10 +513,10 @@ static const struct fault_info { | |||
507 | { do_bad, SIGBUS, 0, "unknown 17" }, | 513 | { do_bad, SIGBUS, 0, "unknown 17" }, |
508 | { do_bad, SIGBUS, 0, "unknown 18" }, | 514 | { do_bad, SIGBUS, 0, "unknown 18" }, |
509 | { do_bad, SIGBUS, 0, "unknown 19" }, | 515 | { do_bad, SIGBUS, 0, "unknown 19" }, |
510 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | 516 | { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" }, |
511 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | 517 | { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" }, |
512 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | 518 | { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" }, |
513 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | 519 | { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" }, |
514 | { do_bad, SIGBUS, 0, "synchronous parity error" }, | 520 | { do_bad, SIGBUS, 0, "synchronous parity error" }, |
515 | { do_bad, SIGBUS, 0, "unknown 25" }, | 521 | { do_bad, SIGBUS, 0, "unknown 25" }, |
516 | { do_bad, SIGBUS, 0, "unknown 26" }, | 522 | { do_bad, SIGBUS, 0, "unknown 26" }, |
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 8377329d8c97..554a2558c12e 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c | |||
@@ -25,14 +25,7 @@ | |||
25 | #include <asm/cachetype.h> | 25 | #include <asm/cachetype.h> |
26 | #include <asm/tlbflush.h> | 26 | #include <asm/tlbflush.h> |
27 | 27 | ||
28 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | 28 | void sync_icache_aliases(void *kaddr, unsigned long len) |
29 | unsigned long end) | ||
30 | { | ||
31 | if (vma->vm_flags & VM_EXEC) | ||
32 | __flush_icache_all(); | ||
33 | } | ||
34 | |||
35 | static void sync_icache_aliases(void *kaddr, unsigned long len) | ||
36 | { | 29 | { |
37 | unsigned long addr = (unsigned long)kaddr; | 30 | unsigned long addr = (unsigned long)kaddr; |
38 | 31 | ||
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 2e49bd252fe7..964b7549af5c 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c | |||
@@ -51,20 +51,8 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr, | |||
51 | *pgsize = PAGE_SIZE; | 51 | *pgsize = PAGE_SIZE; |
52 | if (!pte_cont(pte)) | 52 | if (!pte_cont(pte)) |
53 | return 1; | 53 | return 1; |
54 | if (!pgd_present(*pgd)) { | ||
55 | VM_BUG_ON(!pgd_present(*pgd)); | ||
56 | return 1; | ||
57 | } | ||
58 | pud = pud_offset(pgd, addr); | 54 | pud = pud_offset(pgd, addr); |
59 | if (!pud_present(*pud)) { | ||
60 | VM_BUG_ON(!pud_present(*pud)); | ||
61 | return 1; | ||
62 | } | ||
63 | pmd = pmd_offset(pud, addr); | 55 | pmd = pmd_offset(pud, addr); |
64 | if (!pmd_present(*pmd)) { | ||
65 | VM_BUG_ON(!pmd_present(*pmd)); | ||
66 | return 1; | ||
67 | } | ||
68 | if ((pte_t *)pmd == ptep) { | 56 | if ((pte_t *)pmd == ptep) { |
69 | *pgsize = PMD_SIZE; | 57 | *pgsize = PMD_SIZE; |
70 | return CONT_PMDS; | 58 | return CONT_PMDS; |
@@ -212,7 +200,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | |||
212 | ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize); | 200 | ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize); |
213 | /* save the 1st pte to return */ | 201 | /* save the 1st pte to return */ |
214 | pte = ptep_get_and_clear(mm, addr, cpte); | 202 | pte = ptep_get_and_clear(mm, addr, cpte); |
215 | for (i = 1; i < ncontig; ++i) { | 203 | for (i = 1, addr += pgsize; i < ncontig; ++i, addr += pgsize) { |
216 | /* | 204 | /* |
217 | * If HW_AFDBM is enabled, then the HW could | 205 | * If HW_AFDBM is enabled, then the HW could |
218 | * turn on the dirty bit for any of the page | 206 | * turn on the dirty bit for any of the page |
@@ -250,7 +238,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, | |||
250 | pfn = pte_pfn(*cpte); | 238 | pfn = pte_pfn(*cpte); |
251 | ncontig = find_num_contig(vma->vm_mm, addr, cpte, | 239 | ncontig = find_num_contig(vma->vm_mm, addr, cpte, |
252 | *cpte, &pgsize); | 240 | *cpte, &pgsize); |
253 | for (i = 0; i < ncontig; ++i, ++cpte) { | 241 | for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) { |
254 | changed = ptep_set_access_flags(vma, addr, cpte, | 242 | changed = ptep_set_access_flags(vma, addr, cpte, |
255 | pfn_pte(pfn, | 243 | pfn_pte(pfn, |
256 | hugeprot), | 244 | hugeprot), |
@@ -273,7 +261,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm, | |||
273 | 261 | ||
274 | cpte = huge_pte_offset(mm, addr); | 262 | cpte = huge_pte_offset(mm, addr); |
275 | ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize); | 263 | ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize); |
276 | for (i = 0; i < ncontig; ++i, ++cpte) | 264 | for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) |
277 | ptep_set_wrprotect(mm, addr, cpte); | 265 | ptep_set_wrprotect(mm, addr, cpte); |
278 | } else { | 266 | } else { |
279 | ptep_set_wrprotect(mm, addr, ptep); | 267 | ptep_set_wrprotect(mm, addr, ptep); |
@@ -291,7 +279,7 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma, | |||
291 | cpte = huge_pte_offset(vma->vm_mm, addr); | 279 | cpte = huge_pte_offset(vma->vm_mm, addr); |
292 | ncontig = find_num_contig(vma->vm_mm, addr, cpte, | 280 | ncontig = find_num_contig(vma->vm_mm, addr, cpte, |
293 | *cpte, &pgsize); | 281 | *cpte, &pgsize); |
294 | for (i = 0; i < ncontig; ++i, ++cpte) | 282 | for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) |
295 | ptep_clear_flush(vma, addr, cpte); | 283 | ptep_clear_flush(vma, addr, cpte); |
296 | } else { | 284 | } else { |
297 | ptep_clear_flush(vma, addr, ptep); | 285 | ptep_clear_flush(vma, addr, ptep); |
@@ -323,7 +311,7 @@ __setup("hugepagesz=", setup_hugepagesz); | |||
323 | static __init int add_default_hugepagesz(void) | 311 | static __init int add_default_hugepagesz(void) |
324 | { | 312 | { |
325 | if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL) | 313 | if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL) |
326 | hugetlb_add_hstate(CONT_PMD_SHIFT); | 314 | hugetlb_add_hstate(CONT_PTE_SHIFT); |
327 | return 0; | 315 | return 0; |
328 | } | 316 | } |
329 | arch_initcall(add_default_hugepagesz); | 317 | arch_initcall(add_default_hugepagesz); |
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 05615a3fdc6f..17243e43184e 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -28,8 +28,6 @@ | |||
28 | #include <linux/memblock.h> | 28 | #include <linux/memblock.h> |
29 | #include <linux/fs.h> | 29 | #include <linux/fs.h> |
30 | #include <linux/io.h> | 30 | #include <linux/io.h> |
31 | #include <linux/slab.h> | ||
32 | #include <linux/stop_machine.h> | ||
33 | 31 | ||
34 | #include <asm/barrier.h> | 32 | #include <asm/barrier.h> |
35 | #include <asm/cputype.h> | 33 | #include <asm/cputype.h> |
@@ -42,6 +40,7 @@ | |||
42 | #include <asm/tlb.h> | 40 | #include <asm/tlb.h> |
43 | #include <asm/memblock.h> | 41 | #include <asm/memblock.h> |
44 | #include <asm/mmu_context.h> | 42 | #include <asm/mmu_context.h> |
43 | #include <asm/ptdump.h> | ||
45 | 44 | ||
46 | u64 idmap_t0sz = TCR_T0SZ(VA_BITS); | 45 | u64 idmap_t0sz = TCR_T0SZ(VA_BITS); |
47 | 46 | ||
@@ -95,11 +94,24 @@ static phys_addr_t __init early_pgtable_alloc(void) | |||
95 | return phys; | 94 | return phys; |
96 | } | 95 | } |
97 | 96 | ||
97 | static bool pgattr_change_is_safe(u64 old, u64 new) | ||
98 | { | ||
99 | /* | ||
100 | * The following mapping attributes may be updated in live | ||
101 | * kernel mappings without the need for break-before-make. | ||
102 | */ | ||
103 | static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE; | ||
104 | |||
105 | return old == 0 || new == 0 || ((old ^ new) & ~mask) == 0; | ||
106 | } | ||
107 | |||
98 | static void alloc_init_pte(pmd_t *pmd, unsigned long addr, | 108 | static void alloc_init_pte(pmd_t *pmd, unsigned long addr, |
99 | unsigned long end, unsigned long pfn, | 109 | unsigned long end, unsigned long pfn, |
100 | pgprot_t prot, | 110 | pgprot_t prot, |
101 | phys_addr_t (*pgtable_alloc)(void)) | 111 | phys_addr_t (*pgtable_alloc)(void), |
112 | bool page_mappings_only) | ||
102 | { | 113 | { |
114 | pgprot_t __prot = prot; | ||
103 | pte_t *pte; | 115 | pte_t *pte; |
104 | 116 | ||
105 | BUG_ON(pmd_sect(*pmd)); | 117 | BUG_ON(pmd_sect(*pmd)); |
@@ -115,8 +127,28 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr, | |||
115 | 127 | ||
116 | pte = pte_set_fixmap_offset(pmd, addr); | 128 | pte = pte_set_fixmap_offset(pmd, addr); |
117 | do { | 129 | do { |
118 | set_pte(pte, pfn_pte(pfn, prot)); | 130 | pte_t old_pte = *pte; |
131 | |||
132 | /* | ||
133 | * Set the contiguous bit for the subsequent group of PTEs if | ||
134 | * its size and alignment are appropriate. | ||
135 | */ | ||
136 | if (((addr | PFN_PHYS(pfn)) & ~CONT_PTE_MASK) == 0) { | ||
137 | if (end - addr >= CONT_PTE_SIZE && !page_mappings_only) | ||
138 | __prot = __pgprot(pgprot_val(prot) | PTE_CONT); | ||
139 | else | ||
140 | __prot = prot; | ||
141 | } | ||
142 | |||
143 | set_pte(pte, pfn_pte(pfn, __prot)); | ||
119 | pfn++; | 144 | pfn++; |
145 | |||
146 | /* | ||
147 | * After the PTE entry has been populated once, we | ||
148 | * only allow updates to the permission attributes. | ||
149 | */ | ||
150 | BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte))); | ||
151 | |||
120 | } while (pte++, addr += PAGE_SIZE, addr != end); | 152 | } while (pte++, addr += PAGE_SIZE, addr != end); |
121 | 153 | ||
122 | pte_clear_fixmap(); | 154 | pte_clear_fixmap(); |
@@ -125,8 +157,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr, | |||
125 | static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, | 157 | static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, |
126 | phys_addr_t phys, pgprot_t prot, | 158 | phys_addr_t phys, pgprot_t prot, |
127 | phys_addr_t (*pgtable_alloc)(void), | 159 | phys_addr_t (*pgtable_alloc)(void), |
128 | bool allow_block_mappings) | 160 | bool page_mappings_only) |
129 | { | 161 | { |
162 | pgprot_t __prot = prot; | ||
130 | pmd_t *pmd; | 163 | pmd_t *pmd; |
131 | unsigned long next; | 164 | unsigned long next; |
132 | 165 | ||
@@ -146,27 +179,39 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, | |||
146 | 179 | ||
147 | pmd = pmd_set_fixmap_offset(pud, addr); | 180 | pmd = pmd_set_fixmap_offset(pud, addr); |
148 | do { | 181 | do { |
182 | pmd_t old_pmd = *pmd; | ||
183 | |||
149 | next = pmd_addr_end(addr, end); | 184 | next = pmd_addr_end(addr, end); |
185 | |||
150 | /* try section mapping first */ | 186 | /* try section mapping first */ |
151 | if (((addr | next | phys) & ~SECTION_MASK) == 0 && | 187 | if (((addr | next | phys) & ~SECTION_MASK) == 0 && |
152 | allow_block_mappings) { | 188 | !page_mappings_only) { |
153 | pmd_t old_pmd =*pmd; | ||
154 | pmd_set_huge(pmd, phys, prot); | ||
155 | /* | 189 | /* |
156 | * Check for previous table entries created during | 190 | * Set the contiguous bit for the subsequent group of |
157 | * boot (__create_page_tables) and flush them. | 191 | * PMDs if its size and alignment are appropriate. |
158 | */ | 192 | */ |
159 | if (!pmd_none(old_pmd)) { | 193 | if (((addr | phys) & ~CONT_PMD_MASK) == 0) { |
160 | flush_tlb_all(); | 194 | if (end - addr >= CONT_PMD_SIZE) |
161 | if (pmd_table(old_pmd)) { | 195 | __prot = __pgprot(pgprot_val(prot) | |
162 | phys_addr_t table = pmd_page_paddr(old_pmd); | 196 | PTE_CONT); |
163 | if (!WARN_ON_ONCE(slab_is_available())) | 197 | else |
164 | memblock_free(table, PAGE_SIZE); | 198 | __prot = prot; |
165 | } | ||
166 | } | 199 | } |
200 | pmd_set_huge(pmd, phys, __prot); | ||
201 | |||
202 | /* | ||
203 | * After the PMD entry has been populated once, we | ||
204 | * only allow updates to the permission attributes. | ||
205 | */ | ||
206 | BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), | ||
207 | pmd_val(*pmd))); | ||
167 | } else { | 208 | } else { |
168 | alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), | 209 | alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), |
169 | prot, pgtable_alloc); | 210 | prot, pgtable_alloc, |
211 | page_mappings_only); | ||
212 | |||
213 | BUG_ON(pmd_val(old_pmd) != 0 && | ||
214 | pmd_val(old_pmd) != pmd_val(*pmd)); | ||
170 | } | 215 | } |
171 | phys += next - addr; | 216 | phys += next - addr; |
172 | } while (pmd++, addr = next, addr != end); | 217 | } while (pmd++, addr = next, addr != end); |
@@ -189,7 +234,7 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next, | |||
189 | static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, | 234 | static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, |
190 | phys_addr_t phys, pgprot_t prot, | 235 | phys_addr_t phys, pgprot_t prot, |
191 | phys_addr_t (*pgtable_alloc)(void), | 236 | phys_addr_t (*pgtable_alloc)(void), |
192 | bool allow_block_mappings) | 237 | bool page_mappings_only) |
193 | { | 238 | { |
194 | pud_t *pud; | 239 | pud_t *pud; |
195 | unsigned long next; | 240 | unsigned long next; |
@@ -204,33 +249,28 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, | |||
204 | 249 | ||
205 | pud = pud_set_fixmap_offset(pgd, addr); | 250 | pud = pud_set_fixmap_offset(pgd, addr); |
206 | do { | 251 | do { |
252 | pud_t old_pud = *pud; | ||
253 | |||
207 | next = pud_addr_end(addr, end); | 254 | next = pud_addr_end(addr, end); |
208 | 255 | ||
209 | /* | 256 | /* |
210 | * For 4K granule only, attempt to put down a 1GB block | 257 | * For 4K granule only, attempt to put down a 1GB block |
211 | */ | 258 | */ |
212 | if (use_1G_block(addr, next, phys) && allow_block_mappings) { | 259 | if (use_1G_block(addr, next, phys) && !page_mappings_only) { |
213 | pud_t old_pud = *pud; | ||
214 | pud_set_huge(pud, phys, prot); | 260 | pud_set_huge(pud, phys, prot); |
215 | 261 | ||
216 | /* | 262 | /* |
217 | * If we have an old value for a pud, it will | 263 | * After the PUD entry has been populated once, we |
218 | * be pointing to a pmd table that we no longer | 264 | * only allow updates to the permission attributes. |
219 | * need (from swapper_pg_dir). | ||
220 | * | ||
221 | * Look up the old pmd table and free it. | ||
222 | */ | 265 | */ |
223 | if (!pud_none(old_pud)) { | 266 | BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), |
224 | flush_tlb_all(); | 267 | pud_val(*pud))); |
225 | if (pud_table(old_pud)) { | ||
226 | phys_addr_t table = pud_page_paddr(old_pud); | ||
227 | if (!WARN_ON_ONCE(slab_is_available())) | ||
228 | memblock_free(table, PAGE_SIZE); | ||
229 | } | ||
230 | } | ||
231 | } else { | 268 | } else { |
232 | alloc_init_pmd(pud, addr, next, phys, prot, | 269 | alloc_init_pmd(pud, addr, next, phys, prot, |
233 | pgtable_alloc, allow_block_mappings); | 270 | pgtable_alloc, page_mappings_only); |
271 | |||
272 | BUG_ON(pud_val(old_pud) != 0 && | ||
273 | pud_val(old_pud) != pud_val(*pud)); | ||
234 | } | 274 | } |
235 | phys += next - addr; | 275 | phys += next - addr; |
236 | } while (pud++, addr = next, addr != end); | 276 | } while (pud++, addr = next, addr != end); |
@@ -242,7 +282,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, | |||
242 | unsigned long virt, phys_addr_t size, | 282 | unsigned long virt, phys_addr_t size, |
243 | pgprot_t prot, | 283 | pgprot_t prot, |
244 | phys_addr_t (*pgtable_alloc)(void), | 284 | phys_addr_t (*pgtable_alloc)(void), |
245 | bool allow_block_mappings) | 285 | bool page_mappings_only) |
246 | { | 286 | { |
247 | unsigned long addr, length, end, next; | 287 | unsigned long addr, length, end, next; |
248 | pgd_t *pgd = pgd_offset_raw(pgdir, virt); | 288 | pgd_t *pgd = pgd_offset_raw(pgdir, virt); |
@@ -262,7 +302,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, | |||
262 | do { | 302 | do { |
263 | next = pgd_addr_end(addr, end); | 303 | next = pgd_addr_end(addr, end); |
264 | alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc, | 304 | alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc, |
265 | allow_block_mappings); | 305 | page_mappings_only); |
266 | phys += next - addr; | 306 | phys += next - addr; |
267 | } while (pgd++, addr = next, addr != end); | 307 | } while (pgd++, addr = next, addr != end); |
268 | } | 308 | } |
@@ -291,17 +331,17 @@ static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, | |||
291 | &phys, virt); | 331 | &phys, virt); |
292 | return; | 332 | return; |
293 | } | 333 | } |
294 | __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, true); | 334 | __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, false); |
295 | } | 335 | } |
296 | 336 | ||
297 | void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, | 337 | void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, |
298 | unsigned long virt, phys_addr_t size, | 338 | unsigned long virt, phys_addr_t size, |
299 | pgprot_t prot, bool allow_block_mappings) | 339 | pgprot_t prot, bool page_mappings_only) |
300 | { | 340 | { |
301 | BUG_ON(mm == &init_mm); | 341 | BUG_ON(mm == &init_mm); |
302 | 342 | ||
303 | __create_pgd_mapping(mm->pgd, phys, virt, size, prot, | 343 | __create_pgd_mapping(mm->pgd, phys, virt, size, prot, |
304 | pgd_pgtable_alloc, allow_block_mappings); | 344 | pgd_pgtable_alloc, page_mappings_only); |
305 | } | 345 | } |
306 | 346 | ||
307 | static void create_mapping_late(phys_addr_t phys, unsigned long virt, | 347 | static void create_mapping_late(phys_addr_t phys, unsigned long virt, |
@@ -314,7 +354,7 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt, | |||
314 | } | 354 | } |
315 | 355 | ||
316 | __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, | 356 | __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, |
317 | NULL, !debug_pagealloc_enabled()); | 357 | NULL, debug_pagealloc_enabled()); |
318 | } | 358 | } |
319 | 359 | ||
320 | static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end) | 360 | static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end) |
@@ -332,7 +372,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end | |||
332 | __create_pgd_mapping(pgd, start, __phys_to_virt(start), | 372 | __create_pgd_mapping(pgd, start, __phys_to_virt(start), |
333 | end - start, PAGE_KERNEL, | 373 | end - start, PAGE_KERNEL, |
334 | early_pgtable_alloc, | 374 | early_pgtable_alloc, |
335 | !debug_pagealloc_enabled()); | 375 | debug_pagealloc_enabled()); |
336 | return; | 376 | return; |
337 | } | 377 | } |
338 | 378 | ||
@@ -345,13 +385,13 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end | |||
345 | __phys_to_virt(start), | 385 | __phys_to_virt(start), |
346 | kernel_start - start, PAGE_KERNEL, | 386 | kernel_start - start, PAGE_KERNEL, |
347 | early_pgtable_alloc, | 387 | early_pgtable_alloc, |
348 | !debug_pagealloc_enabled()); | 388 | debug_pagealloc_enabled()); |
349 | if (kernel_end < end) | 389 | if (kernel_end < end) |
350 | __create_pgd_mapping(pgd, kernel_end, | 390 | __create_pgd_mapping(pgd, kernel_end, |
351 | __phys_to_virt(kernel_end), | 391 | __phys_to_virt(kernel_end), |
352 | end - kernel_end, PAGE_KERNEL, | 392 | end - kernel_end, PAGE_KERNEL, |
353 | early_pgtable_alloc, | 393 | early_pgtable_alloc, |
354 | !debug_pagealloc_enabled()); | 394 | debug_pagealloc_enabled()); |
355 | 395 | ||
356 | /* | 396 | /* |
357 | * Map the linear alias of the [_text, __init_begin) interval as | 397 | * Map the linear alias of the [_text, __init_begin) interval as |
@@ -361,7 +401,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end | |||
361 | */ | 401 | */ |
362 | __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start), | 402 | __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start), |
363 | kernel_end - kernel_start, PAGE_KERNEL_RO, | 403 | kernel_end - kernel_start, PAGE_KERNEL_RO, |
364 | early_pgtable_alloc, !debug_pagealloc_enabled()); | 404 | early_pgtable_alloc, debug_pagealloc_enabled()); |
365 | } | 405 | } |
366 | 406 | ||
367 | static void __init map_mem(pgd_t *pgd) | 407 | static void __init map_mem(pgd_t *pgd) |
@@ -396,6 +436,11 @@ void mark_rodata_ro(void) | |||
396 | section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; | 436 | section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; |
397 | create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata, | 437 | create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata, |
398 | section_size, PAGE_KERNEL_RO); | 438 | section_size, PAGE_KERNEL_RO); |
439 | |||
440 | /* flush the TLBs after updating live kernel mappings */ | ||
441 | flush_tlb_all(); | ||
442 | |||
443 | debug_checkwx(); | ||
399 | } | 444 | } |
400 | 445 | ||
401 | static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, | 446 | static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, |
@@ -408,7 +453,7 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, | |||
408 | BUG_ON(!PAGE_ALIGNED(size)); | 453 | BUG_ON(!PAGE_ALIGNED(size)); |
409 | 454 | ||
410 | __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot, | 455 | __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot, |
411 | early_pgtable_alloc, !debug_pagealloc_enabled()); | 456 | early_pgtable_alloc, debug_pagealloc_enabled()); |
412 | 457 | ||
413 | vma->addr = va_start; | 458 | vma->addr = va_start; |
414 | vma->phys_addr = pa_start; | 459 | vma->phys_addr = pa_start; |
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 352c73b6a59e..32682be978e0 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S | |||
@@ -70,11 +70,14 @@ ENTRY(cpu_do_suspend) | |||
70 | mrs x8, mdscr_el1 | 70 | mrs x8, mdscr_el1 |
71 | mrs x9, oslsr_el1 | 71 | mrs x9, oslsr_el1 |
72 | mrs x10, sctlr_el1 | 72 | mrs x10, sctlr_el1 |
73 | mrs x11, tpidr_el1 | ||
74 | mrs x12, sp_el0 | ||
73 | stp x2, x3, [x0] | 75 | stp x2, x3, [x0] |
74 | stp x4, xzr, [x0, #16] | 76 | stp x4, xzr, [x0, #16] |
75 | stp x5, x6, [x0, #32] | 77 | stp x5, x6, [x0, #32] |
76 | stp x7, x8, [x0, #48] | 78 | stp x7, x8, [x0, #48] |
77 | stp x9, x10, [x0, #64] | 79 | stp x9, x10, [x0, #64] |
80 | stp x11, x12, [x0, #80] | ||
78 | ret | 81 | ret |
79 | ENDPROC(cpu_do_suspend) | 82 | ENDPROC(cpu_do_suspend) |
80 | 83 | ||
@@ -90,6 +93,7 @@ ENTRY(cpu_do_resume) | |||
90 | ldp x6, x8, [x0, #32] | 93 | ldp x6, x8, [x0, #32] |
91 | ldp x9, x10, [x0, #48] | 94 | ldp x9, x10, [x0, #48] |
92 | ldp x11, x12, [x0, #64] | 95 | ldp x11, x12, [x0, #64] |
96 | ldp x13, x14, [x0, #80] | ||
93 | msr tpidr_el0, x2 | 97 | msr tpidr_el0, x2 |
94 | msr tpidrro_el0, x3 | 98 | msr tpidrro_el0, x3 |
95 | msr contextidr_el1, x4 | 99 | msr contextidr_el1, x4 |
@@ -112,6 +116,8 @@ ENTRY(cpu_do_resume) | |||
112 | msr mdscr_el1, x10 | 116 | msr mdscr_el1, x10 |
113 | 117 | ||
114 | msr sctlr_el1, x12 | 118 | msr sctlr_el1, x12 |
119 | msr tpidr_el1, x13 | ||
120 | msr sp_el0, x14 | ||
115 | /* | 121 | /* |
116 | * Restore oslsr_el1 by writing oslar_el1 | 122 | * Restore oslsr_el1 by writing oslar_el1 |
117 | */ | 123 | */ |
@@ -136,11 +142,7 @@ ENTRY(cpu_do_switch_mm) | |||
136 | bfi x0, x1, #48, #16 // set the ASID | 142 | bfi x0, x1, #48, #16 // set the ASID |
137 | msr ttbr0_el1, x0 // set TTBR0 | 143 | msr ttbr0_el1, x0 // set TTBR0 |
138 | isb | 144 | isb |
139 | alternative_if ARM64_WORKAROUND_CAVIUM_27456 | 145 | post_ttbr0_update_workaround |
140 | ic iallu | ||
141 | dsb nsh | ||
142 | isb | ||
143 | alternative_else_nop_endif | ||
144 | ret | 146 | ret |
145 | ENDPROC(cpu_do_switch_mm) | 147 | ENDPROC(cpu_do_switch_mm) |
146 | 148 | ||
diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c new file mode 100644 index 000000000000..eee4d864350c --- /dev/null +++ b/arch/arm64/mm/ptdump_debugfs.c | |||
@@ -0,0 +1,31 @@ | |||
1 | #include <linux/debugfs.h> | ||
2 | #include <linux/seq_file.h> | ||
3 | |||
4 | #include <asm/ptdump.h> | ||
5 | |||
6 | static int ptdump_show(struct seq_file *m, void *v) | ||
7 | { | ||
8 | struct ptdump_info *info = m->private; | ||
9 | ptdump_walk_pgd(m, info); | ||
10 | return 0; | ||
11 | } | ||
12 | |||
13 | static int ptdump_open(struct inode *inode, struct file *file) | ||
14 | { | ||
15 | return single_open(file, ptdump_show, inode->i_private); | ||
16 | } | ||
17 | |||
18 | static const struct file_operations ptdump_fops = { | ||
19 | .open = ptdump_open, | ||
20 | .read = seq_read, | ||
21 | .llseek = seq_lseek, | ||
22 | .release = single_release, | ||
23 | }; | ||
24 | |||
25 | int ptdump_debugfs_register(struct ptdump_info *info, const char *name) | ||
26 | { | ||
27 | struct dentry *pe; | ||
28 | pe = debugfs_create_file(name, 0400, NULL, info, &ptdump_fops); | ||
29 | return pe ? 0 : -ENOMEM; | ||
30 | |||
31 | } | ||
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S index 329c8027b0a9..b41aff25426d 100644 --- a/arch/arm64/xen/hypercall.S +++ b/arch/arm64/xen/hypercall.S | |||
@@ -49,6 +49,7 @@ | |||
49 | 49 | ||
50 | #include <linux/linkage.h> | 50 | #include <linux/linkage.h> |
51 | #include <asm/assembler.h> | 51 | #include <asm/assembler.h> |
52 | #include <asm/uaccess.h> | ||
52 | #include <xen/interface/xen.h> | 53 | #include <xen/interface/xen.h> |
53 | 54 | ||
54 | 55 | ||
@@ -91,6 +92,20 @@ ENTRY(privcmd_call) | |||
91 | mov x2, x3 | 92 | mov x2, x3 |
92 | mov x3, x4 | 93 | mov x3, x4 |
93 | mov x4, x5 | 94 | mov x4, x5 |
95 | /* | ||
96 | * Privcmd calls are issued by the userspace. The kernel needs to | ||
97 | * enable access to TTBR0_EL1 as the hypervisor would issue stage 1 | ||
98 | * translations to user memory via AT instructions. Since AT | ||
99 | * instructions are not affected by the PAN bit (ARMv8.1), we only | ||
100 | * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation | ||
101 | * is enabled (it implies that hardware UAO and PAN disabled). | ||
102 | */ | ||
103 | uaccess_ttbr0_enable x6, x7 | ||
94 | hvc XEN_IMM | 104 | hvc XEN_IMM |
105 | |||
106 | /* | ||
107 | * Disable userspace access from kernel once the hyp call completed. | ||
108 | */ | ||
109 | uaccess_ttbr0_disable x6 | ||
95 | ret | 110 | ret |
96 | ENDPROC(privcmd_call); | 111 | ENDPROC(privcmd_call); |
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 7c75a8d9091a..349dc3e1e52e 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c | |||
@@ -39,7 +39,7 @@ static struct mm_struct efi_mm = { | |||
39 | .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), | 39 | .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), |
40 | }; | 40 | }; |
41 | 41 | ||
42 | #ifdef CONFIG_ARM64_PTDUMP | 42 | #ifdef CONFIG_ARM64_PTDUMP_DEBUGFS |
43 | #include <asm/ptdump.h> | 43 | #include <asm/ptdump.h> |
44 | 44 | ||
45 | static struct ptdump_info efi_ptdump_info = { | 45 | static struct ptdump_info efi_ptdump_info = { |
@@ -53,7 +53,7 @@ static struct ptdump_info efi_ptdump_info = { | |||
53 | 53 | ||
54 | static int __init ptdump_init(void) | 54 | static int __init ptdump_init(void) |
55 | { | 55 | { |
56 | return ptdump_register(&efi_ptdump_info, "efi_page_tables"); | 56 | return ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables"); |
57 | } | 57 | } |
58 | device_initcall(ptdump_init); | 58 | device_initcall(ptdump_init); |
59 | 59 | ||
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 19d642eae096..26e1d7fafb1e 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
@@ -120,11 +120,10 @@ static void gic_redist_wait_for_rwp(void) | |||
120 | } | 120 | } |
121 | 121 | ||
122 | #ifdef CONFIG_ARM64 | 122 | #ifdef CONFIG_ARM64 |
123 | static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx); | ||
124 | 123 | ||
125 | static u64 __maybe_unused gic_read_iar(void) | 124 | static u64 __maybe_unused gic_read_iar(void) |
126 | { | 125 | { |
127 | if (static_branch_unlikely(&is_cavium_thunderx)) | 126 | if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154)) |
128 | return gic_read_iar_cavium_thunderx(); | 127 | return gic_read_iar_cavium_thunderx(); |
129 | else | 128 | else |
130 | return gic_read_iar_common(); | 129 | return gic_read_iar_common(); |
@@ -905,14 +904,6 @@ static const struct irq_domain_ops partition_domain_ops = { | |||
905 | .select = gic_irq_domain_select, | 904 | .select = gic_irq_domain_select, |
906 | }; | 905 | }; |
907 | 906 | ||
908 | static void gicv3_enable_quirks(void) | ||
909 | { | ||
910 | #ifdef CONFIG_ARM64 | ||
911 | if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154)) | ||
912 | static_branch_enable(&is_cavium_thunderx); | ||
913 | #endif | ||
914 | } | ||
915 | |||
916 | static int __init gic_init_bases(void __iomem *dist_base, | 907 | static int __init gic_init_bases(void __iomem *dist_base, |
917 | struct redist_region *rdist_regs, | 908 | struct redist_region *rdist_regs, |
918 | u32 nr_redist_regions, | 909 | u32 nr_redist_regions, |
@@ -935,8 +926,6 @@ static int __init gic_init_bases(void __iomem *dist_base, | |||
935 | gic_data.nr_redist_regions = nr_redist_regions; | 926 | gic_data.nr_redist_regions = nr_redist_regions; |
936 | gic_data.redist_stride = redist_stride; | 927 | gic_data.redist_stride = redist_stride; |
937 | 928 | ||
938 | gicv3_enable_quirks(); | ||
939 | |||
940 | /* | 929 | /* |
941 | * Find out how many interrupts are supported. | 930 | * Find out how many interrupts are supported. |
942 | * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) | 931 | * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) |
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h new file mode 100644 index 000000000000..0d905d8ec553 --- /dev/null +++ b/include/linux/restart_block.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Common syscall restarting data | ||
3 | */ | ||
4 | #ifndef __LINUX_RESTART_BLOCK_H | ||
5 | #define __LINUX_RESTART_BLOCK_H | ||
6 | |||
7 | #include <linux/compiler.h> | ||
8 | #include <linux/types.h> | ||
9 | |||
10 | struct timespec; | ||
11 | struct compat_timespec; | ||
12 | struct pollfd; | ||
13 | |||
14 | /* | ||
15 | * System call restart block. | ||
16 | */ | ||
17 | struct restart_block { | ||
18 | long (*fn)(struct restart_block *); | ||
19 | union { | ||
20 | /* For futex_wait and futex_wait_requeue_pi */ | ||
21 | struct { | ||
22 | u32 __user *uaddr; | ||
23 | u32 val; | ||
24 | u32 flags; | ||
25 | u32 bitset; | ||
26 | u64 time; | ||
27 | u32 __user *uaddr2; | ||
28 | } futex; | ||
29 | /* For nanosleep */ | ||
30 | struct { | ||
31 | clockid_t clockid; | ||
32 | struct timespec __user *rmtp; | ||
33 | #ifdef CONFIG_COMPAT | ||
34 | struct compat_timespec __user *compat_rmtp; | ||
35 | #endif | ||
36 | u64 expires; | ||
37 | } nanosleep; | ||
38 | /* For poll */ | ||
39 | struct { | ||
40 | struct pollfd __user *ufds; | ||
41 | int nfds; | ||
42 | int has_timeout; | ||
43 | unsigned long tv_sec; | ||
44 | unsigned long tv_nsec; | ||
45 | } poll; | ||
46 | }; | ||
47 | }; | ||
48 | |||
49 | extern long do_no_restart_syscall(struct restart_block *parm); | ||
50 | |||
51 | #endif /* __LINUX_RESTART_BLOCK_H */ | ||
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 2873baf5372a..58373875e8ee 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h | |||
@@ -9,50 +9,17 @@ | |||
9 | 9 | ||
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | #include <linux/bug.h> | 11 | #include <linux/bug.h> |
12 | 12 | #include <linux/restart_block.h> | |
13 | struct timespec; | ||
14 | struct compat_timespec; | ||
15 | 13 | ||
16 | #ifdef CONFIG_THREAD_INFO_IN_TASK | 14 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
17 | #define current_thread_info() ((struct thread_info *)current) | ||
18 | #endif | ||
19 | |||
20 | /* | 15 | /* |
21 | * System call restart block. | 16 | * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the |
17 | * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels, | ||
18 | * including <asm/current.h> can cause a circular dependency on some platforms. | ||
22 | */ | 19 | */ |
23 | struct restart_block { | 20 | #include <asm/current.h> |
24 | long (*fn)(struct restart_block *); | 21 | #define current_thread_info() ((struct thread_info *)current) |
25 | union { | ||
26 | /* For futex_wait and futex_wait_requeue_pi */ | ||
27 | struct { | ||
28 | u32 __user *uaddr; | ||
29 | u32 val; | ||
30 | u32 flags; | ||
31 | u32 bitset; | ||
32 | u64 time; | ||
33 | u32 __user *uaddr2; | ||
34 | } futex; | ||
35 | /* For nanosleep */ | ||
36 | struct { | ||
37 | clockid_t clockid; | ||
38 | struct timespec __user *rmtp; | ||
39 | #ifdef CONFIG_COMPAT | ||
40 | struct compat_timespec __user *compat_rmtp; | ||
41 | #endif | 22 | #endif |
42 | u64 expires; | ||
43 | } nanosleep; | ||
44 | /* For poll */ | ||
45 | struct { | ||
46 | struct pollfd __user *ufds; | ||
47 | int nfds; | ||
48 | int has_timeout; | ||
49 | unsigned long tv_sec; | ||
50 | unsigned long tv_nsec; | ||
51 | } poll; | ||
52 | }; | ||
53 | }; | ||
54 | |||
55 | extern long do_no_restart_syscall(struct restart_block *parm); | ||
56 | 23 | ||
57 | #include <linux/bitops.h> | 24 | #include <linux/bitops.h> |
58 | #include <asm/thread_info.h> | 25 | #include <asm/thread_info.h> |
diff --git a/include/uapi/linux/hw_breakpoint.h b/include/uapi/linux/hw_breakpoint.h index b04000a2296a..2b65efd19a46 100644 --- a/include/uapi/linux/hw_breakpoint.h +++ b/include/uapi/linux/hw_breakpoint.h | |||
@@ -4,7 +4,11 @@ | |||
4 | enum { | 4 | enum { |
5 | HW_BREAKPOINT_LEN_1 = 1, | 5 | HW_BREAKPOINT_LEN_1 = 1, |
6 | HW_BREAKPOINT_LEN_2 = 2, | 6 | HW_BREAKPOINT_LEN_2 = 2, |
7 | HW_BREAKPOINT_LEN_3 = 3, | ||
7 | HW_BREAKPOINT_LEN_4 = 4, | 8 | HW_BREAKPOINT_LEN_4 = 4, |
9 | HW_BREAKPOINT_LEN_5 = 5, | ||
10 | HW_BREAKPOINT_LEN_6 = 6, | ||
11 | HW_BREAKPOINT_LEN_7 = 7, | ||
8 | HW_BREAKPOINT_LEN_8 = 8, | 12 | HW_BREAKPOINT_LEN_8 = 8, |
9 | }; | 13 | }; |
10 | 14 | ||
diff --git a/tools/include/uapi/linux/hw_breakpoint.h b/tools/include/uapi/linux/hw_breakpoint.h index b04000a2296a..2b65efd19a46 100644 --- a/tools/include/uapi/linux/hw_breakpoint.h +++ b/tools/include/uapi/linux/hw_breakpoint.h | |||
@@ -4,7 +4,11 @@ | |||
4 | enum { | 4 | enum { |
5 | HW_BREAKPOINT_LEN_1 = 1, | 5 | HW_BREAKPOINT_LEN_1 = 1, |
6 | HW_BREAKPOINT_LEN_2 = 2, | 6 | HW_BREAKPOINT_LEN_2 = 2, |
7 | HW_BREAKPOINT_LEN_3 = 3, | ||
7 | HW_BREAKPOINT_LEN_4 = 4, | 8 | HW_BREAKPOINT_LEN_4 = 4, |
9 | HW_BREAKPOINT_LEN_5 = 5, | ||
10 | HW_BREAKPOINT_LEN_6 = 6, | ||
11 | HW_BREAKPOINT_LEN_7 = 7, | ||
8 | HW_BREAKPOINT_LEN_8 = 8, | 12 | HW_BREAKPOINT_LEN_8 = 8, |
9 | }; | 13 | }; |
10 | 14 | ||
diff --git a/tools/testing/selftests/breakpoints/Makefile b/tools/testing/selftests/breakpoints/Makefile index 74e533fd4bc5..61b79e8df1f4 100644 --- a/tools/testing/selftests/breakpoints/Makefile +++ b/tools/testing/selftests/breakpoints/Makefile | |||
@@ -5,6 +5,9 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/) | |||
5 | ifeq ($(ARCH),x86) | 5 | ifeq ($(ARCH),x86) |
6 | TEST_PROGS := breakpoint_test | 6 | TEST_PROGS := breakpoint_test |
7 | endif | 7 | endif |
8 | ifeq ($(ARCH),aarch64) | ||
9 | TEST_PROGS := breakpoint_test_arm64 | ||
10 | endif | ||
8 | 11 | ||
9 | TEST_PROGS += step_after_suspend_test | 12 | TEST_PROGS += step_after_suspend_test |
10 | 13 | ||
@@ -13,4 +16,4 @@ all: $(TEST_PROGS) | |||
13 | include ../lib.mk | 16 | include ../lib.mk |
14 | 17 | ||
15 | clean: | 18 | clean: |
16 | rm -fr breakpoint_test step_after_suspend_test | 19 | rm -fr breakpoint_test breakpoint_test_arm64 step_after_suspend_test |
diff --git a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c new file mode 100644 index 000000000000..3897e996541e --- /dev/null +++ b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c | |||
@@ -0,0 +1,236 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2016 Google, Inc. | ||
3 | * | ||
4 | * This software is licensed under the terms of the GNU General Public | ||
5 | * License version 2, as published by the Free Software Foundation, and | ||
6 | * may be copied, distributed, and modified under those terms. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * Original Code by Pavel Labath <labath@google.com> | ||
14 | * | ||
15 | * Code modified by Pratyush Anand <panand@redhat.com> | ||
16 | * for testing different byte select for each access size. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #define _GNU_SOURCE | ||
21 | |||
22 | #include <sys/types.h> | ||
23 | #include <sys/wait.h> | ||
24 | #include <sys/ptrace.h> | ||
25 | #include <sys/param.h> | ||
26 | #include <sys/uio.h> | ||
27 | #include <stdint.h> | ||
28 | #include <stdbool.h> | ||
29 | #include <stddef.h> | ||
30 | #include <string.h> | ||
31 | #include <stdio.h> | ||
32 | #include <unistd.h> | ||
33 | #include <elf.h> | ||
34 | #include <errno.h> | ||
35 | #include <signal.h> | ||
36 | |||
37 | #include "../kselftest.h" | ||
38 | |||
39 | static volatile uint8_t var[96] __attribute__((__aligned__(32))); | ||
40 | |||
41 | static void child(int size, int wr) | ||
42 | { | ||
43 | volatile uint8_t *addr = &var[32 + wr]; | ||
44 | |||
45 | if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) != 0) { | ||
46 | perror("ptrace(PTRACE_TRACEME) failed"); | ||
47 | _exit(1); | ||
48 | } | ||
49 | |||
50 | if (raise(SIGSTOP) != 0) { | ||
51 | perror("raise(SIGSTOP) failed"); | ||
52 | _exit(1); | ||
53 | } | ||
54 | |||
55 | if ((uintptr_t) addr % size) { | ||
56 | perror("Wrong address write for the given size\n"); | ||
57 | _exit(1); | ||
58 | } | ||
59 | switch (size) { | ||
60 | case 1: | ||
61 | *addr = 47; | ||
62 | break; | ||
63 | case 2: | ||
64 | *(uint16_t *)addr = 47; | ||
65 | break; | ||
66 | case 4: | ||
67 | *(uint32_t *)addr = 47; | ||
68 | break; | ||
69 | case 8: | ||
70 | *(uint64_t *)addr = 47; | ||
71 | break; | ||
72 | case 16: | ||
73 | __asm__ volatile ("stp x29, x30, %0" : "=m" (addr[0])); | ||
74 | break; | ||
75 | case 32: | ||
76 | __asm__ volatile ("stp q29, q30, %0" : "=m" (addr[0])); | ||
77 | break; | ||
78 | } | ||
79 | |||
80 | _exit(0); | ||
81 | } | ||
82 | |||
83 | static bool set_watchpoint(pid_t pid, int size, int wp) | ||
84 | { | ||
85 | const volatile uint8_t *addr = &var[32 + wp]; | ||
86 | const int offset = (uintptr_t)addr % 8; | ||
87 | const unsigned int byte_mask = ((1 << size) - 1) << offset; | ||
88 | const unsigned int type = 2; /* Write */ | ||
89 | const unsigned int enable = 1; | ||
90 | const unsigned int control = byte_mask << 5 | type << 3 | enable; | ||
91 | struct user_hwdebug_state dreg_state; | ||
92 | struct iovec iov; | ||
93 | |||
94 | memset(&dreg_state, 0, sizeof(dreg_state)); | ||
95 | dreg_state.dbg_regs[0].addr = (uintptr_t)(addr - offset); | ||
96 | dreg_state.dbg_regs[0].ctrl = control; | ||
97 | iov.iov_base = &dreg_state; | ||
98 | iov.iov_len = offsetof(struct user_hwdebug_state, dbg_regs) + | ||
99 | sizeof(dreg_state.dbg_regs[0]); | ||
100 | if (ptrace(PTRACE_SETREGSET, pid, NT_ARM_HW_WATCH, &iov) == 0) | ||
101 | return true; | ||
102 | |||
103 | if (errno == EIO) { | ||
104 | printf("ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) " | ||
105 | "not supported on this hardware\n"); | ||
106 | ksft_exit_skip(); | ||
107 | } | ||
108 | perror("ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) failed"); | ||
109 | return false; | ||
110 | } | ||
111 | |||
112 | static bool run_test(int wr_size, int wp_size, int wr, int wp) | ||
113 | { | ||
114 | int status; | ||
115 | siginfo_t siginfo; | ||
116 | pid_t pid = fork(); | ||
117 | pid_t wpid; | ||
118 | |||
119 | if (pid < 0) { | ||
120 | perror("fork() failed"); | ||
121 | return false; | ||
122 | } | ||
123 | if (pid == 0) | ||
124 | child(wr_size, wr); | ||
125 | |||
126 | wpid = waitpid(pid, &status, __WALL); | ||
127 | if (wpid != pid) { | ||
128 | perror("waitpid() failed"); | ||
129 | return false; | ||
130 | } | ||
131 | if (!WIFSTOPPED(status)) { | ||
132 | printf("child did not stop\n"); | ||
133 | return false; | ||
134 | } | ||
135 | if (WSTOPSIG(status) != SIGSTOP) { | ||
136 | printf("child did not stop with SIGSTOP\n"); | ||
137 | return false; | ||
138 | } | ||
139 | |||
140 | if (!set_watchpoint(pid, wp_size, wp)) | ||
141 | return false; | ||
142 | |||
143 | if (ptrace(PTRACE_CONT, pid, NULL, NULL) < 0) { | ||
144 | perror("ptrace(PTRACE_SINGLESTEP) failed"); | ||
145 | return false; | ||
146 | } | ||
147 | |||
148 | alarm(3); | ||
149 | wpid = waitpid(pid, &status, __WALL); | ||
150 | if (wpid != pid) { | ||
151 | perror("waitpid() failed"); | ||
152 | return false; | ||
153 | } | ||
154 | alarm(0); | ||
155 | if (WIFEXITED(status)) { | ||
156 | printf("child did not single-step\t"); | ||
157 | return false; | ||
158 | } | ||
159 | if (!WIFSTOPPED(status)) { | ||
160 | printf("child did not stop\n"); | ||
161 | return false; | ||
162 | } | ||
163 | if (WSTOPSIG(status) != SIGTRAP) { | ||
164 | printf("child did not stop with SIGTRAP\n"); | ||
165 | return false; | ||
166 | } | ||
167 | if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0) { | ||
168 | perror("ptrace(PTRACE_GETSIGINFO)"); | ||
169 | return false; | ||
170 | } | ||
171 | if (siginfo.si_code != TRAP_HWBKPT) { | ||
172 | printf("Unexpected si_code %d\n", siginfo.si_code); | ||
173 | return false; | ||
174 | } | ||
175 | |||
176 | kill(pid, SIGKILL); | ||
177 | wpid = waitpid(pid, &status, 0); | ||
178 | if (wpid != pid) { | ||
179 | perror("waitpid() failed"); | ||
180 | return false; | ||
181 | } | ||
182 | return true; | ||
183 | } | ||
184 | |||
185 | static void sigalrm(int sig) | ||
186 | { | ||
187 | } | ||
188 | |||
189 | int main(int argc, char **argv) | ||
190 | { | ||
191 | int opt; | ||
192 | bool succeeded = true; | ||
193 | struct sigaction act; | ||
194 | int wr, wp, size; | ||
195 | bool result; | ||
196 | |||
197 | act.sa_handler = sigalrm; | ||
198 | sigemptyset(&act.sa_mask); | ||
199 | act.sa_flags = 0; | ||
200 | sigaction(SIGALRM, &act, NULL); | ||
201 | for (size = 1; size <= 32; size = size*2) { | ||
202 | for (wr = 0; wr <= 32; wr = wr + size) { | ||
203 | for (wp = wr - size; wp <= wr + size; wp = wp + size) { | ||
204 | printf("Test size = %d write offset = %d watchpoint offset = %d\t", size, wr, wp); | ||
205 | result = run_test(size, MIN(size, 8), wr, wp); | ||
206 | if ((result && wr == wp) || (!result && wr != wp)) { | ||
207 | printf("[OK]\n"); | ||
208 | ksft_inc_pass_cnt(); | ||
209 | } else { | ||
210 | printf("[FAILED]\n"); | ||
211 | ksft_inc_fail_cnt(); | ||
212 | succeeded = false; | ||
213 | } | ||
214 | } | ||
215 | } | ||
216 | } | ||
217 | |||
218 | for (size = 1; size <= 32; size = size*2) { | ||
219 | printf("Test size = %d write offset = %d watchpoint offset = -8\t", size, -size); | ||
220 | |||
221 | if (run_test(size, 8, -size, -8)) { | ||
222 | printf("[OK]\n"); | ||
223 | ksft_inc_pass_cnt(); | ||
224 | } else { | ||
225 | printf("[FAILED]\n"); | ||
226 | ksft_inc_fail_cnt(); | ||
227 | succeeded = false; | ||
228 | } | ||
229 | } | ||
230 | |||
231 | ksft_print_cnts(); | ||
232 | if (succeeded) | ||
233 | ksft_exit_pass(); | ||
234 | else | ||
235 | ksft_exit_fail(); | ||
236 | } | ||