diff options
84 files changed, 3094 insertions, 661 deletions
diff --git a/Documentation/arm64/legacy_instructions.txt b/Documentation/arm64/legacy_instructions.txt new file mode 100644 index 000000000000..a3b3da2ec6ed --- /dev/null +++ b/Documentation/arm64/legacy_instructions.txt | |||
@@ -0,0 +1,45 @@ | |||
1 | The arm64 port of the Linux kernel provides infrastructure to support | ||
2 | emulation of instructions which have been deprecated, or obsoleted in | ||
3 | the architecture. The infrastructure code uses undefined instruction | ||
4 | hooks to support emulation. Where available it also allows turning on | ||
5 | the instruction execution in hardware. | ||
6 | |||
7 | The emulation mode can be controlled by writing to sysctl nodes | ||
8 | (/proc/sys/abi). The following explains the different execution | ||
9 | behaviours and the corresponding values of the sysctl nodes - | ||
10 | |||
11 | * Undef | ||
12 | Value: 0 | ||
13 | Generates undefined instruction abort. Default for instructions that | ||
14 | have been obsoleted in the architecture, e.g., SWP | ||
15 | |||
16 | * Emulate | ||
17 | Value: 1 | ||
18 | Uses software emulation. To aid migration of software, in this mode | ||
19 | usage of emulated instruction is traced as well as rate limited | ||
20 | warnings are issued. This is the default for deprecated | ||
21 | instructions, .e.g., CP15 barriers | ||
22 | |||
23 | * Hardware Execution | ||
24 | Value: 2 | ||
25 | Although marked as deprecated, some implementations may support the | ||
26 | enabling/disabling of hardware support for the execution of these | ||
27 | instructions. Using hardware execution generally provides better | ||
28 | performance, but at the loss of ability to gather runtime statistics | ||
29 | about the use of the deprecated instructions. | ||
30 | |||
31 | The default mode depends on the status of the instruction in the | ||
32 | architecture. Deprecated instructions should default to emulation | ||
33 | while obsolete instructions must be undefined by default. | ||
34 | |||
35 | Supported legacy instructions | ||
36 | ----------------------------- | ||
37 | * SWP{B} | ||
38 | Node: /proc/sys/abi/swp | ||
39 | Status: Obsolete | ||
40 | Default: Undef (0) | ||
41 | |||
42 | * CP15 Barriers | ||
43 | Node: /proc/sys/abi/cp15_barrier | ||
44 | Status: Deprecated | ||
45 | Default: Emulate (1) | ||
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9532f8d5857e..7c79c6494379 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -34,13 +34,16 @@ config ARM64 | |||
34 | select GENERIC_TIME_VSYSCALL | 34 | select GENERIC_TIME_VSYSCALL |
35 | select HANDLE_DOMAIN_IRQ | 35 | select HANDLE_DOMAIN_IRQ |
36 | select HARDIRQS_SW_RESEND | 36 | select HARDIRQS_SW_RESEND |
37 | select HAVE_ALIGNED_STRUCT_PAGE if SLUB | ||
37 | select HAVE_ARCH_AUDITSYSCALL | 38 | select HAVE_ARCH_AUDITSYSCALL |
38 | select HAVE_ARCH_JUMP_LABEL | 39 | select HAVE_ARCH_JUMP_LABEL |
39 | select HAVE_ARCH_KGDB | 40 | select HAVE_ARCH_KGDB |
41 | select HAVE_ARCH_SECCOMP_FILTER | ||
40 | select HAVE_ARCH_TRACEHOOK | 42 | select HAVE_ARCH_TRACEHOOK |
41 | select HAVE_BPF_JIT | 43 | select HAVE_BPF_JIT |
42 | select HAVE_C_RECORDMCOUNT | 44 | select HAVE_C_RECORDMCOUNT |
43 | select HAVE_CC_STACKPROTECTOR | 45 | select HAVE_CC_STACKPROTECTOR |
46 | select HAVE_CMPXCHG_DOUBLE | ||
44 | select HAVE_DEBUG_BUGVERBOSE | 47 | select HAVE_DEBUG_BUGVERBOSE |
45 | select HAVE_DEBUG_KMEMLEAK | 48 | select HAVE_DEBUG_KMEMLEAK |
46 | select HAVE_DMA_API_DEBUG | 49 | select HAVE_DMA_API_DEBUG |
@@ -193,6 +196,114 @@ endmenu | |||
193 | 196 | ||
194 | menu "Kernel Features" | 197 | menu "Kernel Features" |
195 | 198 | ||
199 | menu "ARM errata workarounds via the alternatives framework" | ||
200 | |||
201 | config ARM64_ERRATUM_826319 | ||
202 | bool "Cortex-A53: 826319: System might deadlock if a write cannot complete until read data is accepted" | ||
203 | default y | ||
204 | help | ||
205 | This option adds an alternative code sequence to work around ARM | ||
206 | erratum 826319 on Cortex-A53 parts up to r0p2 with an AMBA 4 ACE or | ||
207 | AXI master interface and an L2 cache. | ||
208 | |||
209 | If a Cortex-A53 uses an AMBA AXI4 ACE interface to other processors | ||
210 | and is unable to accept a certain write via this interface, it will | ||
211 | not progress on read data presented on the read data channel and the | ||
212 | system can deadlock. | ||
213 | |||
214 | The workaround promotes data cache clean instructions to | ||
215 | data cache clean-and-invalidate. | ||
216 | Please note that this does not necessarily enable the workaround, | ||
217 | as it depends on the alternative framework, which will only patch | ||
218 | the kernel if an affected CPU is detected. | ||
219 | |||
220 | If unsure, say Y. | ||
221 | |||
222 | config ARM64_ERRATUM_827319 | ||
223 | bool "Cortex-A53: 827319: Data cache clean instructions might cause overlapping transactions to the interconnect" | ||
224 | default y | ||
225 | help | ||
226 | This option adds an alternative code sequence to work around ARM | ||
227 | erratum 827319 on Cortex-A53 parts up to r0p2 with an AMBA 5 CHI | ||
228 | master interface and an L2 cache. | ||
229 | |||
230 | Under certain conditions this erratum can cause a clean line eviction | ||
231 | to occur at the same time as another transaction to the same address | ||
232 | on the AMBA 5 CHI interface, which can cause data corruption if the | ||
233 | interconnect reorders the two transactions. | ||
234 | |||
235 | The workaround promotes data cache clean instructions to | ||
236 | data cache clean-and-invalidate. | ||
237 | Please note that this does not necessarily enable the workaround, | ||
238 | as it depends on the alternative framework, which will only patch | ||
239 | the kernel if an affected CPU is detected. | ||
240 | |||
241 | If unsure, say Y. | ||
242 | |||
243 | config ARM64_ERRATUM_824069 | ||
244 | bool "Cortex-A53: 824069: Cache line might not be marked as clean after a CleanShared snoop" | ||
245 | default y | ||
246 | help | ||
247 | This option adds an alternative code sequence to work around ARM | ||
248 | erratum 824069 on Cortex-A53 parts up to r0p2 when it is connected | ||
249 | to a coherent interconnect. | ||
250 | |||
251 | If a Cortex-A53 processor is executing a store or prefetch for | ||
252 | write instruction at the same time as a processor in another | ||
253 | cluster is executing a cache maintenance operation to the same | ||
254 | address, then this erratum might cause a clean cache line to be | ||
255 | incorrectly marked as dirty. | ||
256 | |||
257 | The workaround promotes data cache clean instructions to | ||
258 | data cache clean-and-invalidate. | ||
259 | Please note that this option does not necessarily enable the | ||
260 | workaround, as it depends on the alternative framework, which will | ||
261 | only patch the kernel if an affected CPU is detected. | ||
262 | |||
263 | If unsure, say Y. | ||
264 | |||
265 | config ARM64_ERRATUM_819472 | ||
266 | bool "Cortex-A53: 819472: Store exclusive instructions might cause data corruption" | ||
267 | default y | ||
268 | help | ||
269 | This option adds an alternative code sequence to work around ARM | ||
270 | erratum 819472 on Cortex-A53 parts up to r0p1 with an L2 cache | ||
271 | present when it is connected to a coherent interconnect. | ||
272 | |||
273 | If the processor is executing a load and store exclusive sequence at | ||
274 | the same time as a processor in another cluster is executing a cache | ||
275 | maintenance operation to the same address, then this erratum might | ||
276 | cause data corruption. | ||
277 | |||
278 | The workaround promotes data cache clean instructions to | ||
279 | data cache clean-and-invalidate. | ||
280 | Please note that this does not necessarily enable the workaround, | ||
281 | as it depends on the alternative framework, which will only patch | ||
282 | the kernel if an affected CPU is detected. | ||
283 | |||
284 | If unsure, say Y. | ||
285 | |||
286 | config ARM64_ERRATUM_832075 | ||
287 | bool "Cortex-A57: 832075: possible deadlock on mixing exclusive memory accesses with device loads" | ||
288 | default y | ||
289 | help | ||
290 | This option adds an alternative code sequence to work around ARM | ||
291 | erratum 832075 on Cortex-A57 parts up to r1p2. | ||
292 | |||
293 | Affected Cortex-A57 parts might deadlock when exclusive load/store | ||
294 | instructions to Write-Back memory are mixed with Device loads. | ||
295 | |||
296 | The workaround is to promote device loads to use Load-Acquire | ||
297 | semantics. | ||
298 | Please note that this does not necessarily enable the workaround, | ||
299 | as it depends on the alternative framework, which will only patch | ||
300 | the kernel if an affected CPU is detected. | ||
301 | |||
302 | If unsure, say Y. | ||
303 | |||
304 | endmenu | ||
305 | |||
306 | |||
196 | choice | 307 | choice |
197 | prompt "Page size" | 308 | prompt "Page size" |
198 | default ARM64_4K_PAGES | 309 | default ARM64_4K_PAGES |
@@ -345,6 +456,19 @@ config ARCH_HAS_CACHE_LINE_SIZE | |||
345 | 456 | ||
346 | source "mm/Kconfig" | 457 | source "mm/Kconfig" |
347 | 458 | ||
459 | config SECCOMP | ||
460 | bool "Enable seccomp to safely compute untrusted bytecode" | ||
461 | ---help--- | ||
462 | This kernel feature is useful for number crunching applications | ||
463 | that may need to compute untrusted bytecode during their | ||
464 | execution. By using pipes or other transports made available to | ||
465 | the process as file descriptors supporting the read/write | ||
466 | syscalls, it's possible to isolate those applications in | ||
467 | their own address space using seccomp. Once seccomp is | ||
468 | enabled via prctl(PR_SET_SECCOMP), it cannot be disabled | ||
469 | and the task is only allowed to execute a few safe syscalls | ||
470 | defined by each seccomp mode. | ||
471 | |||
348 | config XEN_DOM0 | 472 | config XEN_DOM0 |
349 | def_bool y | 473 | def_bool y |
350 | depends on XEN | 474 | depends on XEN |
@@ -361,6 +485,58 @@ config FORCE_MAX_ZONEORDER | |||
361 | default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE) | 485 | default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE) |
362 | default "11" | 486 | default "11" |
363 | 487 | ||
488 | menuconfig ARMV8_DEPRECATED | ||
489 | bool "Emulate deprecated/obsolete ARMv8 instructions" | ||
490 | depends on COMPAT | ||
491 | help | ||
492 | Legacy software support may require certain instructions | ||
493 | that have been deprecated or obsoleted in the architecture. | ||
494 | |||
495 | Enable this config to enable selective emulation of these | ||
496 | features. | ||
497 | |||
498 | If unsure, say Y | ||
499 | |||
500 | if ARMV8_DEPRECATED | ||
501 | |||
502 | config SWP_EMULATION | ||
503 | bool "Emulate SWP/SWPB instructions" | ||
504 | help | ||
505 | ARMv8 obsoletes the use of A32 SWP/SWPB instructions such that | ||
506 | they are always undefined. Say Y here to enable software | ||
507 | emulation of these instructions for userspace using LDXR/STXR. | ||
508 | |||
509 | In some older versions of glibc [<=2.8] SWP is used during futex | ||
510 | trylock() operations with the assumption that the code will not | ||
511 | be preempted. This invalid assumption may be more likely to fail | ||
512 | with SWP emulation enabled, leading to deadlock of the user | ||
513 | application. | ||
514 | |||
515 | NOTE: when accessing uncached shared regions, LDXR/STXR rely | ||
516 | on an external transaction monitoring block called a global | ||
517 | monitor to maintain update atomicity. If your system does not | ||
518 | implement a global monitor, this option can cause programs that | ||
519 | perform SWP operations to uncached memory to deadlock. | ||
520 | |||
521 | If unsure, say Y | ||
522 | |||
523 | config CP15_BARRIER_EMULATION | ||
524 | bool "Emulate CP15 Barrier instructions" | ||
525 | help | ||
526 | The CP15 barrier instructions - CP15ISB, CP15DSB, and | ||
527 | CP15DMB - are deprecated in ARMv8 (and ARMv7). It is | ||
528 | strongly recommended to use the ISB, DSB, and DMB | ||
529 | instructions instead. | ||
530 | |||
531 | Say Y here to enable software emulation of these | ||
532 | instructions for AArch32 userspace code. When this option is | ||
533 | enabled, CP15 barrier usage is traced which can help | ||
534 | identify software that needs updating. | ||
535 | |||
536 | If unsure, say Y | ||
537 | |||
538 | endif | ||
539 | |||
364 | endmenu | 540 | endmenu |
365 | 541 | ||
366 | menu "Boot options" | 542 | menu "Boot options" |
@@ -401,6 +577,17 @@ config EFI | |||
401 | allow the kernel to be booted as an EFI application. This | 577 | allow the kernel to be booted as an EFI application. This |
402 | is only useful on systems that have UEFI firmware. | 578 | is only useful on systems that have UEFI firmware. |
403 | 579 | ||
580 | config DMI | ||
581 | bool "Enable support for SMBIOS (DMI) tables" | ||
582 | depends on EFI | ||
583 | default y | ||
584 | help | ||
585 | This enables SMBIOS/DMI feature for systems. | ||
586 | |||
587 | This option is only useful on systems that have UEFI firmware. | ||
588 | However, even with this option, the resultant kernel should | ||
589 | continue to boot on existing non-UEFI platforms. | ||
590 | |||
404 | endmenu | 591 | endmenu |
405 | 592 | ||
406 | menu "Userspace binary formats" | 593 | menu "Userspace binary formats" |
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 0a12933e50ed..5fdd6dce8061 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug | |||
@@ -6,6 +6,18 @@ config FRAME_POINTER | |||
6 | bool | 6 | bool |
7 | default y | 7 | default y |
8 | 8 | ||
9 | config ARM64_PTDUMP | ||
10 | bool "Export kernel pagetable layout to userspace via debugfs" | ||
11 | depends on DEBUG_KERNEL | ||
12 | select DEBUG_FS | ||
13 | help | ||
14 | Say Y here if you want to show the kernel pagetable layout in a | ||
15 | debugfs file. This information is only useful for kernel developers | ||
16 | who are working in architecture specific areas of the kernel. | ||
17 | It is probably not a good idea to enable this feature in a production | ||
18 | kernel. | ||
19 | If in doubt, say "N" | ||
20 | |||
9 | config STRICT_DEVMEM | 21 | config STRICT_DEVMEM |
10 | bool "Filter access to /dev/mem" | 22 | bool "Filter access to /dev/mem" |
11 | depends on MMU | 23 | depends on MMU |
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 5562652c5316..a38b02ce5f9a 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig | |||
@@ -27,20 +27,19 @@ config CRYPTO_AES_ARM64_CE | |||
27 | tristate "AES core cipher using ARMv8 Crypto Extensions" | 27 | tristate "AES core cipher using ARMv8 Crypto Extensions" |
28 | depends on ARM64 && KERNEL_MODE_NEON | 28 | depends on ARM64 && KERNEL_MODE_NEON |
29 | select CRYPTO_ALGAPI | 29 | select CRYPTO_ALGAPI |
30 | select CRYPTO_AES | ||
31 | 30 | ||
32 | config CRYPTO_AES_ARM64_CE_CCM | 31 | config CRYPTO_AES_ARM64_CE_CCM |
33 | tristate "AES in CCM mode using ARMv8 Crypto Extensions" | 32 | tristate "AES in CCM mode using ARMv8 Crypto Extensions" |
34 | depends on ARM64 && KERNEL_MODE_NEON | 33 | depends on ARM64 && KERNEL_MODE_NEON |
35 | select CRYPTO_ALGAPI | 34 | select CRYPTO_ALGAPI |
36 | select CRYPTO_AES | 35 | select CRYPTO_AES_ARM64_CE |
37 | select CRYPTO_AEAD | 36 | select CRYPTO_AEAD |
38 | 37 | ||
39 | config CRYPTO_AES_ARM64_CE_BLK | 38 | config CRYPTO_AES_ARM64_CE_BLK |
40 | tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions" | 39 | tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions" |
41 | depends on ARM64 && KERNEL_MODE_NEON | 40 | depends on ARM64 && KERNEL_MODE_NEON |
42 | select CRYPTO_BLKCIPHER | 41 | select CRYPTO_BLKCIPHER |
43 | select CRYPTO_AES | 42 | select CRYPTO_AES_ARM64_CE |
44 | select CRYPTO_ABLK_HELPER | 43 | select CRYPTO_ABLK_HELPER |
45 | 44 | ||
46 | config CRYPTO_AES_ARM64_NEON_BLK | 45 | config CRYPTO_AES_ARM64_NEON_BLK |
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 9e6cdde9b43d..0ac73b838fa3 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/crypto.h> | 16 | #include <linux/crypto.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | 18 | ||
19 | #include "aes-ce-setkey.h" | ||
20 | |||
19 | static int num_rounds(struct crypto_aes_ctx *ctx) | 21 | static int num_rounds(struct crypto_aes_ctx *ctx) |
20 | { | 22 | { |
21 | /* | 23 | /* |
@@ -48,7 +50,7 @@ static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key, | |||
48 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm); | 50 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm); |
49 | int ret; | 51 | int ret; |
50 | 52 | ||
51 | ret = crypto_aes_expand_key(ctx, in_key, key_len); | 53 | ret = ce_aes_expandkey(ctx, in_key, key_len); |
52 | if (!ret) | 54 | if (!ret) |
53 | return 0; | 55 | return 0; |
54 | 56 | ||
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c index 2075e1acae6b..ce47792a983d 100644 --- a/arch/arm64/crypto/aes-ce-cipher.c +++ b/arch/arm64/crypto/aes-ce-cipher.c | |||
@@ -14,6 +14,8 @@ | |||
14 | #include <linux/crypto.h> | 14 | #include <linux/crypto.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | 16 | ||
17 | #include "aes-ce-setkey.h" | ||
18 | |||
17 | MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions"); | 19 | MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions"); |
18 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); | 20 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); |
19 | MODULE_LICENSE("GPL v2"); | 21 | MODULE_LICENSE("GPL v2"); |
@@ -124,6 +126,114 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) | |||
124 | kernel_neon_end(); | 126 | kernel_neon_end(); |
125 | } | 127 | } |
126 | 128 | ||
129 | /* | ||
130 | * aes_sub() - use the aese instruction to perform the AES sbox substitution | ||
131 | * on each byte in 'input' | ||
132 | */ | ||
133 | static u32 aes_sub(u32 input) | ||
134 | { | ||
135 | u32 ret; | ||
136 | |||
137 | __asm__("dup v1.4s, %w[in] ;" | ||
138 | "movi v0.16b, #0 ;" | ||
139 | "aese v0.16b, v1.16b ;" | ||
140 | "umov %w[out], v0.4s[0] ;" | ||
141 | |||
142 | : [out] "=r"(ret) | ||
143 | : [in] "r"(input) | ||
144 | : "v0","v1"); | ||
145 | |||
146 | return ret; | ||
147 | } | ||
148 | |||
149 | int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, | ||
150 | unsigned int key_len) | ||
151 | { | ||
152 | /* | ||
153 | * The AES key schedule round constants | ||
154 | */ | ||
155 | static u8 const rcon[] = { | ||
156 | 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, | ||
157 | }; | ||
158 | |||
159 | u32 kwords = key_len / sizeof(u32); | ||
160 | struct aes_block *key_enc, *key_dec; | ||
161 | int i, j; | ||
162 | |||
163 | if (key_len != AES_KEYSIZE_128 && | ||
164 | key_len != AES_KEYSIZE_192 && | ||
165 | key_len != AES_KEYSIZE_256) | ||
166 | return -EINVAL; | ||
167 | |||
168 | memcpy(ctx->key_enc, in_key, key_len); | ||
169 | ctx->key_length = key_len; | ||
170 | |||
171 | kernel_neon_begin_partial(2); | ||
172 | for (i = 0; i < sizeof(rcon); i++) { | ||
173 | u32 *rki = ctx->key_enc + (i * kwords); | ||
174 | u32 *rko = rki + kwords; | ||
175 | |||
176 | rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0]; | ||
177 | rko[1] = rko[0] ^ rki[1]; | ||
178 | rko[2] = rko[1] ^ rki[2]; | ||
179 | rko[3] = rko[2] ^ rki[3]; | ||
180 | |||
181 | if (key_len == AES_KEYSIZE_192) { | ||
182 | if (i >= 7) | ||
183 | break; | ||
184 | rko[4] = rko[3] ^ rki[4]; | ||
185 | rko[5] = rko[4] ^ rki[5]; | ||
186 | } else if (key_len == AES_KEYSIZE_256) { | ||
187 | if (i >= 6) | ||
188 | break; | ||
189 | rko[4] = aes_sub(rko[3]) ^ rki[4]; | ||
190 | rko[5] = rko[4] ^ rki[5]; | ||
191 | rko[6] = rko[5] ^ rki[6]; | ||
192 | rko[7] = rko[6] ^ rki[7]; | ||
193 | } | ||
194 | } | ||
195 | |||
196 | /* | ||
197 | * Generate the decryption keys for the Equivalent Inverse Cipher. | ||
198 | * This involves reversing the order of the round keys, and applying | ||
199 | * the Inverse Mix Columns transformation on all but the first and | ||
200 | * the last one. | ||
201 | */ | ||
202 | key_enc = (struct aes_block *)ctx->key_enc; | ||
203 | key_dec = (struct aes_block *)ctx->key_dec; | ||
204 | j = num_rounds(ctx); | ||
205 | |||
206 | key_dec[0] = key_enc[j]; | ||
207 | for (i = 1, j--; j > 0; i++, j--) | ||
208 | __asm__("ld1 {v0.16b}, %[in] ;" | ||
209 | "aesimc v1.16b, v0.16b ;" | ||
210 | "st1 {v1.16b}, %[out] ;" | ||
211 | |||
212 | : [out] "=Q"(key_dec[i]) | ||
213 | : [in] "Q"(key_enc[j]) | ||
214 | : "v0","v1"); | ||
215 | key_dec[i] = key_enc[0]; | ||
216 | |||
217 | kernel_neon_end(); | ||
218 | return 0; | ||
219 | } | ||
220 | EXPORT_SYMBOL(ce_aes_expandkey); | ||
221 | |||
222 | int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key, | ||
223 | unsigned int key_len) | ||
224 | { | ||
225 | struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); | ||
226 | int ret; | ||
227 | |||
228 | ret = ce_aes_expandkey(ctx, in_key, key_len); | ||
229 | if (!ret) | ||
230 | return 0; | ||
231 | |||
232 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | ||
233 | return -EINVAL; | ||
234 | } | ||
235 | EXPORT_SYMBOL(ce_aes_setkey); | ||
236 | |||
127 | static struct crypto_alg aes_alg = { | 237 | static struct crypto_alg aes_alg = { |
128 | .cra_name = "aes", | 238 | .cra_name = "aes", |
129 | .cra_driver_name = "aes-ce", | 239 | .cra_driver_name = "aes-ce", |
@@ -135,7 +245,7 @@ static struct crypto_alg aes_alg = { | |||
135 | .cra_cipher = { | 245 | .cra_cipher = { |
136 | .cia_min_keysize = AES_MIN_KEY_SIZE, | 246 | .cia_min_keysize = AES_MIN_KEY_SIZE, |
137 | .cia_max_keysize = AES_MAX_KEY_SIZE, | 247 | .cia_max_keysize = AES_MAX_KEY_SIZE, |
138 | .cia_setkey = crypto_aes_set_key, | 248 | .cia_setkey = ce_aes_setkey, |
139 | .cia_encrypt = aes_cipher_encrypt, | 249 | .cia_encrypt = aes_cipher_encrypt, |
140 | .cia_decrypt = aes_cipher_decrypt | 250 | .cia_decrypt = aes_cipher_decrypt |
141 | } | 251 | } |
diff --git a/arch/arm64/crypto/aes-ce-setkey.h b/arch/arm64/crypto/aes-ce-setkey.h new file mode 100644 index 000000000000..f08a6471d034 --- /dev/null +++ b/arch/arm64/crypto/aes-ce-setkey.h | |||
@@ -0,0 +1,5 @@ | |||
1 | |||
2 | int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key, | ||
3 | unsigned int key_len); | ||
4 | int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, | ||
5 | unsigned int key_len); | ||
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 79cd911ef88c..801aae32841f 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c | |||
@@ -16,9 +16,13 @@ | |||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/cpufeature.h> | 17 | #include <linux/cpufeature.h> |
18 | 18 | ||
19 | #include "aes-ce-setkey.h" | ||
20 | |||
19 | #ifdef USE_V8_CRYPTO_EXTENSIONS | 21 | #ifdef USE_V8_CRYPTO_EXTENSIONS |
20 | #define MODE "ce" | 22 | #define MODE "ce" |
21 | #define PRIO 300 | 23 | #define PRIO 300 |
24 | #define aes_setkey ce_aes_setkey | ||
25 | #define aes_expandkey ce_aes_expandkey | ||
22 | #define aes_ecb_encrypt ce_aes_ecb_encrypt | 26 | #define aes_ecb_encrypt ce_aes_ecb_encrypt |
23 | #define aes_ecb_decrypt ce_aes_ecb_decrypt | 27 | #define aes_ecb_decrypt ce_aes_ecb_decrypt |
24 | #define aes_cbc_encrypt ce_aes_cbc_encrypt | 28 | #define aes_cbc_encrypt ce_aes_cbc_encrypt |
@@ -30,6 +34,8 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions"); | |||
30 | #else | 34 | #else |
31 | #define MODE "neon" | 35 | #define MODE "neon" |
32 | #define PRIO 200 | 36 | #define PRIO 200 |
37 | #define aes_setkey crypto_aes_set_key | ||
38 | #define aes_expandkey crypto_aes_expand_key | ||
33 | #define aes_ecb_encrypt neon_aes_ecb_encrypt | 39 | #define aes_ecb_encrypt neon_aes_ecb_encrypt |
34 | #define aes_ecb_decrypt neon_aes_ecb_decrypt | 40 | #define aes_ecb_decrypt neon_aes_ecb_decrypt |
35 | #define aes_cbc_encrypt neon_aes_cbc_encrypt | 41 | #define aes_cbc_encrypt neon_aes_cbc_encrypt |
@@ -79,10 +85,10 @@ static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
79 | struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); | 85 | struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); |
80 | int ret; | 86 | int ret; |
81 | 87 | ||
82 | ret = crypto_aes_expand_key(&ctx->key1, in_key, key_len / 2); | 88 | ret = aes_expandkey(&ctx->key1, in_key, key_len / 2); |
83 | if (!ret) | 89 | if (!ret) |
84 | ret = crypto_aes_expand_key(&ctx->key2, &in_key[key_len / 2], | 90 | ret = aes_expandkey(&ctx->key2, &in_key[key_len / 2], |
85 | key_len / 2); | 91 | key_len / 2); |
86 | if (!ret) | 92 | if (!ret) |
87 | return 0; | 93 | return 0; |
88 | 94 | ||
@@ -288,7 +294,7 @@ static struct crypto_alg aes_algs[] = { { | |||
288 | .min_keysize = AES_MIN_KEY_SIZE, | 294 | .min_keysize = AES_MIN_KEY_SIZE, |
289 | .max_keysize = AES_MAX_KEY_SIZE, | 295 | .max_keysize = AES_MAX_KEY_SIZE, |
290 | .ivsize = AES_BLOCK_SIZE, | 296 | .ivsize = AES_BLOCK_SIZE, |
291 | .setkey = crypto_aes_set_key, | 297 | .setkey = aes_setkey, |
292 | .encrypt = ecb_encrypt, | 298 | .encrypt = ecb_encrypt, |
293 | .decrypt = ecb_decrypt, | 299 | .decrypt = ecb_decrypt, |
294 | }, | 300 | }, |
@@ -306,7 +312,7 @@ static struct crypto_alg aes_algs[] = { { | |||
306 | .min_keysize = AES_MIN_KEY_SIZE, | 312 | .min_keysize = AES_MIN_KEY_SIZE, |
307 | .max_keysize = AES_MAX_KEY_SIZE, | 313 | .max_keysize = AES_MAX_KEY_SIZE, |
308 | .ivsize = AES_BLOCK_SIZE, | 314 | .ivsize = AES_BLOCK_SIZE, |
309 | .setkey = crypto_aes_set_key, | 315 | .setkey = aes_setkey, |
310 | .encrypt = cbc_encrypt, | 316 | .encrypt = cbc_encrypt, |
311 | .decrypt = cbc_decrypt, | 317 | .decrypt = cbc_decrypt, |
312 | }, | 318 | }, |
@@ -324,7 +330,7 @@ static struct crypto_alg aes_algs[] = { { | |||
324 | .min_keysize = AES_MIN_KEY_SIZE, | 330 | .min_keysize = AES_MIN_KEY_SIZE, |
325 | .max_keysize = AES_MAX_KEY_SIZE, | 331 | .max_keysize = AES_MAX_KEY_SIZE, |
326 | .ivsize = AES_BLOCK_SIZE, | 332 | .ivsize = AES_BLOCK_SIZE, |
327 | .setkey = crypto_aes_set_key, | 333 | .setkey = aes_setkey, |
328 | .encrypt = ctr_encrypt, | 334 | .encrypt = ctr_encrypt, |
329 | .decrypt = ctr_encrypt, | 335 | .decrypt = ctr_encrypt, |
330 | }, | 336 | }, |
diff --git a/arch/arm64/include/asm/alternative-asm.h b/arch/arm64/include/asm/alternative-asm.h new file mode 100644 index 000000000000..919a67855b63 --- /dev/null +++ b/arch/arm64/include/asm/alternative-asm.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef __ASM_ALTERNATIVE_ASM_H | ||
2 | #define __ASM_ALTERNATIVE_ASM_H | ||
3 | |||
4 | #ifdef __ASSEMBLY__ | ||
5 | |||
6 | .macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len | ||
7 | .word \orig_offset - . | ||
8 | .word \alt_offset - . | ||
9 | .hword \feature | ||
10 | .byte \orig_len | ||
11 | .byte \alt_len | ||
12 | .endm | ||
13 | |||
14 | .macro alternative_insn insn1 insn2 cap | ||
15 | 661: \insn1 | ||
16 | 662: .pushsection .altinstructions, "a" | ||
17 | altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f | ||
18 | .popsection | ||
19 | .pushsection .altinstr_replacement, "ax" | ||
20 | 663: \insn2 | ||
21 | 664: .popsection | ||
22 | .if ((664b-663b) != (662b-661b)) | ||
23 | .error "Alternatives instruction length mismatch" | ||
24 | .endif | ||
25 | .endm | ||
26 | |||
27 | #endif /* __ASSEMBLY__ */ | ||
28 | |||
29 | #endif /* __ASM_ALTERNATIVE_ASM_H */ | ||
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h new file mode 100644 index 000000000000..d261f01e2bae --- /dev/null +++ b/arch/arm64/include/asm/alternative.h | |||
@@ -0,0 +1,44 @@ | |||
1 | #ifndef __ASM_ALTERNATIVE_H | ||
2 | #define __ASM_ALTERNATIVE_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/stddef.h> | ||
6 | #include <linux/stringify.h> | ||
7 | |||
8 | struct alt_instr { | ||
9 | s32 orig_offset; /* offset to original instruction */ | ||
10 | s32 alt_offset; /* offset to replacement instruction */ | ||
11 | u16 cpufeature; /* cpufeature bit set for replacement */ | ||
12 | u8 orig_len; /* size of original instruction(s) */ | ||
13 | u8 alt_len; /* size of new instruction(s), <= orig_len */ | ||
14 | }; | ||
15 | |||
16 | void apply_alternatives_all(void); | ||
17 | void apply_alternatives(void *start, size_t length); | ||
18 | void free_alternatives_memory(void); | ||
19 | |||
20 | #define ALTINSTR_ENTRY(feature) \ | ||
21 | " .word 661b - .\n" /* label */ \ | ||
22 | " .word 663f - .\n" /* new instruction */ \ | ||
23 | " .hword " __stringify(feature) "\n" /* feature bit */ \ | ||
24 | " .byte 662b-661b\n" /* source len */ \ | ||
25 | " .byte 664f-663f\n" /* replacement len */ | ||
26 | |||
27 | /* alternative assembly primitive: */ | ||
28 | #define ALTERNATIVE(oldinstr, newinstr, feature) \ | ||
29 | "661:\n\t" \ | ||
30 | oldinstr "\n" \ | ||
31 | "662:\n" \ | ||
32 | ".pushsection .altinstructions,\"a\"\n" \ | ||
33 | ALTINSTR_ENTRY(feature) \ | ||
34 | ".popsection\n" \ | ||
35 | ".pushsection .altinstr_replacement, \"a\"\n" \ | ||
36 | "663:\n\t" \ | ||
37 | newinstr "\n" \ | ||
38 | "664:\n\t" \ | ||
39 | ".popsection\n\t" \ | ||
40 | ".if ((664b-663b) != (662b-661b))\n\t" \ | ||
41 | " .error \"Alternatives instruction length mismatch\"\n\t"\ | ||
42 | ".endif\n" | ||
43 | |||
44 | #endif /* __ASM_ALTERNATIVE_H */ | ||
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 88cc05b5f3ac..bde449936e2f 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h | |||
@@ -32,6 +32,8 @@ | |||
32 | 32 | ||
33 | #ifndef __ASSEMBLY__ | 33 | #ifndef __ASSEMBLY__ |
34 | 34 | ||
35 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) | ||
36 | |||
35 | static inline int cache_line_size(void) | 37 | static inline int cache_line_size(void) |
36 | { | 38 | { |
37 | u32 cwg = cache_type_cwg(); | 39 | u32 cwg = cache_type_cwg(); |
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 689b6379188c..7ae31a2cc6c0 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h | |||
@@ -73,7 +73,7 @@ extern void flush_cache_all(void); | |||
73 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | 73 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); |
74 | extern void flush_icache_range(unsigned long start, unsigned long end); | 74 | extern void flush_icache_range(unsigned long start, unsigned long end); |
75 | extern void __flush_dcache_area(void *addr, size_t len); | 75 | extern void __flush_dcache_area(void *addr, size_t len); |
76 | extern void __flush_cache_user_range(unsigned long start, unsigned long end); | 76 | extern long __flush_cache_user_range(unsigned long start, unsigned long end); |
77 | 77 | ||
78 | static inline void flush_cache_mm(struct mm_struct *mm) | 78 | static inline void flush_cache_mm(struct mm_struct *mm) |
79 | { | 79 | { |
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index ddb9d7830558..cb9593079f29 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #define __ASM_CMPXCHG_H | 19 | #define __ASM_CMPXCHG_H |
20 | 20 | ||
21 | #include <linux/bug.h> | 21 | #include <linux/bug.h> |
22 | #include <linux/mmdebug.h> | ||
22 | 23 | ||
23 | #include <asm/barrier.h> | 24 | #include <asm/barrier.h> |
24 | 25 | ||
@@ -152,6 +153,51 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
152 | return oldval; | 153 | return oldval; |
153 | } | 154 | } |
154 | 155 | ||
156 | #define system_has_cmpxchg_double() 1 | ||
157 | |||
158 | static inline int __cmpxchg_double(volatile void *ptr1, volatile void *ptr2, | ||
159 | unsigned long old1, unsigned long old2, | ||
160 | unsigned long new1, unsigned long new2, int size) | ||
161 | { | ||
162 | unsigned long loop, lost; | ||
163 | |||
164 | switch (size) { | ||
165 | case 8: | ||
166 | VM_BUG_ON((unsigned long *)ptr2 - (unsigned long *)ptr1 != 1); | ||
167 | do { | ||
168 | asm volatile("// __cmpxchg_double8\n" | ||
169 | " ldxp %0, %1, %2\n" | ||
170 | " eor %0, %0, %3\n" | ||
171 | " eor %1, %1, %4\n" | ||
172 | " orr %1, %0, %1\n" | ||
173 | " mov %w0, #0\n" | ||
174 | " cbnz %1, 1f\n" | ||
175 | " stxp %w0, %5, %6, %2\n" | ||
176 | "1:\n" | ||
177 | : "=&r"(loop), "=&r"(lost), "+Q" (*(u64 *)ptr1) | ||
178 | : "r" (old1), "r"(old2), "r"(new1), "r"(new2)); | ||
179 | } while (loop); | ||
180 | break; | ||
181 | default: | ||
182 | BUILD_BUG(); | ||
183 | } | ||
184 | |||
185 | return !lost; | ||
186 | } | ||
187 | |||
188 | static inline int __cmpxchg_double_mb(volatile void *ptr1, volatile void *ptr2, | ||
189 | unsigned long old1, unsigned long old2, | ||
190 | unsigned long new1, unsigned long new2, int size) | ||
191 | { | ||
192 | int ret; | ||
193 | |||
194 | smp_mb(); | ||
195 | ret = __cmpxchg_double(ptr1, ptr2, old1, old2, new1, new2, size); | ||
196 | smp_mb(); | ||
197 | |||
198 | return ret; | ||
199 | } | ||
200 | |||
155 | static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, | 201 | static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, |
156 | unsigned long new, int size) | 202 | unsigned long new, int size) |
157 | { | 203 | { |
@@ -182,6 +228,33 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, | |||
182 | __ret; \ | 228 | __ret; \ |
183 | }) | 229 | }) |
184 | 230 | ||
231 | #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \ | ||
232 | ({\ | ||
233 | int __ret;\ | ||
234 | __ret = __cmpxchg_double_mb((ptr1), (ptr2), (unsigned long)(o1), \ | ||
235 | (unsigned long)(o2), (unsigned long)(n1), \ | ||
236 | (unsigned long)(n2), sizeof(*(ptr1)));\ | ||
237 | __ret; \ | ||
238 | }) | ||
239 | |||
240 | #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \ | ||
241 | ({\ | ||
242 | int __ret;\ | ||
243 | __ret = __cmpxchg_double((ptr1), (ptr2), (unsigned long)(o1), \ | ||
244 | (unsigned long)(o2), (unsigned long)(n1), \ | ||
245 | (unsigned long)(n2), sizeof(*(ptr1)));\ | ||
246 | __ret; \ | ||
247 | }) | ||
248 | |||
249 | #define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | ||
250 | #define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | ||
251 | #define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | ||
252 | #define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | ||
253 | |||
254 | #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ | ||
255 | cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \ | ||
256 | o1, o2, n1, n2) | ||
257 | |||
185 | #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) | 258 | #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) |
186 | #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) | 259 | #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) |
187 | 260 | ||
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 56de5aadede2..3fb053fa6e98 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h | |||
@@ -205,6 +205,13 @@ typedef struct compat_siginfo { | |||
205 | compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */ | 205 | compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */ |
206 | int _fd; | 206 | int _fd; |
207 | } _sigpoll; | 207 | } _sigpoll; |
208 | |||
209 | /* SIGSYS */ | ||
210 | struct { | ||
211 | compat_uptr_t _call_addr; /* calling user insn */ | ||
212 | int _syscall; /* triggering system call number */ | ||
213 | compat_uint_t _arch; /* AUDIT_ARCH_* of syscall */ | ||
214 | } _sigsys; | ||
208 | } _sifields; | 215 | } _sifields; |
209 | } compat_siginfo_t; | 216 | } compat_siginfo_t; |
210 | 217 | ||
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index 056443086019..ace70682499b 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h | |||
@@ -30,6 +30,8 @@ struct cpuinfo_arm64 { | |||
30 | u32 reg_dczid; | 30 | u32 reg_dczid; |
31 | u32 reg_midr; | 31 | u32 reg_midr; |
32 | 32 | ||
33 | u64 reg_id_aa64dfr0; | ||
34 | u64 reg_id_aa64dfr1; | ||
33 | u64 reg_id_aa64isar0; | 35 | u64 reg_id_aa64isar0; |
34 | u64 reg_id_aa64isar1; | 36 | u64 reg_id_aa64isar1; |
35 | u64 reg_id_aa64mmfr0; | 37 | u64 reg_id_aa64mmfr0; |
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index cd4ac0516488..07547ccc1f2b 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h | |||
@@ -21,9 +21,38 @@ | |||
21 | #define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap)) | 21 | #define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap)) |
22 | #define cpu_feature(x) ilog2(HWCAP_ ## x) | 22 | #define cpu_feature(x) ilog2(HWCAP_ ## x) |
23 | 23 | ||
24 | #define ARM64_WORKAROUND_CLEAN_CACHE 0 | ||
25 | #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 | ||
26 | |||
27 | #define ARM64_NCAPS 2 | ||
28 | |||
29 | #ifndef __ASSEMBLY__ | ||
30 | |||
31 | extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); | ||
32 | |||
24 | static inline bool cpu_have_feature(unsigned int num) | 33 | static inline bool cpu_have_feature(unsigned int num) |
25 | { | 34 | { |
26 | return elf_hwcap & (1UL << num); | 35 | return elf_hwcap & (1UL << num); |
27 | } | 36 | } |
28 | 37 | ||
38 | static inline bool cpus_have_cap(unsigned int num) | ||
39 | { | ||
40 | if (num >= ARM64_NCAPS) | ||
41 | return false; | ||
42 | return test_bit(num, cpu_hwcaps); | ||
43 | } | ||
44 | |||
45 | static inline void cpus_set_cap(unsigned int num) | ||
46 | { | ||
47 | if (num >= ARM64_NCAPS) | ||
48 | pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n", | ||
49 | num, ARM64_NCAPS); | ||
50 | else | ||
51 | __set_bit(num, cpu_hwcaps); | ||
52 | } | ||
53 | |||
54 | void check_local_cpu_errata(void); | ||
55 | |||
56 | #endif /* __ASSEMBLY__ */ | ||
57 | |||
29 | #endif | 58 | #endif |
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 379d0b874328..8adb986a3086 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h | |||
@@ -57,6 +57,11 @@ | |||
57 | #define MIDR_IMPLEMENTOR(midr) \ | 57 | #define MIDR_IMPLEMENTOR(midr) \ |
58 | (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT) | 58 | (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT) |
59 | 59 | ||
60 | #define MIDR_CPU_PART(imp, partnum) \ | ||
61 | (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \ | ||
62 | (0xf << MIDR_ARCHITECTURE_SHIFT) | \ | ||
63 | ((partnum) << MIDR_PARTNUM_SHIFT)) | ||
64 | |||
60 | #define ARM_CPU_IMP_ARM 0x41 | 65 | #define ARM_CPU_IMP_ARM 0x41 |
61 | #define ARM_CPU_IMP_APM 0x50 | 66 | #define ARM_CPU_IMP_APM 0x50 |
62 | 67 | ||
diff --git a/arch/arm64/include/asm/dmi.h b/arch/arm64/include/asm/dmi.h new file mode 100644 index 000000000000..69d37d87b159 --- /dev/null +++ b/arch/arm64/include/asm/dmi.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * arch/arm64/include/asm/dmi.h | ||
3 | * | ||
4 | * Copyright (C) 2013 Linaro Limited. | ||
5 | * Written by: Yi Li (yi.li@linaro.org) | ||
6 | * | ||
7 | * based on arch/ia64/include/asm/dmi.h | ||
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | ||
13 | |||
14 | #ifndef __ASM_DMI_H | ||
15 | #define __ASM_DMI_H | ||
16 | |||
17 | #include <linux/io.h> | ||
18 | #include <linux/slab.h> | ||
19 | |||
20 | /* | ||
21 | * According to section 2.3.6 of the UEFI spec, the firmware should not | ||
22 | * request a virtual mapping for configuration tables such as SMBIOS. | ||
23 | * This means we have to map them before use. | ||
24 | */ | ||
25 | #define dmi_early_remap(x, l) ioremap_cache(x, l) | ||
26 | #define dmi_early_unmap(x, l) iounmap(x) | ||
27 | #define dmi_remap(x, l) ioremap_cache(x, l) | ||
28 | #define dmi_unmap(x) iounmap(x) | ||
29 | #define dmi_alloc(l) kzalloc(l, GFP_KERNEL) | ||
30 | |||
31 | #endif | ||
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index 5f7bfe6df723..9ef6eca905ca 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h | |||
@@ -31,6 +31,7 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | enum fixed_addresses { | 33 | enum fixed_addresses { |
34 | FIX_HOLE, | ||
34 | FIX_EARLYCON_MEM_BASE, | 35 | FIX_EARLYCON_MEM_BASE, |
35 | __end_of_permanent_fixed_addresses, | 36 | __end_of_permanent_fixed_addresses, |
36 | 37 | ||
@@ -56,10 +57,11 @@ enum fixed_addresses { | |||
56 | 57 | ||
57 | #define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE) | 58 | #define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE) |
58 | 59 | ||
59 | extern void __early_set_fixmap(enum fixed_addresses idx, | 60 | void __init early_fixmap_init(void); |
60 | phys_addr_t phys, pgprot_t flags); | ||
61 | 61 | ||
62 | #define __set_fixmap __early_set_fixmap | 62 | #define __early_set_fixmap __set_fixmap |
63 | |||
64 | extern void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot); | ||
63 | 65 | ||
64 | #include <asm-generic/fixmap.h> | 66 | #include <asm-generic/fixmap.h> |
65 | 67 | ||
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 024c46183c3c..0ad735166d9f 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h | |||
@@ -30,6 +30,7 @@ | |||
30 | #define COMPAT_HWCAP_IDIVA (1 << 17) | 30 | #define COMPAT_HWCAP_IDIVA (1 << 17) |
31 | #define COMPAT_HWCAP_IDIVT (1 << 18) | 31 | #define COMPAT_HWCAP_IDIVT (1 << 18) |
32 | #define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT) | 32 | #define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT) |
33 | #define COMPAT_HWCAP_LPAE (1 << 20) | ||
33 | #define COMPAT_HWCAP_EVTSTRM (1 << 21) | 34 | #define COMPAT_HWCAP_EVTSTRM (1 << 21) |
34 | 35 | ||
35 | #define COMPAT_HWCAP2_AES (1 << 0) | 36 | #define COMPAT_HWCAP2_AES (1 << 0) |
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 56a9e63b6c33..e2ff32a93b5c 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h | |||
@@ -354,6 +354,16 @@ bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); | |||
354 | int aarch64_insn_patch_text_nosync(void *addr, u32 insn); | 354 | int aarch64_insn_patch_text_nosync(void *addr, u32 insn); |
355 | int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt); | 355 | int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt); |
356 | int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); | 356 | int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); |
357 | |||
358 | bool aarch32_insn_is_wide(u32 insn); | ||
359 | |||
360 | #define A32_RN_OFFSET 16 | ||
361 | #define A32_RT_OFFSET 12 | ||
362 | #define A32_RT2_OFFSET 0 | ||
363 | |||
364 | u32 aarch32_insn_extract_reg_num(u32 insn, int offset); | ||
365 | u32 aarch32_insn_mcr_extract_opc2(u32 insn); | ||
366 | u32 aarch32_insn_mcr_extract_crm(u32 insn); | ||
357 | #endif /* __ASSEMBLY__ */ | 367 | #endif /* __ASSEMBLY__ */ |
358 | 368 | ||
359 | #endif /* __ASM_INSN_H */ | 369 | #endif /* __ASM_INSN_H */ |
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 79f1d519221f..75825b63464d 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <asm/barrier.h> | 28 | #include <asm/barrier.h> |
29 | #include <asm/pgtable.h> | 29 | #include <asm/pgtable.h> |
30 | #include <asm/early_ioremap.h> | 30 | #include <asm/early_ioremap.h> |
31 | #include <asm/alternative.h> | ||
32 | #include <asm/cpufeature.h> | ||
31 | 33 | ||
32 | #include <xen/xen.h> | 34 | #include <xen/xen.h> |
33 | 35 | ||
@@ -57,28 +59,41 @@ static inline void __raw_writeq(u64 val, volatile void __iomem *addr) | |||
57 | static inline u8 __raw_readb(const volatile void __iomem *addr) | 59 | static inline u8 __raw_readb(const volatile void __iomem *addr) |
58 | { | 60 | { |
59 | u8 val; | 61 | u8 val; |
60 | asm volatile("ldrb %w0, [%1]" : "=r" (val) : "r" (addr)); | 62 | asm volatile(ALTERNATIVE("ldrb %w0, [%1]", |
63 | "ldarb %w0, [%1]", | ||
64 | ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) | ||
65 | : "=r" (val) : "r" (addr)); | ||
61 | return val; | 66 | return val; |
62 | } | 67 | } |
63 | 68 | ||
64 | static inline u16 __raw_readw(const volatile void __iomem *addr) | 69 | static inline u16 __raw_readw(const volatile void __iomem *addr) |
65 | { | 70 | { |
66 | u16 val; | 71 | u16 val; |
67 | asm volatile("ldrh %w0, [%1]" : "=r" (val) : "r" (addr)); | 72 | |
73 | asm volatile(ALTERNATIVE("ldrh %w0, [%1]", | ||
74 | "ldarh %w0, [%1]", | ||
75 | ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) | ||
76 | : "=r" (val) : "r" (addr)); | ||
68 | return val; | 77 | return val; |
69 | } | 78 | } |
70 | 79 | ||
71 | static inline u32 __raw_readl(const volatile void __iomem *addr) | 80 | static inline u32 __raw_readl(const volatile void __iomem *addr) |
72 | { | 81 | { |
73 | u32 val; | 82 | u32 val; |
74 | asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr)); | 83 | asm volatile(ALTERNATIVE("ldr %w0, [%1]", |
84 | "ldar %w0, [%1]", | ||
85 | ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) | ||
86 | : "=r" (val) : "r" (addr)); | ||
75 | return val; | 87 | return val; |
76 | } | 88 | } |
77 | 89 | ||
78 | static inline u64 __raw_readq(const volatile void __iomem *addr) | 90 | static inline u64 __raw_readq(const volatile void __iomem *addr) |
79 | { | 91 | { |
80 | u64 val; | 92 | u64 val; |
81 | asm volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr)); | 93 | asm volatile(ALTERNATIVE("ldr %0, [%1]", |
94 | "ldar %0, [%1]", | ||
95 | ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) | ||
96 | : "=r" (val) : "r" (addr)); | ||
82 | return val; | 97 | return val; |
83 | } | 98 | } |
84 | 99 | ||
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index e1f7ecdde11f..94c53674a31d 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h | |||
@@ -3,7 +3,8 @@ | |||
3 | 3 | ||
4 | #include <asm-generic/irq.h> | 4 | #include <asm-generic/irq.h> |
5 | 5 | ||
6 | extern void (*handle_arch_irq)(struct pt_regs *); | 6 | struct pt_regs; |
7 | |||
7 | extern void migrate_irqs(void); | 8 | extern void migrate_irqs(void); |
8 | extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); | 9 | extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); |
9 | 10 | ||
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 7fd3e27e3ccc..8afb863f5a9e 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #ifndef __ARM64_KVM_ARM_H__ | 18 | #ifndef __ARM64_KVM_ARM_H__ |
19 | #define __ARM64_KVM_ARM_H__ | 19 | #define __ARM64_KVM_ARM_H__ |
20 | 20 | ||
21 | #include <asm/memory.h> | ||
21 | #include <asm/types.h> | 22 | #include <asm/types.h> |
22 | 23 | ||
23 | /* Hyp Configuration Register (HCR) bits */ | 24 | /* Hyp Configuration Register (HCR) bits */ |
@@ -160,9 +161,9 @@ | |||
160 | #endif | 161 | #endif |
161 | 162 | ||
162 | #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) | 163 | #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) |
163 | #define VTTBR_BADDR_MASK (((1LLU << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) | 164 | #define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) |
164 | #define VTTBR_VMID_SHIFT (48LLU) | 165 | #define VTTBR_VMID_SHIFT (UL(48)) |
165 | #define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) | 166 | #define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT) |
166 | 167 | ||
167 | /* Hyp System Trap Register */ | 168 | /* Hyp System Trap Register */ |
168 | #define HSTR_EL2_TTEE (1 << 16) | 169 | #define HSTR_EL2_TTEE (1 << 16) |
@@ -185,13 +186,13 @@ | |||
185 | 186 | ||
186 | /* Exception Syndrome Register (ESR) bits */ | 187 | /* Exception Syndrome Register (ESR) bits */ |
187 | #define ESR_EL2_EC_SHIFT (26) | 188 | #define ESR_EL2_EC_SHIFT (26) |
188 | #define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT) | 189 | #define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT) |
189 | #define ESR_EL2_IL (1U << 25) | 190 | #define ESR_EL2_IL (UL(1) << 25) |
190 | #define ESR_EL2_ISS (ESR_EL2_IL - 1) | 191 | #define ESR_EL2_ISS (ESR_EL2_IL - 1) |
191 | #define ESR_EL2_ISV_SHIFT (24) | 192 | #define ESR_EL2_ISV_SHIFT (24) |
192 | #define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT) | 193 | #define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT) |
193 | #define ESR_EL2_SAS_SHIFT (22) | 194 | #define ESR_EL2_SAS_SHIFT (22) |
194 | #define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT) | 195 | #define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT) |
195 | #define ESR_EL2_SSE (1 << 21) | 196 | #define ESR_EL2_SSE (1 << 21) |
196 | #define ESR_EL2_SRT_SHIFT (16) | 197 | #define ESR_EL2_SRT_SHIFT (16) |
197 | #define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT) | 198 | #define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT) |
@@ -205,16 +206,16 @@ | |||
205 | #define ESR_EL2_FSC_TYPE (0x3c) | 206 | #define ESR_EL2_FSC_TYPE (0x3c) |
206 | 207 | ||
207 | #define ESR_EL2_CV_SHIFT (24) | 208 | #define ESR_EL2_CV_SHIFT (24) |
208 | #define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT) | 209 | #define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT) |
209 | #define ESR_EL2_COND_SHIFT (20) | 210 | #define ESR_EL2_COND_SHIFT (20) |
210 | #define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT) | 211 | #define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT) |
211 | 212 | ||
212 | 213 | ||
213 | #define FSC_FAULT (0x04) | 214 | #define FSC_FAULT (0x04) |
214 | #define FSC_PERM (0x0c) | 215 | #define FSC_PERM (0x0c) |
215 | 216 | ||
216 | /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ | 217 | /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ |
217 | #define HPFAR_MASK (~0xFUL) | 218 | #define HPFAR_MASK (~UL(0xf)) |
218 | 219 | ||
219 | #define ESR_EL2_EC_UNKNOWN (0x00) | 220 | #define ESR_EL2_EC_UNKNOWN (0x00) |
220 | #define ESR_EL2_EC_WFI (0x01) | 221 | #define ESR_EL2_EC_WFI (0x01) |
diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h new file mode 100644 index 000000000000..4e603ea36ad3 --- /dev/null +++ b/arch/arm64/include/asm/opcodes.h | |||
@@ -0,0 +1 @@ | |||
#include <../../arm/include/asm/opcodes.h> | |||
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 5279e5733386..09da25bc596f 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h | |||
@@ -44,6 +44,221 @@ static inline unsigned long __my_cpu_offset(void) | |||
44 | 44 | ||
45 | #endif /* CONFIG_SMP */ | 45 | #endif /* CONFIG_SMP */ |
46 | 46 | ||
47 | #define PERCPU_OP(op, asm_op) \ | ||
48 | static inline unsigned long __percpu_##op(void *ptr, \ | ||
49 | unsigned long val, int size) \ | ||
50 | { \ | ||
51 | unsigned long loop, ret; \ | ||
52 | \ | ||
53 | switch (size) { \ | ||
54 | case 1: \ | ||
55 | do { \ | ||
56 | asm ("//__per_cpu_" #op "_1\n" \ | ||
57 | "ldxrb %w[ret], %[ptr]\n" \ | ||
58 | #asm_op " %w[ret], %w[ret], %w[val]\n" \ | ||
59 | "stxrb %w[loop], %w[ret], %[ptr]\n" \ | ||
60 | : [loop] "=&r" (loop), [ret] "=&r" (ret), \ | ||
61 | [ptr] "+Q"(*(u8 *)ptr) \ | ||
62 | : [val] "Ir" (val)); \ | ||
63 | } while (loop); \ | ||
64 | break; \ | ||
65 | case 2: \ | ||
66 | do { \ | ||
67 | asm ("//__per_cpu_" #op "_2\n" \ | ||
68 | "ldxrh %w[ret], %[ptr]\n" \ | ||
69 | #asm_op " %w[ret], %w[ret], %w[val]\n" \ | ||
70 | "stxrh %w[loop], %w[ret], %[ptr]\n" \ | ||
71 | : [loop] "=&r" (loop), [ret] "=&r" (ret), \ | ||
72 | [ptr] "+Q"(*(u16 *)ptr) \ | ||
73 | : [val] "Ir" (val)); \ | ||
74 | } while (loop); \ | ||
75 | break; \ | ||
76 | case 4: \ | ||
77 | do { \ | ||
78 | asm ("//__per_cpu_" #op "_4\n" \ | ||
79 | "ldxr %w[ret], %[ptr]\n" \ | ||
80 | #asm_op " %w[ret], %w[ret], %w[val]\n" \ | ||
81 | "stxr %w[loop], %w[ret], %[ptr]\n" \ | ||
82 | : [loop] "=&r" (loop), [ret] "=&r" (ret), \ | ||
83 | [ptr] "+Q"(*(u32 *)ptr) \ | ||
84 | : [val] "Ir" (val)); \ | ||
85 | } while (loop); \ | ||
86 | break; \ | ||
87 | case 8: \ | ||
88 | do { \ | ||
89 | asm ("//__per_cpu_" #op "_8\n" \ | ||
90 | "ldxr %[ret], %[ptr]\n" \ | ||
91 | #asm_op " %[ret], %[ret], %[val]\n" \ | ||
92 | "stxr %w[loop], %[ret], %[ptr]\n" \ | ||
93 | : [loop] "=&r" (loop), [ret] "=&r" (ret), \ | ||
94 | [ptr] "+Q"(*(u64 *)ptr) \ | ||
95 | : [val] "Ir" (val)); \ | ||
96 | } while (loop); \ | ||
97 | break; \ | ||
98 | default: \ | ||
99 | BUILD_BUG(); \ | ||
100 | } \ | ||
101 | \ | ||
102 | return ret; \ | ||
103 | } | ||
104 | |||
105 | PERCPU_OP(add, add) | ||
106 | PERCPU_OP(and, and) | ||
107 | PERCPU_OP(or, orr) | ||
108 | #undef PERCPU_OP | ||
109 | |||
110 | static inline unsigned long __percpu_read(void *ptr, int size) | ||
111 | { | ||
112 | unsigned long ret; | ||
113 | |||
114 | switch (size) { | ||
115 | case 1: | ||
116 | ret = ACCESS_ONCE(*(u8 *)ptr); | ||
117 | break; | ||
118 | case 2: | ||
119 | ret = ACCESS_ONCE(*(u16 *)ptr); | ||
120 | break; | ||
121 | case 4: | ||
122 | ret = ACCESS_ONCE(*(u32 *)ptr); | ||
123 | break; | ||
124 | case 8: | ||
125 | ret = ACCESS_ONCE(*(u64 *)ptr); | ||
126 | break; | ||
127 | default: | ||
128 | BUILD_BUG(); | ||
129 | } | ||
130 | |||
131 | return ret; | ||
132 | } | ||
133 | |||
134 | static inline void __percpu_write(void *ptr, unsigned long val, int size) | ||
135 | { | ||
136 | switch (size) { | ||
137 | case 1: | ||
138 | ACCESS_ONCE(*(u8 *)ptr) = (u8)val; | ||
139 | break; | ||
140 | case 2: | ||
141 | ACCESS_ONCE(*(u16 *)ptr) = (u16)val; | ||
142 | break; | ||
143 | case 4: | ||
144 | ACCESS_ONCE(*(u32 *)ptr) = (u32)val; | ||
145 | break; | ||
146 | case 8: | ||
147 | ACCESS_ONCE(*(u64 *)ptr) = (u64)val; | ||
148 | break; | ||
149 | default: | ||
150 | BUILD_BUG(); | ||
151 | } | ||
152 | } | ||
153 | |||
154 | static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, | ||
155 | int size) | ||
156 | { | ||
157 | unsigned long ret, loop; | ||
158 | |||
159 | switch (size) { | ||
160 | case 1: | ||
161 | do { | ||
162 | asm ("//__percpu_xchg_1\n" | ||
163 | "ldxrb %w[ret], %[ptr]\n" | ||
164 | "stxrb %w[loop], %w[val], %[ptr]\n" | ||
165 | : [loop] "=&r"(loop), [ret] "=&r"(ret), | ||
166 | [ptr] "+Q"(*(u8 *)ptr) | ||
167 | : [val] "r" (val)); | ||
168 | } while (loop); | ||
169 | break; | ||
170 | case 2: | ||
171 | do { | ||
172 | asm ("//__percpu_xchg_2\n" | ||
173 | "ldxrh %w[ret], %[ptr]\n" | ||
174 | "stxrh %w[loop], %w[val], %[ptr]\n" | ||
175 | : [loop] "=&r"(loop), [ret] "=&r"(ret), | ||
176 | [ptr] "+Q"(*(u16 *)ptr) | ||
177 | : [val] "r" (val)); | ||
178 | } while (loop); | ||
179 | break; | ||
180 | case 4: | ||
181 | do { | ||
182 | asm ("//__percpu_xchg_4\n" | ||
183 | "ldxr %w[ret], %[ptr]\n" | ||
184 | "stxr %w[loop], %w[val], %[ptr]\n" | ||
185 | : [loop] "=&r"(loop), [ret] "=&r"(ret), | ||
186 | [ptr] "+Q"(*(u32 *)ptr) | ||
187 | : [val] "r" (val)); | ||
188 | } while (loop); | ||
189 | break; | ||
190 | case 8: | ||
191 | do { | ||
192 | asm ("//__percpu_xchg_8\n" | ||
193 | "ldxr %[ret], %[ptr]\n" | ||
194 | "stxr %w[loop], %[val], %[ptr]\n" | ||
195 | : [loop] "=&r"(loop), [ret] "=&r"(ret), | ||
196 | [ptr] "+Q"(*(u64 *)ptr) | ||
197 | : [val] "r" (val)); | ||
198 | } while (loop); | ||
199 | break; | ||
200 | default: | ||
201 | BUILD_BUG(); | ||
202 | } | ||
203 | |||
204 | return ret; | ||
205 | } | ||
206 | |||
207 | #define _percpu_add(pcp, val) \ | ||
208 | __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) | ||
209 | |||
210 | #define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val)) | ||
211 | |||
212 | #define _percpu_and(pcp, val) \ | ||
213 | __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) | ||
214 | |||
215 | #define _percpu_or(pcp, val) \ | ||
216 | __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) | ||
217 | |||
218 | #define _percpu_read(pcp) (typeof(pcp)) \ | ||
219 | (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp))) | ||
220 | |||
221 | #define _percpu_write(pcp, val) \ | ||
222 | __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)) | ||
223 | |||
224 | #define _percpu_xchg(pcp, val) (typeof(pcp)) \ | ||
225 | (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))) | ||
226 | |||
227 | #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) | ||
228 | #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) | ||
229 | #define this_cpu_add_4(pcp, val) _percpu_add(pcp, val) | ||
230 | #define this_cpu_add_8(pcp, val) _percpu_add(pcp, val) | ||
231 | |||
232 | #define this_cpu_add_return_1(pcp, val) _percpu_add_return(pcp, val) | ||
233 | #define this_cpu_add_return_2(pcp, val) _percpu_add_return(pcp, val) | ||
234 | #define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val) | ||
235 | #define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val) | ||
236 | |||
237 | #define this_cpu_and_1(pcp, val) _percpu_and(pcp, val) | ||
238 | #define this_cpu_and_2(pcp, val) _percpu_and(pcp, val) | ||
239 | #define this_cpu_and_4(pcp, val) _percpu_and(pcp, val) | ||
240 | #define this_cpu_and_8(pcp, val) _percpu_and(pcp, val) | ||
241 | |||
242 | #define this_cpu_or_1(pcp, val) _percpu_or(pcp, val) | ||
243 | #define this_cpu_or_2(pcp, val) _percpu_or(pcp, val) | ||
244 | #define this_cpu_or_4(pcp, val) _percpu_or(pcp, val) | ||
245 | #define this_cpu_or_8(pcp, val) _percpu_or(pcp, val) | ||
246 | |||
247 | #define this_cpu_read_1(pcp) _percpu_read(pcp) | ||
248 | #define this_cpu_read_2(pcp) _percpu_read(pcp) | ||
249 | #define this_cpu_read_4(pcp) _percpu_read(pcp) | ||
250 | #define this_cpu_read_8(pcp) _percpu_read(pcp) | ||
251 | |||
252 | #define this_cpu_write_1(pcp, val) _percpu_write(pcp, val) | ||
253 | #define this_cpu_write_2(pcp, val) _percpu_write(pcp, val) | ||
254 | #define this_cpu_write_4(pcp, val) _percpu_write(pcp, val) | ||
255 | #define this_cpu_write_8(pcp, val) _percpu_write(pcp, val) | ||
256 | |||
257 | #define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val) | ||
258 | #define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val) | ||
259 | #define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val) | ||
260 | #define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val) | ||
261 | |||
47 | #include <asm-generic/percpu.h> | 262 | #include <asm-generic/percpu.h> |
48 | 263 | ||
49 | #endif /* __ASM_PERCPU_H */ | 264 | #endif /* __ASM_PERCPU_H */ |
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index d5bed02073d6..e20df38a8ff3 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h | |||
@@ -26,11 +26,13 @@ | |||
26 | 26 | ||
27 | #define check_pgt_cache() do { } while (0) | 27 | #define check_pgt_cache() do { } while (0) |
28 | 28 | ||
29 | #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) | ||
30 | |||
29 | #if CONFIG_ARM64_PGTABLE_LEVELS > 2 | 31 | #if CONFIG_ARM64_PGTABLE_LEVELS > 2 |
30 | 32 | ||
31 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) | 33 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
32 | { | 34 | { |
33 | return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); | 35 | return (pmd_t *)__get_free_page(PGALLOC_GFP); |
34 | } | 36 | } |
35 | 37 | ||
36 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | 38 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
@@ -50,7 +52,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |||
50 | 52 | ||
51 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) | 53 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) |
52 | { | 54 | { |
53 | return (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); | 55 | return (pud_t *)__get_free_page(PGALLOC_GFP); |
54 | } | 56 | } |
55 | 57 | ||
56 | static inline void pud_free(struct mm_struct *mm, pud_t *pud) | 58 | static inline void pud_free(struct mm_struct *mm, pud_t *pud) |
@@ -69,8 +71,6 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) | |||
69 | extern pgd_t *pgd_alloc(struct mm_struct *mm); | 71 | extern pgd_t *pgd_alloc(struct mm_struct *mm); |
70 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); | 72 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); |
71 | 73 | ||
72 | #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) | ||
73 | |||
74 | static inline pte_t * | 74 | static inline pte_t * |
75 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) | 75 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) |
76 | { | 76 | { |
diff --git a/arch/arm64/include/asm/seccomp.h b/arch/arm64/include/asm/seccomp.h new file mode 100644 index 000000000000..c76fac979629 --- /dev/null +++ b/arch/arm64/include/asm/seccomp.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * arch/arm64/include/asm/seccomp.h | ||
3 | * | ||
4 | * Copyright (C) 2014 Linaro Limited | ||
5 | * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #ifndef _ASM_SECCOMP_H | ||
12 | #define _ASM_SECCOMP_H | ||
13 | |||
14 | #include <asm/unistd.h> | ||
15 | |||
16 | #ifdef CONFIG_COMPAT | ||
17 | #define __NR_seccomp_read_32 __NR_compat_read | ||
18 | #define __NR_seccomp_write_32 __NR_compat_write | ||
19 | #define __NR_seccomp_exit_32 __NR_compat_exit | ||
20 | #define __NR_seccomp_sigreturn_32 __NR_compat_rt_sigreturn | ||
21 | #endif /* CONFIG_COMPAT */ | ||
22 | |||
23 | #include <asm-generic/seccomp.h> | ||
24 | |||
25 | #endif /* _ASM_SECCOMP_H */ | ||
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index a82c0c5c8b52..c028fe37456f 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h | |||
@@ -19,10 +19,6 @@ | |||
19 | #ifndef __ASM_TLB_H | 19 | #ifndef __ASM_TLB_H |
20 | #define __ASM_TLB_H | 20 | #define __ASM_TLB_H |
21 | 21 | ||
22 | #define __tlb_remove_pmd_tlb_entry __tlb_remove_pmd_tlb_entry | ||
23 | |||
24 | #include <asm-generic/tlb.h> | ||
25 | |||
26 | #include <linux/pagemap.h> | 22 | #include <linux/pagemap.h> |
27 | #include <linux/swap.h> | 23 | #include <linux/swap.h> |
28 | 24 | ||
@@ -37,71 +33,22 @@ static inline void __tlb_remove_table(void *_table) | |||
37 | #define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry) | 33 | #define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry) |
38 | #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ | 34 | #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ |
39 | 35 | ||
40 | /* | 36 | #include <asm-generic/tlb.h> |
41 | * There's three ways the TLB shootdown code is used: | 37 | |
42 | * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). | ||
43 | * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. | ||
44 | * 2. Unmapping all vmas. See exit_mmap(). | ||
45 | * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. | ||
46 | * Page tables will be freed. | ||
47 | * 3. Unmapping argument pages. See shift_arg_pages(). | ||
48 | * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. | ||
49 | */ | ||
50 | static inline void tlb_flush(struct mmu_gather *tlb) | 38 | static inline void tlb_flush(struct mmu_gather *tlb) |
51 | { | 39 | { |
52 | if (tlb->fullmm) { | 40 | if (tlb->fullmm) { |
53 | flush_tlb_mm(tlb->mm); | 41 | flush_tlb_mm(tlb->mm); |
54 | } else if (tlb->end > 0) { | 42 | } else { |
55 | struct vm_area_struct vma = { .vm_mm = tlb->mm, }; | 43 | struct vm_area_struct vma = { .vm_mm = tlb->mm, }; |
56 | flush_tlb_range(&vma, tlb->start, tlb->end); | 44 | flush_tlb_range(&vma, tlb->start, tlb->end); |
57 | tlb->start = TASK_SIZE; | ||
58 | tlb->end = 0; | ||
59 | } | ||
60 | } | ||
61 | |||
62 | static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) | ||
63 | { | ||
64 | if (!tlb->fullmm) { | ||
65 | tlb->start = min(tlb->start, addr); | ||
66 | tlb->end = max(tlb->end, addr + PAGE_SIZE); | ||
67 | } | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * Memorize the range for the TLB flush. | ||
72 | */ | ||
73 | static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, | ||
74 | unsigned long addr) | ||
75 | { | ||
76 | tlb_add_flush(tlb, addr); | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * In the case of tlb vma handling, we can optimise these away in the | ||
81 | * case where we're doing a full MM flush. When we're doing a munmap, | ||
82 | * the vmas are adjusted to only cover the region to be torn down. | ||
83 | */ | ||
84 | static inline void tlb_start_vma(struct mmu_gather *tlb, | ||
85 | struct vm_area_struct *vma) | ||
86 | { | ||
87 | if (!tlb->fullmm) { | ||
88 | tlb->start = TASK_SIZE; | ||
89 | tlb->end = 0; | ||
90 | } | 45 | } |
91 | } | 46 | } |
92 | 47 | ||
93 | static inline void tlb_end_vma(struct mmu_gather *tlb, | ||
94 | struct vm_area_struct *vma) | ||
95 | { | ||
96 | if (!tlb->fullmm) | ||
97 | tlb_flush(tlb); | ||
98 | } | ||
99 | |||
100 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | 48 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, |
101 | unsigned long addr) | 49 | unsigned long addr) |
102 | { | 50 | { |
103 | pgtable_page_dtor(pte); | 51 | pgtable_page_dtor(pte); |
104 | tlb_add_flush(tlb, addr); | ||
105 | tlb_remove_entry(tlb, pte); | 52 | tlb_remove_entry(tlb, pte); |
106 | } | 53 | } |
107 | 54 | ||
@@ -109,7 +56,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | |||
109 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | 56 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, |
110 | unsigned long addr) | 57 | unsigned long addr) |
111 | { | 58 | { |
112 | tlb_add_flush(tlb, addr); | ||
113 | tlb_remove_entry(tlb, virt_to_page(pmdp)); | 59 | tlb_remove_entry(tlb, virt_to_page(pmdp)); |
114 | } | 60 | } |
115 | #endif | 61 | #endif |
@@ -118,15 +64,8 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | |||
118 | static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, | 64 | static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, |
119 | unsigned long addr) | 65 | unsigned long addr) |
120 | { | 66 | { |
121 | tlb_add_flush(tlb, addr); | ||
122 | tlb_remove_entry(tlb, virt_to_page(pudp)); | 67 | tlb_remove_entry(tlb, virt_to_page(pudp)); |
123 | } | 68 | } |
124 | #endif | 69 | #endif |
125 | 70 | ||
126 | static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, | ||
127 | unsigned long address) | ||
128 | { | ||
129 | tlb_add_flush(tlb, address); | ||
130 | } | ||
131 | |||
132 | #endif | 71 | #endif |
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index 10ca8ff93cc2..232e4ba5d314 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h | |||
@@ -18,6 +18,22 @@ | |||
18 | #ifndef __ASM_TRAP_H | 18 | #ifndef __ASM_TRAP_H |
19 | #define __ASM_TRAP_H | 19 | #define __ASM_TRAP_H |
20 | 20 | ||
21 | #include <linux/list.h> | ||
22 | |||
23 | struct pt_regs; | ||
24 | |||
25 | struct undef_hook { | ||
26 | struct list_head node; | ||
27 | u32 instr_mask; | ||
28 | u32 instr_val; | ||
29 | u64 pstate_mask; | ||
30 | u64 pstate_val; | ||
31 | int (*fn)(struct pt_regs *regs, u32 instr); | ||
32 | }; | ||
33 | |||
34 | void register_undef_hook(struct undef_hook *hook); | ||
35 | void unregister_undef_hook(struct undef_hook *hook); | ||
36 | |||
21 | static inline int in_exception_text(unsigned long ptr) | 37 | static inline int in_exception_text(unsigned long ptr) |
22 | { | 38 | { |
23 | extern char __exception_text_start[]; | 39 | extern char __exception_text_start[]; |
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 6d2bf419431d..49c9aefd24a5 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h | |||
@@ -31,6 +31,9 @@ | |||
31 | * Compat syscall numbers used by the AArch64 kernel. | 31 | * Compat syscall numbers used by the AArch64 kernel. |
32 | */ | 32 | */ |
33 | #define __NR_compat_restart_syscall 0 | 33 | #define __NR_compat_restart_syscall 0 |
34 | #define __NR_compat_exit 1 | ||
35 | #define __NR_compat_read 3 | ||
36 | #define __NR_compat_write 4 | ||
34 | #define __NR_compat_sigreturn 119 | 37 | #define __NR_compat_sigreturn 119 |
35 | #define __NR_compat_rt_sigreturn 173 | 38 | #define __NR_compat_rt_sigreturn 173 |
36 | 39 | ||
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 9dfdac4a74a1..8893cebcea5b 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h | |||
@@ -787,7 +787,8 @@ __SYSCALL(__NR_sched_setattr, sys_sched_setattr) | |||
787 | __SYSCALL(__NR_sched_getattr, sys_sched_getattr) | 787 | __SYSCALL(__NR_sched_getattr, sys_sched_getattr) |
788 | #define __NR_renameat2 382 | 788 | #define __NR_renameat2 382 |
789 | __SYSCALL(__NR_renameat2, sys_renameat2) | 789 | __SYSCALL(__NR_renameat2, sys_renameat2) |
790 | /* 383 for seccomp */ | 790 | #define __NR_seccomp 383 |
791 | __SYSCALL(__NR_seccomp, sys_seccomp) | ||
791 | #define __NR_getrandom 384 | 792 | #define __NR_getrandom 384 |
792 | __SYSCALL(__NR_getrandom, sys_getrandom) | 793 | __SYSCALL(__NR_getrandom, sys_getrandom) |
793 | #define __NR_memfd_create 385 | 794 | #define __NR_memfd_create 385 |
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 5bd029b43644..eaa77ed7766a 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile | |||
@@ -5,6 +5,7 @@ | |||
5 | CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) | 5 | CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) |
6 | AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) | 6 | AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) |
7 | CFLAGS_efi-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) | 7 | CFLAGS_efi-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) |
8 | CFLAGS_armv8_deprecated.o := -I$(src) | ||
8 | 9 | ||
9 | CFLAGS_REMOVE_ftrace.o = -pg | 10 | CFLAGS_REMOVE_ftrace.o = -pg |
10 | CFLAGS_REMOVE_insn.o = -pg | 11 | CFLAGS_REMOVE_insn.o = -pg |
@@ -15,10 +16,11 @@ arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ | |||
15 | entry-fpsimd.o process.o ptrace.o setup.o signal.o \ | 16 | entry-fpsimd.o process.o ptrace.o setup.o signal.o \ |
16 | sys.o stacktrace.o time.o traps.o io.o vdso.o \ | 17 | sys.o stacktrace.o time.o traps.o io.o vdso.o \ |
17 | hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \ | 18 | hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \ |
18 | cpuinfo.o | 19 | cpuinfo.o cpu_errata.o alternative.o |
19 | 20 | ||
20 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ | 21 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ |
21 | sys_compat.o | 22 | sys_compat.o \ |
23 | ../../arm/kernel/opcodes.o | ||
22 | arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o | 24 | arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o |
23 | arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o | 25 | arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o |
24 | arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o | 26 | arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o |
@@ -31,6 +33,7 @@ arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o | |||
31 | arm64-obj-$(CONFIG_KGDB) += kgdb.o | 33 | arm64-obj-$(CONFIG_KGDB) += kgdb.o |
32 | arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o | 34 | arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o |
33 | arm64-obj-$(CONFIG_PCI) += pci.o | 35 | arm64-obj-$(CONFIG_PCI) += pci.o |
36 | arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o | ||
34 | 37 | ||
35 | obj-y += $(arm64-obj-y) vdso/ | 38 | obj-y += $(arm64-obj-y) vdso/ |
36 | obj-m += $(arm64-obj-m) | 39 | obj-m += $(arm64-obj-m) |
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c new file mode 100644 index 000000000000..ad7821d64a1d --- /dev/null +++ b/arch/arm64/kernel/alternative.c | |||
@@ -0,0 +1,85 @@ | |||
1 | /* | ||
2 | * alternative runtime patching | ||
3 | * inspired by the x86 version | ||
4 | * | ||
5 | * Copyright (C) 2014 ARM Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #define pr_fmt(fmt) "alternatives: " fmt | ||
21 | |||
22 | #include <linux/init.h> | ||
23 | #include <linux/cpu.h> | ||
24 | #include <asm/cacheflush.h> | ||
25 | #include <asm/alternative.h> | ||
26 | #include <asm/cpufeature.h> | ||
27 | #include <linux/stop_machine.h> | ||
28 | |||
29 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; | ||
30 | |||
31 | struct alt_region { | ||
32 | struct alt_instr *begin; | ||
33 | struct alt_instr *end; | ||
34 | }; | ||
35 | |||
36 | static int __apply_alternatives(void *alt_region) | ||
37 | { | ||
38 | struct alt_instr *alt; | ||
39 | struct alt_region *region = alt_region; | ||
40 | u8 *origptr, *replptr; | ||
41 | |||
42 | for (alt = region->begin; alt < region->end; alt++) { | ||
43 | if (!cpus_have_cap(alt->cpufeature)) | ||
44 | continue; | ||
45 | |||
46 | BUG_ON(alt->alt_len > alt->orig_len); | ||
47 | |||
48 | pr_info_once("patching kernel code\n"); | ||
49 | |||
50 | origptr = (u8 *)&alt->orig_offset + alt->orig_offset; | ||
51 | replptr = (u8 *)&alt->alt_offset + alt->alt_offset; | ||
52 | memcpy(origptr, replptr, alt->alt_len); | ||
53 | flush_icache_range((uintptr_t)origptr, | ||
54 | (uintptr_t)(origptr + alt->alt_len)); | ||
55 | } | ||
56 | |||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | void apply_alternatives_all(void) | ||
61 | { | ||
62 | struct alt_region region = { | ||
63 | .begin = __alt_instructions, | ||
64 | .end = __alt_instructions_end, | ||
65 | }; | ||
66 | |||
67 | /* better not try code patching on a live SMP system */ | ||
68 | stop_machine(__apply_alternatives, ®ion, NULL); | ||
69 | } | ||
70 | |||
71 | void apply_alternatives(void *start, size_t length) | ||
72 | { | ||
73 | struct alt_region region = { | ||
74 | .begin = start, | ||
75 | .end = start + length, | ||
76 | }; | ||
77 | |||
78 | __apply_alternatives(®ion); | ||
79 | } | ||
80 | |||
81 | void free_alternatives_memory(void) | ||
82 | { | ||
83 | free_reserved_area(__alt_instructions, __alt_instructions_end, | ||
84 | 0, "alternatives"); | ||
85 | } | ||
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c new file mode 100644 index 000000000000..c363671d7509 --- /dev/null +++ b/arch/arm64/kernel/armv8_deprecated.c | |||
@@ -0,0 +1,553 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 ARM Limited | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/cpu.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/list.h> | ||
12 | #include <linux/perf_event.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/sysctl.h> | ||
16 | |||
17 | #include <asm/insn.h> | ||
18 | #include <asm/opcodes.h> | ||
19 | #include <asm/system_misc.h> | ||
20 | #include <asm/traps.h> | ||
21 | #include <asm/uaccess.h> | ||
22 | |||
23 | #define CREATE_TRACE_POINTS | ||
24 | #include "trace-events-emulation.h" | ||
25 | |||
26 | /* | ||
27 | * The runtime support for deprecated instruction support can be in one of | ||
28 | * following three states - | ||
29 | * | ||
30 | * 0 = undef | ||
31 | * 1 = emulate (software emulation) | ||
32 | * 2 = hw (supported in hardware) | ||
33 | */ | ||
34 | enum insn_emulation_mode { | ||
35 | INSN_UNDEF, | ||
36 | INSN_EMULATE, | ||
37 | INSN_HW, | ||
38 | }; | ||
39 | |||
40 | enum legacy_insn_status { | ||
41 | INSN_DEPRECATED, | ||
42 | INSN_OBSOLETE, | ||
43 | }; | ||
44 | |||
45 | struct insn_emulation_ops { | ||
46 | const char *name; | ||
47 | enum legacy_insn_status status; | ||
48 | struct undef_hook *hooks; | ||
49 | int (*set_hw_mode)(bool enable); | ||
50 | }; | ||
51 | |||
52 | struct insn_emulation { | ||
53 | struct list_head node; | ||
54 | struct insn_emulation_ops *ops; | ||
55 | int current_mode; | ||
56 | int min; | ||
57 | int max; | ||
58 | }; | ||
59 | |||
60 | static LIST_HEAD(insn_emulation); | ||
61 | static int nr_insn_emulated; | ||
62 | static DEFINE_RAW_SPINLOCK(insn_emulation_lock); | ||
63 | |||
64 | static void register_emulation_hooks(struct insn_emulation_ops *ops) | ||
65 | { | ||
66 | struct undef_hook *hook; | ||
67 | |||
68 | BUG_ON(!ops->hooks); | ||
69 | |||
70 | for (hook = ops->hooks; hook->instr_mask; hook++) | ||
71 | register_undef_hook(hook); | ||
72 | |||
73 | pr_notice("Registered %s emulation handler\n", ops->name); | ||
74 | } | ||
75 | |||
76 | static void remove_emulation_hooks(struct insn_emulation_ops *ops) | ||
77 | { | ||
78 | struct undef_hook *hook; | ||
79 | |||
80 | BUG_ON(!ops->hooks); | ||
81 | |||
82 | for (hook = ops->hooks; hook->instr_mask; hook++) | ||
83 | unregister_undef_hook(hook); | ||
84 | |||
85 | pr_notice("Removed %s emulation handler\n", ops->name); | ||
86 | } | ||
87 | |||
88 | static int update_insn_emulation_mode(struct insn_emulation *insn, | ||
89 | enum insn_emulation_mode prev) | ||
90 | { | ||
91 | int ret = 0; | ||
92 | |||
93 | switch (prev) { | ||
94 | case INSN_UNDEF: /* Nothing to be done */ | ||
95 | break; | ||
96 | case INSN_EMULATE: | ||
97 | remove_emulation_hooks(insn->ops); | ||
98 | break; | ||
99 | case INSN_HW: | ||
100 | if (insn->ops->set_hw_mode) { | ||
101 | insn->ops->set_hw_mode(false); | ||
102 | pr_notice("Disabled %s support\n", insn->ops->name); | ||
103 | } | ||
104 | break; | ||
105 | } | ||
106 | |||
107 | switch (insn->current_mode) { | ||
108 | case INSN_UNDEF: | ||
109 | break; | ||
110 | case INSN_EMULATE: | ||
111 | register_emulation_hooks(insn->ops); | ||
112 | break; | ||
113 | case INSN_HW: | ||
114 | if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(true)) | ||
115 | pr_notice("Enabled %s support\n", insn->ops->name); | ||
116 | else | ||
117 | ret = -EINVAL; | ||
118 | break; | ||
119 | } | ||
120 | |||
121 | return ret; | ||
122 | } | ||
123 | |||
124 | static void register_insn_emulation(struct insn_emulation_ops *ops) | ||
125 | { | ||
126 | unsigned long flags; | ||
127 | struct insn_emulation *insn; | ||
128 | |||
129 | insn = kzalloc(sizeof(*insn), GFP_KERNEL); | ||
130 | insn->ops = ops; | ||
131 | insn->min = INSN_UNDEF; | ||
132 | |||
133 | switch (ops->status) { | ||
134 | case INSN_DEPRECATED: | ||
135 | insn->current_mode = INSN_EMULATE; | ||
136 | insn->max = INSN_HW; | ||
137 | break; | ||
138 | case INSN_OBSOLETE: | ||
139 | insn->current_mode = INSN_UNDEF; | ||
140 | insn->max = INSN_EMULATE; | ||
141 | break; | ||
142 | } | ||
143 | |||
144 | raw_spin_lock_irqsave(&insn_emulation_lock, flags); | ||
145 | list_add(&insn->node, &insn_emulation); | ||
146 | nr_insn_emulated++; | ||
147 | raw_spin_unlock_irqrestore(&insn_emulation_lock, flags); | ||
148 | |||
149 | /* Register any handlers if required */ | ||
150 | update_insn_emulation_mode(insn, INSN_UNDEF); | ||
151 | } | ||
152 | |||
153 | static int emulation_proc_handler(struct ctl_table *table, int write, | ||
154 | void __user *buffer, size_t *lenp, | ||
155 | loff_t *ppos) | ||
156 | { | ||
157 | int ret = 0; | ||
158 | struct insn_emulation *insn = (struct insn_emulation *) table->data; | ||
159 | enum insn_emulation_mode prev_mode = insn->current_mode; | ||
160 | |||
161 | table->data = &insn->current_mode; | ||
162 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | ||
163 | |||
164 | if (ret || !write || prev_mode == insn->current_mode) | ||
165 | goto ret; | ||
166 | |||
167 | ret = update_insn_emulation_mode(insn, prev_mode); | ||
168 | if (ret) { | ||
169 | /* Mode change failed, revert to previous mode. */ | ||
170 | insn->current_mode = prev_mode; | ||
171 | update_insn_emulation_mode(insn, INSN_UNDEF); | ||
172 | } | ||
173 | ret: | ||
174 | table->data = insn; | ||
175 | return ret; | ||
176 | } | ||
177 | |||
178 | static struct ctl_table ctl_abi[] = { | ||
179 | { | ||
180 | .procname = "abi", | ||
181 | .mode = 0555, | ||
182 | }, | ||
183 | { } | ||
184 | }; | ||
185 | |||
186 | static void register_insn_emulation_sysctl(struct ctl_table *table) | ||
187 | { | ||
188 | unsigned long flags; | ||
189 | int i = 0; | ||
190 | struct insn_emulation *insn; | ||
191 | struct ctl_table *insns_sysctl, *sysctl; | ||
192 | |||
193 | insns_sysctl = kzalloc(sizeof(*sysctl) * (nr_insn_emulated + 1), | ||
194 | GFP_KERNEL); | ||
195 | |||
196 | raw_spin_lock_irqsave(&insn_emulation_lock, flags); | ||
197 | list_for_each_entry(insn, &insn_emulation, node) { | ||
198 | sysctl = &insns_sysctl[i]; | ||
199 | |||
200 | sysctl->mode = 0644; | ||
201 | sysctl->maxlen = sizeof(int); | ||
202 | |||
203 | sysctl->procname = insn->ops->name; | ||
204 | sysctl->data = insn; | ||
205 | sysctl->extra1 = &insn->min; | ||
206 | sysctl->extra2 = &insn->max; | ||
207 | sysctl->proc_handler = emulation_proc_handler; | ||
208 | i++; | ||
209 | } | ||
210 | raw_spin_unlock_irqrestore(&insn_emulation_lock, flags); | ||
211 | |||
212 | table->child = insns_sysctl; | ||
213 | register_sysctl_table(table); | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * Implement emulation of the SWP/SWPB instructions using load-exclusive and | ||
218 | * store-exclusive. | ||
219 | * | ||
220 | * Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>] | ||
221 | * Where: Rt = destination | ||
222 | * Rt2 = source | ||
223 | * Rn = address | ||
224 | */ | ||
225 | |||
226 | /* | ||
227 | * Error-checking SWP macros implemented using ldxr{b}/stxr{b} | ||
228 | */ | ||
229 | #define __user_swpX_asm(data, addr, res, temp, B) \ | ||
230 | __asm__ __volatile__( \ | ||
231 | " mov %w2, %w1\n" \ | ||
232 | "0: ldxr"B" %w1, [%3]\n" \ | ||
233 | "1: stxr"B" %w0, %w2, [%3]\n" \ | ||
234 | " cbz %w0, 2f\n" \ | ||
235 | " mov %w0, %w4\n" \ | ||
236 | "2:\n" \ | ||
237 | " .pushsection .fixup,\"ax\"\n" \ | ||
238 | " .align 2\n" \ | ||
239 | "3: mov %w0, %w5\n" \ | ||
240 | " b 2b\n" \ | ||
241 | " .popsection" \ | ||
242 | " .pushsection __ex_table,\"a\"\n" \ | ||
243 | " .align 3\n" \ | ||
244 | " .quad 0b, 3b\n" \ | ||
245 | " .quad 1b, 3b\n" \ | ||
246 | " .popsection" \ | ||
247 | : "=&r" (res), "+r" (data), "=&r" (temp) \ | ||
248 | : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \ | ||
249 | : "memory") | ||
250 | |||
251 | #define __user_swp_asm(data, addr, res, temp) \ | ||
252 | __user_swpX_asm(data, addr, res, temp, "") | ||
253 | #define __user_swpb_asm(data, addr, res, temp) \ | ||
254 | __user_swpX_asm(data, addr, res, temp, "b") | ||
255 | |||
256 | /* | ||
257 | * Bit 22 of the instruction encoding distinguishes between | ||
258 | * the SWP and SWPB variants (bit set means SWPB). | ||
259 | */ | ||
260 | #define TYPE_SWPB (1 << 22) | ||
261 | |||
262 | /* | ||
263 | * Set up process info to signal segmentation fault - called on access error. | ||
264 | */ | ||
265 | static void set_segfault(struct pt_regs *regs, unsigned long addr) | ||
266 | { | ||
267 | siginfo_t info; | ||
268 | |||
269 | down_read(¤t->mm->mmap_sem); | ||
270 | if (find_vma(current->mm, addr) == NULL) | ||
271 | info.si_code = SEGV_MAPERR; | ||
272 | else | ||
273 | info.si_code = SEGV_ACCERR; | ||
274 | up_read(¤t->mm->mmap_sem); | ||
275 | |||
276 | info.si_signo = SIGSEGV; | ||
277 | info.si_errno = 0; | ||
278 | info.si_addr = (void *) instruction_pointer(regs); | ||
279 | |||
280 | pr_debug("SWP{B} emulation: access caused memory abort!\n"); | ||
281 | arm64_notify_die("Illegal memory access", regs, &info, 0); | ||
282 | } | ||
283 | |||
284 | static int emulate_swpX(unsigned int address, unsigned int *data, | ||
285 | unsigned int type) | ||
286 | { | ||
287 | unsigned int res = 0; | ||
288 | |||
289 | if ((type != TYPE_SWPB) && (address & 0x3)) { | ||
290 | /* SWP to unaligned address not permitted */ | ||
291 | pr_debug("SWP instruction on unaligned pointer!\n"); | ||
292 | return -EFAULT; | ||
293 | } | ||
294 | |||
295 | while (1) { | ||
296 | unsigned long temp; | ||
297 | |||
298 | if (type == TYPE_SWPB) | ||
299 | __user_swpb_asm(*data, address, res, temp); | ||
300 | else | ||
301 | __user_swp_asm(*data, address, res, temp); | ||
302 | |||
303 | if (likely(res != -EAGAIN) || signal_pending(current)) | ||
304 | break; | ||
305 | |||
306 | cond_resched(); | ||
307 | } | ||
308 | |||
309 | return res; | ||
310 | } | ||
311 | |||
312 | /* | ||
313 | * swp_handler logs the id of calling process, dissects the instruction, sanity | ||
314 | * checks the memory location, calls emulate_swpX for the actual operation and | ||
315 | * deals with fixup/error handling before returning | ||
316 | */ | ||
317 | static int swp_handler(struct pt_regs *regs, u32 instr) | ||
318 | { | ||
319 | u32 destreg, data, type, address = 0; | ||
320 | int rn, rt2, res = 0; | ||
321 | |||
322 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc); | ||
323 | |||
324 | type = instr & TYPE_SWPB; | ||
325 | |||
326 | switch (arm_check_condition(instr, regs->pstate)) { | ||
327 | case ARM_OPCODE_CONDTEST_PASS: | ||
328 | break; | ||
329 | case ARM_OPCODE_CONDTEST_FAIL: | ||
330 | /* Condition failed - return to next instruction */ | ||
331 | goto ret; | ||
332 | case ARM_OPCODE_CONDTEST_UNCOND: | ||
333 | /* If unconditional encoding - not a SWP, undef */ | ||
334 | return -EFAULT; | ||
335 | default: | ||
336 | return -EINVAL; | ||
337 | } | ||
338 | |||
339 | rn = aarch32_insn_extract_reg_num(instr, A32_RN_OFFSET); | ||
340 | rt2 = aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET); | ||
341 | |||
342 | address = (u32)regs->user_regs.regs[rn]; | ||
343 | data = (u32)regs->user_regs.regs[rt2]; | ||
344 | destreg = aarch32_insn_extract_reg_num(instr, A32_RT_OFFSET); | ||
345 | |||
346 | pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n", | ||
347 | rn, address, destreg, | ||
348 | aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data); | ||
349 | |||
350 | /* Check access in reasonable access range for both SWP and SWPB */ | ||
351 | if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) { | ||
352 | pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n", | ||
353 | address); | ||
354 | goto fault; | ||
355 | } | ||
356 | |||
357 | res = emulate_swpX(address, &data, type); | ||
358 | if (res == -EFAULT) | ||
359 | goto fault; | ||
360 | else if (res == 0) | ||
361 | regs->user_regs.regs[destreg] = data; | ||
362 | |||
363 | ret: | ||
364 | if (type == TYPE_SWPB) | ||
365 | trace_instruction_emulation("swpb", regs->pc); | ||
366 | else | ||
367 | trace_instruction_emulation("swp", regs->pc); | ||
368 | |||
369 | pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n", | ||
370 | current->comm, (unsigned long)current->pid, regs->pc); | ||
371 | |||
372 | regs->pc += 4; | ||
373 | return 0; | ||
374 | |||
375 | fault: | ||
376 | set_segfault(regs, address); | ||
377 | |||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * Only emulate SWP/SWPB executed in ARM state/User mode. | ||
383 | * The kernel must be SWP free and SWP{B} does not exist in Thumb. | ||
384 | */ | ||
385 | static struct undef_hook swp_hooks[] = { | ||
386 | { | ||
387 | .instr_mask = 0x0fb00ff0, | ||
388 | .instr_val = 0x01000090, | ||
389 | .pstate_mask = COMPAT_PSR_MODE_MASK, | ||
390 | .pstate_val = COMPAT_PSR_MODE_USR, | ||
391 | .fn = swp_handler | ||
392 | }, | ||
393 | { } | ||
394 | }; | ||
395 | |||
396 | static struct insn_emulation_ops swp_ops = { | ||
397 | .name = "swp", | ||
398 | .status = INSN_OBSOLETE, | ||
399 | .hooks = swp_hooks, | ||
400 | .set_hw_mode = NULL, | ||
401 | }; | ||
402 | |||
403 | static int cp15barrier_handler(struct pt_regs *regs, u32 instr) | ||
404 | { | ||
405 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc); | ||
406 | |||
407 | switch (arm_check_condition(instr, regs->pstate)) { | ||
408 | case ARM_OPCODE_CONDTEST_PASS: | ||
409 | break; | ||
410 | case ARM_OPCODE_CONDTEST_FAIL: | ||
411 | /* Condition failed - return to next instruction */ | ||
412 | goto ret; | ||
413 | case ARM_OPCODE_CONDTEST_UNCOND: | ||
414 | /* If unconditional encoding - not a barrier instruction */ | ||
415 | return -EFAULT; | ||
416 | default: | ||
417 | return -EINVAL; | ||
418 | } | ||
419 | |||
420 | switch (aarch32_insn_mcr_extract_crm(instr)) { | ||
421 | case 10: | ||
422 | /* | ||
423 | * dmb - mcr p15, 0, Rt, c7, c10, 5 | ||
424 | * dsb - mcr p15, 0, Rt, c7, c10, 4 | ||
425 | */ | ||
426 | if (aarch32_insn_mcr_extract_opc2(instr) == 5) { | ||
427 | dmb(sy); | ||
428 | trace_instruction_emulation( | ||
429 | "mcr p15, 0, Rt, c7, c10, 5 ; dmb", regs->pc); | ||
430 | } else { | ||
431 | dsb(sy); | ||
432 | trace_instruction_emulation( | ||
433 | "mcr p15, 0, Rt, c7, c10, 4 ; dsb", regs->pc); | ||
434 | } | ||
435 | break; | ||
436 | case 5: | ||
437 | /* | ||
438 | * isb - mcr p15, 0, Rt, c7, c5, 4 | ||
439 | * | ||
440 | * Taking an exception or returning from one acts as an | ||
441 | * instruction barrier. So no explicit barrier needed here. | ||
442 | */ | ||
443 | trace_instruction_emulation( | ||
444 | "mcr p15, 0, Rt, c7, c5, 4 ; isb", regs->pc); | ||
445 | break; | ||
446 | } | ||
447 | |||
448 | ret: | ||
449 | pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n", | ||
450 | current->comm, (unsigned long)current->pid, regs->pc); | ||
451 | |||
452 | regs->pc += 4; | ||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | #define SCTLR_EL1_CP15BEN (1 << 5) | ||
457 | |||
458 | static inline void config_sctlr_el1(u32 clear, u32 set) | ||
459 | { | ||
460 | u32 val; | ||
461 | |||
462 | asm volatile("mrs %0, sctlr_el1" : "=r" (val)); | ||
463 | val &= ~clear; | ||
464 | val |= set; | ||
465 | asm volatile("msr sctlr_el1, %0" : : "r" (val)); | ||
466 | } | ||
467 | |||
468 | static void enable_cp15_ben(void *info) | ||
469 | { | ||
470 | config_sctlr_el1(0, SCTLR_EL1_CP15BEN); | ||
471 | } | ||
472 | |||
473 | static void disable_cp15_ben(void *info) | ||
474 | { | ||
475 | config_sctlr_el1(SCTLR_EL1_CP15BEN, 0); | ||
476 | } | ||
477 | |||
478 | static int cpu_hotplug_notify(struct notifier_block *b, | ||
479 | unsigned long action, void *hcpu) | ||
480 | { | ||
481 | switch (action) { | ||
482 | case CPU_STARTING: | ||
483 | case CPU_STARTING_FROZEN: | ||
484 | enable_cp15_ben(NULL); | ||
485 | return NOTIFY_DONE; | ||
486 | case CPU_DYING: | ||
487 | case CPU_DYING_FROZEN: | ||
488 | disable_cp15_ben(NULL); | ||
489 | return NOTIFY_DONE; | ||
490 | } | ||
491 | |||
492 | return NOTIFY_OK; | ||
493 | } | ||
494 | |||
495 | static struct notifier_block cpu_hotplug_notifier = { | ||
496 | .notifier_call = cpu_hotplug_notify, | ||
497 | }; | ||
498 | |||
499 | static int cp15_barrier_set_hw_mode(bool enable) | ||
500 | { | ||
501 | if (enable) { | ||
502 | register_cpu_notifier(&cpu_hotplug_notifier); | ||
503 | on_each_cpu(enable_cp15_ben, NULL, true); | ||
504 | } else { | ||
505 | unregister_cpu_notifier(&cpu_hotplug_notifier); | ||
506 | on_each_cpu(disable_cp15_ben, NULL, true); | ||
507 | } | ||
508 | |||
509 | return true; | ||
510 | } | ||
511 | |||
512 | static struct undef_hook cp15_barrier_hooks[] = { | ||
513 | { | ||
514 | .instr_mask = 0x0fff0fdf, | ||
515 | .instr_val = 0x0e070f9a, | ||
516 | .pstate_mask = COMPAT_PSR_MODE_MASK, | ||
517 | .pstate_val = COMPAT_PSR_MODE_USR, | ||
518 | .fn = cp15barrier_handler, | ||
519 | }, | ||
520 | { | ||
521 | .instr_mask = 0x0fff0fff, | ||
522 | .instr_val = 0x0e070f95, | ||
523 | .pstate_mask = COMPAT_PSR_MODE_MASK, | ||
524 | .pstate_val = COMPAT_PSR_MODE_USR, | ||
525 | .fn = cp15barrier_handler, | ||
526 | }, | ||
527 | { } | ||
528 | }; | ||
529 | |||
530 | static struct insn_emulation_ops cp15_barrier_ops = { | ||
531 | .name = "cp15_barrier", | ||
532 | .status = INSN_DEPRECATED, | ||
533 | .hooks = cp15_barrier_hooks, | ||
534 | .set_hw_mode = cp15_barrier_set_hw_mode, | ||
535 | }; | ||
536 | |||
537 | /* | ||
538 | * Invoked as late_initcall, since not needed before init spawned. | ||
539 | */ | ||
540 | static int __init armv8_deprecated_init(void) | ||
541 | { | ||
542 | if (IS_ENABLED(CONFIG_SWP_EMULATION)) | ||
543 | register_insn_emulation(&swp_ops); | ||
544 | |||
545 | if (IS_ENABLED(CONFIG_CP15_BARRIER_EMULATION)) | ||
546 | register_insn_emulation(&cp15_barrier_ops); | ||
547 | |||
548 | register_insn_emulation_sysctl(ctl_abi); | ||
549 | |||
550 | return 0; | ||
551 | } | ||
552 | |||
553 | late_initcall(armv8_deprecated_init); | ||
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c new file mode 100644 index 000000000000..fa62637e63a8 --- /dev/null +++ b/arch/arm64/kernel/cpu_errata.c | |||
@@ -0,0 +1,111 @@ | |||
1 | /* | ||
2 | * Contains CPU specific errata definitions | ||
3 | * | ||
4 | * Copyright (C) 2014 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #define pr_fmt(fmt) "alternatives: " fmt | ||
20 | |||
21 | #include <linux/types.h> | ||
22 | #include <asm/cpu.h> | ||
23 | #include <asm/cputype.h> | ||
24 | #include <asm/cpufeature.h> | ||
25 | |||
26 | #define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) | ||
27 | #define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) | ||
28 | |||
29 | /* | ||
30 | * Add a struct or another datatype to the union below if you need | ||
31 | * different means to detect an affected CPU. | ||
32 | */ | ||
33 | struct arm64_cpu_capabilities { | ||
34 | const char *desc; | ||
35 | u16 capability; | ||
36 | bool (*is_affected)(struct arm64_cpu_capabilities *); | ||
37 | union { | ||
38 | struct { | ||
39 | u32 midr_model; | ||
40 | u32 midr_range_min, midr_range_max; | ||
41 | }; | ||
42 | }; | ||
43 | }; | ||
44 | |||
45 | #define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ | ||
46 | MIDR_ARCHITECTURE_MASK) | ||
47 | |||
48 | static bool __maybe_unused | ||
49 | is_affected_midr_range(struct arm64_cpu_capabilities *entry) | ||
50 | { | ||
51 | u32 midr = read_cpuid_id(); | ||
52 | |||
53 | if ((midr & CPU_MODEL_MASK) != entry->midr_model) | ||
54 | return false; | ||
55 | |||
56 | midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; | ||
57 | |||
58 | return (midr >= entry->midr_range_min && midr <= entry->midr_range_max); | ||
59 | } | ||
60 | |||
61 | #define MIDR_RANGE(model, min, max) \ | ||
62 | .is_affected = is_affected_midr_range, \ | ||
63 | .midr_model = model, \ | ||
64 | .midr_range_min = min, \ | ||
65 | .midr_range_max = max | ||
66 | |||
67 | struct arm64_cpu_capabilities arm64_errata[] = { | ||
68 | #if defined(CONFIG_ARM64_ERRATUM_826319) || \ | ||
69 | defined(CONFIG_ARM64_ERRATUM_827319) || \ | ||
70 | defined(CONFIG_ARM64_ERRATUM_824069) | ||
71 | { | ||
72 | /* Cortex-A53 r0p[012] */ | ||
73 | .desc = "ARM errata 826319, 827319, 824069", | ||
74 | .capability = ARM64_WORKAROUND_CLEAN_CACHE, | ||
75 | MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02), | ||
76 | }, | ||
77 | #endif | ||
78 | #ifdef CONFIG_ARM64_ERRATUM_819472 | ||
79 | { | ||
80 | /* Cortex-A53 r0p[01] */ | ||
81 | .desc = "ARM errata 819472", | ||
82 | .capability = ARM64_WORKAROUND_CLEAN_CACHE, | ||
83 | MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01), | ||
84 | }, | ||
85 | #endif | ||
86 | #ifdef CONFIG_ARM64_ERRATUM_832075 | ||
87 | { | ||
88 | /* Cortex-A57 r0p0 - r1p2 */ | ||
89 | .desc = "ARM erratum 832075", | ||
90 | .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, | ||
91 | MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12), | ||
92 | }, | ||
93 | #endif | ||
94 | { | ||
95 | } | ||
96 | }; | ||
97 | |||
98 | void check_local_cpu_errata(void) | ||
99 | { | ||
100 | struct arm64_cpu_capabilities *cpus = arm64_errata; | ||
101 | int i; | ||
102 | |||
103 | for (i = 0; cpus[i].desc; i++) { | ||
104 | if (!cpus[i].is_affected(&cpus[i])) | ||
105 | continue; | ||
106 | |||
107 | if (!cpus_have_cap(cpus[i].capability)) | ||
108 | pr_info("enabling workaround for %s\n", cpus[i].desc); | ||
109 | cpus_set_cap(cpus[i].capability); | ||
110 | } | ||
111 | } | ||
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 504fdaa8367e..57b641747534 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/cachetype.h> | 18 | #include <asm/cachetype.h> |
19 | #include <asm/cpu.h> | 19 | #include <asm/cpu.h> |
20 | #include <asm/cputype.h> | 20 | #include <asm/cputype.h> |
21 | #include <asm/cpufeature.h> | ||
21 | 22 | ||
22 | #include <linux/bitops.h> | 23 | #include <linux/bitops.h> |
23 | #include <linux/bug.h> | 24 | #include <linux/bug.h> |
@@ -111,6 +112,15 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) | |||
111 | diff |= CHECK(cntfrq, boot, cur, cpu); | 112 | diff |= CHECK(cntfrq, boot, cur, cpu); |
112 | 113 | ||
113 | /* | 114 | /* |
115 | * The kernel uses self-hosted debug features and expects CPUs to | ||
116 | * support identical debug features. We presently need CTX_CMPs, WRPs, | ||
117 | * and BRPs to be identical. | ||
118 | * ID_AA64DFR1 is currently RES0. | ||
119 | */ | ||
120 | diff |= CHECK(id_aa64dfr0, boot, cur, cpu); | ||
121 | diff |= CHECK(id_aa64dfr1, boot, cur, cpu); | ||
122 | |||
123 | /* | ||
114 | * Even in big.LITTLE, processors should be identical instruction-set | 124 | * Even in big.LITTLE, processors should be identical instruction-set |
115 | * wise. | 125 | * wise. |
116 | */ | 126 | */ |
@@ -143,7 +153,12 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) | |||
143 | diff |= CHECK(id_isar3, boot, cur, cpu); | 153 | diff |= CHECK(id_isar3, boot, cur, cpu); |
144 | diff |= CHECK(id_isar4, boot, cur, cpu); | 154 | diff |= CHECK(id_isar4, boot, cur, cpu); |
145 | diff |= CHECK(id_isar5, boot, cur, cpu); | 155 | diff |= CHECK(id_isar5, boot, cur, cpu); |
146 | diff |= CHECK(id_mmfr0, boot, cur, cpu); | 156 | /* |
157 | * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and | ||
158 | * ACTLR formats could differ across CPUs and therefore would have to | ||
159 | * be trapped for virtualization anyway. | ||
160 | */ | ||
161 | diff |= CHECK_MASK(id_mmfr0, 0xff0fffff, boot, cur, cpu); | ||
147 | diff |= CHECK(id_mmfr1, boot, cur, cpu); | 162 | diff |= CHECK(id_mmfr1, boot, cur, cpu); |
148 | diff |= CHECK(id_mmfr2, boot, cur, cpu); | 163 | diff |= CHECK(id_mmfr2, boot, cur, cpu); |
149 | diff |= CHECK(id_mmfr3, boot, cur, cpu); | 164 | diff |= CHECK(id_mmfr3, boot, cur, cpu); |
@@ -155,7 +170,7 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) | |||
155 | * pretend to support them. | 170 | * pretend to support them. |
156 | */ | 171 | */ |
157 | WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC, | 172 | WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC, |
158 | "Unsupported CPU feature variation."); | 173 | "Unsupported CPU feature variation.\n"); |
159 | } | 174 | } |
160 | 175 | ||
161 | static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) | 176 | static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) |
@@ -165,6 +180,8 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) | |||
165 | info->reg_dczid = read_cpuid(DCZID_EL0); | 180 | info->reg_dczid = read_cpuid(DCZID_EL0); |
166 | info->reg_midr = read_cpuid_id(); | 181 | info->reg_midr = read_cpuid_id(); |
167 | 182 | ||
183 | info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1); | ||
184 | info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1); | ||
168 | info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1); | 185 | info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1); |
169 | info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1); | 186 | info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1); |
170 | info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); | 187 | info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); |
@@ -186,6 +203,8 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) | |||
186 | info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); | 203 | info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); |
187 | 204 | ||
188 | cpuinfo_detect_icache_policy(info); | 205 | cpuinfo_detect_icache_policy(info); |
206 | |||
207 | check_local_cpu_errata(); | ||
189 | } | 208 | } |
190 | 209 | ||
191 | void cpuinfo_store_cpu(void) | 210 | void cpuinfo_store_cpu(void) |
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S index d18a44940968..8ce9b0577442 100644 --- a/arch/arm64/kernel/efi-entry.S +++ b/arch/arm64/kernel/efi-entry.S | |||
@@ -61,7 +61,8 @@ ENTRY(efi_stub_entry) | |||
61 | */ | 61 | */ |
62 | mov x20, x0 // DTB address | 62 | mov x20, x0 // DTB address |
63 | ldr x0, [sp, #16] // relocated _text address | 63 | ldr x0, [sp, #16] // relocated _text address |
64 | mov x21, x0 | 64 | ldr x21, =stext_offset |
65 | add x21, x0, x21 | ||
65 | 66 | ||
66 | /* | 67 | /* |
67 | * Calculate size of the kernel Image (same for original and copy). | 68 | * Calculate size of the kernel Image (same for original and copy). |
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 95c49ebc660d..6fac253bc783 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c | |||
@@ -11,6 +11,7 @@ | |||
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/dmi.h> | ||
14 | #include <linux/efi.h> | 15 | #include <linux/efi.h> |
15 | #include <linux/export.h> | 16 | #include <linux/export.h> |
16 | #include <linux/memblock.h> | 17 | #include <linux/memblock.h> |
@@ -112,8 +113,6 @@ static int __init uefi_init(void) | |||
112 | efi.systab->hdr.revision & 0xffff, vendor); | 113 | efi.systab->hdr.revision & 0xffff, vendor); |
113 | 114 | ||
114 | retval = efi_config_init(NULL); | 115 | retval = efi_config_init(NULL); |
115 | if (retval == 0) | ||
116 | set_bit(EFI_CONFIG_TABLES, &efi.flags); | ||
117 | 116 | ||
118 | out: | 117 | out: |
119 | early_memunmap(efi.systab, sizeof(efi_system_table_t)); | 118 | early_memunmap(efi.systab, sizeof(efi_system_table_t)); |
@@ -125,17 +124,17 @@ out: | |||
125 | */ | 124 | */ |
126 | static __init int is_reserve_region(efi_memory_desc_t *md) | 125 | static __init int is_reserve_region(efi_memory_desc_t *md) |
127 | { | 126 | { |
128 | if (!is_normal_ram(md)) | 127 | switch (md->type) { |
128 | case EFI_LOADER_CODE: | ||
129 | case EFI_LOADER_DATA: | ||
130 | case EFI_BOOT_SERVICES_CODE: | ||
131 | case EFI_BOOT_SERVICES_DATA: | ||
132 | case EFI_CONVENTIONAL_MEMORY: | ||
129 | return 0; | 133 | return 0; |
130 | 134 | default: | |
131 | if (md->attribute & EFI_MEMORY_RUNTIME) | 135 | break; |
132 | return 1; | 136 | } |
133 | 137 | return is_normal_ram(md); | |
134 | if (md->type == EFI_ACPI_RECLAIM_MEMORY || | ||
135 | md->type == EFI_RESERVED_TYPE) | ||
136 | return 1; | ||
137 | |||
138 | return 0; | ||
139 | } | 138 | } |
140 | 139 | ||
141 | static __init void reserve_regions(void) | 140 | static __init void reserve_regions(void) |
@@ -471,3 +470,17 @@ err_unmap: | |||
471 | return -1; | 470 | return -1; |
472 | } | 471 | } |
473 | early_initcall(arm64_enter_virtual_mode); | 472 | early_initcall(arm64_enter_virtual_mode); |
473 | |||
474 | static int __init arm64_dmi_init(void) | ||
475 | { | ||
476 | /* | ||
477 | * On arm64, DMI depends on UEFI, and dmi_scan_machine() needs to | ||
478 | * be called early because dmi_id_init(), which is an arch_initcall | ||
479 | * itself, depends on dmi_scan_machine() having been called already. | ||
480 | */ | ||
481 | dmi_scan_machine(); | ||
482 | if (dmi_available) | ||
483 | dmi_set_dump_stack_arch_desc(); | ||
484 | return 0; | ||
485 | } | ||
486 | core_initcall(arm64_dmi_init); | ||
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 38e704e597f7..08cafc518b9a 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S | |||
@@ -98,8 +98,8 @@ | |||
98 | ENTRY(_mcount) | 98 | ENTRY(_mcount) |
99 | mcount_enter | 99 | mcount_enter |
100 | 100 | ||
101 | ldr x0, =ftrace_trace_function | 101 | adrp x0, ftrace_trace_function |
102 | ldr x2, [x0] | 102 | ldr x2, [x0, #:lo12:ftrace_trace_function] |
103 | adr x0, ftrace_stub | 103 | adr x0, ftrace_stub |
104 | cmp x0, x2 // if (ftrace_trace_function | 104 | cmp x0, x2 // if (ftrace_trace_function |
105 | b.eq skip_ftrace_call // != ftrace_stub) { | 105 | b.eq skip_ftrace_call // != ftrace_stub) { |
@@ -115,14 +115,15 @@ skip_ftrace_call: // return; | |||
115 | mcount_exit // return; | 115 | mcount_exit // return; |
116 | // } | 116 | // } |
117 | skip_ftrace_call: | 117 | skip_ftrace_call: |
118 | ldr x1, =ftrace_graph_return | 118 | adrp x1, ftrace_graph_return |
119 | ldr x2, [x1] // if ((ftrace_graph_return | 119 | ldr x2, [x1, #:lo12:ftrace_graph_return] |
120 | cmp x0, x2 // != ftrace_stub) | 120 | cmp x0, x2 // if ((ftrace_graph_return |
121 | b.ne ftrace_graph_caller | 121 | b.ne ftrace_graph_caller // != ftrace_stub) |
122 | 122 | ||
123 | ldr x1, =ftrace_graph_entry // || (ftrace_graph_entry | 123 | adrp x1, ftrace_graph_entry // || (ftrace_graph_entry |
124 | ldr x2, [x1] // != ftrace_graph_entry_stub)) | 124 | adrp x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub)) |
125 | ldr x0, =ftrace_graph_entry_stub | 125 | ldr x2, [x1, #:lo12:ftrace_graph_entry] |
126 | add x0, x0, #:lo12:ftrace_graph_entry_stub | ||
126 | cmp x0, x2 | 127 | cmp x0, x2 |
127 | b.ne ftrace_graph_caller // ftrace_graph_caller(); | 128 | b.ne ftrace_graph_caller // ftrace_graph_caller(); |
128 | 129 | ||
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 726b910fe6ec..fd4fa374e5d2 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S | |||
@@ -64,25 +64,26 @@ | |||
64 | #define BAD_ERROR 3 | 64 | #define BAD_ERROR 3 |
65 | 65 | ||
66 | .macro kernel_entry, el, regsize = 64 | 66 | .macro kernel_entry, el, regsize = 64 |
67 | sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR | 67 | sub sp, sp, #S_FRAME_SIZE |
68 | .if \regsize == 32 | 68 | .if \regsize == 32 |
69 | mov w0, w0 // zero upper 32 bits of x0 | 69 | mov w0, w0 // zero upper 32 bits of x0 |
70 | .endif | 70 | .endif |
71 | push x28, x29 | 71 | stp x0, x1, [sp, #16 * 0] |
72 | push x26, x27 | 72 | stp x2, x3, [sp, #16 * 1] |
73 | push x24, x25 | 73 | stp x4, x5, [sp, #16 * 2] |
74 | push x22, x23 | 74 | stp x6, x7, [sp, #16 * 3] |
75 | push x20, x21 | 75 | stp x8, x9, [sp, #16 * 4] |
76 | push x18, x19 | 76 | stp x10, x11, [sp, #16 * 5] |
77 | push x16, x17 | 77 | stp x12, x13, [sp, #16 * 6] |
78 | push x14, x15 | 78 | stp x14, x15, [sp, #16 * 7] |
79 | push x12, x13 | 79 | stp x16, x17, [sp, #16 * 8] |
80 | push x10, x11 | 80 | stp x18, x19, [sp, #16 * 9] |
81 | push x8, x9 | 81 | stp x20, x21, [sp, #16 * 10] |
82 | push x6, x7 | 82 | stp x22, x23, [sp, #16 * 11] |
83 | push x4, x5 | 83 | stp x24, x25, [sp, #16 * 12] |
84 | push x2, x3 | 84 | stp x26, x27, [sp, #16 * 13] |
85 | push x0, x1 | 85 | stp x28, x29, [sp, #16 * 14] |
86 | |||
86 | .if \el == 0 | 87 | .if \el == 0 |
87 | mrs x21, sp_el0 | 88 | mrs x21, sp_el0 |
88 | get_thread_info tsk // Ensure MDSCR_EL1.SS is clear, | 89 | get_thread_info tsk // Ensure MDSCR_EL1.SS is clear, |
@@ -118,33 +119,31 @@ | |||
118 | .if \el == 0 | 119 | .if \el == 0 |
119 | ct_user_enter | 120 | ct_user_enter |
120 | ldr x23, [sp, #S_SP] // load return stack pointer | 121 | ldr x23, [sp, #S_SP] // load return stack pointer |
122 | msr sp_el0, x23 | ||
121 | .endif | 123 | .endif |
124 | msr elr_el1, x21 // set up the return data | ||
125 | msr spsr_el1, x22 | ||
122 | .if \ret | 126 | .if \ret |
123 | ldr x1, [sp, #S_X1] // preserve x0 (syscall return) | 127 | ldr x1, [sp, #S_X1] // preserve x0 (syscall return) |
124 | add sp, sp, S_X2 | ||
125 | .else | 128 | .else |
126 | pop x0, x1 | 129 | ldp x0, x1, [sp, #16 * 0] |
127 | .endif | 130 | .endif |
128 | pop x2, x3 // load the rest of the registers | 131 | ldp x2, x3, [sp, #16 * 1] |
129 | pop x4, x5 | 132 | ldp x4, x5, [sp, #16 * 2] |
130 | pop x6, x7 | 133 | ldp x6, x7, [sp, #16 * 3] |
131 | pop x8, x9 | 134 | ldp x8, x9, [sp, #16 * 4] |
132 | msr elr_el1, x21 // set up the return data | 135 | ldp x10, x11, [sp, #16 * 5] |
133 | msr spsr_el1, x22 | 136 | ldp x12, x13, [sp, #16 * 6] |
134 | .if \el == 0 | 137 | ldp x14, x15, [sp, #16 * 7] |
135 | msr sp_el0, x23 | 138 | ldp x16, x17, [sp, #16 * 8] |
136 | .endif | 139 | ldp x18, x19, [sp, #16 * 9] |
137 | pop x10, x11 | 140 | ldp x20, x21, [sp, #16 * 10] |
138 | pop x12, x13 | 141 | ldp x22, x23, [sp, #16 * 11] |
139 | pop x14, x15 | 142 | ldp x24, x25, [sp, #16 * 12] |
140 | pop x16, x17 | 143 | ldp x26, x27, [sp, #16 * 13] |
141 | pop x18, x19 | 144 | ldp x28, x29, [sp, #16 * 14] |
142 | pop x20, x21 | 145 | ldr lr, [sp, #S_LR] |
143 | pop x22, x23 | 146 | add sp, sp, #S_FRAME_SIZE // restore sp |
144 | pop x24, x25 | ||
145 | pop x26, x27 | ||
146 | pop x28, x29 | ||
147 | ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP | ||
148 | eret // return to kernel | 147 | eret // return to kernel |
149 | .endm | 148 | .endm |
150 | 149 | ||
@@ -168,7 +167,8 @@ tsk .req x28 // current thread_info | |||
168 | * Interrupt handling. | 167 | * Interrupt handling. |
169 | */ | 168 | */ |
170 | .macro irq_handler | 169 | .macro irq_handler |
171 | ldr x1, handle_arch_irq | 170 | adrp x1, handle_arch_irq |
171 | ldr x1, [x1, #:lo12:handle_arch_irq] | ||
172 | mov x0, sp | 172 | mov x0, sp |
173 | blr x1 | 173 | blr x1 |
174 | .endm | 174 | .endm |
@@ -455,8 +455,8 @@ el0_da: | |||
455 | bic x0, x26, #(0xff << 56) | 455 | bic x0, x26, #(0xff << 56) |
456 | mov x1, x25 | 456 | mov x1, x25 |
457 | mov x2, sp | 457 | mov x2, sp |
458 | adr lr, ret_to_user | 458 | bl do_mem_abort |
459 | b do_mem_abort | 459 | b ret_to_user |
460 | el0_ia: | 460 | el0_ia: |
461 | /* | 461 | /* |
462 | * Instruction abort handling | 462 | * Instruction abort handling |
@@ -468,8 +468,8 @@ el0_ia: | |||
468 | mov x0, x26 | 468 | mov x0, x26 |
469 | orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts | 469 | orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts |
470 | mov x2, sp | 470 | mov x2, sp |
471 | adr lr, ret_to_user | 471 | bl do_mem_abort |
472 | b do_mem_abort | 472 | b ret_to_user |
473 | el0_fpsimd_acc: | 473 | el0_fpsimd_acc: |
474 | /* | 474 | /* |
475 | * Floating Point or Advanced SIMD access | 475 | * Floating Point or Advanced SIMD access |
@@ -478,8 +478,8 @@ el0_fpsimd_acc: | |||
478 | ct_user_exit | 478 | ct_user_exit |
479 | mov x0, x25 | 479 | mov x0, x25 |
480 | mov x1, sp | 480 | mov x1, sp |
481 | adr lr, ret_to_user | 481 | bl do_fpsimd_acc |
482 | b do_fpsimd_acc | 482 | b ret_to_user |
483 | el0_fpsimd_exc: | 483 | el0_fpsimd_exc: |
484 | /* | 484 | /* |
485 | * Floating Point or Advanced SIMD exception | 485 | * Floating Point or Advanced SIMD exception |
@@ -488,8 +488,8 @@ el0_fpsimd_exc: | |||
488 | ct_user_exit | 488 | ct_user_exit |
489 | mov x0, x25 | 489 | mov x0, x25 |
490 | mov x1, sp | 490 | mov x1, sp |
491 | adr lr, ret_to_user | 491 | bl do_fpsimd_exc |
492 | b do_fpsimd_exc | 492 | b ret_to_user |
493 | el0_sp_pc: | 493 | el0_sp_pc: |
494 | /* | 494 | /* |
495 | * Stack or PC alignment exception handling | 495 | * Stack or PC alignment exception handling |
@@ -500,8 +500,8 @@ el0_sp_pc: | |||
500 | mov x0, x26 | 500 | mov x0, x26 |
501 | mov x1, x25 | 501 | mov x1, x25 |
502 | mov x2, sp | 502 | mov x2, sp |
503 | adr lr, ret_to_user | 503 | bl do_sp_pc_abort |
504 | b do_sp_pc_abort | 504 | b ret_to_user |
505 | el0_undef: | 505 | el0_undef: |
506 | /* | 506 | /* |
507 | * Undefined instruction | 507 | * Undefined instruction |
@@ -510,8 +510,8 @@ el0_undef: | |||
510 | enable_dbg_and_irq | 510 | enable_dbg_and_irq |
511 | ct_user_exit | 511 | ct_user_exit |
512 | mov x0, sp | 512 | mov x0, sp |
513 | adr lr, ret_to_user | 513 | bl do_undefinstr |
514 | b do_undefinstr | 514 | b ret_to_user |
515 | el0_dbg: | 515 | el0_dbg: |
516 | /* | 516 | /* |
517 | * Debug exception handling | 517 | * Debug exception handling |
@@ -530,8 +530,8 @@ el0_inv: | |||
530 | mov x0, sp | 530 | mov x0, sp |
531 | mov x1, #BAD_SYNC | 531 | mov x1, #BAD_SYNC |
532 | mrs x2, esr_el1 | 532 | mrs x2, esr_el1 |
533 | adr lr, ret_to_user | 533 | bl bad_mode |
534 | b bad_mode | 534 | b ret_to_user |
535 | ENDPROC(el0_sync) | 535 | ENDPROC(el0_sync) |
536 | 536 | ||
537 | .align 6 | 537 | .align 6 |
@@ -653,14 +653,15 @@ el0_svc_naked: // compat entry point | |||
653 | ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks | 653 | ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks |
654 | tst x16, #_TIF_SYSCALL_WORK | 654 | tst x16, #_TIF_SYSCALL_WORK |
655 | b.ne __sys_trace | 655 | b.ne __sys_trace |
656 | adr lr, ret_fast_syscall // return address | ||
657 | cmp scno, sc_nr // check upper syscall limit | 656 | cmp scno, sc_nr // check upper syscall limit |
658 | b.hs ni_sys | 657 | b.hs ni_sys |
659 | ldr x16, [stbl, scno, lsl #3] // address in the syscall table | 658 | ldr x16, [stbl, scno, lsl #3] // address in the syscall table |
660 | br x16 // call sys_* routine | 659 | blr x16 // call sys_* routine |
660 | b ret_fast_syscall | ||
661 | ni_sys: | 661 | ni_sys: |
662 | mov x0, sp | 662 | mov x0, sp |
663 | b do_ni_syscall | 663 | bl do_ni_syscall |
664 | b ret_fast_syscall | ||
664 | ENDPROC(el0_svc) | 665 | ENDPROC(el0_svc) |
665 | 666 | ||
666 | /* | 667 | /* |
@@ -668,26 +669,38 @@ ENDPROC(el0_svc) | |||
668 | * switches, and waiting for our parent to respond. | 669 | * switches, and waiting for our parent to respond. |
669 | */ | 670 | */ |
670 | __sys_trace: | 671 | __sys_trace: |
671 | mov x0, sp | 672 | mov w0, #-1 // set default errno for |
673 | cmp scno, x0 // user-issued syscall(-1) | ||
674 | b.ne 1f | ||
675 | mov x0, #-ENOSYS | ||
676 | str x0, [sp, #S_X0] | ||
677 | 1: mov x0, sp | ||
672 | bl syscall_trace_enter | 678 | bl syscall_trace_enter |
673 | adr lr, __sys_trace_return // return address | 679 | cmp w0, #-1 // skip the syscall? |
680 | b.eq __sys_trace_return_skipped | ||
674 | uxtw scno, w0 // syscall number (possibly new) | 681 | uxtw scno, w0 // syscall number (possibly new) |
675 | mov x1, sp // pointer to regs | 682 | mov x1, sp // pointer to regs |
676 | cmp scno, sc_nr // check upper syscall limit | 683 | cmp scno, sc_nr // check upper syscall limit |
677 | b.hs ni_sys | 684 | b.hs __ni_sys_trace |
678 | ldp x0, x1, [sp] // restore the syscall args | 685 | ldp x0, x1, [sp] // restore the syscall args |
679 | ldp x2, x3, [sp, #S_X2] | 686 | ldp x2, x3, [sp, #S_X2] |
680 | ldp x4, x5, [sp, #S_X4] | 687 | ldp x4, x5, [sp, #S_X4] |
681 | ldp x6, x7, [sp, #S_X6] | 688 | ldp x6, x7, [sp, #S_X6] |
682 | ldr x16, [stbl, scno, lsl #3] // address in the syscall table | 689 | ldr x16, [stbl, scno, lsl #3] // address in the syscall table |
683 | br x16 // call sys_* routine | 690 | blr x16 // call sys_* routine |
684 | 691 | ||
685 | __sys_trace_return: | 692 | __sys_trace_return: |
686 | str x0, [sp] // save returned x0 | 693 | str x0, [sp, #S_X0] // save returned x0 |
694 | __sys_trace_return_skipped: | ||
687 | mov x0, sp | 695 | mov x0, sp |
688 | bl syscall_trace_exit | 696 | bl syscall_trace_exit |
689 | b ret_to_user | 697 | b ret_to_user |
690 | 698 | ||
699 | __ni_sys_trace: | ||
700 | mov x0, sp | ||
701 | bl do_ni_syscall | ||
702 | b __sys_trace_return | ||
703 | |||
691 | /* | 704 | /* |
692 | * Special system call wrappers. | 705 | * Special system call wrappers. |
693 | */ | 706 | */ |
@@ -695,6 +708,3 @@ ENTRY(sys_rt_sigreturn_wrapper) | |||
695 | mov x0, sp | 708 | mov x0, sp |
696 | b sys_rt_sigreturn | 709 | b sys_rt_sigreturn |
697 | ENDPROC(sys_rt_sigreturn_wrapper) | 710 | ENDPROC(sys_rt_sigreturn_wrapper) |
698 | |||
699 | ENTRY(handle_arch_irq) | ||
700 | .quad 0 | ||
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 0a6e4f924df8..8ce88e08c030 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -132,6 +132,8 @@ efi_head: | |||
132 | #endif | 132 | #endif |
133 | 133 | ||
134 | #ifdef CONFIG_EFI | 134 | #ifdef CONFIG_EFI |
135 | .globl stext_offset | ||
136 | .set stext_offset, stext - efi_head | ||
135 | .align 3 | 137 | .align 3 |
136 | pe_header: | 138 | pe_header: |
137 | .ascii "PE" | 139 | .ascii "PE" |
@@ -155,12 +157,12 @@ optional_header: | |||
155 | .long 0 // SizeOfInitializedData | 157 | .long 0 // SizeOfInitializedData |
156 | .long 0 // SizeOfUninitializedData | 158 | .long 0 // SizeOfUninitializedData |
157 | .long efi_stub_entry - efi_head // AddressOfEntryPoint | 159 | .long efi_stub_entry - efi_head // AddressOfEntryPoint |
158 | .long stext - efi_head // BaseOfCode | 160 | .long stext_offset // BaseOfCode |
159 | 161 | ||
160 | extra_header_fields: | 162 | extra_header_fields: |
161 | .quad 0 // ImageBase | 163 | .quad 0 // ImageBase |
162 | .long 0x20 // SectionAlignment | 164 | .long 0x1000 // SectionAlignment |
163 | .long 0x8 // FileAlignment | 165 | .long PECOFF_FILE_ALIGNMENT // FileAlignment |
164 | .short 0 // MajorOperatingSystemVersion | 166 | .short 0 // MajorOperatingSystemVersion |
165 | .short 0 // MinorOperatingSystemVersion | 167 | .short 0 // MinorOperatingSystemVersion |
166 | .short 0 // MajorImageVersion | 168 | .short 0 // MajorImageVersion |
@@ -172,7 +174,7 @@ extra_header_fields: | |||
172 | .long _end - efi_head // SizeOfImage | 174 | .long _end - efi_head // SizeOfImage |
173 | 175 | ||
174 | // Everything before the kernel image is considered part of the header | 176 | // Everything before the kernel image is considered part of the header |
175 | .long stext - efi_head // SizeOfHeaders | 177 | .long stext_offset // SizeOfHeaders |
176 | .long 0 // CheckSum | 178 | .long 0 // CheckSum |
177 | .short 0xa // Subsystem (EFI application) | 179 | .short 0xa // Subsystem (EFI application) |
178 | .short 0 // DllCharacteristics | 180 | .short 0 // DllCharacteristics |
@@ -217,16 +219,24 @@ section_table: | |||
217 | .byte 0 | 219 | .byte 0 |
218 | .byte 0 // end of 0 padding of section name | 220 | .byte 0 // end of 0 padding of section name |
219 | .long _end - stext // VirtualSize | 221 | .long _end - stext // VirtualSize |
220 | .long stext - efi_head // VirtualAddress | 222 | .long stext_offset // VirtualAddress |
221 | .long _edata - stext // SizeOfRawData | 223 | .long _edata - stext // SizeOfRawData |
222 | .long stext - efi_head // PointerToRawData | 224 | .long stext_offset // PointerToRawData |
223 | 225 | ||
224 | .long 0 // PointerToRelocations (0 for executables) | 226 | .long 0 // PointerToRelocations (0 for executables) |
225 | .long 0 // PointerToLineNumbers (0 for executables) | 227 | .long 0 // PointerToLineNumbers (0 for executables) |
226 | .short 0 // NumberOfRelocations (0 for executables) | 228 | .short 0 // NumberOfRelocations (0 for executables) |
227 | .short 0 // NumberOfLineNumbers (0 for executables) | 229 | .short 0 // NumberOfLineNumbers (0 for executables) |
228 | .long 0xe0500020 // Characteristics (section flags) | 230 | .long 0xe0500020 // Characteristics (section flags) |
229 | .align 5 | 231 | |
232 | /* | ||
233 | * EFI will load stext onwards at the 4k section alignment | ||
234 | * described in the PE/COFF header. To ensure that instruction | ||
235 | * sequences using an adrp and a :lo12: immediate will function | ||
236 | * correctly at this alignment, we must ensure that stext is | ||
237 | * placed at a 4k boundary in the Image to begin with. | ||
238 | */ | ||
239 | .align 12 | ||
230 | #endif | 240 | #endif |
231 | 241 | ||
232 | ENTRY(stext) | 242 | ENTRY(stext) |
@@ -238,7 +248,13 @@ ENTRY(stext) | |||
238 | mov x0, x22 | 248 | mov x0, x22 |
239 | bl lookup_processor_type | 249 | bl lookup_processor_type |
240 | mov x23, x0 // x23=current cpu_table | 250 | mov x23, x0 // x23=current cpu_table |
241 | cbz x23, __error_p // invalid processor (x23=0)? | 251 | /* |
252 | * __error_p may end up out of range for cbz if text areas are | ||
253 | * aligned up to section sizes. | ||
254 | */ | ||
255 | cbnz x23, 1f // invalid processor (x23=0)? | ||
256 | b __error_p | ||
257 | 1: | ||
242 | bl __vet_fdt | 258 | bl __vet_fdt |
243 | bl __create_page_tables // x25=TTBR0, x26=TTBR1 | 259 | bl __create_page_tables // x25=TTBR0, x26=TTBR1 |
244 | /* | 260 | /* |
@@ -250,13 +266,214 @@ ENTRY(stext) | |||
250 | */ | 266 | */ |
251 | ldr x27, __switch_data // address to jump to after | 267 | ldr x27, __switch_data // address to jump to after |
252 | // MMU has been enabled | 268 | // MMU has been enabled |
253 | adr lr, __enable_mmu // return (PIC) address | 269 | adrp lr, __enable_mmu // return (PIC) address |
270 | add lr, lr, #:lo12:__enable_mmu | ||
254 | ldr x12, [x23, #CPU_INFO_SETUP] | 271 | ldr x12, [x23, #CPU_INFO_SETUP] |
255 | add x12, x12, x28 // __virt_to_phys | 272 | add x12, x12, x28 // __virt_to_phys |
256 | br x12 // initialise processor | 273 | br x12 // initialise processor |
257 | ENDPROC(stext) | 274 | ENDPROC(stext) |
258 | 275 | ||
259 | /* | 276 | /* |
277 | * Determine validity of the x21 FDT pointer. | ||
278 | * The dtb must be 8-byte aligned and live in the first 512M of memory. | ||
279 | */ | ||
280 | __vet_fdt: | ||
281 | tst x21, #0x7 | ||
282 | b.ne 1f | ||
283 | cmp x21, x24 | ||
284 | b.lt 1f | ||
285 | mov x0, #(1 << 29) | ||
286 | add x0, x0, x24 | ||
287 | cmp x21, x0 | ||
288 | b.ge 1f | ||
289 | ret | ||
290 | 1: | ||
291 | mov x21, #0 | ||
292 | ret | ||
293 | ENDPROC(__vet_fdt) | ||
294 | /* | ||
295 | * Macro to create a table entry to the next page. | ||
296 | * | ||
297 | * tbl: page table address | ||
298 | * virt: virtual address | ||
299 | * shift: #imm page table shift | ||
300 | * ptrs: #imm pointers per table page | ||
301 | * | ||
302 | * Preserves: virt | ||
303 | * Corrupts: tmp1, tmp2 | ||
304 | * Returns: tbl -> next level table page address | ||
305 | */ | ||
306 | .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 | ||
307 | lsr \tmp1, \virt, #\shift | ||
308 | and \tmp1, \tmp1, #\ptrs - 1 // table index | ||
309 | add \tmp2, \tbl, #PAGE_SIZE | ||
310 | orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type | ||
311 | str \tmp2, [\tbl, \tmp1, lsl #3] | ||
312 | add \tbl, \tbl, #PAGE_SIZE // next level table page | ||
313 | .endm | ||
314 | |||
315 | /* | ||
316 | * Macro to populate the PGD (and possibily PUD) for the corresponding | ||
317 | * block entry in the next level (tbl) for the given virtual address. | ||
318 | * | ||
319 | * Preserves: tbl, next, virt | ||
320 | * Corrupts: tmp1, tmp2 | ||
321 | */ | ||
322 | .macro create_pgd_entry, tbl, virt, tmp1, tmp2 | ||
323 | create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2 | ||
324 | #if SWAPPER_PGTABLE_LEVELS == 3 | ||
325 | create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2 | ||
326 | #endif | ||
327 | .endm | ||
328 | |||
329 | /* | ||
330 | * Macro to populate block entries in the page table for the start..end | ||
331 | * virtual range (inclusive). | ||
332 | * | ||
333 | * Preserves: tbl, flags | ||
334 | * Corrupts: phys, start, end, pstate | ||
335 | */ | ||
336 | .macro create_block_map, tbl, flags, phys, start, end | ||
337 | lsr \phys, \phys, #BLOCK_SHIFT | ||
338 | lsr \start, \start, #BLOCK_SHIFT | ||
339 | and \start, \start, #PTRS_PER_PTE - 1 // table index | ||
340 | orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry | ||
341 | lsr \end, \end, #BLOCK_SHIFT | ||
342 | and \end, \end, #PTRS_PER_PTE - 1 // table end index | ||
343 | 9999: str \phys, [\tbl, \start, lsl #3] // store the entry | ||
344 | add \start, \start, #1 // next entry | ||
345 | add \phys, \phys, #BLOCK_SIZE // next block | ||
346 | cmp \start, \end | ||
347 | b.ls 9999b | ||
348 | .endm | ||
349 | |||
350 | /* | ||
351 | * Setup the initial page tables. We only setup the barest amount which is | ||
352 | * required to get the kernel running. The following sections are required: | ||
353 | * - identity mapping to enable the MMU (low address, TTBR0) | ||
354 | * - first few MB of the kernel linear mapping to jump to once the MMU has | ||
355 | * been enabled, including the FDT blob (TTBR1) | ||
356 | * - pgd entry for fixed mappings (TTBR1) | ||
357 | */ | ||
358 | __create_page_tables: | ||
359 | pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses | ||
360 | mov x27, lr | ||
361 | |||
362 | /* | ||
363 | * Invalidate the idmap and swapper page tables to avoid potential | ||
364 | * dirty cache lines being evicted. | ||
365 | */ | ||
366 | mov x0, x25 | ||
367 | add x1, x26, #SWAPPER_DIR_SIZE | ||
368 | bl __inval_cache_range | ||
369 | |||
370 | /* | ||
371 | * Clear the idmap and swapper page tables. | ||
372 | */ | ||
373 | mov x0, x25 | ||
374 | add x6, x26, #SWAPPER_DIR_SIZE | ||
375 | 1: stp xzr, xzr, [x0], #16 | ||
376 | stp xzr, xzr, [x0], #16 | ||
377 | stp xzr, xzr, [x0], #16 | ||
378 | stp xzr, xzr, [x0], #16 | ||
379 | cmp x0, x6 | ||
380 | b.lo 1b | ||
381 | |||
382 | ldr x7, =MM_MMUFLAGS | ||
383 | |||
384 | /* | ||
385 | * Create the identity mapping. | ||
386 | */ | ||
387 | mov x0, x25 // idmap_pg_dir | ||
388 | ldr x3, =KERNEL_START | ||
389 | add x3, x3, x28 // __pa(KERNEL_START) | ||
390 | create_pgd_entry x0, x3, x5, x6 | ||
391 | ldr x6, =KERNEL_END | ||
392 | mov x5, x3 // __pa(KERNEL_START) | ||
393 | add x6, x6, x28 // __pa(KERNEL_END) | ||
394 | create_block_map x0, x7, x3, x5, x6 | ||
395 | |||
396 | /* | ||
397 | * Map the kernel image (starting with PHYS_OFFSET). | ||
398 | */ | ||
399 | mov x0, x26 // swapper_pg_dir | ||
400 | mov x5, #PAGE_OFFSET | ||
401 | create_pgd_entry x0, x5, x3, x6 | ||
402 | ldr x6, =KERNEL_END | ||
403 | mov x3, x24 // phys offset | ||
404 | create_block_map x0, x7, x3, x5, x6 | ||
405 | |||
406 | /* | ||
407 | * Map the FDT blob (maximum 2MB; must be within 512MB of | ||
408 | * PHYS_OFFSET). | ||
409 | */ | ||
410 | mov x3, x21 // FDT phys address | ||
411 | and x3, x3, #~((1 << 21) - 1) // 2MB aligned | ||
412 | mov x6, #PAGE_OFFSET | ||
413 | sub x5, x3, x24 // subtract PHYS_OFFSET | ||
414 | tst x5, #~((1 << 29) - 1) // within 512MB? | ||
415 | csel x21, xzr, x21, ne // zero the FDT pointer | ||
416 | b.ne 1f | ||
417 | add x5, x5, x6 // __va(FDT blob) | ||
418 | add x6, x5, #1 << 21 // 2MB for the FDT blob | ||
419 | sub x6, x6, #1 // inclusive range | ||
420 | create_block_map x0, x7, x3, x5, x6 | ||
421 | 1: | ||
422 | /* | ||
423 | * Since the page tables have been populated with non-cacheable | ||
424 | * accesses (MMU disabled), invalidate the idmap and swapper page | ||
425 | * tables again to remove any speculatively loaded cache lines. | ||
426 | */ | ||
427 | mov x0, x25 | ||
428 | add x1, x26, #SWAPPER_DIR_SIZE | ||
429 | bl __inval_cache_range | ||
430 | |||
431 | mov lr, x27 | ||
432 | ret | ||
433 | ENDPROC(__create_page_tables) | ||
434 | .ltorg | ||
435 | |||
436 | .align 3 | ||
437 | .type __switch_data, %object | ||
438 | __switch_data: | ||
439 | .quad __mmap_switched | ||
440 | .quad __bss_start // x6 | ||
441 | .quad __bss_stop // x7 | ||
442 | .quad processor_id // x4 | ||
443 | .quad __fdt_pointer // x5 | ||
444 | .quad memstart_addr // x6 | ||
445 | .quad init_thread_union + THREAD_START_SP // sp | ||
446 | |||
447 | /* | ||
448 | * The following fragment of code is executed with the MMU on in MMU mode, and | ||
449 | * uses absolute addresses; this is not position independent. | ||
450 | */ | ||
451 | __mmap_switched: | ||
452 | adr x3, __switch_data + 8 | ||
453 | |||
454 | ldp x6, x7, [x3], #16 | ||
455 | 1: cmp x6, x7 | ||
456 | b.hs 2f | ||
457 | str xzr, [x6], #8 // Clear BSS | ||
458 | b 1b | ||
459 | 2: | ||
460 | ldp x4, x5, [x3], #16 | ||
461 | ldr x6, [x3], #8 | ||
462 | ldr x16, [x3] | ||
463 | mov sp, x16 | ||
464 | str x22, [x4] // Save processor ID | ||
465 | str x21, [x5] // Save FDT pointer | ||
466 | str x24, [x6] // Save PHYS_OFFSET | ||
467 | mov x29, #0 | ||
468 | b start_kernel | ||
469 | ENDPROC(__mmap_switched) | ||
470 | |||
471 | /* | ||
472 | * end early head section, begin head code that is also used for | ||
473 | * hotplug and needs to have the same protections as the text region | ||
474 | */ | ||
475 | .section ".text","ax" | ||
476 | /* | ||
260 | * If we're fortunate enough to boot at EL2, ensure that the world is | 477 | * If we're fortunate enough to boot at EL2, ensure that the world is |
261 | * sane before dropping to EL1. | 478 | * sane before dropping to EL1. |
262 | * | 479 | * |
@@ -331,7 +548,8 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems | |||
331 | msr vttbr_el2, xzr | 548 | msr vttbr_el2, xzr |
332 | 549 | ||
333 | /* Hypervisor stub */ | 550 | /* Hypervisor stub */ |
334 | adr x0, __hyp_stub_vectors | 551 | adrp x0, __hyp_stub_vectors |
552 | add x0, x0, #:lo12:__hyp_stub_vectors | ||
335 | msr vbar_el2, x0 | 553 | msr vbar_el2, x0 |
336 | 554 | ||
337 | /* spsr */ | 555 | /* spsr */ |
@@ -492,183 +710,6 @@ ENDPROC(__calc_phys_offset) | |||
492 | .quad PAGE_OFFSET | 710 | .quad PAGE_OFFSET |
493 | 711 | ||
494 | /* | 712 | /* |
495 | * Macro to create a table entry to the next page. | ||
496 | * | ||
497 | * tbl: page table address | ||
498 | * virt: virtual address | ||
499 | * shift: #imm page table shift | ||
500 | * ptrs: #imm pointers per table page | ||
501 | * | ||
502 | * Preserves: virt | ||
503 | * Corrupts: tmp1, tmp2 | ||
504 | * Returns: tbl -> next level table page address | ||
505 | */ | ||
506 | .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 | ||
507 | lsr \tmp1, \virt, #\shift | ||
508 | and \tmp1, \tmp1, #\ptrs - 1 // table index | ||
509 | add \tmp2, \tbl, #PAGE_SIZE | ||
510 | orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type | ||
511 | str \tmp2, [\tbl, \tmp1, lsl #3] | ||
512 | add \tbl, \tbl, #PAGE_SIZE // next level table page | ||
513 | .endm | ||
514 | |||
515 | /* | ||
516 | * Macro to populate the PGD (and possibily PUD) for the corresponding | ||
517 | * block entry in the next level (tbl) for the given virtual address. | ||
518 | * | ||
519 | * Preserves: tbl, next, virt | ||
520 | * Corrupts: tmp1, tmp2 | ||
521 | */ | ||
522 | .macro create_pgd_entry, tbl, virt, tmp1, tmp2 | ||
523 | create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2 | ||
524 | #if SWAPPER_PGTABLE_LEVELS == 3 | ||
525 | create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2 | ||
526 | #endif | ||
527 | .endm | ||
528 | |||
529 | /* | ||
530 | * Macro to populate block entries in the page table for the start..end | ||
531 | * virtual range (inclusive). | ||
532 | * | ||
533 | * Preserves: tbl, flags | ||
534 | * Corrupts: phys, start, end, pstate | ||
535 | */ | ||
536 | .macro create_block_map, tbl, flags, phys, start, end | ||
537 | lsr \phys, \phys, #BLOCK_SHIFT | ||
538 | lsr \start, \start, #BLOCK_SHIFT | ||
539 | and \start, \start, #PTRS_PER_PTE - 1 // table index | ||
540 | orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry | ||
541 | lsr \end, \end, #BLOCK_SHIFT | ||
542 | and \end, \end, #PTRS_PER_PTE - 1 // table end index | ||
543 | 9999: str \phys, [\tbl, \start, lsl #3] // store the entry | ||
544 | add \start, \start, #1 // next entry | ||
545 | add \phys, \phys, #BLOCK_SIZE // next block | ||
546 | cmp \start, \end | ||
547 | b.ls 9999b | ||
548 | .endm | ||
549 | |||
550 | /* | ||
551 | * Setup the initial page tables. We only setup the barest amount which is | ||
552 | * required to get the kernel running. The following sections are required: | ||
553 | * - identity mapping to enable the MMU (low address, TTBR0) | ||
554 | * - first few MB of the kernel linear mapping to jump to once the MMU has | ||
555 | * been enabled, including the FDT blob (TTBR1) | ||
556 | * - pgd entry for fixed mappings (TTBR1) | ||
557 | */ | ||
558 | __create_page_tables: | ||
559 | pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses | ||
560 | mov x27, lr | ||
561 | |||
562 | /* | ||
563 | * Invalidate the idmap and swapper page tables to avoid potential | ||
564 | * dirty cache lines being evicted. | ||
565 | */ | ||
566 | mov x0, x25 | ||
567 | add x1, x26, #SWAPPER_DIR_SIZE | ||
568 | bl __inval_cache_range | ||
569 | |||
570 | /* | ||
571 | * Clear the idmap and swapper page tables. | ||
572 | */ | ||
573 | mov x0, x25 | ||
574 | add x6, x26, #SWAPPER_DIR_SIZE | ||
575 | 1: stp xzr, xzr, [x0], #16 | ||
576 | stp xzr, xzr, [x0], #16 | ||
577 | stp xzr, xzr, [x0], #16 | ||
578 | stp xzr, xzr, [x0], #16 | ||
579 | cmp x0, x6 | ||
580 | b.lo 1b | ||
581 | |||
582 | ldr x7, =MM_MMUFLAGS | ||
583 | |||
584 | /* | ||
585 | * Create the identity mapping. | ||
586 | */ | ||
587 | mov x0, x25 // idmap_pg_dir | ||
588 | ldr x3, =KERNEL_START | ||
589 | add x3, x3, x28 // __pa(KERNEL_START) | ||
590 | create_pgd_entry x0, x3, x5, x6 | ||
591 | ldr x6, =KERNEL_END | ||
592 | mov x5, x3 // __pa(KERNEL_START) | ||
593 | add x6, x6, x28 // __pa(KERNEL_END) | ||
594 | create_block_map x0, x7, x3, x5, x6 | ||
595 | |||
596 | /* | ||
597 | * Map the kernel image (starting with PHYS_OFFSET). | ||
598 | */ | ||
599 | mov x0, x26 // swapper_pg_dir | ||
600 | mov x5, #PAGE_OFFSET | ||
601 | create_pgd_entry x0, x5, x3, x6 | ||
602 | ldr x6, =KERNEL_END | ||
603 | mov x3, x24 // phys offset | ||
604 | create_block_map x0, x7, x3, x5, x6 | ||
605 | |||
606 | /* | ||
607 | * Map the FDT blob (maximum 2MB; must be within 512MB of | ||
608 | * PHYS_OFFSET). | ||
609 | */ | ||
610 | mov x3, x21 // FDT phys address | ||
611 | and x3, x3, #~((1 << 21) - 1) // 2MB aligned | ||
612 | mov x6, #PAGE_OFFSET | ||
613 | sub x5, x3, x24 // subtract PHYS_OFFSET | ||
614 | tst x5, #~((1 << 29) - 1) // within 512MB? | ||
615 | csel x21, xzr, x21, ne // zero the FDT pointer | ||
616 | b.ne 1f | ||
617 | add x5, x5, x6 // __va(FDT blob) | ||
618 | add x6, x5, #1 << 21 // 2MB for the FDT blob | ||
619 | sub x6, x6, #1 // inclusive range | ||
620 | create_block_map x0, x7, x3, x5, x6 | ||
621 | 1: | ||
622 | /* | ||
623 | * Since the page tables have been populated with non-cacheable | ||
624 | * accesses (MMU disabled), invalidate the idmap and swapper page | ||
625 | * tables again to remove any speculatively loaded cache lines. | ||
626 | */ | ||
627 | mov x0, x25 | ||
628 | add x1, x26, #SWAPPER_DIR_SIZE | ||
629 | bl __inval_cache_range | ||
630 | |||
631 | mov lr, x27 | ||
632 | ret | ||
633 | ENDPROC(__create_page_tables) | ||
634 | .ltorg | ||
635 | |||
636 | .align 3 | ||
637 | .type __switch_data, %object | ||
638 | __switch_data: | ||
639 | .quad __mmap_switched | ||
640 | .quad __bss_start // x6 | ||
641 | .quad __bss_stop // x7 | ||
642 | .quad processor_id // x4 | ||
643 | .quad __fdt_pointer // x5 | ||
644 | .quad memstart_addr // x6 | ||
645 | .quad init_thread_union + THREAD_START_SP // sp | ||
646 | |||
647 | /* | ||
648 | * The following fragment of code is executed with the MMU on in MMU mode, and | ||
649 | * uses absolute addresses; this is not position independent. | ||
650 | */ | ||
651 | __mmap_switched: | ||
652 | adr x3, __switch_data + 8 | ||
653 | |||
654 | ldp x6, x7, [x3], #16 | ||
655 | 1: cmp x6, x7 | ||
656 | b.hs 2f | ||
657 | str xzr, [x6], #8 // Clear BSS | ||
658 | b 1b | ||
659 | 2: | ||
660 | ldp x4, x5, [x3], #16 | ||
661 | ldr x6, [x3], #8 | ||
662 | ldr x16, [x3] | ||
663 | mov sp, x16 | ||
664 | str x22, [x4] // Save processor ID | ||
665 | str x21, [x5] // Save FDT pointer | ||
666 | str x24, [x6] // Save PHYS_OFFSET | ||
667 | mov x29, #0 | ||
668 | b start_kernel | ||
669 | ENDPROC(__mmap_switched) | ||
670 | |||
671 | /* | ||
672 | * Exception handling. Something went wrong and we can't proceed. We ought to | 713 | * Exception handling. Something went wrong and we can't proceed. We ought to |
673 | * tell the user, but since we don't have any guarantee that we're even | 714 | * tell the user, but since we don't have any guarantee that we're even |
674 | * running on the right architecture, we do virtually nothing. | 715 | * running on the right architecture, we do virtually nothing. |
@@ -715,22 +756,3 @@ __lookup_processor_type_data: | |||
715 | .quad . | 756 | .quad . |
716 | .quad cpu_table | 757 | .quad cpu_table |
717 | .size __lookup_processor_type_data, . - __lookup_processor_type_data | 758 | .size __lookup_processor_type_data, . - __lookup_processor_type_data |
718 | |||
719 | /* | ||
720 | * Determine validity of the x21 FDT pointer. | ||
721 | * The dtb must be 8-byte aligned and live in the first 512M of memory. | ||
722 | */ | ||
723 | __vet_fdt: | ||
724 | tst x21, #0x7 | ||
725 | b.ne 1f | ||
726 | cmp x21, x24 | ||
727 | b.lt 1f | ||
728 | mov x0, #(1 << 29) | ||
729 | add x0, x0, x24 | ||
730 | cmp x21, x0 | ||
731 | b.ge 1f | ||
732 | ret | ||
733 | 1: | ||
734 | mov x21, #0 | ||
735 | ret | ||
736 | ENDPROC(__vet_fdt) | ||
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 8cd27fedc8b6..7e9327a0986d 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c | |||
@@ -960,3 +960,29 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, | |||
960 | 960 | ||
961 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); | 961 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); |
962 | } | 962 | } |
963 | |||
964 | bool aarch32_insn_is_wide(u32 insn) | ||
965 | { | ||
966 | return insn >= 0xe800; | ||
967 | } | ||
968 | |||
969 | /* | ||
970 | * Macros/defines for extracting register numbers from instruction. | ||
971 | */ | ||
972 | u32 aarch32_insn_extract_reg_num(u32 insn, int offset) | ||
973 | { | ||
974 | return (insn & (0xf << offset)) >> offset; | ||
975 | } | ||
976 | |||
977 | #define OPC2_MASK 0x7 | ||
978 | #define OPC2_OFFSET 5 | ||
979 | u32 aarch32_insn_mcr_extract_opc2(u32 insn) | ||
980 | { | ||
981 | return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET; | ||
982 | } | ||
983 | |||
984 | #define CRM_MASK 0xf | ||
985 | u32 aarch32_insn_mcr_extract_crm(u32 insn) | ||
986 | { | ||
987 | return insn & CRM_MASK; | ||
988 | } | ||
diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c index 7d37ead4d199..354be2a872ae 100644 --- a/arch/arm64/kernel/io.c +++ b/arch/arm64/kernel/io.c | |||
@@ -25,12 +25,26 @@ | |||
25 | */ | 25 | */ |
26 | void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) | 26 | void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) |
27 | { | 27 | { |
28 | unsigned char *t = to; | 28 | while (count && (!IS_ALIGNED((unsigned long)from, 8) || |
29 | while (count) { | 29 | !IS_ALIGNED((unsigned long)to, 8))) { |
30 | *(u8 *)to = __raw_readb(from); | ||
31 | from++; | ||
32 | to++; | ||
30 | count--; | 33 | count--; |
31 | *t = readb(from); | 34 | } |
32 | t++; | 35 | |
36 | while (count >= 8) { | ||
37 | *(u64 *)to = __raw_readq(from); | ||
38 | from += 8; | ||
39 | to += 8; | ||
40 | count -= 8; | ||
41 | } | ||
42 | |||
43 | while (count) { | ||
44 | *(u8 *)to = __raw_readb(from); | ||
33 | from++; | 45 | from++; |
46 | to++; | ||
47 | count--; | ||
34 | } | 48 | } |
35 | } | 49 | } |
36 | EXPORT_SYMBOL(__memcpy_fromio); | 50 | EXPORT_SYMBOL(__memcpy_fromio); |
@@ -40,12 +54,26 @@ EXPORT_SYMBOL(__memcpy_fromio); | |||
40 | */ | 54 | */ |
41 | void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count) | 55 | void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count) |
42 | { | 56 | { |
43 | const unsigned char *f = from; | 57 | while (count && (!IS_ALIGNED((unsigned long)to, 8) || |
44 | while (count) { | 58 | !IS_ALIGNED((unsigned long)from, 8))) { |
59 | __raw_writeb(*(volatile u8 *)from, to); | ||
60 | from++; | ||
61 | to++; | ||
45 | count--; | 62 | count--; |
46 | writeb(*f, to); | 63 | } |
47 | f++; | 64 | |
65 | while (count >= 8) { | ||
66 | __raw_writeq(*(volatile u64 *)from, to); | ||
67 | from += 8; | ||
68 | to += 8; | ||
69 | count -= 8; | ||
70 | } | ||
71 | |||
72 | while (count) { | ||
73 | __raw_writeb(*(volatile u8 *)from, to); | ||
74 | from++; | ||
48 | to++; | 75 | to++; |
76 | count--; | ||
49 | } | 77 | } |
50 | } | 78 | } |
51 | EXPORT_SYMBOL(__memcpy_toio); | 79 | EXPORT_SYMBOL(__memcpy_toio); |
@@ -55,10 +83,28 @@ EXPORT_SYMBOL(__memcpy_toio); | |||
55 | */ | 83 | */ |
56 | void __memset_io(volatile void __iomem *dst, int c, size_t count) | 84 | void __memset_io(volatile void __iomem *dst, int c, size_t count) |
57 | { | 85 | { |
58 | while (count) { | 86 | u64 qc = (u8)c; |
87 | |||
88 | qc |= qc << 8; | ||
89 | qc |= qc << 16; | ||
90 | qc |= qc << 32; | ||
91 | |||
92 | while (count && !IS_ALIGNED((unsigned long)dst, 8)) { | ||
93 | __raw_writeb(c, dst); | ||
94 | dst++; | ||
59 | count--; | 95 | count--; |
60 | writeb(c, dst); | 96 | } |
97 | |||
98 | while (count >= 8) { | ||
99 | __raw_writeq(qc, dst); | ||
100 | dst += 8; | ||
101 | count -= 8; | ||
102 | } | ||
103 | |||
104 | while (count) { | ||
105 | __raw_writeb(c, dst); | ||
61 | dst++; | 106 | dst++; |
107 | count--; | ||
62 | } | 108 | } |
63 | } | 109 | } |
64 | EXPORT_SYMBOL(__memset_io); | 110 | EXPORT_SYMBOL(__memset_io); |
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 071a6ec13bd8..240b75c0e94f 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c | |||
@@ -40,6 +40,8 @@ int arch_show_interrupts(struct seq_file *p, int prec) | |||
40 | return 0; | 40 | return 0; |
41 | } | 41 | } |
42 | 42 | ||
43 | void (*handle_arch_irq)(struct pt_regs *) = NULL; | ||
44 | |||
43 | void __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) | 45 | void __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) |
44 | { | 46 | { |
45 | if (handle_arch_irq) | 47 | if (handle_arch_irq) |
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c index 263a166291fb..4f1fec7a46db 100644 --- a/arch/arm64/kernel/jump_label.c +++ b/arch/arm64/kernel/jump_label.c | |||
@@ -22,9 +22,8 @@ | |||
22 | 22 | ||
23 | #ifdef HAVE_JUMP_LABEL | 23 | #ifdef HAVE_JUMP_LABEL |
24 | 24 | ||
25 | static void __arch_jump_label_transform(struct jump_entry *entry, | 25 | void arch_jump_label_transform(struct jump_entry *entry, |
26 | enum jump_label_type type, | 26 | enum jump_label_type type) |
27 | bool is_static) | ||
28 | { | 27 | { |
29 | void *addr = (void *)entry->code; | 28 | void *addr = (void *)entry->code; |
30 | u32 insn; | 29 | u32 insn; |
@@ -37,22 +36,18 @@ static void __arch_jump_label_transform(struct jump_entry *entry, | |||
37 | insn = aarch64_insn_gen_nop(); | 36 | insn = aarch64_insn_gen_nop(); |
38 | } | 37 | } |
39 | 38 | ||
40 | if (is_static) | 39 | aarch64_insn_patch_text(&addr, &insn, 1); |
41 | aarch64_insn_patch_text_nosync(addr, insn); | ||
42 | else | ||
43 | aarch64_insn_patch_text(&addr, &insn, 1); | ||
44 | } | ||
45 | |||
46 | void arch_jump_label_transform(struct jump_entry *entry, | ||
47 | enum jump_label_type type) | ||
48 | { | ||
49 | __arch_jump_label_transform(entry, type, false); | ||
50 | } | 40 | } |
51 | 41 | ||
52 | void arch_jump_label_transform_static(struct jump_entry *entry, | 42 | void arch_jump_label_transform_static(struct jump_entry *entry, |
53 | enum jump_label_type type) | 43 | enum jump_label_type type) |
54 | { | 44 | { |
55 | __arch_jump_label_transform(entry, type, true); | 45 | /* |
46 | * We use the architected A64 NOP in arch_static_branch, so there's no | ||
47 | * need to patch an identical A64 NOP over the top of it here. The core | ||
48 | * will call arch_jump_label_transform from a module notifier if the | ||
49 | * NOP needs to be replaced by a branch. | ||
50 | */ | ||
56 | } | 51 | } |
57 | 52 | ||
58 | #endif /* HAVE_JUMP_LABEL */ | 53 | #endif /* HAVE_JUMP_LABEL */ |
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 1eb1cc955139..fd027b101de5 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/moduleloader.h> | 26 | #include <linux/moduleloader.h> |
27 | #include <linux/vmalloc.h> | 27 | #include <linux/vmalloc.h> |
28 | #include <asm/insn.h> | 28 | #include <asm/insn.h> |
29 | #include <asm/sections.h> | ||
29 | 30 | ||
30 | #define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX | 31 | #define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX |
31 | #define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16 | 32 | #define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16 |
@@ -394,3 +395,20 @@ overflow: | |||
394 | me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); | 395 | me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); |
395 | return -ENOEXEC; | 396 | return -ENOEXEC; |
396 | } | 397 | } |
398 | |||
399 | int module_finalize(const Elf_Ehdr *hdr, | ||
400 | const Elf_Shdr *sechdrs, | ||
401 | struct module *me) | ||
402 | { | ||
403 | const Elf_Shdr *s, *se; | ||
404 | const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | ||
405 | |||
406 | for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { | ||
407 | if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) { | ||
408 | apply_alternatives((void *)s->sh_addr, s->sh_size); | ||
409 | return 0; | ||
410 | } | ||
411 | } | ||
412 | |||
413 | return 0; | ||
414 | } | ||
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index aa29ecb4f800..25a5308744b1 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -169,8 +169,14 @@ armpmu_event_set_period(struct perf_event *event, | |||
169 | ret = 1; | 169 | ret = 1; |
170 | } | 170 | } |
171 | 171 | ||
172 | if (left > (s64)armpmu->max_period) | 172 | /* |
173 | left = armpmu->max_period; | 173 | * Limit the maximum period to prevent the counter value |
174 | * from overtaking the one we are about to program. In | ||
175 | * effect we are reducing max_period to account for | ||
176 | * interrupt latency (and we are being very conservative). | ||
177 | */ | ||
178 | if (left > (armpmu->max_period >> 1)) | ||
179 | left = armpmu->max_period >> 1; | ||
174 | 180 | ||
175 | local64_set(&hwc->prev_count, (u64)-left); | 181 | local64_set(&hwc->prev_count, (u64)-left); |
176 | 182 | ||
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 8a4ae8e73213..d882b833dbdb 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/smp.h> | 27 | #include <linux/smp.h> |
28 | #include <linux/ptrace.h> | 28 | #include <linux/ptrace.h> |
29 | #include <linux/user.h> | 29 | #include <linux/user.h> |
30 | #include <linux/seccomp.h> | ||
30 | #include <linux/security.h> | 31 | #include <linux/security.h> |
31 | #include <linux/init.h> | 32 | #include <linux/init.h> |
32 | #include <linux/signal.h> | 33 | #include <linux/signal.h> |
@@ -551,6 +552,32 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset, | |||
551 | return ret; | 552 | return ret; |
552 | } | 553 | } |
553 | 554 | ||
555 | static int system_call_get(struct task_struct *target, | ||
556 | const struct user_regset *regset, | ||
557 | unsigned int pos, unsigned int count, | ||
558 | void *kbuf, void __user *ubuf) | ||
559 | { | ||
560 | int syscallno = task_pt_regs(target)->syscallno; | ||
561 | |||
562 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
563 | &syscallno, 0, -1); | ||
564 | } | ||
565 | |||
566 | static int system_call_set(struct task_struct *target, | ||
567 | const struct user_regset *regset, | ||
568 | unsigned int pos, unsigned int count, | ||
569 | const void *kbuf, const void __user *ubuf) | ||
570 | { | ||
571 | int syscallno, ret; | ||
572 | |||
573 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); | ||
574 | if (ret) | ||
575 | return ret; | ||
576 | |||
577 | task_pt_regs(target)->syscallno = syscallno; | ||
578 | return ret; | ||
579 | } | ||
580 | |||
554 | enum aarch64_regset { | 581 | enum aarch64_regset { |
555 | REGSET_GPR, | 582 | REGSET_GPR, |
556 | REGSET_FPR, | 583 | REGSET_FPR, |
@@ -559,6 +586,7 @@ enum aarch64_regset { | |||
559 | REGSET_HW_BREAK, | 586 | REGSET_HW_BREAK, |
560 | REGSET_HW_WATCH, | 587 | REGSET_HW_WATCH, |
561 | #endif | 588 | #endif |
589 | REGSET_SYSTEM_CALL, | ||
562 | }; | 590 | }; |
563 | 591 | ||
564 | static const struct user_regset aarch64_regsets[] = { | 592 | static const struct user_regset aarch64_regsets[] = { |
@@ -608,6 +636,14 @@ static const struct user_regset aarch64_regsets[] = { | |||
608 | .set = hw_break_set, | 636 | .set = hw_break_set, |
609 | }, | 637 | }, |
610 | #endif | 638 | #endif |
639 | [REGSET_SYSTEM_CALL] = { | ||
640 | .core_note_type = NT_ARM_SYSTEM_CALL, | ||
641 | .n = 1, | ||
642 | .size = sizeof(int), | ||
643 | .align = sizeof(int), | ||
644 | .get = system_call_get, | ||
645 | .set = system_call_set, | ||
646 | }, | ||
611 | }; | 647 | }; |
612 | 648 | ||
613 | static const struct user_regset_view user_aarch64_view = { | 649 | static const struct user_regset_view user_aarch64_view = { |
@@ -1114,6 +1150,10 @@ static void tracehook_report_syscall(struct pt_regs *regs, | |||
1114 | 1150 | ||
1115 | asmlinkage int syscall_trace_enter(struct pt_regs *regs) | 1151 | asmlinkage int syscall_trace_enter(struct pt_regs *regs) |
1116 | { | 1152 | { |
1153 | /* Do the secure computing check first; failures should be fast. */ | ||
1154 | if (secure_computing() == -1) | ||
1155 | return -1; | ||
1156 | |||
1117 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | 1157 | if (test_thread_flag(TIF_SYSCALL_TRACE)) |
1118 | tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); | 1158 | tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); |
1119 | 1159 | ||
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 2437196cc5d4..b80991166754 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -43,12 +43,14 @@ | |||
43 | #include <linux/of_fdt.h> | 43 | #include <linux/of_fdt.h> |
44 | #include <linux/of_platform.h> | 44 | #include <linux/of_platform.h> |
45 | #include <linux/efi.h> | 45 | #include <linux/efi.h> |
46 | #include <linux/personality.h> | ||
46 | 47 | ||
47 | #include <asm/fixmap.h> | 48 | #include <asm/fixmap.h> |
48 | #include <asm/cpu.h> | 49 | #include <asm/cpu.h> |
49 | #include <asm/cputype.h> | 50 | #include <asm/cputype.h> |
50 | #include <asm/elf.h> | 51 | #include <asm/elf.h> |
51 | #include <asm/cputable.h> | 52 | #include <asm/cputable.h> |
53 | #include <asm/cpufeature.h> | ||
52 | #include <asm/cpu_ops.h> | 54 | #include <asm/cpu_ops.h> |
53 | #include <asm/sections.h> | 55 | #include <asm/sections.h> |
54 | #include <asm/setup.h> | 56 | #include <asm/setup.h> |
@@ -72,13 +74,15 @@ EXPORT_SYMBOL_GPL(elf_hwcap); | |||
72 | COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ | 74 | COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ |
73 | COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ | 75 | COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ |
74 | COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ | 76 | COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ |
75 | COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV) | 77 | COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\ |
78 | COMPAT_HWCAP_LPAE) | ||
76 | unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; | 79 | unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; |
77 | unsigned int compat_elf_hwcap2 __read_mostly; | 80 | unsigned int compat_elf_hwcap2 __read_mostly; |
78 | #endif | 81 | #endif |
79 | 82 | ||
83 | DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); | ||
84 | |||
80 | static const char *cpu_name; | 85 | static const char *cpu_name; |
81 | static const char *machine_name; | ||
82 | phys_addr_t __fdt_pointer __initdata; | 86 | phys_addr_t __fdt_pointer __initdata; |
83 | 87 | ||
84 | /* | 88 | /* |
@@ -116,12 +120,16 @@ void __init early_print(const char *str, ...) | |||
116 | 120 | ||
117 | void __init smp_setup_processor_id(void) | 121 | void __init smp_setup_processor_id(void) |
118 | { | 122 | { |
123 | u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; | ||
124 | cpu_logical_map(0) = mpidr; | ||
125 | |||
119 | /* | 126 | /* |
120 | * clear __my_cpu_offset on boot CPU to avoid hang caused by | 127 | * clear __my_cpu_offset on boot CPU to avoid hang caused by |
121 | * using percpu variable early, for example, lockdep will | 128 | * using percpu variable early, for example, lockdep will |
122 | * access percpu variable inside lock_release | 129 | * access percpu variable inside lock_release |
123 | */ | 130 | */ |
124 | set_my_cpu_offset(0); | 131 | set_my_cpu_offset(0); |
132 | pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr); | ||
125 | } | 133 | } |
126 | 134 | ||
127 | bool arch_match_cpu_phys_id(int cpu, u64 phys_id) | 135 | bool arch_match_cpu_phys_id(int cpu, u64 phys_id) |
@@ -311,7 +319,7 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) | |||
311 | cpu_relax(); | 319 | cpu_relax(); |
312 | } | 320 | } |
313 | 321 | ||
314 | machine_name = of_flat_dt_get_machine_name(); | 322 | dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name()); |
315 | } | 323 | } |
316 | 324 | ||
317 | /* | 325 | /* |
@@ -376,6 +384,7 @@ void __init setup_arch(char **cmdline_p) | |||
376 | 384 | ||
377 | *cmdline_p = boot_command_line; | 385 | *cmdline_p = boot_command_line; |
378 | 386 | ||
387 | early_fixmap_init(); | ||
379 | early_ioremap_init(); | 388 | early_ioremap_init(); |
380 | 389 | ||
381 | parse_early_param(); | 390 | parse_early_param(); |
@@ -398,7 +407,6 @@ void __init setup_arch(char **cmdline_p) | |||
398 | 407 | ||
399 | psci_init(); | 408 | psci_init(); |
400 | 409 | ||
401 | cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; | ||
402 | cpu_read_bootcpu_ops(); | 410 | cpu_read_bootcpu_ops(); |
403 | #ifdef CONFIG_SMP | 411 | #ifdef CONFIG_SMP |
404 | smp_init_cpus(); | 412 | smp_init_cpus(); |
@@ -447,14 +455,50 @@ static const char *hwcap_str[] = { | |||
447 | NULL | 455 | NULL |
448 | }; | 456 | }; |
449 | 457 | ||
458 | #ifdef CONFIG_COMPAT | ||
459 | static const char *compat_hwcap_str[] = { | ||
460 | "swp", | ||
461 | "half", | ||
462 | "thumb", | ||
463 | "26bit", | ||
464 | "fastmult", | ||
465 | "fpa", | ||
466 | "vfp", | ||
467 | "edsp", | ||
468 | "java", | ||
469 | "iwmmxt", | ||
470 | "crunch", | ||
471 | "thumbee", | ||
472 | "neon", | ||
473 | "vfpv3", | ||
474 | "vfpv3d16", | ||
475 | "tls", | ||
476 | "vfpv4", | ||
477 | "idiva", | ||
478 | "idivt", | ||
479 | "vfpd32", | ||
480 | "lpae", | ||
481 | "evtstrm" | ||
482 | }; | ||
483 | |||
484 | static const char *compat_hwcap2_str[] = { | ||
485 | "aes", | ||
486 | "pmull", | ||
487 | "sha1", | ||
488 | "sha2", | ||
489 | "crc32", | ||
490 | NULL | ||
491 | }; | ||
492 | #endif /* CONFIG_COMPAT */ | ||
493 | |||
450 | static int c_show(struct seq_file *m, void *v) | 494 | static int c_show(struct seq_file *m, void *v) |
451 | { | 495 | { |
452 | int i; | 496 | int i, j; |
453 | |||
454 | seq_printf(m, "Processor\t: %s rev %d (%s)\n", | ||
455 | cpu_name, read_cpuid_id() & 15, ELF_PLATFORM); | ||
456 | 497 | ||
457 | for_each_online_cpu(i) { | 498 | for_each_online_cpu(i) { |
499 | struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); | ||
500 | u32 midr = cpuinfo->reg_midr; | ||
501 | |||
458 | /* | 502 | /* |
459 | * glibc reads /proc/cpuinfo to determine the number of | 503 | * glibc reads /proc/cpuinfo to determine the number of |
460 | * online processors, looking for lines beginning with | 504 | * online processors, looking for lines beginning with |
@@ -463,24 +507,38 @@ static int c_show(struct seq_file *m, void *v) | |||
463 | #ifdef CONFIG_SMP | 507 | #ifdef CONFIG_SMP |
464 | seq_printf(m, "processor\t: %d\n", i); | 508 | seq_printf(m, "processor\t: %d\n", i); |
465 | #endif | 509 | #endif |
466 | } | ||
467 | |||
468 | /* dump out the processor features */ | ||
469 | seq_puts(m, "Features\t: "); | ||
470 | |||
471 | for (i = 0; hwcap_str[i]; i++) | ||
472 | if (elf_hwcap & (1 << i)) | ||
473 | seq_printf(m, "%s ", hwcap_str[i]); | ||
474 | |||
475 | seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); | ||
476 | seq_printf(m, "CPU architecture: AArch64\n"); | ||
477 | seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15); | ||
478 | seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff); | ||
479 | seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); | ||
480 | 510 | ||
481 | seq_puts(m, "\n"); | 511 | /* |
482 | 512 | * Dump out the common processor features in a single line. | |
483 | seq_printf(m, "Hardware\t: %s\n", machine_name); | 513 | * Userspace should read the hwcaps with getauxval(AT_HWCAP) |
514 | * rather than attempting to parse this, but there's a body of | ||
515 | * software which does already (at least for 32-bit). | ||
516 | */ | ||
517 | seq_puts(m, "Features\t:"); | ||
518 | if (personality(current->personality) == PER_LINUX32) { | ||
519 | #ifdef CONFIG_COMPAT | ||
520 | for (j = 0; compat_hwcap_str[j]; j++) | ||
521 | if (compat_elf_hwcap & (1 << j)) | ||
522 | seq_printf(m, " %s", compat_hwcap_str[j]); | ||
523 | |||
524 | for (j = 0; compat_hwcap2_str[j]; j++) | ||
525 | if (compat_elf_hwcap2 & (1 << j)) | ||
526 | seq_printf(m, " %s", compat_hwcap2_str[j]); | ||
527 | #endif /* CONFIG_COMPAT */ | ||
528 | } else { | ||
529 | for (j = 0; hwcap_str[j]; j++) | ||
530 | if (elf_hwcap & (1 << j)) | ||
531 | seq_printf(m, " %s", hwcap_str[j]); | ||
532 | } | ||
533 | seq_puts(m, "\n"); | ||
534 | |||
535 | seq_printf(m, "CPU implementer\t: 0x%02x\n", | ||
536 | MIDR_IMPLEMENTOR(midr)); | ||
537 | seq_printf(m, "CPU architecture: 8\n"); | ||
538 | seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr)); | ||
539 | seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr)); | ||
540 | seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr)); | ||
541 | } | ||
484 | 542 | ||
485 | return 0; | 543 | return 0; |
486 | } | 544 | } |
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 1b9ad02837cf..5a1ba6e80d4e 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c | |||
@@ -186,6 +186,12 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) | |||
186 | err |= __put_user(from->si_uid, &to->si_uid); | 186 | err |= __put_user(from->si_uid, &to->si_uid); |
187 | err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr); | 187 | err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr); |
188 | break; | 188 | break; |
189 | case __SI_SYS: | ||
190 | err |= __put_user((compat_uptr_t)(unsigned long) | ||
191 | from->si_call_addr, &to->si_call_addr); | ||
192 | err |= __put_user(from->si_syscall, &to->si_syscall); | ||
193 | err |= __put_user(from->si_arch, &to->si_arch); | ||
194 | break; | ||
189 | default: /* this is just in case for now ... */ | 195 | default: /* this is just in case for now ... */ |
190 | err |= __put_user(from->si_pid, &to->si_pid); | 196 | err |= __put_user(from->si_pid, &to->si_pid); |
191 | err |= __put_user(from->si_uid, &to->si_uid); | 197 | err |= __put_user(from->si_uid, &to->si_uid); |
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index a564b440416a..ede186cdd452 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S | |||
@@ -147,14 +147,12 @@ cpu_resume_after_mmu: | |||
147 | ret | 147 | ret |
148 | ENDPROC(cpu_resume_after_mmu) | 148 | ENDPROC(cpu_resume_after_mmu) |
149 | 149 | ||
150 | .data | ||
151 | ENTRY(cpu_resume) | 150 | ENTRY(cpu_resume) |
152 | bl el2_setup // if in EL2 drop to EL1 cleanly | 151 | bl el2_setup // if in EL2 drop to EL1 cleanly |
153 | #ifdef CONFIG_SMP | 152 | #ifdef CONFIG_SMP |
154 | mrs x1, mpidr_el1 | 153 | mrs x1, mpidr_el1 |
155 | adr x4, mpidr_hash_ptr | 154 | adrp x8, mpidr_hash |
156 | ldr x5, [x4] | 155 | add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address |
157 | add x8, x4, x5 // x8 = struct mpidr_hash phys address | ||
158 | /* retrieve mpidr_hash members to compute the hash */ | 156 | /* retrieve mpidr_hash members to compute the hash */ |
159 | ldr x2, [x8, #MPIDR_HASH_MASK] | 157 | ldr x2, [x8, #MPIDR_HASH_MASK] |
160 | ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS] | 158 | ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS] |
@@ -164,14 +162,15 @@ ENTRY(cpu_resume) | |||
164 | #else | 162 | #else |
165 | mov x7, xzr | 163 | mov x7, xzr |
166 | #endif | 164 | #endif |
167 | adr x0, sleep_save_sp | 165 | adrp x0, sleep_save_sp |
166 | add x0, x0, #:lo12:sleep_save_sp | ||
168 | ldr x0, [x0, #SLEEP_SAVE_SP_PHYS] | 167 | ldr x0, [x0, #SLEEP_SAVE_SP_PHYS] |
169 | ldr x0, [x0, x7, lsl #3] | 168 | ldr x0, [x0, x7, lsl #3] |
170 | /* load sp from context */ | 169 | /* load sp from context */ |
171 | ldr x2, [x0, #CPU_CTX_SP] | 170 | ldr x2, [x0, #CPU_CTX_SP] |
172 | adr x1, sleep_idmap_phys | 171 | adrp x1, sleep_idmap_phys |
173 | /* load physical address of identity map page table in x1 */ | 172 | /* load physical address of identity map page table in x1 */ |
174 | ldr x1, [x1] | 173 | ldr x1, [x1, #:lo12:sleep_idmap_phys] |
175 | mov sp, x2 | 174 | mov sp, x2 |
176 | /* | 175 | /* |
177 | * cpu_do_resume expects x0 to contain context physical address | 176 | * cpu_do_resume expects x0 to contain context physical address |
@@ -180,26 +179,3 @@ ENTRY(cpu_resume) | |||
180 | bl cpu_do_resume // PC relative jump, MMU off | 179 | bl cpu_do_resume // PC relative jump, MMU off |
181 | b cpu_resume_mmu // Resume MMU, never returns | 180 | b cpu_resume_mmu // Resume MMU, never returns |
182 | ENDPROC(cpu_resume) | 181 | ENDPROC(cpu_resume) |
183 | |||
184 | .align 3 | ||
185 | mpidr_hash_ptr: | ||
186 | /* | ||
187 | * offset of mpidr_hash symbol from current location | ||
188 | * used to obtain run-time mpidr_hash address with MMU off | ||
189 | */ | ||
190 | .quad mpidr_hash - . | ||
191 | /* | ||
192 | * physical address of identity mapped page tables | ||
193 | */ | ||
194 | .type sleep_idmap_phys, #object | ||
195 | ENTRY(sleep_idmap_phys) | ||
196 | .quad 0 | ||
197 | /* | ||
198 | * struct sleep_save_sp { | ||
199 | * phys_addr_t *save_ptr_stash; | ||
200 | * phys_addr_t save_ptr_stash_phys; | ||
201 | * }; | ||
202 | */ | ||
203 | .type sleep_save_sp, #object | ||
204 | ENTRY(sleep_save_sp) | ||
205 | .space SLEEP_SAVE_SP_SZ // struct sleep_save_sp | ||
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index b06d1d90ee8c..7ae6ee085261 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/of.h> | 37 | #include <linux/of.h> |
38 | #include <linux/irq_work.h> | 38 | #include <linux/irq_work.h> |
39 | 39 | ||
40 | #include <asm/alternative.h> | ||
40 | #include <asm/atomic.h> | 41 | #include <asm/atomic.h> |
41 | #include <asm/cacheflush.h> | 42 | #include <asm/cacheflush.h> |
42 | #include <asm/cpu.h> | 43 | #include <asm/cpu.h> |
@@ -309,6 +310,7 @@ void cpu_die(void) | |||
309 | void __init smp_cpus_done(unsigned int max_cpus) | 310 | void __init smp_cpus_done(unsigned int max_cpus) |
310 | { | 311 | { |
311 | pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); | 312 | pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); |
313 | apply_alternatives_all(); | ||
312 | } | 314 | } |
313 | 315 | ||
314 | void __init smp_prepare_boot_cpu(void) | 316 | void __init smp_prepare_boot_cpu(void) |
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 13ad4dbb1615..3771b72b6569 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c | |||
@@ -126,8 +126,8 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | |||
126 | return ret; | 126 | return ret; |
127 | } | 127 | } |
128 | 128 | ||
129 | extern struct sleep_save_sp sleep_save_sp; | 129 | struct sleep_save_sp sleep_save_sp; |
130 | extern phys_addr_t sleep_idmap_phys; | 130 | phys_addr_t sleep_idmap_phys; |
131 | 131 | ||
132 | static int __init cpu_suspend_init(void) | 132 | static int __init cpu_suspend_init(void) |
133 | { | 133 | { |
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index dc47e53e9e28..28c511b06edf 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c | |||
@@ -28,29 +28,39 @@ | |||
28 | #include <asm/cacheflush.h> | 28 | #include <asm/cacheflush.h> |
29 | #include <asm/unistd.h> | 29 | #include <asm/unistd.h> |
30 | 30 | ||
31 | static inline void | 31 | static long |
32 | do_compat_cache_op(unsigned long start, unsigned long end, int flags) | 32 | __do_compat_cache_op(unsigned long start, unsigned long end) |
33 | { | 33 | { |
34 | struct mm_struct *mm = current->active_mm; | 34 | long ret; |
35 | struct vm_area_struct *vma; | ||
36 | 35 | ||
37 | if (end < start || flags) | 36 | do { |
38 | return; | 37 | unsigned long chunk = min(PAGE_SIZE, end - start); |
39 | 38 | ||
40 | down_read(&mm->mmap_sem); | 39 | if (fatal_signal_pending(current)) |
41 | vma = find_vma(mm, start); | 40 | return 0; |
42 | if (vma && vma->vm_start < end) { | 41 | |
43 | if (start < vma->vm_start) | 42 | ret = __flush_cache_user_range(start, start + chunk); |
44 | start = vma->vm_start; | 43 | if (ret) |
45 | if (end > vma->vm_end) | 44 | return ret; |
46 | end = vma->vm_end; | 45 | |
47 | up_read(&mm->mmap_sem); | 46 | cond_resched(); |
48 | __flush_cache_user_range(start & PAGE_MASK, PAGE_ALIGN(end)); | 47 | start += chunk; |
49 | return; | 48 | } while (start < end); |
50 | } | 49 | |
51 | up_read(&mm->mmap_sem); | 50 | return 0; |
52 | } | 51 | } |
53 | 52 | ||
53 | static inline long | ||
54 | do_compat_cache_op(unsigned long start, unsigned long end, int flags) | ||
55 | { | ||
56 | if (end < start || flags) | ||
57 | return -EINVAL; | ||
58 | |||
59 | if (!access_ok(VERIFY_READ, start, end - start)) | ||
60 | return -EFAULT; | ||
61 | |||
62 | return __do_compat_cache_op(start, end); | ||
63 | } | ||
54 | /* | 64 | /* |
55 | * Handle all unrecognised system calls. | 65 | * Handle all unrecognised system calls. |
56 | */ | 66 | */ |
@@ -74,8 +84,7 @@ long compat_arm_syscall(struct pt_regs *regs) | |||
74 | * the specified region). | 84 | * the specified region). |
75 | */ | 85 | */ |
76 | case __ARM_NR_compat_cacheflush: | 86 | case __ARM_NR_compat_cacheflush: |
77 | do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]); | 87 | return do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]); |
78 | return 0; | ||
79 | 88 | ||
80 | case __ARM_NR_compat_set_tls: | 89 | case __ARM_NR_compat_set_tls: |
81 | current->thread.tp_value = regs->regs[0]; | 90 | current->thread.tp_value = regs->regs[0]; |
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index b6ee26b0939a..fcb8f7b42271 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c | |||
@@ -255,12 +255,15 @@ void store_cpu_topology(unsigned int cpuid) | |||
255 | /* Multiprocessor system : Multi-threads per core */ | 255 | /* Multiprocessor system : Multi-threads per core */ |
256 | cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); | 256 | cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
257 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); | 257 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
258 | cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); | 258 | cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) | |
259 | MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8; | ||
259 | } else { | 260 | } else { |
260 | /* Multiprocessor system : Single-thread per core */ | 261 | /* Multiprocessor system : Single-thread per core */ |
261 | cpuid_topo->thread_id = -1; | 262 | cpuid_topo->thread_id = -1; |
262 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); | 263 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
263 | cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); | 264 | cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) | |
265 | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 | | ||
266 | MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16; | ||
264 | } | 267 | } |
265 | 268 | ||
266 | pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n", | 269 | pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n", |
diff --git a/arch/arm64/kernel/trace-events-emulation.h b/arch/arm64/kernel/trace-events-emulation.h new file mode 100644 index 000000000000..ae1dd598ea65 --- /dev/null +++ b/arch/arm64/kernel/trace-events-emulation.h | |||
@@ -0,0 +1,35 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM emulation | ||
3 | |||
4 | #if !defined(_TRACE_EMULATION_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_EMULATION_H | ||
6 | |||
7 | #include <linux/tracepoint.h> | ||
8 | |||
9 | TRACE_EVENT(instruction_emulation, | ||
10 | |||
11 | TP_PROTO(const char *instr, u64 addr), | ||
12 | TP_ARGS(instr, addr), | ||
13 | |||
14 | TP_STRUCT__entry( | ||
15 | __string(instr, instr) | ||
16 | __field(u64, addr) | ||
17 | ), | ||
18 | |||
19 | TP_fast_assign( | ||
20 | __assign_str(instr, instr); | ||
21 | __entry->addr = addr; | ||
22 | ), | ||
23 | |||
24 | TP_printk("instr=\"%s\" addr=0x%llx", __get_str(instr), __entry->addr) | ||
25 | ); | ||
26 | |||
27 | #endif /* _TRACE_EMULATION_H */ | ||
28 | |||
29 | /* This part must be outside protection */ | ||
30 | #undef TRACE_INCLUDE_PATH | ||
31 | #undef TRACE_INCLUDE_FILE | ||
32 | #define TRACE_INCLUDE_PATH . | ||
33 | |||
34 | #define TRACE_INCLUDE_FILE trace-events-emulation | ||
35 | #include <trace/define_trace.h> | ||
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index de1b085e7963..0a801e3743d5 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
@@ -259,6 +259,69 @@ void arm64_notify_die(const char *str, struct pt_regs *regs, | |||
259 | } | 259 | } |
260 | } | 260 | } |
261 | 261 | ||
262 | static LIST_HEAD(undef_hook); | ||
263 | static DEFINE_RAW_SPINLOCK(undef_lock); | ||
264 | |||
265 | void register_undef_hook(struct undef_hook *hook) | ||
266 | { | ||
267 | unsigned long flags; | ||
268 | |||
269 | raw_spin_lock_irqsave(&undef_lock, flags); | ||
270 | list_add(&hook->node, &undef_hook); | ||
271 | raw_spin_unlock_irqrestore(&undef_lock, flags); | ||
272 | } | ||
273 | |||
274 | void unregister_undef_hook(struct undef_hook *hook) | ||
275 | { | ||
276 | unsigned long flags; | ||
277 | |||
278 | raw_spin_lock_irqsave(&undef_lock, flags); | ||
279 | list_del(&hook->node); | ||
280 | raw_spin_unlock_irqrestore(&undef_lock, flags); | ||
281 | } | ||
282 | |||
283 | static int call_undef_hook(struct pt_regs *regs) | ||
284 | { | ||
285 | struct undef_hook *hook; | ||
286 | unsigned long flags; | ||
287 | u32 instr; | ||
288 | int (*fn)(struct pt_regs *regs, u32 instr) = NULL; | ||
289 | void __user *pc = (void __user *)instruction_pointer(regs); | ||
290 | |||
291 | if (!user_mode(regs)) | ||
292 | return 1; | ||
293 | |||
294 | if (compat_thumb_mode(regs)) { | ||
295 | /* 16-bit Thumb instruction */ | ||
296 | if (get_user(instr, (u16 __user *)pc)) | ||
297 | goto exit; | ||
298 | instr = le16_to_cpu(instr); | ||
299 | if (aarch32_insn_is_wide(instr)) { | ||
300 | u32 instr2; | ||
301 | |||
302 | if (get_user(instr2, (u16 __user *)(pc + 2))) | ||
303 | goto exit; | ||
304 | instr2 = le16_to_cpu(instr2); | ||
305 | instr = (instr << 16) | instr2; | ||
306 | } | ||
307 | } else { | ||
308 | /* 32-bit ARM instruction */ | ||
309 | if (get_user(instr, (u32 __user *)pc)) | ||
310 | goto exit; | ||
311 | instr = le32_to_cpu(instr); | ||
312 | } | ||
313 | |||
314 | raw_spin_lock_irqsave(&undef_lock, flags); | ||
315 | list_for_each_entry(hook, &undef_hook, node) | ||
316 | if ((instr & hook->instr_mask) == hook->instr_val && | ||
317 | (regs->pstate & hook->pstate_mask) == hook->pstate_val) | ||
318 | fn = hook->fn; | ||
319 | |||
320 | raw_spin_unlock_irqrestore(&undef_lock, flags); | ||
321 | exit: | ||
322 | return fn ? fn(regs, instr) : 1; | ||
323 | } | ||
324 | |||
262 | asmlinkage void __exception do_undefinstr(struct pt_regs *regs) | 325 | asmlinkage void __exception do_undefinstr(struct pt_regs *regs) |
263 | { | 326 | { |
264 | siginfo_t info; | 327 | siginfo_t info; |
@@ -268,6 +331,9 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) | |||
268 | if (!aarch32_break_handler(regs)) | 331 | if (!aarch32_break_handler(regs)) |
269 | return; | 332 | return; |
270 | 333 | ||
334 | if (call_undef_hook(regs) == 0) | ||
335 | return; | ||
336 | |||
271 | if (show_unhandled_signals && unhandled_signal(current, SIGILL) && | 337 | if (show_unhandled_signals && unhandled_signal(current, SIGILL) && |
272 | printk_ratelimit()) { | 338 | printk_ratelimit()) { |
273 | pr_info("%s[%d]: undefined instruction: pc=%p\n", | 339 | pr_info("%s[%d]: undefined instruction: pc=%p\n", |
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index edf8715ba39b..9965ec87cbec 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S | |||
@@ -11,8 +11,9 @@ | |||
11 | 11 | ||
12 | #include "image.h" | 12 | #include "image.h" |
13 | 13 | ||
14 | #define ARM_EXIT_KEEP(x) | 14 | /* .exit.text needed in case of alternative patching */ |
15 | #define ARM_EXIT_DISCARD(x) x | 15 | #define ARM_EXIT_KEEP(x) x |
16 | #define ARM_EXIT_DISCARD(x) | ||
16 | 17 | ||
17 | OUTPUT_ARCH(aarch64) | 18 | OUTPUT_ARCH(aarch64) |
18 | ENTRY(_text) | 19 | ENTRY(_text) |
@@ -32,6 +33,22 @@ jiffies = jiffies_64; | |||
32 | *(.hyp.text) \ | 33 | *(.hyp.text) \ |
33 | VMLINUX_SYMBOL(__hyp_text_end) = .; | 34 | VMLINUX_SYMBOL(__hyp_text_end) = .; |
34 | 35 | ||
36 | /* | ||
37 | * The size of the PE/COFF section that covers the kernel image, which | ||
38 | * runs from stext to _edata, must be a round multiple of the PE/COFF | ||
39 | * FileAlignment, which we set to its minimum value of 0x200. 'stext' | ||
40 | * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned | ||
41 | * boundary should be sufficient. | ||
42 | */ | ||
43 | PECOFF_FILE_ALIGNMENT = 0x200; | ||
44 | |||
45 | #ifdef CONFIG_EFI | ||
46 | #define PECOFF_EDATA_PADDING \ | ||
47 | .pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); } | ||
48 | #else | ||
49 | #define PECOFF_EDATA_PADDING | ||
50 | #endif | ||
51 | |||
35 | SECTIONS | 52 | SECTIONS |
36 | { | 53 | { |
37 | /* | 54 | /* |
@@ -100,9 +117,21 @@ SECTIONS | |||
100 | . = ALIGN(PAGE_SIZE); | 117 | . = ALIGN(PAGE_SIZE); |
101 | __init_end = .; | 118 | __init_end = .; |
102 | 119 | ||
120 | . = ALIGN(4); | ||
121 | .altinstructions : { | ||
122 | __alt_instructions = .; | ||
123 | *(.altinstructions) | ||
124 | __alt_instructions_end = .; | ||
125 | } | ||
126 | .altinstr_replacement : { | ||
127 | *(.altinstr_replacement) | ||
128 | } | ||
129 | |||
130 | . = ALIGN(PAGE_SIZE); | ||
103 | _data = .; | 131 | _data = .; |
104 | _sdata = .; | 132 | _sdata = .; |
105 | RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) | 133 | RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) |
134 | PECOFF_EDATA_PADDING | ||
106 | _edata = .; | 135 | _edata = .; |
107 | 136 | ||
108 | BSS_SECTION(0, 0, 0) | 137 | BSS_SECTION(0, 0, 0) |
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index b72aa9f9215c..fbe909fb0a1a 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S | |||
@@ -761,10 +761,10 @@ | |||
761 | .macro activate_traps | 761 | .macro activate_traps |
762 | ldr x2, [x0, #VCPU_HCR_EL2] | 762 | ldr x2, [x0, #VCPU_HCR_EL2] |
763 | msr hcr_el2, x2 | 763 | msr hcr_el2, x2 |
764 | ldr x2, =(CPTR_EL2_TTA) | 764 | mov x2, #CPTR_EL2_TTA |
765 | msr cptr_el2, x2 | 765 | msr cptr_el2, x2 |
766 | 766 | ||
767 | ldr x2, =(1 << 15) // Trap CP15 Cr=15 | 767 | mov x2, #(1 << 15) // Trap CP15 Cr=15 |
768 | msr hstr_el2, x2 | 768 | msr hstr_el2, x2 |
769 | 769 | ||
770 | mrs x2, mdcr_el2 | 770 | mrs x2, mdcr_el2 |
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index c56179ed2c09..773d37a14039 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile | |||
@@ -3,3 +3,4 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ | |||
3 | ioremap.o mmap.o pgd.o mmu.o \ | 3 | ioremap.o mmap.o pgd.o mmu.o \ |
4 | context.o proc.o pageattr.o | 4 | context.o proc.o pageattr.o |
5 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 5 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
6 | obj-$(CONFIG_ARM64_PTDUMP) += dump.o | ||
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 23663837acff..2560e1e1562e 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S | |||
@@ -17,9 +17,12 @@ | |||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/errno.h> | ||
20 | #include <linux/linkage.h> | 21 | #include <linux/linkage.h> |
21 | #include <linux/init.h> | 22 | #include <linux/init.h> |
22 | #include <asm/assembler.h> | 23 | #include <asm/assembler.h> |
24 | #include <asm/cpufeature.h> | ||
25 | #include <asm/alternative-asm.h> | ||
23 | 26 | ||
24 | #include "proc-macros.S" | 27 | #include "proc-macros.S" |
25 | 28 | ||
@@ -138,9 +141,12 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU | |||
138 | add x4, x4, x2 | 141 | add x4, x4, x2 |
139 | cmp x4, x1 | 142 | cmp x4, x1 |
140 | b.lo 1b | 143 | b.lo 1b |
141 | 9: // ignore any faulting cache operation | ||
142 | dsb ish | 144 | dsb ish |
143 | isb | 145 | isb |
146 | mov x0, #0 | ||
147 | ret | ||
148 | 9: | ||
149 | mov x0, #-EFAULT | ||
144 | ret | 150 | ret |
145 | ENDPROC(flush_icache_range) | 151 | ENDPROC(flush_icache_range) |
146 | ENDPROC(__flush_cache_user_range) | 152 | ENDPROC(__flush_cache_user_range) |
@@ -210,7 +216,7 @@ __dma_clean_range: | |||
210 | dcache_line_size x2, x3 | 216 | dcache_line_size x2, x3 |
211 | sub x3, x2, #1 | 217 | sub x3, x2, #1 |
212 | bic x0, x0, x3 | 218 | bic x0, x0, x3 |
213 | 1: dc cvac, x0 // clean D / U line | 219 | 1: alternative_insn "dc cvac, x0", "dc civac, x0", ARM64_WORKAROUND_CLEAN_CACHE |
214 | add x0, x0, x2 | 220 | add x0, x0, x2 |
215 | cmp x0, x1 | 221 | cmp x0, x1 |
216 | b.lo 1b | 222 | b.lo 1b |
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c new file mode 100644 index 000000000000..bf69601be546 --- /dev/null +++ b/arch/arm64/mm/dump.c | |||
@@ -0,0 +1,332 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014, The Linux Foundation. All rights reserved. | ||
3 | * Debug helper to dump the current kernel pagetables of the system | ||
4 | * so that we can see what the various memory ranges are set to. | ||
5 | * | ||
6 | * Derived from x86 and arm implementation: | ||
7 | * (C) Copyright 2008 Intel Corporation | ||
8 | * | ||
9 | * Author: Arjan van de Ven <arjan@linux.intel.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; version 2 | ||
14 | * of the License. | ||
15 | */ | ||
16 | #include <linux/debugfs.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/seq_file.h> | ||
21 | |||
22 | #include <asm/fixmap.h> | ||
23 | #include <asm/pgtable.h> | ||
24 | |||
25 | #define LOWEST_ADDR (UL(0xffffffffffffffff) << VA_BITS) | ||
26 | |||
27 | struct addr_marker { | ||
28 | unsigned long start_address; | ||
29 | const char *name; | ||
30 | }; | ||
31 | |||
32 | enum address_markers_idx { | ||
33 | VMALLOC_START_NR = 0, | ||
34 | VMALLOC_END_NR, | ||
35 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | ||
36 | VMEMMAP_START_NR, | ||
37 | VMEMMAP_END_NR, | ||
38 | #endif | ||
39 | PCI_START_NR, | ||
40 | PCI_END_NR, | ||
41 | FIXADDR_START_NR, | ||
42 | FIXADDR_END_NR, | ||
43 | MODULES_START_NR, | ||
44 | MODUELS_END_NR, | ||
45 | KERNEL_SPACE_NR, | ||
46 | }; | ||
47 | |||
48 | static struct addr_marker address_markers[] = { | ||
49 | { VMALLOC_START, "vmalloc() Area" }, | ||
50 | { VMALLOC_END, "vmalloc() End" }, | ||
51 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | ||
52 | { 0, "vmemmap start" }, | ||
53 | { 0, "vmemmap end" }, | ||
54 | #endif | ||
55 | { (unsigned long) PCI_IOBASE, "PCI I/O start" }, | ||
56 | { (unsigned long) PCI_IOBASE + SZ_16M, "PCI I/O end" }, | ||
57 | { FIXADDR_START, "Fixmap start" }, | ||
58 | { FIXADDR_TOP, "Fixmap end" }, | ||
59 | { MODULES_VADDR, "Modules start" }, | ||
60 | { MODULES_END, "Modules end" }, | ||
61 | { PAGE_OFFSET, "Kernel Mapping" }, | ||
62 | { -1, NULL }, | ||
63 | }; | ||
64 | |||
65 | struct pg_state { | ||
66 | struct seq_file *seq; | ||
67 | const struct addr_marker *marker; | ||
68 | unsigned long start_address; | ||
69 | unsigned level; | ||
70 | u64 current_prot; | ||
71 | }; | ||
72 | |||
73 | struct prot_bits { | ||
74 | u64 mask; | ||
75 | u64 val; | ||
76 | const char *set; | ||
77 | const char *clear; | ||
78 | }; | ||
79 | |||
80 | static const struct prot_bits pte_bits[] = { | ||
81 | { | ||
82 | .mask = PTE_USER, | ||
83 | .val = PTE_USER, | ||
84 | .set = "USR", | ||
85 | .clear = " ", | ||
86 | }, { | ||
87 | .mask = PTE_RDONLY, | ||
88 | .val = PTE_RDONLY, | ||
89 | .set = "ro", | ||
90 | .clear = "RW", | ||
91 | }, { | ||
92 | .mask = PTE_PXN, | ||
93 | .val = PTE_PXN, | ||
94 | .set = "NX", | ||
95 | .clear = "x ", | ||
96 | }, { | ||
97 | .mask = PTE_SHARED, | ||
98 | .val = PTE_SHARED, | ||
99 | .set = "SHD", | ||
100 | .clear = " ", | ||
101 | }, { | ||
102 | .mask = PTE_AF, | ||
103 | .val = PTE_AF, | ||
104 | .set = "AF", | ||
105 | .clear = " ", | ||
106 | }, { | ||
107 | .mask = PTE_NG, | ||
108 | .val = PTE_NG, | ||
109 | .set = "NG", | ||
110 | .clear = " ", | ||
111 | }, { | ||
112 | .mask = PTE_UXN, | ||
113 | .val = PTE_UXN, | ||
114 | .set = "UXN", | ||
115 | }, { | ||
116 | .mask = PTE_ATTRINDX_MASK, | ||
117 | .val = PTE_ATTRINDX(MT_DEVICE_nGnRnE), | ||
118 | .set = "DEVICE/nGnRnE", | ||
119 | }, { | ||
120 | .mask = PTE_ATTRINDX_MASK, | ||
121 | .val = PTE_ATTRINDX(MT_DEVICE_nGnRE), | ||
122 | .set = "DEVICE/nGnRE", | ||
123 | }, { | ||
124 | .mask = PTE_ATTRINDX_MASK, | ||
125 | .val = PTE_ATTRINDX(MT_DEVICE_GRE), | ||
126 | .set = "DEVICE/GRE", | ||
127 | }, { | ||
128 | .mask = PTE_ATTRINDX_MASK, | ||
129 | .val = PTE_ATTRINDX(MT_NORMAL_NC), | ||
130 | .set = "MEM/NORMAL-NC", | ||
131 | }, { | ||
132 | .mask = PTE_ATTRINDX_MASK, | ||
133 | .val = PTE_ATTRINDX(MT_NORMAL), | ||
134 | .set = "MEM/NORMAL", | ||
135 | } | ||
136 | }; | ||
137 | |||
138 | struct pg_level { | ||
139 | const struct prot_bits *bits; | ||
140 | size_t num; | ||
141 | u64 mask; | ||
142 | }; | ||
143 | |||
144 | static struct pg_level pg_level[] = { | ||
145 | { | ||
146 | }, { /* pgd */ | ||
147 | .bits = pte_bits, | ||
148 | .num = ARRAY_SIZE(pte_bits), | ||
149 | }, { /* pud */ | ||
150 | .bits = pte_bits, | ||
151 | .num = ARRAY_SIZE(pte_bits), | ||
152 | }, { /* pmd */ | ||
153 | .bits = pte_bits, | ||
154 | .num = ARRAY_SIZE(pte_bits), | ||
155 | }, { /* pte */ | ||
156 | .bits = pte_bits, | ||
157 | .num = ARRAY_SIZE(pte_bits), | ||
158 | }, | ||
159 | }; | ||
160 | |||
161 | static void dump_prot(struct pg_state *st, const struct prot_bits *bits, | ||
162 | size_t num) | ||
163 | { | ||
164 | unsigned i; | ||
165 | |||
166 | for (i = 0; i < num; i++, bits++) { | ||
167 | const char *s; | ||
168 | |||
169 | if ((st->current_prot & bits->mask) == bits->val) | ||
170 | s = bits->set; | ||
171 | else | ||
172 | s = bits->clear; | ||
173 | |||
174 | if (s) | ||
175 | seq_printf(st->seq, " %s", s); | ||
176 | } | ||
177 | } | ||
178 | |||
179 | static void note_page(struct pg_state *st, unsigned long addr, unsigned level, | ||
180 | u64 val) | ||
181 | { | ||
182 | static const char units[] = "KMGTPE"; | ||
183 | u64 prot = val & pg_level[level].mask; | ||
184 | |||
185 | if (addr < LOWEST_ADDR) | ||
186 | return; | ||
187 | |||
188 | if (!st->level) { | ||
189 | st->level = level; | ||
190 | st->current_prot = prot; | ||
191 | st->start_address = addr; | ||
192 | seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); | ||
193 | } else if (prot != st->current_prot || level != st->level || | ||
194 | addr >= st->marker[1].start_address) { | ||
195 | const char *unit = units; | ||
196 | unsigned long delta; | ||
197 | |||
198 | if (st->current_prot) { | ||
199 | seq_printf(st->seq, "0x%16lx-0x%16lx ", | ||
200 | st->start_address, addr); | ||
201 | |||
202 | delta = (addr - st->start_address) >> 10; | ||
203 | while (!(delta & 1023) && unit[1]) { | ||
204 | delta >>= 10; | ||
205 | unit++; | ||
206 | } | ||
207 | seq_printf(st->seq, "%9lu%c", delta, *unit); | ||
208 | if (pg_level[st->level].bits) | ||
209 | dump_prot(st, pg_level[st->level].bits, | ||
210 | pg_level[st->level].num); | ||
211 | seq_puts(st->seq, "\n"); | ||
212 | } | ||
213 | |||
214 | if (addr >= st->marker[1].start_address) { | ||
215 | st->marker++; | ||
216 | seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); | ||
217 | } | ||
218 | |||
219 | st->start_address = addr; | ||
220 | st->current_prot = prot; | ||
221 | st->level = level; | ||
222 | } | ||
223 | |||
224 | if (addr >= st->marker[1].start_address) { | ||
225 | st->marker++; | ||
226 | seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); | ||
227 | } | ||
228 | |||
229 | } | ||
230 | |||
231 | static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) | ||
232 | { | ||
233 | pte_t *pte = pte_offset_kernel(pmd, 0); | ||
234 | unsigned long addr; | ||
235 | unsigned i; | ||
236 | |||
237 | for (i = 0; i < PTRS_PER_PTE; i++, pte++) { | ||
238 | addr = start + i * PAGE_SIZE; | ||
239 | note_page(st, addr, 4, pte_val(*pte)); | ||
240 | } | ||
241 | } | ||
242 | |||
243 | static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) | ||
244 | { | ||
245 | pmd_t *pmd = pmd_offset(pud, 0); | ||
246 | unsigned long addr; | ||
247 | unsigned i; | ||
248 | |||
249 | for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { | ||
250 | addr = start + i * PMD_SIZE; | ||
251 | if (pmd_none(*pmd) || pmd_sect(*pmd) || pmd_bad(*pmd)) | ||
252 | note_page(st, addr, 3, pmd_val(*pmd)); | ||
253 | else | ||
254 | walk_pte(st, pmd, addr); | ||
255 | } | ||
256 | } | ||
257 | |||
258 | static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) | ||
259 | { | ||
260 | pud_t *pud = pud_offset(pgd, 0); | ||
261 | unsigned long addr; | ||
262 | unsigned i; | ||
263 | |||
264 | for (i = 0; i < PTRS_PER_PUD; i++, pud++) { | ||
265 | addr = start + i * PUD_SIZE; | ||
266 | if (pud_none(*pud) || pud_sect(*pud) || pud_bad(*pud)) | ||
267 | note_page(st, addr, 2, pud_val(*pud)); | ||
268 | else | ||
269 | walk_pmd(st, pud, addr); | ||
270 | } | ||
271 | } | ||
272 | |||
273 | static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long start) | ||
274 | { | ||
275 | pgd_t *pgd = pgd_offset(mm, 0); | ||
276 | unsigned i; | ||
277 | unsigned long addr; | ||
278 | |||
279 | for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { | ||
280 | addr = start + i * PGDIR_SIZE; | ||
281 | if (pgd_none(*pgd) || pgd_bad(*pgd)) | ||
282 | note_page(st, addr, 1, pgd_val(*pgd)); | ||
283 | else | ||
284 | walk_pud(st, pgd, addr); | ||
285 | } | ||
286 | } | ||
287 | |||
288 | static int ptdump_show(struct seq_file *m, void *v) | ||
289 | { | ||
290 | struct pg_state st = { | ||
291 | .seq = m, | ||
292 | .marker = address_markers, | ||
293 | }; | ||
294 | |||
295 | walk_pgd(&st, &init_mm, LOWEST_ADDR); | ||
296 | |||
297 | note_page(&st, 0, 0, 0); | ||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | static int ptdump_open(struct inode *inode, struct file *file) | ||
302 | { | ||
303 | return single_open(file, ptdump_show, NULL); | ||
304 | } | ||
305 | |||
306 | static const struct file_operations ptdump_fops = { | ||
307 | .open = ptdump_open, | ||
308 | .read = seq_read, | ||
309 | .llseek = seq_lseek, | ||
310 | .release = single_release, | ||
311 | }; | ||
312 | |||
313 | static int ptdump_init(void) | ||
314 | { | ||
315 | struct dentry *pe; | ||
316 | unsigned i, j; | ||
317 | |||
318 | for (i = 0; i < ARRAY_SIZE(pg_level); i++) | ||
319 | if (pg_level[i].bits) | ||
320 | for (j = 0; j < pg_level[i].num; j++) | ||
321 | pg_level[i].mask |= pg_level[i].bits[j].mask; | ||
322 | |||
323 | address_markers[VMEMMAP_START_NR].start_address = | ||
324 | (unsigned long)virt_to_page(PAGE_OFFSET); | ||
325 | address_markers[VMEMMAP_END_NR].start_address = | ||
326 | (unsigned long)virt_to_page(high_memory); | ||
327 | |||
328 | pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, | ||
329 | &ptdump_fops); | ||
330 | return pe ? 0 : -ENOMEM; | ||
331 | } | ||
332 | device_initcall(ptdump_init); | ||
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 41cb6d3d6075..c11cd27ca8f5 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
@@ -380,7 +380,7 @@ static struct fault_info { | |||
380 | { do_bad, SIGBUS, 0, "level 1 address size fault" }, | 380 | { do_bad, SIGBUS, 0, "level 1 address size fault" }, |
381 | { do_bad, SIGBUS, 0, "level 2 address size fault" }, | 381 | { do_bad, SIGBUS, 0, "level 2 address size fault" }, |
382 | { do_bad, SIGBUS, 0, "level 3 address size fault" }, | 382 | { do_bad, SIGBUS, 0, "level 3 address size fault" }, |
383 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "input address range fault" }, | 383 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" }, |
384 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, | 384 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, |
385 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, | 385 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, |
386 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, | 386 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, |
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 494297c698ca..bac492c12fcc 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <asm/setup.h> | 39 | #include <asm/setup.h> |
40 | #include <asm/sizes.h> | 40 | #include <asm/sizes.h> |
41 | #include <asm/tlb.h> | 41 | #include <asm/tlb.h> |
42 | #include <asm/alternative.h> | ||
42 | 43 | ||
43 | #include "mm.h" | 44 | #include "mm.h" |
44 | 45 | ||
@@ -325,6 +326,7 @@ void __init mem_init(void) | |||
325 | void free_initmem(void) | 326 | void free_initmem(void) |
326 | { | 327 | { |
327 | free_initmem_default(0); | 328 | free_initmem_default(0); |
329 | free_alternatives_memory(); | ||
328 | } | 330 | } |
329 | 331 | ||
330 | #ifdef CONFIG_BLK_DEV_INITRD | 332 | #ifdef CONFIG_BLK_DEV_INITRD |
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index 4a07630a6616..cbb99c8f1e04 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c | |||
@@ -103,97 +103,10 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size) | |||
103 | } | 103 | } |
104 | EXPORT_SYMBOL(ioremap_cache); | 104 | EXPORT_SYMBOL(ioremap_cache); |
105 | 105 | ||
106 | static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; | 106 | /* |
107 | #if CONFIG_ARM64_PGTABLE_LEVELS > 2 | 107 | * Must be called after early_fixmap_init |
108 | static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss; | 108 | */ |
109 | #endif | ||
110 | #if CONFIG_ARM64_PGTABLE_LEVELS > 3 | ||
111 | static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss; | ||
112 | #endif | ||
113 | |||
114 | static inline pud_t * __init early_ioremap_pud(unsigned long addr) | ||
115 | { | ||
116 | pgd_t *pgd; | ||
117 | |||
118 | pgd = pgd_offset_k(addr); | ||
119 | BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd)); | ||
120 | |||
121 | return pud_offset(pgd, addr); | ||
122 | } | ||
123 | |||
124 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) | ||
125 | { | ||
126 | pud_t *pud = early_ioremap_pud(addr); | ||
127 | |||
128 | BUG_ON(pud_none(*pud) || pud_bad(*pud)); | ||
129 | |||
130 | return pmd_offset(pud, addr); | ||
131 | } | ||
132 | |||
133 | static inline pte_t * __init early_ioremap_pte(unsigned long addr) | ||
134 | { | ||
135 | pmd_t *pmd = early_ioremap_pmd(addr); | ||
136 | |||
137 | BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd)); | ||
138 | |||
139 | return pte_offset_kernel(pmd, addr); | ||
140 | } | ||
141 | |||
142 | void __init early_ioremap_init(void) | 109 | void __init early_ioremap_init(void) |
143 | { | 110 | { |
144 | pgd_t *pgd; | ||
145 | pud_t *pud; | ||
146 | pmd_t *pmd; | ||
147 | unsigned long addr = fix_to_virt(FIX_BTMAP_BEGIN); | ||
148 | |||
149 | pgd = pgd_offset_k(addr); | ||
150 | pgd_populate(&init_mm, pgd, bm_pud); | ||
151 | pud = pud_offset(pgd, addr); | ||
152 | pud_populate(&init_mm, pud, bm_pmd); | ||
153 | pmd = pmd_offset(pud, addr); | ||
154 | pmd_populate_kernel(&init_mm, pmd, bm_pte); | ||
155 | |||
156 | /* | ||
157 | * The boot-ioremap range spans multiple pmds, for which | ||
158 | * we are not prepared: | ||
159 | */ | ||
160 | BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) | ||
161 | != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); | ||
162 | |||
163 | if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { | ||
164 | WARN_ON(1); | ||
165 | pr_warn("pmd %p != %p\n", | ||
166 | pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); | ||
167 | pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", | ||
168 | fix_to_virt(FIX_BTMAP_BEGIN)); | ||
169 | pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", | ||
170 | fix_to_virt(FIX_BTMAP_END)); | ||
171 | |||
172 | pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); | ||
173 | pr_warn("FIX_BTMAP_BEGIN: %d\n", | ||
174 | FIX_BTMAP_BEGIN); | ||
175 | } | ||
176 | |||
177 | early_ioremap_setup(); | 111 | early_ioremap_setup(); |
178 | } | 112 | } |
179 | |||
180 | void __init __early_set_fixmap(enum fixed_addresses idx, | ||
181 | phys_addr_t phys, pgprot_t flags) | ||
182 | { | ||
183 | unsigned long addr = __fix_to_virt(idx); | ||
184 | pte_t *pte; | ||
185 | |||
186 | if (idx >= __end_of_fixed_addresses) { | ||
187 | BUG(); | ||
188 | return; | ||
189 | } | ||
190 | |||
191 | pte = early_ioremap_pte(addr); | ||
192 | |||
193 | if (pgprot_val(flags)) | ||
194 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); | ||
195 | else { | ||
196 | pte_clear(&init_mm, addr, pte); | ||
197 | flush_tlb_kernel_range(addr, addr+PAGE_SIZE); | ||
198 | } | ||
199 | } | ||
diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h index d519f4f50c8c..50c3351df9c7 100644 --- a/arch/arm64/mm/mm.h +++ b/arch/arm64/mm/mm.h | |||
@@ -1,2 +1 @@ | |||
1 | extern void __init bootmem_init(void); | extern void __init bootmem_init(void); | |
2 | extern void __init arm64_swiotlb_init(void); | ||
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 1d73662f00ff..54922d1275b8 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c | |||
@@ -47,22 +47,14 @@ static int mmap_is_legacy(void) | |||
47 | return sysctl_legacy_va_layout; | 47 | return sysctl_legacy_va_layout; |
48 | } | 48 | } |
49 | 49 | ||
50 | /* | ||
51 | * Since get_random_int() returns the same value within a 1 jiffy window, we | ||
52 | * will almost always get the same randomisation for the stack and mmap | ||
53 | * region. This will mean the relative distance between stack and mmap will be | ||
54 | * the same. | ||
55 | * | ||
56 | * To avoid this we can shift the randomness by 1 bit. | ||
57 | */ | ||
58 | static unsigned long mmap_rnd(void) | 50 | static unsigned long mmap_rnd(void) |
59 | { | 51 | { |
60 | unsigned long rnd = 0; | 52 | unsigned long rnd = 0; |
61 | 53 | ||
62 | if (current->flags & PF_RANDOMIZE) | 54 | if (current->flags & PF_RANDOMIZE) |
63 | rnd = (long)get_random_int() & (STACK_RND_MASK >> 1); | 55 | rnd = (long)get_random_int() & STACK_RND_MASK; |
64 | 56 | ||
65 | return rnd << (PAGE_SHIFT + 1); | 57 | return rnd << PAGE_SHIFT; |
66 | } | 58 | } |
67 | 59 | ||
68 | static unsigned long mmap_base(void) | 60 | static unsigned long mmap_base(void) |
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index f4f8b500f74c..6032f3e3056a 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/io.h> | 28 | #include <linux/io.h> |
29 | 29 | ||
30 | #include <asm/cputype.h> | 30 | #include <asm/cputype.h> |
31 | #include <asm/fixmap.h> | ||
31 | #include <asm/sections.h> | 32 | #include <asm/sections.h> |
32 | #include <asm/setup.h> | 33 | #include <asm/setup.h> |
33 | #include <asm/sizes.h> | 34 | #include <asm/sizes.h> |
@@ -463,3 +464,96 @@ void vmemmap_free(unsigned long start, unsigned long end) | |||
463 | { | 464 | { |
464 | } | 465 | } |
465 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | 466 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
467 | |||
468 | static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; | ||
469 | #if CONFIG_ARM64_PGTABLE_LEVELS > 2 | ||
470 | static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss; | ||
471 | #endif | ||
472 | #if CONFIG_ARM64_PGTABLE_LEVELS > 3 | ||
473 | static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss; | ||
474 | #endif | ||
475 | |||
476 | static inline pud_t * fixmap_pud(unsigned long addr) | ||
477 | { | ||
478 | pgd_t *pgd = pgd_offset_k(addr); | ||
479 | |||
480 | BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd)); | ||
481 | |||
482 | return pud_offset(pgd, addr); | ||
483 | } | ||
484 | |||
485 | static inline pmd_t * fixmap_pmd(unsigned long addr) | ||
486 | { | ||
487 | pud_t *pud = fixmap_pud(addr); | ||
488 | |||
489 | BUG_ON(pud_none(*pud) || pud_bad(*pud)); | ||
490 | |||
491 | return pmd_offset(pud, addr); | ||
492 | } | ||
493 | |||
494 | static inline pte_t * fixmap_pte(unsigned long addr) | ||
495 | { | ||
496 | pmd_t *pmd = fixmap_pmd(addr); | ||
497 | |||
498 | BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd)); | ||
499 | |||
500 | return pte_offset_kernel(pmd, addr); | ||
501 | } | ||
502 | |||
503 | void __init early_fixmap_init(void) | ||
504 | { | ||
505 | pgd_t *pgd; | ||
506 | pud_t *pud; | ||
507 | pmd_t *pmd; | ||
508 | unsigned long addr = FIXADDR_START; | ||
509 | |||
510 | pgd = pgd_offset_k(addr); | ||
511 | pgd_populate(&init_mm, pgd, bm_pud); | ||
512 | pud = pud_offset(pgd, addr); | ||
513 | pud_populate(&init_mm, pud, bm_pmd); | ||
514 | pmd = pmd_offset(pud, addr); | ||
515 | pmd_populate_kernel(&init_mm, pmd, bm_pte); | ||
516 | |||
517 | /* | ||
518 | * The boot-ioremap range spans multiple pmds, for which | ||
519 | * we are not preparted: | ||
520 | */ | ||
521 | BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) | ||
522 | != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); | ||
523 | |||
524 | if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) | ||
525 | || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { | ||
526 | WARN_ON(1); | ||
527 | pr_warn("pmd %p != %p, %p\n", | ||
528 | pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), | ||
529 | fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); | ||
530 | pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", | ||
531 | fix_to_virt(FIX_BTMAP_BEGIN)); | ||
532 | pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", | ||
533 | fix_to_virt(FIX_BTMAP_END)); | ||
534 | |||
535 | pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); | ||
536 | pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); | ||
537 | } | ||
538 | } | ||
539 | |||
540 | void __set_fixmap(enum fixed_addresses idx, | ||
541 | phys_addr_t phys, pgprot_t flags) | ||
542 | { | ||
543 | unsigned long addr = __fix_to_virt(idx); | ||
544 | pte_t *pte; | ||
545 | |||
546 | if (idx >= __end_of_fixed_addresses) { | ||
547 | BUG(); | ||
548 | return; | ||
549 | } | ||
550 | |||
551 | pte = fixmap_pte(addr); | ||
552 | |||
553 | if (pgprot_val(flags)) { | ||
554 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); | ||
555 | } else { | ||
556 | pte_clear(&init_mm, addr, pte); | ||
557 | flush_tlb_kernel_range(addr, addr+PAGE_SIZE); | ||
558 | } | ||
559 | } | ||
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c index 6682b361d3ac..71ca104f97bd 100644 --- a/arch/arm64/mm/pgd.c +++ b/arch/arm64/mm/pgd.c | |||
@@ -35,9 +35,9 @@ static struct kmem_cache *pgd_cache; | |||
35 | pgd_t *pgd_alloc(struct mm_struct *mm) | 35 | pgd_t *pgd_alloc(struct mm_struct *mm) |
36 | { | 36 | { |
37 | if (PGD_SIZE == PAGE_SIZE) | 37 | if (PGD_SIZE == PAGE_SIZE) |
38 | return (pgd_t *)get_zeroed_page(GFP_KERNEL); | 38 | return (pgd_t *)__get_free_page(PGALLOC_GFP); |
39 | else | 39 | else |
40 | return kmem_cache_zalloc(pgd_cache, GFP_KERNEL); | 40 | return kmem_cache_alloc(pgd_cache, PGALLOC_GFP); |
41 | } | 41 | } |
42 | 42 | ||
43 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | 43 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 41f1e3e2ea24..edba042b2325 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c | |||
@@ -60,7 +60,7 @@ struct jit_ctx { | |||
60 | const struct bpf_prog *prog; | 60 | const struct bpf_prog *prog; |
61 | int idx; | 61 | int idx; |
62 | int tmp_used; | 62 | int tmp_used; |
63 | int body_offset; | 63 | int epilogue_offset; |
64 | int *offset; | 64 | int *offset; |
65 | u32 *image; | 65 | u32 *image; |
66 | }; | 66 | }; |
@@ -130,8 +130,8 @@ static void jit_fill_hole(void *area, unsigned int size) | |||
130 | 130 | ||
131 | static inline int epilogue_offset(const struct jit_ctx *ctx) | 131 | static inline int epilogue_offset(const struct jit_ctx *ctx) |
132 | { | 132 | { |
133 | int to = ctx->offset[ctx->prog->len - 1]; | 133 | int to = ctx->epilogue_offset; |
134 | int from = ctx->idx - ctx->body_offset; | 134 | int from = ctx->idx; |
135 | 135 | ||
136 | return to - from; | 136 | return to - from; |
137 | } | 137 | } |
@@ -463,6 +463,8 @@ emit_cond_jmp: | |||
463 | } | 463 | } |
464 | /* function return */ | 464 | /* function return */ |
465 | case BPF_JMP | BPF_EXIT: | 465 | case BPF_JMP | BPF_EXIT: |
466 | /* Optimization: when last instruction is EXIT, | ||
467 | simply fallthrough to epilogue. */ | ||
466 | if (i == ctx->prog->len - 1) | 468 | if (i == ctx->prog->len - 1) |
467 | break; | 469 | break; |
468 | jmp_offset = epilogue_offset(ctx); | 470 | jmp_offset = epilogue_offset(ctx); |
@@ -685,11 +687,13 @@ void bpf_int_jit_compile(struct bpf_prog *prog) | |||
685 | 687 | ||
686 | /* 1. Initial fake pass to compute ctx->idx. */ | 688 | /* 1. Initial fake pass to compute ctx->idx. */ |
687 | 689 | ||
688 | /* Fake pass to fill in ctx->offset. */ | 690 | /* Fake pass to fill in ctx->offset and ctx->tmp_used. */ |
689 | if (build_body(&ctx)) | 691 | if (build_body(&ctx)) |
690 | goto out; | 692 | goto out; |
691 | 693 | ||
692 | build_prologue(&ctx); | 694 | build_prologue(&ctx); |
695 | |||
696 | ctx.epilogue_offset = ctx.idx; | ||
693 | build_epilogue(&ctx); | 697 | build_epilogue(&ctx); |
694 | 698 | ||
695 | /* Now we know the actual image size. */ | 699 | /* Now we know the actual image size. */ |
@@ -706,7 +710,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog) | |||
706 | 710 | ||
707 | build_prologue(&ctx); | 711 | build_prologue(&ctx); |
708 | 712 | ||
709 | ctx.body_offset = ctx.idx; | ||
710 | if (build_body(&ctx)) { | 713 | if (build_body(&ctx)) { |
711 | bpf_jit_binary_free(header); | 714 | bpf_jit_binary_free(header); |
712 | goto out; | 715 | goto out; |
diff --git a/arch/microblaze/include/asm/tlb.h b/arch/microblaze/include/asm/tlb.h index 8aa97817cc8c..99b6ded54849 100644 --- a/arch/microblaze/include/asm/tlb.h +++ b/arch/microblaze/include/asm/tlb.h | |||
@@ -14,7 +14,6 @@ | |||
14 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) | 14 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) |
15 | 15 | ||
16 | #include <linux/pagemap.h> | 16 | #include <linux/pagemap.h> |
17 | #include <asm-generic/tlb.h> | ||
18 | 17 | ||
19 | #ifdef CONFIG_MMU | 18 | #ifdef CONFIG_MMU |
20 | #define tlb_start_vma(tlb, vma) do { } while (0) | 19 | #define tlb_start_vma(tlb, vma) do { } while (0) |
@@ -22,4 +21,6 @@ | |||
22 | #define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) | 21 | #define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) |
23 | #endif | 22 | #endif |
24 | 23 | ||
24 | #include <asm-generic/tlb.h> | ||
25 | |||
25 | #endif /* _ASM_MICROBLAZE_TLB_H */ | 26 | #endif /* _ASM_MICROBLAZE_TLB_H */ |
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index e9a9f60e596d..fc3ee06eab87 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h | |||
@@ -3,7 +3,6 @@ | |||
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | #include <asm-generic/tlb.h> | ||
7 | 6 | ||
8 | #ifdef CONFIG_PPC_BOOK3E | 7 | #ifdef CONFIG_PPC_BOOK3E |
9 | extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address); | 8 | extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address); |
@@ -14,6 +13,8 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb, | |||
14 | } | 13 | } |
15 | #endif /* !CONFIG_PPC_BOOK3E */ | 14 | #endif /* !CONFIG_PPC_BOOK3E */ |
16 | 15 | ||
16 | extern void tlb_remove_table(struct mmu_gather *tlb, void *table); | ||
17 | |||
17 | #ifdef CONFIG_PPC64 | 18 | #ifdef CONFIG_PPC64 |
18 | #include <asm/pgalloc-64.h> | 19 | #include <asm/pgalloc-64.h> |
19 | #else | 20 | #else |
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index e2b428b0f7ba..20733fa518ae 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | #define tlb_start_vma(tlb, vma) do { } while (0) | 28 | #define tlb_start_vma(tlb, vma) do { } while (0) |
29 | #define tlb_end_vma(tlb, vma) do { } while (0) | 29 | #define tlb_end_vma(tlb, vma) do { } while (0) |
30 | #define __tlb_remove_tlb_entry __tlb_remove_tlb_entry | ||
30 | 31 | ||
31 | extern void tlb_flush(struct mmu_gather *tlb); | 32 | extern void tlb_flush(struct mmu_gather *tlb); |
32 | 33 | ||
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 7e70ae968e5f..6a4a5fcb9730 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -517,8 +517,6 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif | |||
517 | for (i = 0; i < num_hugepd; i++, hpdp++) | 517 | for (i = 0; i < num_hugepd; i++, hpdp++) |
518 | hpdp->pd = 0; | 518 | hpdp->pd = 0; |
519 | 519 | ||
520 | tlb->need_flush = 1; | ||
521 | |||
522 | #ifdef CONFIG_PPC_FSL_BOOK3E | 520 | #ifdef CONFIG_PPC_FSL_BOOK3E |
523 | hugepd_free(tlb, hugepte); | 521 | hugepd_free(tlb, hugepte); |
524 | #else | 522 | #else |
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 17afc51f3054..c5f7b4e9eb6c 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c | |||
@@ -93,6 +93,12 @@ static void dmi_table(u8 *buf, int len, int num, | |||
93 | const struct dmi_header *dm = (const struct dmi_header *)data; | 93 | const struct dmi_header *dm = (const struct dmi_header *)data; |
94 | 94 | ||
95 | /* | 95 | /* |
96 | * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0] | ||
97 | */ | ||
98 | if (dm->type == DMI_ENTRY_END_OF_TABLE) | ||
99 | break; | ||
100 | |||
101 | /* | ||
96 | * We want to know the total length (formatted area and | 102 | * We want to know the total length (formatted area and |
97 | * strings) before decoding to make sure we won't run off the | 103 | * strings) before decoding to make sure we won't run off the |
98 | * table in dmi_decode or dmi_string | 104 | * table in dmi_decode or dmi_string |
@@ -107,7 +113,7 @@ static void dmi_table(u8 *buf, int len, int num, | |||
107 | } | 113 | } |
108 | } | 114 | } |
109 | 115 | ||
110 | static u32 dmi_base; | 116 | static phys_addr_t dmi_base; |
111 | static u16 dmi_len; | 117 | static u16 dmi_len; |
112 | static u16 dmi_num; | 118 | static u16 dmi_num; |
113 | 119 | ||
@@ -467,7 +473,7 @@ static int __init dmi_present(const u8 *buf) | |||
467 | 473 | ||
468 | if (memcmp(buf, "_SM_", 4) == 0 && | 474 | if (memcmp(buf, "_SM_", 4) == 0 && |
469 | buf[5] < 32 && dmi_checksum(buf, buf[5])) { | 475 | buf[5] < 32 && dmi_checksum(buf, buf[5])) { |
470 | smbios_ver = (buf[6] << 8) + buf[7]; | 476 | smbios_ver = get_unaligned_be16(buf + 6); |
471 | 477 | ||
472 | /* Some BIOS report weird SMBIOS version, fix that up */ | 478 | /* Some BIOS report weird SMBIOS version, fix that up */ |
473 | switch (smbios_ver) { | 479 | switch (smbios_ver) { |
@@ -489,10 +495,9 @@ static int __init dmi_present(const u8 *buf) | |||
489 | buf += 16; | 495 | buf += 16; |
490 | 496 | ||
491 | if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) { | 497 | if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) { |
492 | dmi_num = (buf[13] << 8) | buf[12]; | 498 | dmi_num = get_unaligned_le16(buf + 12); |
493 | dmi_len = (buf[7] << 8) | buf[6]; | 499 | dmi_len = get_unaligned_le16(buf + 6); |
494 | dmi_base = (buf[11] << 24) | (buf[10] << 16) | | 500 | dmi_base = get_unaligned_le32(buf + 8); |
495 | (buf[9] << 8) | buf[8]; | ||
496 | 501 | ||
497 | if (dmi_walk_early(dmi_decode) == 0) { | 502 | if (dmi_walk_early(dmi_decode) == 0) { |
498 | if (smbios_ver) { | 503 | if (smbios_ver) { |
@@ -514,12 +519,72 @@ static int __init dmi_present(const u8 *buf) | |||
514 | return 1; | 519 | return 1; |
515 | } | 520 | } |
516 | 521 | ||
522 | /* | ||
523 | * Check for the SMBIOS 3.0 64-bit entry point signature. Unlike the legacy | ||
524 | * 32-bit entry point, there is no embedded DMI header (_DMI_) in here. | ||
525 | */ | ||
526 | static int __init dmi_smbios3_present(const u8 *buf) | ||
527 | { | ||
528 | if (memcmp(buf, "_SM3_", 5) == 0 && | ||
529 | buf[6] < 32 && dmi_checksum(buf, buf[6])) { | ||
530 | dmi_ver = get_unaligned_be16(buf + 7); | ||
531 | dmi_len = get_unaligned_le32(buf + 12); | ||
532 | dmi_base = get_unaligned_le64(buf + 16); | ||
533 | |||
534 | /* | ||
535 | * The 64-bit SMBIOS 3.0 entry point no longer has a field | ||
536 | * containing the number of structures present in the table. | ||
537 | * Instead, it defines the table size as a maximum size, and | ||
538 | * relies on the end-of-table structure type (#127) to be used | ||
539 | * to signal the end of the table. | ||
540 | * So let's define dmi_num as an upper bound as well: each | ||
541 | * structure has a 4 byte header, so dmi_len / 4 is an upper | ||
542 | * bound for the number of structures in the table. | ||
543 | */ | ||
544 | dmi_num = dmi_len / 4; | ||
545 | |||
546 | if (dmi_walk_early(dmi_decode) == 0) { | ||
547 | pr_info("SMBIOS %d.%d present.\n", | ||
548 | dmi_ver >> 8, dmi_ver & 0xFF); | ||
549 | dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string)); | ||
550 | pr_debug("DMI: %s\n", dmi_ids_string); | ||
551 | return 0; | ||
552 | } | ||
553 | } | ||
554 | return 1; | ||
555 | } | ||
556 | |||
517 | void __init dmi_scan_machine(void) | 557 | void __init dmi_scan_machine(void) |
518 | { | 558 | { |
519 | char __iomem *p, *q; | 559 | char __iomem *p, *q; |
520 | char buf[32]; | 560 | char buf[32]; |
521 | 561 | ||
522 | if (efi_enabled(EFI_CONFIG_TABLES)) { | 562 | if (efi_enabled(EFI_CONFIG_TABLES)) { |
563 | /* | ||
564 | * According to the DMTF SMBIOS reference spec v3.0.0, it is | ||
565 | * allowed to define both the 64-bit entry point (smbios3) and | ||
566 | * the 32-bit entry point (smbios), in which case they should | ||
567 | * either both point to the same SMBIOS structure table, or the | ||
568 | * table pointed to by the 64-bit entry point should contain a | ||
569 | * superset of the table contents pointed to by the 32-bit entry | ||
570 | * point (section 5.2) | ||
571 | * This implies that the 64-bit entry point should have | ||
572 | * precedence if it is defined and supported by the OS. If we | ||
573 | * have the 64-bit entry point, but fail to decode it, fall | ||
574 | * back to the legacy one (if available) | ||
575 | */ | ||
576 | if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) { | ||
577 | p = dmi_early_remap(efi.smbios3, 32); | ||
578 | if (p == NULL) | ||
579 | goto error; | ||
580 | memcpy_fromio(buf, p, 32); | ||
581 | dmi_early_unmap(p, 32); | ||
582 | |||
583 | if (!dmi_smbios3_present(buf)) { | ||
584 | dmi_available = 1; | ||
585 | goto out; | ||
586 | } | ||
587 | } | ||
523 | if (efi.smbios == EFI_INVALID_TABLE_ADDR) | 588 | if (efi.smbios == EFI_INVALID_TABLE_ADDR) |
524 | goto error; | 589 | goto error; |
525 | 590 | ||
@@ -552,7 +617,7 @@ void __init dmi_scan_machine(void) | |||
552 | memset(buf, 0, 16); | 617 | memset(buf, 0, 16); |
553 | for (q = p; q < p + 0x10000; q += 16) { | 618 | for (q = p; q < p + 0x10000; q += 16) { |
554 | memcpy_fromio(buf + 16, q, 16); | 619 | memcpy_fromio(buf + 16, q, 16); |
555 | if (!dmi_present(buf)) { | 620 | if (!dmi_smbios3_present(buf) || !dmi_present(buf)) { |
556 | dmi_available = 1; | 621 | dmi_available = 1; |
557 | dmi_early_unmap(p, 0x10000); | 622 | dmi_early_unmap(p, 0x10000); |
558 | goto out; | 623 | goto out; |
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 8590099ac148..9035c1b74d58 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c | |||
@@ -30,6 +30,7 @@ struct efi __read_mostly efi = { | |||
30 | .acpi = EFI_INVALID_TABLE_ADDR, | 30 | .acpi = EFI_INVALID_TABLE_ADDR, |
31 | .acpi20 = EFI_INVALID_TABLE_ADDR, | 31 | .acpi20 = EFI_INVALID_TABLE_ADDR, |
32 | .smbios = EFI_INVALID_TABLE_ADDR, | 32 | .smbios = EFI_INVALID_TABLE_ADDR, |
33 | .smbios3 = EFI_INVALID_TABLE_ADDR, | ||
33 | .sal_systab = EFI_INVALID_TABLE_ADDR, | 34 | .sal_systab = EFI_INVALID_TABLE_ADDR, |
34 | .boot_info = EFI_INVALID_TABLE_ADDR, | 35 | .boot_info = EFI_INVALID_TABLE_ADDR, |
35 | .hcdp = EFI_INVALID_TABLE_ADDR, | 36 | .hcdp = EFI_INVALID_TABLE_ADDR, |
@@ -86,6 +87,8 @@ static ssize_t systab_show(struct kobject *kobj, | |||
86 | str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); | 87 | str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); |
87 | if (efi.smbios != EFI_INVALID_TABLE_ADDR) | 88 | if (efi.smbios != EFI_INVALID_TABLE_ADDR) |
88 | str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); | 89 | str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); |
90 | if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) | ||
91 | str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3); | ||
89 | if (efi.hcdp != EFI_INVALID_TABLE_ADDR) | 92 | if (efi.hcdp != EFI_INVALID_TABLE_ADDR) |
90 | str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp); | 93 | str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp); |
91 | if (efi.boot_info != EFI_INVALID_TABLE_ADDR) | 94 | if (efi.boot_info != EFI_INVALID_TABLE_ADDR) |
@@ -260,6 +263,7 @@ static __initdata efi_config_table_type_t common_tables[] = { | |||
260 | {MPS_TABLE_GUID, "MPS", &efi.mps}, | 263 | {MPS_TABLE_GUID, "MPS", &efi.mps}, |
261 | {SAL_SYSTEM_TABLE_GUID, "SALsystab", &efi.sal_systab}, | 264 | {SAL_SYSTEM_TABLE_GUID, "SALsystab", &efi.sal_systab}, |
262 | {SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios}, | 265 | {SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios}, |
266 | {SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3}, | ||
263 | {UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga}, | 267 | {UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga}, |
264 | {NULL_GUID, NULL, NULL}, | 268 | {NULL_GUID, NULL, NULL}, |
265 | }; | 269 | }; |
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index 75ee05964cbc..eb48a1a1a576 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c | |||
@@ -247,9 +247,18 @@ unsigned long __init efi_entry(void *handle, efi_system_table_t *sys_table, | |||
247 | goto fail_free_cmdline; | 247 | goto fail_free_cmdline; |
248 | } | 248 | } |
249 | } | 249 | } |
250 | if (!fdt_addr) | 250 | |
251 | if (fdt_addr) { | ||
252 | pr_efi(sys_table, "Using DTB from command line\n"); | ||
253 | } else { | ||
251 | /* Look for a device tree configuration table entry. */ | 254 | /* Look for a device tree configuration table entry. */ |
252 | fdt_addr = (uintptr_t)get_fdt(sys_table); | 255 | fdt_addr = (uintptr_t)get_fdt(sys_table); |
256 | if (fdt_addr) | ||
257 | pr_efi(sys_table, "Using DTB from configuration table\n"); | ||
258 | } | ||
259 | |||
260 | if (!fdt_addr) | ||
261 | pr_efi(sys_table, "Generating empty DTB\n"); | ||
253 | 262 | ||
254 | status = handle_cmdline_files(sys_table, image, cmdline_ptr, | 263 | status = handle_cmdline_files(sys_table, image, cmdline_ptr, |
255 | "initrd=", dram_base + SZ_512M, | 264 | "initrd=", dram_base + SZ_512M, |
diff --git a/drivers/xen/efi.c b/drivers/xen/efi.c index 1f850c97482f..f745db270171 100644 --- a/drivers/xen/efi.c +++ b/drivers/xen/efi.c | |||
@@ -294,6 +294,7 @@ static const struct efi efi_xen __initconst = { | |||
294 | .acpi = EFI_INVALID_TABLE_ADDR, | 294 | .acpi = EFI_INVALID_TABLE_ADDR, |
295 | .acpi20 = EFI_INVALID_TABLE_ADDR, | 295 | .acpi20 = EFI_INVALID_TABLE_ADDR, |
296 | .smbios = EFI_INVALID_TABLE_ADDR, | 296 | .smbios = EFI_INVALID_TABLE_ADDR, |
297 | .smbios3 = EFI_INVALID_TABLE_ADDR, | ||
297 | .sal_systab = EFI_INVALID_TABLE_ADDR, | 298 | .sal_systab = EFI_INVALID_TABLE_ADDR, |
298 | .boot_info = EFI_INVALID_TABLE_ADDR, | 299 | .boot_info = EFI_INVALID_TABLE_ADDR, |
299 | .hcdp = EFI_INVALID_TABLE_ADDR, | 300 | .hcdp = EFI_INVALID_TABLE_ADDR, |
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h new file mode 100644 index 000000000000..9fa1f653ed3b --- /dev/null +++ b/include/asm-generic/seccomp.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * include/asm-generic/seccomp.h | ||
3 | * | ||
4 | * Copyright (C) 2014 Linaro Limited | ||
5 | * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #ifndef _ASM_GENERIC_SECCOMP_H | ||
12 | #define _ASM_GENERIC_SECCOMP_H | ||
13 | |||
14 | #include <linux/unistd.h> | ||
15 | |||
16 | #if defined(CONFIG_COMPAT) && !defined(__NR_seccomp_read_32) | ||
17 | #define __NR_seccomp_read_32 __NR_read | ||
18 | #define __NR_seccomp_write_32 __NR_write | ||
19 | #define __NR_seccomp_exit_32 __NR_exit | ||
20 | #define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn | ||
21 | #endif /* CONFIG_COMPAT && ! already defined */ | ||
22 | |||
23 | #define __NR_seccomp_read __NR_read | ||
24 | #define __NR_seccomp_write __NR_write | ||
25 | #define __NR_seccomp_exit __NR_exit | ||
26 | #ifndef __NR_seccomp_sigreturn | ||
27 | #define __NR_seccomp_sigreturn __NR_rt_sigreturn | ||
28 | #endif | ||
29 | |||
30 | #endif /* _ASM_GENERIC_SECCOMP_H */ | ||
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 5672d7ea1fa0..08848050922e 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h | |||
@@ -96,10 +96,9 @@ struct mmu_gather { | |||
96 | #endif | 96 | #endif |
97 | unsigned long start; | 97 | unsigned long start; |
98 | unsigned long end; | 98 | unsigned long end; |
99 | unsigned int need_flush : 1, /* Did free PTEs */ | ||
100 | /* we are in the middle of an operation to clear | 99 | /* we are in the middle of an operation to clear |
101 | * a full mm and can make some optimizations */ | 100 | * a full mm and can make some optimizations */ |
102 | fullmm : 1, | 101 | unsigned int fullmm : 1, |
103 | /* we have performed an operation which | 102 | /* we have performed an operation which |
104 | * requires a complete flush of the tlb */ | 103 | * requires a complete flush of the tlb */ |
105 | need_flush_all : 1; | 104 | need_flush_all : 1; |
@@ -128,16 +127,54 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |||
128 | tlb_flush_mmu(tlb); | 127 | tlb_flush_mmu(tlb); |
129 | } | 128 | } |
130 | 129 | ||
130 | static inline void __tlb_adjust_range(struct mmu_gather *tlb, | ||
131 | unsigned long address) | ||
132 | { | ||
133 | tlb->start = min(tlb->start, address); | ||
134 | tlb->end = max(tlb->end, address + PAGE_SIZE); | ||
135 | } | ||
136 | |||
137 | static inline void __tlb_reset_range(struct mmu_gather *tlb) | ||
138 | { | ||
139 | tlb->start = TASK_SIZE; | ||
140 | tlb->end = 0; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * In the case of tlb vma handling, we can optimise these away in the | ||
145 | * case where we're doing a full MM flush. When we're doing a munmap, | ||
146 | * the vmas are adjusted to only cover the region to be torn down. | ||
147 | */ | ||
148 | #ifndef tlb_start_vma | ||
149 | #define tlb_start_vma(tlb, vma) do { } while (0) | ||
150 | #endif | ||
151 | |||
152 | #define __tlb_end_vma(tlb, vma) \ | ||
153 | do { \ | ||
154 | if (!tlb->fullmm && tlb->end) { \ | ||
155 | tlb_flush(tlb); \ | ||
156 | __tlb_reset_range(tlb); \ | ||
157 | } \ | ||
158 | } while (0) | ||
159 | |||
160 | #ifndef tlb_end_vma | ||
161 | #define tlb_end_vma __tlb_end_vma | ||
162 | #endif | ||
163 | |||
164 | #ifndef __tlb_remove_tlb_entry | ||
165 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) | ||
166 | #endif | ||
167 | |||
131 | /** | 168 | /** |
132 | * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. | 169 | * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. |
133 | * | 170 | * |
134 | * Record the fact that pte's were really umapped in ->need_flush, so we can | 171 | * Record the fact that pte's were really unmapped by updating the range, |
135 | * later optimise away the tlb invalidate. This helps when userspace is | 172 | * so we can later optimise away the tlb invalidate. This helps when |
136 | * unmapping already-unmapped pages, which happens quite a lot. | 173 | * userspace is unmapping already-unmapped pages, which happens quite a lot. |
137 | */ | 174 | */ |
138 | #define tlb_remove_tlb_entry(tlb, ptep, address) \ | 175 | #define tlb_remove_tlb_entry(tlb, ptep, address) \ |
139 | do { \ | 176 | do { \ |
140 | tlb->need_flush = 1; \ | 177 | __tlb_adjust_range(tlb, address); \ |
141 | __tlb_remove_tlb_entry(tlb, ptep, address); \ | 178 | __tlb_remove_tlb_entry(tlb, ptep, address); \ |
142 | } while (0) | 179 | } while (0) |
143 | 180 | ||
@@ -151,27 +188,27 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |||
151 | 188 | ||
152 | #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ | 189 | #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ |
153 | do { \ | 190 | do { \ |
154 | tlb->need_flush = 1; \ | 191 | __tlb_adjust_range(tlb, address); \ |
155 | __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ | 192 | __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ |
156 | } while (0) | 193 | } while (0) |
157 | 194 | ||
158 | #define pte_free_tlb(tlb, ptep, address) \ | 195 | #define pte_free_tlb(tlb, ptep, address) \ |
159 | do { \ | 196 | do { \ |
160 | tlb->need_flush = 1; \ | 197 | __tlb_adjust_range(tlb, address); \ |
161 | __pte_free_tlb(tlb, ptep, address); \ | 198 | __pte_free_tlb(tlb, ptep, address); \ |
162 | } while (0) | 199 | } while (0) |
163 | 200 | ||
164 | #ifndef __ARCH_HAS_4LEVEL_HACK | 201 | #ifndef __ARCH_HAS_4LEVEL_HACK |
165 | #define pud_free_tlb(tlb, pudp, address) \ | 202 | #define pud_free_tlb(tlb, pudp, address) \ |
166 | do { \ | 203 | do { \ |
167 | tlb->need_flush = 1; \ | 204 | __tlb_adjust_range(tlb, address); \ |
168 | __pud_free_tlb(tlb, pudp, address); \ | 205 | __pud_free_tlb(tlb, pudp, address); \ |
169 | } while (0) | 206 | } while (0) |
170 | #endif | 207 | #endif |
171 | 208 | ||
172 | #define pmd_free_tlb(tlb, pmdp, address) \ | 209 | #define pmd_free_tlb(tlb, pmdp, address) \ |
173 | do { \ | 210 | do { \ |
174 | tlb->need_flush = 1; \ | 211 | __tlb_adjust_range(tlb, address); \ |
175 | __pmd_free_tlb(tlb, pmdp, address); \ | 212 | __pmd_free_tlb(tlb, pmdp, address); \ |
176 | } while (0) | 213 | } while (0) |
177 | 214 | ||
diff --git a/include/linux/efi.h b/include/linux/efi.h index 0949f9c7e872..0238d612750e 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
@@ -547,6 +547,9 @@ void efi_native_runtime_setup(void); | |||
547 | #define SMBIOS_TABLE_GUID \ | 547 | #define SMBIOS_TABLE_GUID \ |
548 | EFI_GUID( 0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) | 548 | EFI_GUID( 0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) |
549 | 549 | ||
550 | #define SMBIOS3_TABLE_GUID \ | ||
551 | EFI_GUID( 0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94 ) | ||
552 | |||
550 | #define SAL_SYSTEM_TABLE_GUID \ | 553 | #define SAL_SYSTEM_TABLE_GUID \ |
551 | EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) | 554 | EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) |
552 | 555 | ||
@@ -810,7 +813,8 @@ extern struct efi { | |||
810 | unsigned long mps; /* MPS table */ | 813 | unsigned long mps; /* MPS table */ |
811 | unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ | 814 | unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ |
812 | unsigned long acpi20; /* ACPI table (ACPI 2.0) */ | 815 | unsigned long acpi20; /* ACPI table (ACPI 2.0) */ |
813 | unsigned long smbios; /* SM BIOS table */ | 816 | unsigned long smbios; /* SMBIOS table (32 bit entry point) */ |
817 | unsigned long smbios3; /* SMBIOS table (64 bit entry point) */ | ||
814 | unsigned long sal_systab; /* SAL system table */ | 818 | unsigned long sal_systab; /* SAL system table */ |
815 | unsigned long boot_info; /* boot info table */ | 819 | unsigned long boot_info; /* boot info table */ |
816 | unsigned long hcdp; /* HCDP table */ | 820 | unsigned long hcdp; /* HCDP table */ |
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index ea9bf2561b9e..71e1d0ed92f7 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h | |||
@@ -397,6 +397,7 @@ typedef struct elf64_shdr { | |||
397 | #define NT_ARM_TLS 0x401 /* ARM TLS register */ | 397 | #define NT_ARM_TLS 0x401 /* ARM TLS register */ |
398 | #define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */ | 398 | #define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */ |
399 | #define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */ | 399 | #define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */ |
400 | #define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */ | ||
400 | #define NT_METAG_CBUF 0x500 /* Metag catch buffer registers */ | 401 | #define NT_METAG_CBUF 0x500 /* Metag catch buffer registers */ |
401 | #define NT_METAG_RPIPE 0x501 /* Metag read pipeline state */ | 402 | #define NT_METAG_RPIPE 0x501 /* Metag read pipeline state */ |
402 | #define NT_METAG_TLS 0x502 /* Metag TLS pointer */ | 403 | #define NT_METAG_TLS 0x502 /* Metag TLS pointer */ |
diff --git a/mm/memory.c b/mm/memory.c index d5f2ae9c4a23..0b3f6c71620d 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -220,9 +220,6 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long | |||
220 | /* Is it from 0 to ~0? */ | 220 | /* Is it from 0 to ~0? */ |
221 | tlb->fullmm = !(start | (end+1)); | 221 | tlb->fullmm = !(start | (end+1)); |
222 | tlb->need_flush_all = 0; | 222 | tlb->need_flush_all = 0; |
223 | tlb->start = start; | ||
224 | tlb->end = end; | ||
225 | tlb->need_flush = 0; | ||
226 | tlb->local.next = NULL; | 223 | tlb->local.next = NULL; |
227 | tlb->local.nr = 0; | 224 | tlb->local.nr = 0; |
228 | tlb->local.max = ARRAY_SIZE(tlb->__pages); | 225 | tlb->local.max = ARRAY_SIZE(tlb->__pages); |
@@ -232,15 +229,20 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long | |||
232 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | 229 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
233 | tlb->batch = NULL; | 230 | tlb->batch = NULL; |
234 | #endif | 231 | #endif |
232 | |||
233 | __tlb_reset_range(tlb); | ||
235 | } | 234 | } |
236 | 235 | ||
237 | static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) | 236 | static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) |
238 | { | 237 | { |
239 | tlb->need_flush = 0; | 238 | if (!tlb->end) |
239 | return; | ||
240 | |||
240 | tlb_flush(tlb); | 241 | tlb_flush(tlb); |
241 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | 242 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
242 | tlb_table_flush(tlb); | 243 | tlb_table_flush(tlb); |
243 | #endif | 244 | #endif |
245 | __tlb_reset_range(tlb); | ||
244 | } | 246 | } |
245 | 247 | ||
246 | static void tlb_flush_mmu_free(struct mmu_gather *tlb) | 248 | static void tlb_flush_mmu_free(struct mmu_gather *tlb) |
@@ -256,8 +258,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb) | |||
256 | 258 | ||
257 | void tlb_flush_mmu(struct mmu_gather *tlb) | 259 | void tlb_flush_mmu(struct mmu_gather *tlb) |
258 | { | 260 | { |
259 | if (!tlb->need_flush) | ||
260 | return; | ||
261 | tlb_flush_mmu_tlbonly(tlb); | 261 | tlb_flush_mmu_tlbonly(tlb); |
262 | tlb_flush_mmu_free(tlb); | 262 | tlb_flush_mmu_free(tlb); |
263 | } | 263 | } |
@@ -292,7 +292,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |||
292 | { | 292 | { |
293 | struct mmu_gather_batch *batch; | 293 | struct mmu_gather_batch *batch; |
294 | 294 | ||
295 | VM_BUG_ON(!tlb->need_flush); | 295 | VM_BUG_ON(!tlb->end); |
296 | 296 | ||
297 | batch = tlb->active; | 297 | batch = tlb->active; |
298 | batch->pages[batch->nr++] = page; | 298 | batch->pages[batch->nr++] = page; |
@@ -359,8 +359,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) | |||
359 | { | 359 | { |
360 | struct mmu_table_batch **batch = &tlb->batch; | 360 | struct mmu_table_batch **batch = &tlb->batch; |
361 | 361 | ||
362 | tlb->need_flush = 1; | ||
363 | |||
364 | /* | 362 | /* |
365 | * When there's less then two users of this mm there cannot be a | 363 | * When there's less then two users of this mm there cannot be a |
366 | * concurrent page-table walk. | 364 | * concurrent page-table walk. |
@@ -1186,20 +1184,8 @@ again: | |||
1186 | arch_leave_lazy_mmu_mode(); | 1184 | arch_leave_lazy_mmu_mode(); |
1187 | 1185 | ||
1188 | /* Do the actual TLB flush before dropping ptl */ | 1186 | /* Do the actual TLB flush before dropping ptl */ |
1189 | if (force_flush) { | 1187 | if (force_flush) |
1190 | unsigned long old_end; | ||
1191 | |||
1192 | /* | ||
1193 | * Flush the TLB just for the previous segment, | ||
1194 | * then update the range to be the remaining | ||
1195 | * TLB range. | ||
1196 | */ | ||
1197 | old_end = tlb->end; | ||
1198 | tlb->end = addr; | ||
1199 | tlb_flush_mmu_tlbonly(tlb); | 1188 | tlb_flush_mmu_tlbonly(tlb); |
1200 | tlb->start = addr; | ||
1201 | tlb->end = old_end; | ||
1202 | } | ||
1203 | pte_unmap_unlock(start_pte, ptl); | 1189 | pte_unmap_unlock(start_pte, ptl); |
1204 | 1190 | ||
1205 | /* | 1191 | /* |