aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-09 16:12:47 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-09 16:12:47 -0500
commitb64bb1d758163814687eb3b84d74e56f04d0c9d1 (patch)
tree59f1db8b718e98d13c6cf9d3486221cfff6e7eef /arch/arm64/kernel
parent50569687e9c688a8688982805be6d8e3c8879042 (diff)
parenteb8a653137b7e74f7cdc01f814eb9d094a65aed9 (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "Here's the usual mixed bag of arm64 updates, also including some related EFI changes (Acked by Matt) and the MMU gather range cleanup (Acked by you). Changes include: - support for alternative instruction patching from Andre - seccomp from Akashi - some AArch32 instruction emulation, required by the Android folks - optimisations for exception entry/exit code, cmpxchg, pcpu atomics - mmu_gather range calculations moved into core code - EFI updates from Ard, including long-awaited SMBIOS support - /proc/cpuinfo fixes to align with the format used by arch/arm/ - a few non-critical fixes across the architecture" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (70 commits) arm64: remove the unnecessary arm64_swiotlb_init() arm64: add module support for alternatives fixups arm64: perf: Prevent wraparound during overflow arm64/include/asm: Fixed a warning about 'struct pt_regs' arm64: Provide a namespace to NCAPS arm64: bpf: lift restriction on last instruction arm64: Implement support for read-mostly sections arm64: compat: align cacheflush syscall with arch/arm arm64: add seccomp support arm64: add SIGSYS siginfo for compat task arm64: add seccomp syscall for compat task asm-generic: add generic seccomp.h for secure computing mode 1 arm64: ptrace: allow tracer to skip a system call arm64: ptrace: add NT_ARM_SYSTEM_CALL regset arm64: Move some head.text functions to executable section arm64: jump labels: NOP out NOP -> NOP replacement arm64: add support to dump the kernel page tables arm64: Add FIX_HOLE to permanent fixed addresses arm64: alternatives: fix pr_fmt string for consistency arm64: vmlinux.lds.S: don't discard .exit.* sections at link-time ...
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/Makefile7
-rw-r--r--arch/arm64/kernel/alternative.c85
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c553
-rw-r--r--arch/arm64/kernel/cpu_errata.c111
-rw-r--r--arch/arm64/kernel/cpuinfo.c23
-rw-r--r--arch/arm64/kernel/efi-entry.S3
-rw-r--r--arch/arm64/kernel/efi.c37
-rw-r--r--arch/arm64/kernel/entry-ftrace.S21
-rw-r--r--arch/arm64/kernel/entry.S138
-rw-r--r--arch/arm64/kernel/head.S434
-rw-r--r--arch/arm64/kernel/insn.c26
-rw-r--r--arch/arm64/kernel/io.c66
-rw-r--r--arch/arm64/kernel/irq.c2
-rw-r--r--arch/arm64/kernel/jump_label.c23
-rw-r--r--arch/arm64/kernel/module.c18
-rw-r--r--arch/arm64/kernel/perf_event.c10
-rw-r--r--arch/arm64/kernel/ptrace.c40
-rw-r--r--arch/arm64/kernel/setup.c108
-rw-r--r--arch/arm64/kernel/signal32.c6
-rw-r--r--arch/arm64/kernel/sleep.S36
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kernel/suspend.c4
-rw-r--r--arch/arm64/kernel/sys_compat.c49
-rw-r--r--arch/arm64/kernel/topology.c7
-rw-r--r--arch/arm64/kernel/trace-events-emulation.h35
-rw-r--r--arch/arm64/kernel/traps.c66
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S33
27 files changed, 1539 insertions, 404 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 5bd029b43644..eaa77ed7766a 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -5,6 +5,7 @@
5CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) 5CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
6AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) 6AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
7CFLAGS_efi-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) 7CFLAGS_efi-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
8CFLAGS_armv8_deprecated.o := -I$(src)
8 9
9CFLAGS_REMOVE_ftrace.o = -pg 10CFLAGS_REMOVE_ftrace.o = -pg
10CFLAGS_REMOVE_insn.o = -pg 11CFLAGS_REMOVE_insn.o = -pg
@@ -15,10 +16,11 @@ arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
15 entry-fpsimd.o process.o ptrace.o setup.o signal.o \ 16 entry-fpsimd.o process.o ptrace.o setup.o signal.o \
16 sys.o stacktrace.o time.o traps.o io.o vdso.o \ 17 sys.o stacktrace.o time.o traps.o io.o vdso.o \
17 hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \ 18 hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \
18 cpuinfo.o 19 cpuinfo.o cpu_errata.o alternative.o
19 20
20arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ 21arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
21 sys_compat.o 22 sys_compat.o \
23 ../../arm/kernel/opcodes.o
22arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o 24arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
23arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o 25arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
24arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o 26arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o
@@ -31,6 +33,7 @@ arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
31arm64-obj-$(CONFIG_KGDB) += kgdb.o 33arm64-obj-$(CONFIG_KGDB) += kgdb.o
32arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o 34arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o
33arm64-obj-$(CONFIG_PCI) += pci.o 35arm64-obj-$(CONFIG_PCI) += pci.o
36arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
34 37
35obj-y += $(arm64-obj-y) vdso/ 38obj-y += $(arm64-obj-y) vdso/
36obj-m += $(arm64-obj-m) 39obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
new file mode 100644
index 000000000000..ad7821d64a1d
--- /dev/null
+++ b/arch/arm64/kernel/alternative.c
@@ -0,0 +1,85 @@
1/*
2 * alternative runtime patching
3 * inspired by the x86 version
4 *
5 * Copyright (C) 2014 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#define pr_fmt(fmt) "alternatives: " fmt
21
22#include <linux/init.h>
23#include <linux/cpu.h>
24#include <asm/cacheflush.h>
25#include <asm/alternative.h>
26#include <asm/cpufeature.h>
27#include <linux/stop_machine.h>
28
29extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
30
31struct alt_region {
32 struct alt_instr *begin;
33 struct alt_instr *end;
34};
35
36static int __apply_alternatives(void *alt_region)
37{
38 struct alt_instr *alt;
39 struct alt_region *region = alt_region;
40 u8 *origptr, *replptr;
41
42 for (alt = region->begin; alt < region->end; alt++) {
43 if (!cpus_have_cap(alt->cpufeature))
44 continue;
45
46 BUG_ON(alt->alt_len > alt->orig_len);
47
48 pr_info_once("patching kernel code\n");
49
50 origptr = (u8 *)&alt->orig_offset + alt->orig_offset;
51 replptr = (u8 *)&alt->alt_offset + alt->alt_offset;
52 memcpy(origptr, replptr, alt->alt_len);
53 flush_icache_range((uintptr_t)origptr,
54 (uintptr_t)(origptr + alt->alt_len));
55 }
56
57 return 0;
58}
59
60void apply_alternatives_all(void)
61{
62 struct alt_region region = {
63 .begin = __alt_instructions,
64 .end = __alt_instructions_end,
65 };
66
67 /* better not try code patching on a live SMP system */
68 stop_machine(__apply_alternatives, &region, NULL);
69}
70
71void apply_alternatives(void *start, size_t length)
72{
73 struct alt_region region = {
74 .begin = start,
75 .end = start + length,
76 };
77
78 __apply_alternatives(&region);
79}
80
81void free_alternatives_memory(void)
82{
83 free_reserved_area(__alt_instructions, __alt_instructions_end,
84 0, "alternatives");
85}
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
new file mode 100644
index 000000000000..c363671d7509
--- /dev/null
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -0,0 +1,553 @@
1/*
2 * Copyright (C) 2014 ARM Limited
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/cpu.h>
10#include <linux/init.h>
11#include <linux/list.h>
12#include <linux/perf_event.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/sysctl.h>
16
17#include <asm/insn.h>
18#include <asm/opcodes.h>
19#include <asm/system_misc.h>
20#include <asm/traps.h>
21#include <asm/uaccess.h>
22
23#define CREATE_TRACE_POINTS
24#include "trace-events-emulation.h"
25
26/*
27 * The runtime support for deprecated instruction support can be in one of
28 * following three states -
29 *
30 * 0 = undef
31 * 1 = emulate (software emulation)
32 * 2 = hw (supported in hardware)
33 */
34enum insn_emulation_mode {
35 INSN_UNDEF,
36 INSN_EMULATE,
37 INSN_HW,
38};
39
40enum legacy_insn_status {
41 INSN_DEPRECATED,
42 INSN_OBSOLETE,
43};
44
45struct insn_emulation_ops {
46 const char *name;
47 enum legacy_insn_status status;
48 struct undef_hook *hooks;
49 int (*set_hw_mode)(bool enable);
50};
51
52struct insn_emulation {
53 struct list_head node;
54 struct insn_emulation_ops *ops;
55 int current_mode;
56 int min;
57 int max;
58};
59
60static LIST_HEAD(insn_emulation);
61static int nr_insn_emulated;
62static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
63
64static void register_emulation_hooks(struct insn_emulation_ops *ops)
65{
66 struct undef_hook *hook;
67
68 BUG_ON(!ops->hooks);
69
70 for (hook = ops->hooks; hook->instr_mask; hook++)
71 register_undef_hook(hook);
72
73 pr_notice("Registered %s emulation handler\n", ops->name);
74}
75
76static void remove_emulation_hooks(struct insn_emulation_ops *ops)
77{
78 struct undef_hook *hook;
79
80 BUG_ON(!ops->hooks);
81
82 for (hook = ops->hooks; hook->instr_mask; hook++)
83 unregister_undef_hook(hook);
84
85 pr_notice("Removed %s emulation handler\n", ops->name);
86}
87
88static int update_insn_emulation_mode(struct insn_emulation *insn,
89 enum insn_emulation_mode prev)
90{
91 int ret = 0;
92
93 switch (prev) {
94 case INSN_UNDEF: /* Nothing to be done */
95 break;
96 case INSN_EMULATE:
97 remove_emulation_hooks(insn->ops);
98 break;
99 case INSN_HW:
100 if (insn->ops->set_hw_mode) {
101 insn->ops->set_hw_mode(false);
102 pr_notice("Disabled %s support\n", insn->ops->name);
103 }
104 break;
105 }
106
107 switch (insn->current_mode) {
108 case INSN_UNDEF:
109 break;
110 case INSN_EMULATE:
111 register_emulation_hooks(insn->ops);
112 break;
113 case INSN_HW:
114 if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(true))
115 pr_notice("Enabled %s support\n", insn->ops->name);
116 else
117 ret = -EINVAL;
118 break;
119 }
120
121 return ret;
122}
123
124static void register_insn_emulation(struct insn_emulation_ops *ops)
125{
126 unsigned long flags;
127 struct insn_emulation *insn;
128
129 insn = kzalloc(sizeof(*insn), GFP_KERNEL);
130 insn->ops = ops;
131 insn->min = INSN_UNDEF;
132
133 switch (ops->status) {
134 case INSN_DEPRECATED:
135 insn->current_mode = INSN_EMULATE;
136 insn->max = INSN_HW;
137 break;
138 case INSN_OBSOLETE:
139 insn->current_mode = INSN_UNDEF;
140 insn->max = INSN_EMULATE;
141 break;
142 }
143
144 raw_spin_lock_irqsave(&insn_emulation_lock, flags);
145 list_add(&insn->node, &insn_emulation);
146 nr_insn_emulated++;
147 raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
148
149 /* Register any handlers if required */
150 update_insn_emulation_mode(insn, INSN_UNDEF);
151}
152
153static int emulation_proc_handler(struct ctl_table *table, int write,
154 void __user *buffer, size_t *lenp,
155 loff_t *ppos)
156{
157 int ret = 0;
158 struct insn_emulation *insn = (struct insn_emulation *) table->data;
159 enum insn_emulation_mode prev_mode = insn->current_mode;
160
161 table->data = &insn->current_mode;
162 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
163
164 if (ret || !write || prev_mode == insn->current_mode)
165 goto ret;
166
167 ret = update_insn_emulation_mode(insn, prev_mode);
168 if (ret) {
169 /* Mode change failed, revert to previous mode. */
170 insn->current_mode = prev_mode;
171 update_insn_emulation_mode(insn, INSN_UNDEF);
172 }
173ret:
174 table->data = insn;
175 return ret;
176}
177
178static struct ctl_table ctl_abi[] = {
179 {
180 .procname = "abi",
181 .mode = 0555,
182 },
183 { }
184};
185
186static void register_insn_emulation_sysctl(struct ctl_table *table)
187{
188 unsigned long flags;
189 int i = 0;
190 struct insn_emulation *insn;
191 struct ctl_table *insns_sysctl, *sysctl;
192
193 insns_sysctl = kzalloc(sizeof(*sysctl) * (nr_insn_emulated + 1),
194 GFP_KERNEL);
195
196 raw_spin_lock_irqsave(&insn_emulation_lock, flags);
197 list_for_each_entry(insn, &insn_emulation, node) {
198 sysctl = &insns_sysctl[i];
199
200 sysctl->mode = 0644;
201 sysctl->maxlen = sizeof(int);
202
203 sysctl->procname = insn->ops->name;
204 sysctl->data = insn;
205 sysctl->extra1 = &insn->min;
206 sysctl->extra2 = &insn->max;
207 sysctl->proc_handler = emulation_proc_handler;
208 i++;
209 }
210 raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
211
212 table->child = insns_sysctl;
213 register_sysctl_table(table);
214}
215
216/*
217 * Implement emulation of the SWP/SWPB instructions using load-exclusive and
218 * store-exclusive.
219 *
220 * Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
221 * Where: Rt = destination
222 * Rt2 = source
223 * Rn = address
224 */
225
226/*
227 * Error-checking SWP macros implemented using ldxr{b}/stxr{b}
228 */
229#define __user_swpX_asm(data, addr, res, temp, B) \
230 __asm__ __volatile__( \
231 " mov %w2, %w1\n" \
232 "0: ldxr"B" %w1, [%3]\n" \
233 "1: stxr"B" %w0, %w2, [%3]\n" \
234 " cbz %w0, 2f\n" \
235 " mov %w0, %w4\n" \
236 "2:\n" \
237 " .pushsection .fixup,\"ax\"\n" \
238 " .align 2\n" \
239 "3: mov %w0, %w5\n" \
240 " b 2b\n" \
241 " .popsection" \
242 " .pushsection __ex_table,\"a\"\n" \
243 " .align 3\n" \
244 " .quad 0b, 3b\n" \
245 " .quad 1b, 3b\n" \
246 " .popsection" \
247 : "=&r" (res), "+r" (data), "=&r" (temp) \
248 : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
249 : "memory")
250
251#define __user_swp_asm(data, addr, res, temp) \
252 __user_swpX_asm(data, addr, res, temp, "")
253#define __user_swpb_asm(data, addr, res, temp) \
254 __user_swpX_asm(data, addr, res, temp, "b")
255
256/*
257 * Bit 22 of the instruction encoding distinguishes between
258 * the SWP and SWPB variants (bit set means SWPB).
259 */
260#define TYPE_SWPB (1 << 22)
261
262/*
263 * Set up process info to signal segmentation fault - called on access error.
264 */
265static void set_segfault(struct pt_regs *regs, unsigned long addr)
266{
267 siginfo_t info;
268
269 down_read(&current->mm->mmap_sem);
270 if (find_vma(current->mm, addr) == NULL)
271 info.si_code = SEGV_MAPERR;
272 else
273 info.si_code = SEGV_ACCERR;
274 up_read(&current->mm->mmap_sem);
275
276 info.si_signo = SIGSEGV;
277 info.si_errno = 0;
278 info.si_addr = (void *) instruction_pointer(regs);
279
280 pr_debug("SWP{B} emulation: access caused memory abort!\n");
281 arm64_notify_die("Illegal memory access", regs, &info, 0);
282}
283
284static int emulate_swpX(unsigned int address, unsigned int *data,
285 unsigned int type)
286{
287 unsigned int res = 0;
288
289 if ((type != TYPE_SWPB) && (address & 0x3)) {
290 /* SWP to unaligned address not permitted */
291 pr_debug("SWP instruction on unaligned pointer!\n");
292 return -EFAULT;
293 }
294
295 while (1) {
296 unsigned long temp;
297
298 if (type == TYPE_SWPB)
299 __user_swpb_asm(*data, address, res, temp);
300 else
301 __user_swp_asm(*data, address, res, temp);
302
303 if (likely(res != -EAGAIN) || signal_pending(current))
304 break;
305
306 cond_resched();
307 }
308
309 return res;
310}
311
312/*
313 * swp_handler logs the id of calling process, dissects the instruction, sanity
314 * checks the memory location, calls emulate_swpX for the actual operation and
315 * deals with fixup/error handling before returning
316 */
317static int swp_handler(struct pt_regs *regs, u32 instr)
318{
319 u32 destreg, data, type, address = 0;
320 int rn, rt2, res = 0;
321
322 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
323
324 type = instr & TYPE_SWPB;
325
326 switch (arm_check_condition(instr, regs->pstate)) {
327 case ARM_OPCODE_CONDTEST_PASS:
328 break;
329 case ARM_OPCODE_CONDTEST_FAIL:
330 /* Condition failed - return to next instruction */
331 goto ret;
332 case ARM_OPCODE_CONDTEST_UNCOND:
333 /* If unconditional encoding - not a SWP, undef */
334 return -EFAULT;
335 default:
336 return -EINVAL;
337 }
338
339 rn = aarch32_insn_extract_reg_num(instr, A32_RN_OFFSET);
340 rt2 = aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET);
341
342 address = (u32)regs->user_regs.regs[rn];
343 data = (u32)regs->user_regs.regs[rt2];
344 destreg = aarch32_insn_extract_reg_num(instr, A32_RT_OFFSET);
345
346 pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
347 rn, address, destreg,
348 aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data);
349
350 /* Check access in reasonable access range for both SWP and SWPB */
351 if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
352 pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n",
353 address);
354 goto fault;
355 }
356
357 res = emulate_swpX(address, &data, type);
358 if (res == -EFAULT)
359 goto fault;
360 else if (res == 0)
361 regs->user_regs.regs[destreg] = data;
362
363ret:
364 if (type == TYPE_SWPB)
365 trace_instruction_emulation("swpb", regs->pc);
366 else
367 trace_instruction_emulation("swp", regs->pc);
368
369 pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n",
370 current->comm, (unsigned long)current->pid, regs->pc);
371
372 regs->pc += 4;
373 return 0;
374
375fault:
376 set_segfault(regs, address);
377
378 return 0;
379}
380
381/*
382 * Only emulate SWP/SWPB executed in ARM state/User mode.
383 * The kernel must be SWP free and SWP{B} does not exist in Thumb.
384 */
385static struct undef_hook swp_hooks[] = {
386 {
387 .instr_mask = 0x0fb00ff0,
388 .instr_val = 0x01000090,
389 .pstate_mask = COMPAT_PSR_MODE_MASK,
390 .pstate_val = COMPAT_PSR_MODE_USR,
391 .fn = swp_handler
392 },
393 { }
394};
395
396static struct insn_emulation_ops swp_ops = {
397 .name = "swp",
398 .status = INSN_OBSOLETE,
399 .hooks = swp_hooks,
400 .set_hw_mode = NULL,
401};
402
403static int cp15barrier_handler(struct pt_regs *regs, u32 instr)
404{
405 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
406
407 switch (arm_check_condition(instr, regs->pstate)) {
408 case ARM_OPCODE_CONDTEST_PASS:
409 break;
410 case ARM_OPCODE_CONDTEST_FAIL:
411 /* Condition failed - return to next instruction */
412 goto ret;
413 case ARM_OPCODE_CONDTEST_UNCOND:
414 /* If unconditional encoding - not a barrier instruction */
415 return -EFAULT;
416 default:
417 return -EINVAL;
418 }
419
420 switch (aarch32_insn_mcr_extract_crm(instr)) {
421 case 10:
422 /*
423 * dmb - mcr p15, 0, Rt, c7, c10, 5
424 * dsb - mcr p15, 0, Rt, c7, c10, 4
425 */
426 if (aarch32_insn_mcr_extract_opc2(instr) == 5) {
427 dmb(sy);
428 trace_instruction_emulation(
429 "mcr p15, 0, Rt, c7, c10, 5 ; dmb", regs->pc);
430 } else {
431 dsb(sy);
432 trace_instruction_emulation(
433 "mcr p15, 0, Rt, c7, c10, 4 ; dsb", regs->pc);
434 }
435 break;
436 case 5:
437 /*
438 * isb - mcr p15, 0, Rt, c7, c5, 4
439 *
440 * Taking an exception or returning from one acts as an
441 * instruction barrier. So no explicit barrier needed here.
442 */
443 trace_instruction_emulation(
444 "mcr p15, 0, Rt, c7, c5, 4 ; isb", regs->pc);
445 break;
446 }
447
448ret:
449 pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n",
450 current->comm, (unsigned long)current->pid, regs->pc);
451
452 regs->pc += 4;
453 return 0;
454}
455
456#define SCTLR_EL1_CP15BEN (1 << 5)
457
458static inline void config_sctlr_el1(u32 clear, u32 set)
459{
460 u32 val;
461
462 asm volatile("mrs %0, sctlr_el1" : "=r" (val));
463 val &= ~clear;
464 val |= set;
465 asm volatile("msr sctlr_el1, %0" : : "r" (val));
466}
467
468static void enable_cp15_ben(void *info)
469{
470 config_sctlr_el1(0, SCTLR_EL1_CP15BEN);
471}
472
473static void disable_cp15_ben(void *info)
474{
475 config_sctlr_el1(SCTLR_EL1_CP15BEN, 0);
476}
477
478static int cpu_hotplug_notify(struct notifier_block *b,
479 unsigned long action, void *hcpu)
480{
481 switch (action) {
482 case CPU_STARTING:
483 case CPU_STARTING_FROZEN:
484 enable_cp15_ben(NULL);
485 return NOTIFY_DONE;
486 case CPU_DYING:
487 case CPU_DYING_FROZEN:
488 disable_cp15_ben(NULL);
489 return NOTIFY_DONE;
490 }
491
492 return NOTIFY_OK;
493}
494
495static struct notifier_block cpu_hotplug_notifier = {
496 .notifier_call = cpu_hotplug_notify,
497};
498
499static int cp15_barrier_set_hw_mode(bool enable)
500{
501 if (enable) {
502 register_cpu_notifier(&cpu_hotplug_notifier);
503 on_each_cpu(enable_cp15_ben, NULL, true);
504 } else {
505 unregister_cpu_notifier(&cpu_hotplug_notifier);
506 on_each_cpu(disable_cp15_ben, NULL, true);
507 }
508
509 return true;
510}
511
512static struct undef_hook cp15_barrier_hooks[] = {
513 {
514 .instr_mask = 0x0fff0fdf,
515 .instr_val = 0x0e070f9a,
516 .pstate_mask = COMPAT_PSR_MODE_MASK,
517 .pstate_val = COMPAT_PSR_MODE_USR,
518 .fn = cp15barrier_handler,
519 },
520 {
521 .instr_mask = 0x0fff0fff,
522 .instr_val = 0x0e070f95,
523 .pstate_mask = COMPAT_PSR_MODE_MASK,
524 .pstate_val = COMPAT_PSR_MODE_USR,
525 .fn = cp15barrier_handler,
526 },
527 { }
528};
529
530static struct insn_emulation_ops cp15_barrier_ops = {
531 .name = "cp15_barrier",
532 .status = INSN_DEPRECATED,
533 .hooks = cp15_barrier_hooks,
534 .set_hw_mode = cp15_barrier_set_hw_mode,
535};
536
537/*
538 * Invoked as late_initcall, since not needed before init spawned.
539 */
540static int __init armv8_deprecated_init(void)
541{
542 if (IS_ENABLED(CONFIG_SWP_EMULATION))
543 register_insn_emulation(&swp_ops);
544
545 if (IS_ENABLED(CONFIG_CP15_BARRIER_EMULATION))
546 register_insn_emulation(&cp15_barrier_ops);
547
548 register_insn_emulation_sysctl(ctl_abi);
549
550 return 0;
551}
552
553late_initcall(armv8_deprecated_init);
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
new file mode 100644
index 000000000000..fa62637e63a8
--- /dev/null
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -0,0 +1,111 @@
1/*
2 * Contains CPU specific errata definitions
3 *
4 * Copyright (C) 2014 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#define pr_fmt(fmt) "alternatives: " fmt
20
21#include <linux/types.h>
22#include <asm/cpu.h>
23#include <asm/cputype.h>
24#include <asm/cpufeature.h>
25
26#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
27#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
28
29/*
30 * Add a struct or another datatype to the union below if you need
31 * different means to detect an affected CPU.
32 */
33struct arm64_cpu_capabilities {
34 const char *desc;
35 u16 capability;
36 bool (*is_affected)(struct arm64_cpu_capabilities *);
37 union {
38 struct {
39 u32 midr_model;
40 u32 midr_range_min, midr_range_max;
41 };
42 };
43};
44
45#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
46 MIDR_ARCHITECTURE_MASK)
47
48static bool __maybe_unused
49is_affected_midr_range(struct arm64_cpu_capabilities *entry)
50{
51 u32 midr = read_cpuid_id();
52
53 if ((midr & CPU_MODEL_MASK) != entry->midr_model)
54 return false;
55
56 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
57
58 return (midr >= entry->midr_range_min && midr <= entry->midr_range_max);
59}
60
61#define MIDR_RANGE(model, min, max) \
62 .is_affected = is_affected_midr_range, \
63 .midr_model = model, \
64 .midr_range_min = min, \
65 .midr_range_max = max
66
67struct arm64_cpu_capabilities arm64_errata[] = {
68#if defined(CONFIG_ARM64_ERRATUM_826319) || \
69 defined(CONFIG_ARM64_ERRATUM_827319) || \
70 defined(CONFIG_ARM64_ERRATUM_824069)
71 {
72 /* Cortex-A53 r0p[012] */
73 .desc = "ARM errata 826319, 827319, 824069",
74 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
75 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
76 },
77#endif
78#ifdef CONFIG_ARM64_ERRATUM_819472
79 {
80 /* Cortex-A53 r0p[01] */
81 .desc = "ARM errata 819472",
82 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
83 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
84 },
85#endif
86#ifdef CONFIG_ARM64_ERRATUM_832075
87 {
88 /* Cortex-A57 r0p0 - r1p2 */
89 .desc = "ARM erratum 832075",
90 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
91 MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12),
92 },
93#endif
94 {
95 }
96};
97
98void check_local_cpu_errata(void)
99{
100 struct arm64_cpu_capabilities *cpus = arm64_errata;
101 int i;
102
103 for (i = 0; cpus[i].desc; i++) {
104 if (!cpus[i].is_affected(&cpus[i]))
105 continue;
106
107 if (!cpus_have_cap(cpus[i].capability))
108 pr_info("enabling workaround for %s\n", cpus[i].desc);
109 cpus_set_cap(cpus[i].capability);
110 }
111}
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 504fdaa8367e..57b641747534 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -18,6 +18,7 @@
18#include <asm/cachetype.h> 18#include <asm/cachetype.h>
19#include <asm/cpu.h> 19#include <asm/cpu.h>
20#include <asm/cputype.h> 20#include <asm/cputype.h>
21#include <asm/cpufeature.h>
21 22
22#include <linux/bitops.h> 23#include <linux/bitops.h>
23#include <linux/bug.h> 24#include <linux/bug.h>
@@ -111,6 +112,15 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
111 diff |= CHECK(cntfrq, boot, cur, cpu); 112 diff |= CHECK(cntfrq, boot, cur, cpu);
112 113
113 /* 114 /*
115 * The kernel uses self-hosted debug features and expects CPUs to
116 * support identical debug features. We presently need CTX_CMPs, WRPs,
117 * and BRPs to be identical.
118 * ID_AA64DFR1 is currently RES0.
119 */
120 diff |= CHECK(id_aa64dfr0, boot, cur, cpu);
121 diff |= CHECK(id_aa64dfr1, boot, cur, cpu);
122
123 /*
114 * Even in big.LITTLE, processors should be identical instruction-set 124 * Even in big.LITTLE, processors should be identical instruction-set
115 * wise. 125 * wise.
116 */ 126 */
@@ -143,7 +153,12 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
143 diff |= CHECK(id_isar3, boot, cur, cpu); 153 diff |= CHECK(id_isar3, boot, cur, cpu);
144 diff |= CHECK(id_isar4, boot, cur, cpu); 154 diff |= CHECK(id_isar4, boot, cur, cpu);
145 diff |= CHECK(id_isar5, boot, cur, cpu); 155 diff |= CHECK(id_isar5, boot, cur, cpu);
146 diff |= CHECK(id_mmfr0, boot, cur, cpu); 156 /*
157 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
158 * ACTLR formats could differ across CPUs and therefore would have to
159 * be trapped for virtualization anyway.
160 */
161 diff |= CHECK_MASK(id_mmfr0, 0xff0fffff, boot, cur, cpu);
147 diff |= CHECK(id_mmfr1, boot, cur, cpu); 162 diff |= CHECK(id_mmfr1, boot, cur, cpu);
148 diff |= CHECK(id_mmfr2, boot, cur, cpu); 163 diff |= CHECK(id_mmfr2, boot, cur, cpu);
149 diff |= CHECK(id_mmfr3, boot, cur, cpu); 164 diff |= CHECK(id_mmfr3, boot, cur, cpu);
@@ -155,7 +170,7 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
155 * pretend to support them. 170 * pretend to support them.
156 */ 171 */
157 WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC, 172 WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC,
158 "Unsupported CPU feature variation."); 173 "Unsupported CPU feature variation.\n");
159} 174}
160 175
161static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) 176static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
@@ -165,6 +180,8 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
165 info->reg_dczid = read_cpuid(DCZID_EL0); 180 info->reg_dczid = read_cpuid(DCZID_EL0);
166 info->reg_midr = read_cpuid_id(); 181 info->reg_midr = read_cpuid_id();
167 182
183 info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
184 info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
168 info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1); 185 info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
169 info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1); 186 info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
170 info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); 187 info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
@@ -186,6 +203,8 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
186 info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); 203 info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
187 204
188 cpuinfo_detect_icache_policy(info); 205 cpuinfo_detect_icache_policy(info);
206
207 check_local_cpu_errata();
189} 208}
190 209
191void cpuinfo_store_cpu(void) 210void cpuinfo_store_cpu(void)
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index d18a44940968..8ce9b0577442 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -61,7 +61,8 @@ ENTRY(efi_stub_entry)
61 */ 61 */
62 mov x20, x0 // DTB address 62 mov x20, x0 // DTB address
63 ldr x0, [sp, #16] // relocated _text address 63 ldr x0, [sp, #16] // relocated _text address
64 mov x21, x0 64 ldr x21, =stext_offset
65 add x21, x0, x21
65 66
66 /* 67 /*
67 * Calculate size of the kernel Image (same for original and copy). 68 * Calculate size of the kernel Image (same for original and copy).
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 95c49ebc660d..6fac253bc783 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -11,6 +11,7 @@
11 * 11 *
12 */ 12 */
13 13
14#include <linux/dmi.h>
14#include <linux/efi.h> 15#include <linux/efi.h>
15#include <linux/export.h> 16#include <linux/export.h>
16#include <linux/memblock.h> 17#include <linux/memblock.h>
@@ -112,8 +113,6 @@ static int __init uefi_init(void)
112 efi.systab->hdr.revision & 0xffff, vendor); 113 efi.systab->hdr.revision & 0xffff, vendor);
113 114
114 retval = efi_config_init(NULL); 115 retval = efi_config_init(NULL);
115 if (retval == 0)
116 set_bit(EFI_CONFIG_TABLES, &efi.flags);
117 116
118out: 117out:
119 early_memunmap(efi.systab, sizeof(efi_system_table_t)); 118 early_memunmap(efi.systab, sizeof(efi_system_table_t));
@@ -125,17 +124,17 @@ out:
125 */ 124 */
126static __init int is_reserve_region(efi_memory_desc_t *md) 125static __init int is_reserve_region(efi_memory_desc_t *md)
127{ 126{
128 if (!is_normal_ram(md)) 127 switch (md->type) {
128 case EFI_LOADER_CODE:
129 case EFI_LOADER_DATA:
130 case EFI_BOOT_SERVICES_CODE:
131 case EFI_BOOT_SERVICES_DATA:
132 case EFI_CONVENTIONAL_MEMORY:
129 return 0; 133 return 0;
130 134 default:
131 if (md->attribute & EFI_MEMORY_RUNTIME) 135 break;
132 return 1; 136 }
133 137 return is_normal_ram(md);
134 if (md->type == EFI_ACPI_RECLAIM_MEMORY ||
135 md->type == EFI_RESERVED_TYPE)
136 return 1;
137
138 return 0;
139} 138}
140 139
141static __init void reserve_regions(void) 140static __init void reserve_regions(void)
@@ -471,3 +470,17 @@ err_unmap:
471 return -1; 470 return -1;
472} 471}
473early_initcall(arm64_enter_virtual_mode); 472early_initcall(arm64_enter_virtual_mode);
473
474static int __init arm64_dmi_init(void)
475{
476 /*
477 * On arm64, DMI depends on UEFI, and dmi_scan_machine() needs to
478 * be called early because dmi_id_init(), which is an arch_initcall
479 * itself, depends on dmi_scan_machine() having been called already.
480 */
481 dmi_scan_machine();
482 if (dmi_available)
483 dmi_set_dump_stack_arch_desc();
484 return 0;
485}
486core_initcall(arm64_dmi_init);
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 38e704e597f7..08cafc518b9a 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -98,8 +98,8 @@
98ENTRY(_mcount) 98ENTRY(_mcount)
99 mcount_enter 99 mcount_enter
100 100
101 ldr x0, =ftrace_trace_function 101 adrp x0, ftrace_trace_function
102 ldr x2, [x0] 102 ldr x2, [x0, #:lo12:ftrace_trace_function]
103 adr x0, ftrace_stub 103 adr x0, ftrace_stub
104 cmp x0, x2 // if (ftrace_trace_function 104 cmp x0, x2 // if (ftrace_trace_function
105 b.eq skip_ftrace_call // != ftrace_stub) { 105 b.eq skip_ftrace_call // != ftrace_stub) {
@@ -115,14 +115,15 @@ skip_ftrace_call: // return;
115 mcount_exit // return; 115 mcount_exit // return;
116 // } 116 // }
117skip_ftrace_call: 117skip_ftrace_call:
118 ldr x1, =ftrace_graph_return 118 adrp x1, ftrace_graph_return
119 ldr x2, [x1] // if ((ftrace_graph_return 119 ldr x2, [x1, #:lo12:ftrace_graph_return]
120 cmp x0, x2 // != ftrace_stub) 120 cmp x0, x2 // if ((ftrace_graph_return
121 b.ne ftrace_graph_caller 121 b.ne ftrace_graph_caller // != ftrace_stub)
122 122
123 ldr x1, =ftrace_graph_entry // || (ftrace_graph_entry 123 adrp x1, ftrace_graph_entry // || (ftrace_graph_entry
124 ldr x2, [x1] // != ftrace_graph_entry_stub)) 124 adrp x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub))
125 ldr x0, =ftrace_graph_entry_stub 125 ldr x2, [x1, #:lo12:ftrace_graph_entry]
126 add x0, x0, #:lo12:ftrace_graph_entry_stub
126 cmp x0, x2 127 cmp x0, x2
127 b.ne ftrace_graph_caller // ftrace_graph_caller(); 128 b.ne ftrace_graph_caller // ftrace_graph_caller();
128 129
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 726b910fe6ec..fd4fa374e5d2 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -64,25 +64,26 @@
64#define BAD_ERROR 3 64#define BAD_ERROR 3
65 65
66 .macro kernel_entry, el, regsize = 64 66 .macro kernel_entry, el, regsize = 64
67 sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR 67 sub sp, sp, #S_FRAME_SIZE
68 .if \regsize == 32 68 .if \regsize == 32
69 mov w0, w0 // zero upper 32 bits of x0 69 mov w0, w0 // zero upper 32 bits of x0
70 .endif 70 .endif
71 push x28, x29 71 stp x0, x1, [sp, #16 * 0]
72 push x26, x27 72 stp x2, x3, [sp, #16 * 1]
73 push x24, x25 73 stp x4, x5, [sp, #16 * 2]
74 push x22, x23 74 stp x6, x7, [sp, #16 * 3]
75 push x20, x21 75 stp x8, x9, [sp, #16 * 4]
76 push x18, x19 76 stp x10, x11, [sp, #16 * 5]
77 push x16, x17 77 stp x12, x13, [sp, #16 * 6]
78 push x14, x15 78 stp x14, x15, [sp, #16 * 7]
79 push x12, x13 79 stp x16, x17, [sp, #16 * 8]
80 push x10, x11 80 stp x18, x19, [sp, #16 * 9]
81 push x8, x9 81 stp x20, x21, [sp, #16 * 10]
82 push x6, x7 82 stp x22, x23, [sp, #16 * 11]
83 push x4, x5 83 stp x24, x25, [sp, #16 * 12]
84 push x2, x3 84 stp x26, x27, [sp, #16 * 13]
85 push x0, x1 85 stp x28, x29, [sp, #16 * 14]
86
86 .if \el == 0 87 .if \el == 0
87 mrs x21, sp_el0 88 mrs x21, sp_el0
88 get_thread_info tsk // Ensure MDSCR_EL1.SS is clear, 89 get_thread_info tsk // Ensure MDSCR_EL1.SS is clear,
@@ -118,33 +119,31 @@
118 .if \el == 0 119 .if \el == 0
119 ct_user_enter 120 ct_user_enter
120 ldr x23, [sp, #S_SP] // load return stack pointer 121 ldr x23, [sp, #S_SP] // load return stack pointer
122 msr sp_el0, x23
121 .endif 123 .endif
124 msr elr_el1, x21 // set up the return data
125 msr spsr_el1, x22
122 .if \ret 126 .if \ret
123 ldr x1, [sp, #S_X1] // preserve x0 (syscall return) 127 ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
124 add sp, sp, S_X2
125 .else 128 .else
126 pop x0, x1 129 ldp x0, x1, [sp, #16 * 0]
127 .endif 130 .endif
128 pop x2, x3 // load the rest of the registers 131 ldp x2, x3, [sp, #16 * 1]
129 pop x4, x5 132 ldp x4, x5, [sp, #16 * 2]
130 pop x6, x7 133 ldp x6, x7, [sp, #16 * 3]
131 pop x8, x9 134 ldp x8, x9, [sp, #16 * 4]
132 msr elr_el1, x21 // set up the return data 135 ldp x10, x11, [sp, #16 * 5]
133 msr spsr_el1, x22 136 ldp x12, x13, [sp, #16 * 6]
134 .if \el == 0 137 ldp x14, x15, [sp, #16 * 7]
135 msr sp_el0, x23 138 ldp x16, x17, [sp, #16 * 8]
136 .endif 139 ldp x18, x19, [sp, #16 * 9]
137 pop x10, x11 140 ldp x20, x21, [sp, #16 * 10]
138 pop x12, x13 141 ldp x22, x23, [sp, #16 * 11]
139 pop x14, x15 142 ldp x24, x25, [sp, #16 * 12]
140 pop x16, x17 143 ldp x26, x27, [sp, #16 * 13]
141 pop x18, x19 144 ldp x28, x29, [sp, #16 * 14]
142 pop x20, x21 145 ldr lr, [sp, #S_LR]
143 pop x22, x23 146 add sp, sp, #S_FRAME_SIZE // restore sp
144 pop x24, x25
145 pop x26, x27
146 pop x28, x29
147 ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
148 eret // return to kernel 147 eret // return to kernel
149 .endm 148 .endm
150 149
@@ -168,7 +167,8 @@ tsk .req x28 // current thread_info
168 * Interrupt handling. 167 * Interrupt handling.
169 */ 168 */
170 .macro irq_handler 169 .macro irq_handler
171 ldr x1, handle_arch_irq 170 adrp x1, handle_arch_irq
171 ldr x1, [x1, #:lo12:handle_arch_irq]
172 mov x0, sp 172 mov x0, sp
173 blr x1 173 blr x1
174 .endm 174 .endm
@@ -455,8 +455,8 @@ el0_da:
455 bic x0, x26, #(0xff << 56) 455 bic x0, x26, #(0xff << 56)
456 mov x1, x25 456 mov x1, x25
457 mov x2, sp 457 mov x2, sp
458 adr lr, ret_to_user 458 bl do_mem_abort
459 b do_mem_abort 459 b ret_to_user
460el0_ia: 460el0_ia:
461 /* 461 /*
462 * Instruction abort handling 462 * Instruction abort handling
@@ -468,8 +468,8 @@ el0_ia:
468 mov x0, x26 468 mov x0, x26
469 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts 469 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
470 mov x2, sp 470 mov x2, sp
471 adr lr, ret_to_user 471 bl do_mem_abort
472 b do_mem_abort 472 b ret_to_user
473el0_fpsimd_acc: 473el0_fpsimd_acc:
474 /* 474 /*
475 * Floating Point or Advanced SIMD access 475 * Floating Point or Advanced SIMD access
@@ -478,8 +478,8 @@ el0_fpsimd_acc:
478 ct_user_exit 478 ct_user_exit
479 mov x0, x25 479 mov x0, x25
480 mov x1, sp 480 mov x1, sp
481 adr lr, ret_to_user 481 bl do_fpsimd_acc
482 b do_fpsimd_acc 482 b ret_to_user
483el0_fpsimd_exc: 483el0_fpsimd_exc:
484 /* 484 /*
485 * Floating Point or Advanced SIMD exception 485 * Floating Point or Advanced SIMD exception
@@ -488,8 +488,8 @@ el0_fpsimd_exc:
488 ct_user_exit 488 ct_user_exit
489 mov x0, x25 489 mov x0, x25
490 mov x1, sp 490 mov x1, sp
491 adr lr, ret_to_user 491 bl do_fpsimd_exc
492 b do_fpsimd_exc 492 b ret_to_user
493el0_sp_pc: 493el0_sp_pc:
494 /* 494 /*
495 * Stack or PC alignment exception handling 495 * Stack or PC alignment exception handling
@@ -500,8 +500,8 @@ el0_sp_pc:
500 mov x0, x26 500 mov x0, x26
501 mov x1, x25 501 mov x1, x25
502 mov x2, sp 502 mov x2, sp
503 adr lr, ret_to_user 503 bl do_sp_pc_abort
504 b do_sp_pc_abort 504 b ret_to_user
505el0_undef: 505el0_undef:
506 /* 506 /*
507 * Undefined instruction 507 * Undefined instruction
@@ -510,8 +510,8 @@ el0_undef:
510 enable_dbg_and_irq 510 enable_dbg_and_irq
511 ct_user_exit 511 ct_user_exit
512 mov x0, sp 512 mov x0, sp
513 adr lr, ret_to_user 513 bl do_undefinstr
514 b do_undefinstr 514 b ret_to_user
515el0_dbg: 515el0_dbg:
516 /* 516 /*
517 * Debug exception handling 517 * Debug exception handling
@@ -530,8 +530,8 @@ el0_inv:
530 mov x0, sp 530 mov x0, sp
531 mov x1, #BAD_SYNC 531 mov x1, #BAD_SYNC
532 mrs x2, esr_el1 532 mrs x2, esr_el1
533 adr lr, ret_to_user 533 bl bad_mode
534 b bad_mode 534 b ret_to_user
535ENDPROC(el0_sync) 535ENDPROC(el0_sync)
536 536
537 .align 6 537 .align 6
@@ -653,14 +653,15 @@ el0_svc_naked: // compat entry point
653 ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks 653 ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
654 tst x16, #_TIF_SYSCALL_WORK 654 tst x16, #_TIF_SYSCALL_WORK
655 b.ne __sys_trace 655 b.ne __sys_trace
656 adr lr, ret_fast_syscall // return address
657 cmp scno, sc_nr // check upper syscall limit 656 cmp scno, sc_nr // check upper syscall limit
658 b.hs ni_sys 657 b.hs ni_sys
659 ldr x16, [stbl, scno, lsl #3] // address in the syscall table 658 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
660 br x16 // call sys_* routine 659 blr x16 // call sys_* routine
660 b ret_fast_syscall
661ni_sys: 661ni_sys:
662 mov x0, sp 662 mov x0, sp
663 b do_ni_syscall 663 bl do_ni_syscall
664 b ret_fast_syscall
664ENDPROC(el0_svc) 665ENDPROC(el0_svc)
665 666
666 /* 667 /*
@@ -668,26 +669,38 @@ ENDPROC(el0_svc)
668 * switches, and waiting for our parent to respond. 669 * switches, and waiting for our parent to respond.
669 */ 670 */
670__sys_trace: 671__sys_trace:
671 mov x0, sp 672 mov w0, #-1 // set default errno for
673 cmp scno, x0 // user-issued syscall(-1)
674 b.ne 1f
675 mov x0, #-ENOSYS
676 str x0, [sp, #S_X0]
6771: mov x0, sp
672 bl syscall_trace_enter 678 bl syscall_trace_enter
673 adr lr, __sys_trace_return // return address 679 cmp w0, #-1 // skip the syscall?
680 b.eq __sys_trace_return_skipped
674 uxtw scno, w0 // syscall number (possibly new) 681 uxtw scno, w0 // syscall number (possibly new)
675 mov x1, sp // pointer to regs 682 mov x1, sp // pointer to regs
676 cmp scno, sc_nr // check upper syscall limit 683 cmp scno, sc_nr // check upper syscall limit
677 b.hs ni_sys 684 b.hs __ni_sys_trace
678 ldp x0, x1, [sp] // restore the syscall args 685 ldp x0, x1, [sp] // restore the syscall args
679 ldp x2, x3, [sp, #S_X2] 686 ldp x2, x3, [sp, #S_X2]
680 ldp x4, x5, [sp, #S_X4] 687 ldp x4, x5, [sp, #S_X4]
681 ldp x6, x7, [sp, #S_X6] 688 ldp x6, x7, [sp, #S_X6]
682 ldr x16, [stbl, scno, lsl #3] // address in the syscall table 689 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
683 br x16 // call sys_* routine 690 blr x16 // call sys_* routine
684 691
685__sys_trace_return: 692__sys_trace_return:
686 str x0, [sp] // save returned x0 693 str x0, [sp, #S_X0] // save returned x0
694__sys_trace_return_skipped:
687 mov x0, sp 695 mov x0, sp
688 bl syscall_trace_exit 696 bl syscall_trace_exit
689 b ret_to_user 697 b ret_to_user
690 698
699__ni_sys_trace:
700 mov x0, sp
701 bl do_ni_syscall
702 b __sys_trace_return
703
691/* 704/*
692 * Special system call wrappers. 705 * Special system call wrappers.
693 */ 706 */
@@ -695,6 +708,3 @@ ENTRY(sys_rt_sigreturn_wrapper)
695 mov x0, sp 708 mov x0, sp
696 b sys_rt_sigreturn 709 b sys_rt_sigreturn
697ENDPROC(sys_rt_sigreturn_wrapper) 710ENDPROC(sys_rt_sigreturn_wrapper)
698
699ENTRY(handle_arch_irq)
700 .quad 0
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 0a6e4f924df8..8ce88e08c030 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -132,6 +132,8 @@ efi_head:
132#endif 132#endif
133 133
134#ifdef CONFIG_EFI 134#ifdef CONFIG_EFI
135 .globl stext_offset
136 .set stext_offset, stext - efi_head
135 .align 3 137 .align 3
136pe_header: 138pe_header:
137 .ascii "PE" 139 .ascii "PE"
@@ -155,12 +157,12 @@ optional_header:
155 .long 0 // SizeOfInitializedData 157 .long 0 // SizeOfInitializedData
156 .long 0 // SizeOfUninitializedData 158 .long 0 // SizeOfUninitializedData
157 .long efi_stub_entry - efi_head // AddressOfEntryPoint 159 .long efi_stub_entry - efi_head // AddressOfEntryPoint
158 .long stext - efi_head // BaseOfCode 160 .long stext_offset // BaseOfCode
159 161
160extra_header_fields: 162extra_header_fields:
161 .quad 0 // ImageBase 163 .quad 0 // ImageBase
162 .long 0x20 // SectionAlignment 164 .long 0x1000 // SectionAlignment
163 .long 0x8 // FileAlignment 165 .long PECOFF_FILE_ALIGNMENT // FileAlignment
164 .short 0 // MajorOperatingSystemVersion 166 .short 0 // MajorOperatingSystemVersion
165 .short 0 // MinorOperatingSystemVersion 167 .short 0 // MinorOperatingSystemVersion
166 .short 0 // MajorImageVersion 168 .short 0 // MajorImageVersion
@@ -172,7 +174,7 @@ extra_header_fields:
172 .long _end - efi_head // SizeOfImage 174 .long _end - efi_head // SizeOfImage
173 175
174 // Everything before the kernel image is considered part of the header 176 // Everything before the kernel image is considered part of the header
175 .long stext - efi_head // SizeOfHeaders 177 .long stext_offset // SizeOfHeaders
176 .long 0 // CheckSum 178 .long 0 // CheckSum
177 .short 0xa // Subsystem (EFI application) 179 .short 0xa // Subsystem (EFI application)
178 .short 0 // DllCharacteristics 180 .short 0 // DllCharacteristics
@@ -217,16 +219,24 @@ section_table:
217 .byte 0 219 .byte 0
218 .byte 0 // end of 0 padding of section name 220 .byte 0 // end of 0 padding of section name
219 .long _end - stext // VirtualSize 221 .long _end - stext // VirtualSize
220 .long stext - efi_head // VirtualAddress 222 .long stext_offset // VirtualAddress
221 .long _edata - stext // SizeOfRawData 223 .long _edata - stext // SizeOfRawData
222 .long stext - efi_head // PointerToRawData 224 .long stext_offset // PointerToRawData
223 225
224 .long 0 // PointerToRelocations (0 for executables) 226 .long 0 // PointerToRelocations (0 for executables)
225 .long 0 // PointerToLineNumbers (0 for executables) 227 .long 0 // PointerToLineNumbers (0 for executables)
226 .short 0 // NumberOfRelocations (0 for executables) 228 .short 0 // NumberOfRelocations (0 for executables)
227 .short 0 // NumberOfLineNumbers (0 for executables) 229 .short 0 // NumberOfLineNumbers (0 for executables)
228 .long 0xe0500020 // Characteristics (section flags) 230 .long 0xe0500020 // Characteristics (section flags)
229 .align 5 231
232 /*
233 * EFI will load stext onwards at the 4k section alignment
234 * described in the PE/COFF header. To ensure that instruction
235 * sequences using an adrp and a :lo12: immediate will function
236 * correctly at this alignment, we must ensure that stext is
237 * placed at a 4k boundary in the Image to begin with.
238 */
239 .align 12
230#endif 240#endif
231 241
232ENTRY(stext) 242ENTRY(stext)
@@ -238,7 +248,13 @@ ENTRY(stext)
238 mov x0, x22 248 mov x0, x22
239 bl lookup_processor_type 249 bl lookup_processor_type
240 mov x23, x0 // x23=current cpu_table 250 mov x23, x0 // x23=current cpu_table
241 cbz x23, __error_p // invalid processor (x23=0)? 251 /*
252 * __error_p may end up out of range for cbz if text areas are
253 * aligned up to section sizes.
254 */
255 cbnz x23, 1f // invalid processor (x23=0)?
256 b __error_p
2571:
242 bl __vet_fdt 258 bl __vet_fdt
243 bl __create_page_tables // x25=TTBR0, x26=TTBR1 259 bl __create_page_tables // x25=TTBR0, x26=TTBR1
244 /* 260 /*
@@ -250,13 +266,214 @@ ENTRY(stext)
250 */ 266 */
251 ldr x27, __switch_data // address to jump to after 267 ldr x27, __switch_data // address to jump to after
252 // MMU has been enabled 268 // MMU has been enabled
253 adr lr, __enable_mmu // return (PIC) address 269 adrp lr, __enable_mmu // return (PIC) address
270 add lr, lr, #:lo12:__enable_mmu
254 ldr x12, [x23, #CPU_INFO_SETUP] 271 ldr x12, [x23, #CPU_INFO_SETUP]
255 add x12, x12, x28 // __virt_to_phys 272 add x12, x12, x28 // __virt_to_phys
256 br x12 // initialise processor 273 br x12 // initialise processor
257ENDPROC(stext) 274ENDPROC(stext)
258 275
259/* 276/*
277 * Determine validity of the x21 FDT pointer.
278 * The dtb must be 8-byte aligned and live in the first 512M of memory.
279 */
280__vet_fdt:
281 tst x21, #0x7
282 b.ne 1f
283 cmp x21, x24
284 b.lt 1f
285 mov x0, #(1 << 29)
286 add x0, x0, x24
287 cmp x21, x0
288 b.ge 1f
289 ret
2901:
291 mov x21, #0
292 ret
293ENDPROC(__vet_fdt)
294/*
295 * Macro to create a table entry to the next page.
296 *
297 * tbl: page table address
298 * virt: virtual address
299 * shift: #imm page table shift
300 * ptrs: #imm pointers per table page
301 *
302 * Preserves: virt
303 * Corrupts: tmp1, tmp2
304 * Returns: tbl -> next level table page address
305 */
306 .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
307 lsr \tmp1, \virt, #\shift
308 and \tmp1, \tmp1, #\ptrs - 1 // table index
309 add \tmp2, \tbl, #PAGE_SIZE
310 orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
311 str \tmp2, [\tbl, \tmp1, lsl #3]
312 add \tbl, \tbl, #PAGE_SIZE // next level table page
313 .endm
314
315/*
316 * Macro to populate the PGD (and possibily PUD) for the corresponding
317 * block entry in the next level (tbl) for the given virtual address.
318 *
319 * Preserves: tbl, next, virt
320 * Corrupts: tmp1, tmp2
321 */
322 .macro create_pgd_entry, tbl, virt, tmp1, tmp2
323 create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
324#if SWAPPER_PGTABLE_LEVELS == 3
325 create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
326#endif
327 .endm
328
329/*
330 * Macro to populate block entries in the page table for the start..end
331 * virtual range (inclusive).
332 *
333 * Preserves: tbl, flags
334 * Corrupts: phys, start, end, pstate
335 */
336 .macro create_block_map, tbl, flags, phys, start, end
337 lsr \phys, \phys, #BLOCK_SHIFT
338 lsr \start, \start, #BLOCK_SHIFT
339 and \start, \start, #PTRS_PER_PTE - 1 // table index
340 orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry
341 lsr \end, \end, #BLOCK_SHIFT
342 and \end, \end, #PTRS_PER_PTE - 1 // table end index
3439999: str \phys, [\tbl, \start, lsl #3] // store the entry
344 add \start, \start, #1 // next entry
345 add \phys, \phys, #BLOCK_SIZE // next block
346 cmp \start, \end
347 b.ls 9999b
348 .endm
349
350/*
351 * Setup the initial page tables. We only setup the barest amount which is
352 * required to get the kernel running. The following sections are required:
353 * - identity mapping to enable the MMU (low address, TTBR0)
354 * - first few MB of the kernel linear mapping to jump to once the MMU has
355 * been enabled, including the FDT blob (TTBR1)
356 * - pgd entry for fixed mappings (TTBR1)
357 */
358__create_page_tables:
359 pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses
360 mov x27, lr
361
362 /*
363 * Invalidate the idmap and swapper page tables to avoid potential
364 * dirty cache lines being evicted.
365 */
366 mov x0, x25
367 add x1, x26, #SWAPPER_DIR_SIZE
368 bl __inval_cache_range
369
370 /*
371 * Clear the idmap and swapper page tables.
372 */
373 mov x0, x25
374 add x6, x26, #SWAPPER_DIR_SIZE
3751: stp xzr, xzr, [x0], #16
376 stp xzr, xzr, [x0], #16
377 stp xzr, xzr, [x0], #16
378 stp xzr, xzr, [x0], #16
379 cmp x0, x6
380 b.lo 1b
381
382 ldr x7, =MM_MMUFLAGS
383
384 /*
385 * Create the identity mapping.
386 */
387 mov x0, x25 // idmap_pg_dir
388 ldr x3, =KERNEL_START
389 add x3, x3, x28 // __pa(KERNEL_START)
390 create_pgd_entry x0, x3, x5, x6
391 ldr x6, =KERNEL_END
392 mov x5, x3 // __pa(KERNEL_START)
393 add x6, x6, x28 // __pa(KERNEL_END)
394 create_block_map x0, x7, x3, x5, x6
395
396 /*
397 * Map the kernel image (starting with PHYS_OFFSET).
398 */
399 mov x0, x26 // swapper_pg_dir
400 mov x5, #PAGE_OFFSET
401 create_pgd_entry x0, x5, x3, x6
402 ldr x6, =KERNEL_END
403 mov x3, x24 // phys offset
404 create_block_map x0, x7, x3, x5, x6
405
406 /*
407 * Map the FDT blob (maximum 2MB; must be within 512MB of
408 * PHYS_OFFSET).
409 */
410 mov x3, x21 // FDT phys address
411 and x3, x3, #~((1 << 21) - 1) // 2MB aligned
412 mov x6, #PAGE_OFFSET
413 sub x5, x3, x24 // subtract PHYS_OFFSET
414 tst x5, #~((1 << 29) - 1) // within 512MB?
415 csel x21, xzr, x21, ne // zero the FDT pointer
416 b.ne 1f
417 add x5, x5, x6 // __va(FDT blob)
418 add x6, x5, #1 << 21 // 2MB for the FDT blob
419 sub x6, x6, #1 // inclusive range
420 create_block_map x0, x7, x3, x5, x6
4211:
422 /*
423 * Since the page tables have been populated with non-cacheable
424 * accesses (MMU disabled), invalidate the idmap and swapper page
425 * tables again to remove any speculatively loaded cache lines.
426 */
427 mov x0, x25
428 add x1, x26, #SWAPPER_DIR_SIZE
429 bl __inval_cache_range
430
431 mov lr, x27
432 ret
433ENDPROC(__create_page_tables)
434 .ltorg
435
436 .align 3
437 .type __switch_data, %object
438__switch_data:
439 .quad __mmap_switched
440 .quad __bss_start // x6
441 .quad __bss_stop // x7
442 .quad processor_id // x4
443 .quad __fdt_pointer // x5
444 .quad memstart_addr // x6
445 .quad init_thread_union + THREAD_START_SP // sp
446
447/*
448 * The following fragment of code is executed with the MMU on in MMU mode, and
449 * uses absolute addresses; this is not position independent.
450 */
451__mmap_switched:
452 adr x3, __switch_data + 8
453
454 ldp x6, x7, [x3], #16
4551: cmp x6, x7
456 b.hs 2f
457 str xzr, [x6], #8 // Clear BSS
458 b 1b
4592:
460 ldp x4, x5, [x3], #16
461 ldr x6, [x3], #8
462 ldr x16, [x3]
463 mov sp, x16
464 str x22, [x4] // Save processor ID
465 str x21, [x5] // Save FDT pointer
466 str x24, [x6] // Save PHYS_OFFSET
467 mov x29, #0
468 b start_kernel
469ENDPROC(__mmap_switched)
470
471/*
472 * end early head section, begin head code that is also used for
473 * hotplug and needs to have the same protections as the text region
474 */
475 .section ".text","ax"
476/*
260 * If we're fortunate enough to boot at EL2, ensure that the world is 477 * If we're fortunate enough to boot at EL2, ensure that the world is
261 * sane before dropping to EL1. 478 * sane before dropping to EL1.
262 * 479 *
@@ -331,7 +548,8 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
331 msr vttbr_el2, xzr 548 msr vttbr_el2, xzr
332 549
333 /* Hypervisor stub */ 550 /* Hypervisor stub */
334 adr x0, __hyp_stub_vectors 551 adrp x0, __hyp_stub_vectors
552 add x0, x0, #:lo12:__hyp_stub_vectors
335 msr vbar_el2, x0 553 msr vbar_el2, x0
336 554
337 /* spsr */ 555 /* spsr */
@@ -492,183 +710,6 @@ ENDPROC(__calc_phys_offset)
492 .quad PAGE_OFFSET 710 .quad PAGE_OFFSET
493 711
494/* 712/*
495 * Macro to create a table entry to the next page.
496 *
497 * tbl: page table address
498 * virt: virtual address
499 * shift: #imm page table shift
500 * ptrs: #imm pointers per table page
501 *
502 * Preserves: virt
503 * Corrupts: tmp1, tmp2
504 * Returns: tbl -> next level table page address
505 */
506 .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
507 lsr \tmp1, \virt, #\shift
508 and \tmp1, \tmp1, #\ptrs - 1 // table index
509 add \tmp2, \tbl, #PAGE_SIZE
510 orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
511 str \tmp2, [\tbl, \tmp1, lsl #3]
512 add \tbl, \tbl, #PAGE_SIZE // next level table page
513 .endm
514
515/*
516 * Macro to populate the PGD (and possibily PUD) for the corresponding
517 * block entry in the next level (tbl) for the given virtual address.
518 *
519 * Preserves: tbl, next, virt
520 * Corrupts: tmp1, tmp2
521 */
522 .macro create_pgd_entry, tbl, virt, tmp1, tmp2
523 create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
524#if SWAPPER_PGTABLE_LEVELS == 3
525 create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
526#endif
527 .endm
528
529/*
530 * Macro to populate block entries in the page table for the start..end
531 * virtual range (inclusive).
532 *
533 * Preserves: tbl, flags
534 * Corrupts: phys, start, end, pstate
535 */
536 .macro create_block_map, tbl, flags, phys, start, end
537 lsr \phys, \phys, #BLOCK_SHIFT
538 lsr \start, \start, #BLOCK_SHIFT
539 and \start, \start, #PTRS_PER_PTE - 1 // table index
540 orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry
541 lsr \end, \end, #BLOCK_SHIFT
542 and \end, \end, #PTRS_PER_PTE - 1 // table end index
5439999: str \phys, [\tbl, \start, lsl #3] // store the entry
544 add \start, \start, #1 // next entry
545 add \phys, \phys, #BLOCK_SIZE // next block
546 cmp \start, \end
547 b.ls 9999b
548 .endm
549
550/*
551 * Setup the initial page tables. We only setup the barest amount which is
552 * required to get the kernel running. The following sections are required:
553 * - identity mapping to enable the MMU (low address, TTBR0)
554 * - first few MB of the kernel linear mapping to jump to once the MMU has
555 * been enabled, including the FDT blob (TTBR1)
556 * - pgd entry for fixed mappings (TTBR1)
557 */
558__create_page_tables:
559 pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses
560 mov x27, lr
561
562 /*
563 * Invalidate the idmap and swapper page tables to avoid potential
564 * dirty cache lines being evicted.
565 */
566 mov x0, x25
567 add x1, x26, #SWAPPER_DIR_SIZE
568 bl __inval_cache_range
569
570 /*
571 * Clear the idmap and swapper page tables.
572 */
573 mov x0, x25
574 add x6, x26, #SWAPPER_DIR_SIZE
5751: stp xzr, xzr, [x0], #16
576 stp xzr, xzr, [x0], #16
577 stp xzr, xzr, [x0], #16
578 stp xzr, xzr, [x0], #16
579 cmp x0, x6
580 b.lo 1b
581
582 ldr x7, =MM_MMUFLAGS
583
584 /*
585 * Create the identity mapping.
586 */
587 mov x0, x25 // idmap_pg_dir
588 ldr x3, =KERNEL_START
589 add x3, x3, x28 // __pa(KERNEL_START)
590 create_pgd_entry x0, x3, x5, x6
591 ldr x6, =KERNEL_END
592 mov x5, x3 // __pa(KERNEL_START)
593 add x6, x6, x28 // __pa(KERNEL_END)
594 create_block_map x0, x7, x3, x5, x6
595
596 /*
597 * Map the kernel image (starting with PHYS_OFFSET).
598 */
599 mov x0, x26 // swapper_pg_dir
600 mov x5, #PAGE_OFFSET
601 create_pgd_entry x0, x5, x3, x6
602 ldr x6, =KERNEL_END
603 mov x3, x24 // phys offset
604 create_block_map x0, x7, x3, x5, x6
605
606 /*
607 * Map the FDT blob (maximum 2MB; must be within 512MB of
608 * PHYS_OFFSET).
609 */
610 mov x3, x21 // FDT phys address
611 and x3, x3, #~((1 << 21) - 1) // 2MB aligned
612 mov x6, #PAGE_OFFSET
613 sub x5, x3, x24 // subtract PHYS_OFFSET
614 tst x5, #~((1 << 29) - 1) // within 512MB?
615 csel x21, xzr, x21, ne // zero the FDT pointer
616 b.ne 1f
617 add x5, x5, x6 // __va(FDT blob)
618 add x6, x5, #1 << 21 // 2MB for the FDT blob
619 sub x6, x6, #1 // inclusive range
620 create_block_map x0, x7, x3, x5, x6
6211:
622 /*
623 * Since the page tables have been populated with non-cacheable
624 * accesses (MMU disabled), invalidate the idmap and swapper page
625 * tables again to remove any speculatively loaded cache lines.
626 */
627 mov x0, x25
628 add x1, x26, #SWAPPER_DIR_SIZE
629 bl __inval_cache_range
630
631 mov lr, x27
632 ret
633ENDPROC(__create_page_tables)
634 .ltorg
635
636 .align 3
637 .type __switch_data, %object
638__switch_data:
639 .quad __mmap_switched
640 .quad __bss_start // x6
641 .quad __bss_stop // x7
642 .quad processor_id // x4
643 .quad __fdt_pointer // x5
644 .quad memstart_addr // x6
645 .quad init_thread_union + THREAD_START_SP // sp
646
647/*
648 * The following fragment of code is executed with the MMU on in MMU mode, and
649 * uses absolute addresses; this is not position independent.
650 */
651__mmap_switched:
652 adr x3, __switch_data + 8
653
654 ldp x6, x7, [x3], #16
6551: cmp x6, x7
656 b.hs 2f
657 str xzr, [x6], #8 // Clear BSS
658 b 1b
6592:
660 ldp x4, x5, [x3], #16
661 ldr x6, [x3], #8
662 ldr x16, [x3]
663 mov sp, x16
664 str x22, [x4] // Save processor ID
665 str x21, [x5] // Save FDT pointer
666 str x24, [x6] // Save PHYS_OFFSET
667 mov x29, #0
668 b start_kernel
669ENDPROC(__mmap_switched)
670
671/*
672 * Exception handling. Something went wrong and we can't proceed. We ought to 713 * Exception handling. Something went wrong and we can't proceed. We ought to
673 * tell the user, but since we don't have any guarantee that we're even 714 * tell the user, but since we don't have any guarantee that we're even
674 * running on the right architecture, we do virtually nothing. 715 * running on the right architecture, we do virtually nothing.
@@ -715,22 +756,3 @@ __lookup_processor_type_data:
715 .quad . 756 .quad .
716 .quad cpu_table 757 .quad cpu_table
717 .size __lookup_processor_type_data, . - __lookup_processor_type_data 758 .size __lookup_processor_type_data, . - __lookup_processor_type_data
718
719/*
720 * Determine validity of the x21 FDT pointer.
721 * The dtb must be 8-byte aligned and live in the first 512M of memory.
722 */
723__vet_fdt:
724 tst x21, #0x7
725 b.ne 1f
726 cmp x21, x24
727 b.lt 1f
728 mov x0, #(1 << 29)
729 add x0, x0, x24
730 cmp x21, x0
731 b.ge 1f
732 ret
7331:
734 mov x21, #0
735 ret
736ENDPROC(__vet_fdt)
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 8cd27fedc8b6..7e9327a0986d 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -960,3 +960,29 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
960 960
961 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); 961 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
962} 962}
963
964bool aarch32_insn_is_wide(u32 insn)
965{
966 return insn >= 0xe800;
967}
968
969/*
970 * Macros/defines for extracting register numbers from instruction.
971 */
972u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
973{
974 return (insn & (0xf << offset)) >> offset;
975}
976
977#define OPC2_MASK 0x7
978#define OPC2_OFFSET 5
979u32 aarch32_insn_mcr_extract_opc2(u32 insn)
980{
981 return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
982}
983
984#define CRM_MASK 0xf
985u32 aarch32_insn_mcr_extract_crm(u32 insn)
986{
987 return insn & CRM_MASK;
988}
diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c
index 7d37ead4d199..354be2a872ae 100644
--- a/arch/arm64/kernel/io.c
+++ b/arch/arm64/kernel/io.c
@@ -25,12 +25,26 @@
25 */ 25 */
26void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) 26void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
27{ 27{
28 unsigned char *t = to; 28 while (count && (!IS_ALIGNED((unsigned long)from, 8) ||
29 while (count) { 29 !IS_ALIGNED((unsigned long)to, 8))) {
30 *(u8 *)to = __raw_readb(from);
31 from++;
32 to++;
30 count--; 33 count--;
31 *t = readb(from); 34 }
32 t++; 35
36 while (count >= 8) {
37 *(u64 *)to = __raw_readq(from);
38 from += 8;
39 to += 8;
40 count -= 8;
41 }
42
43 while (count) {
44 *(u8 *)to = __raw_readb(from);
33 from++; 45 from++;
46 to++;
47 count--;
34 } 48 }
35} 49}
36EXPORT_SYMBOL(__memcpy_fromio); 50EXPORT_SYMBOL(__memcpy_fromio);
@@ -40,12 +54,26 @@ EXPORT_SYMBOL(__memcpy_fromio);
40 */ 54 */
41void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count) 55void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
42{ 56{
43 const unsigned char *f = from; 57 while (count && (!IS_ALIGNED((unsigned long)to, 8) ||
44 while (count) { 58 !IS_ALIGNED((unsigned long)from, 8))) {
59 __raw_writeb(*(volatile u8 *)from, to);
60 from++;
61 to++;
45 count--; 62 count--;
46 writeb(*f, to); 63 }
47 f++; 64
65 while (count >= 8) {
66 __raw_writeq(*(volatile u64 *)from, to);
67 from += 8;
68 to += 8;
69 count -= 8;
70 }
71
72 while (count) {
73 __raw_writeb(*(volatile u8 *)from, to);
74 from++;
48 to++; 75 to++;
76 count--;
49 } 77 }
50} 78}
51EXPORT_SYMBOL(__memcpy_toio); 79EXPORT_SYMBOL(__memcpy_toio);
@@ -55,10 +83,28 @@ EXPORT_SYMBOL(__memcpy_toio);
55 */ 83 */
56void __memset_io(volatile void __iomem *dst, int c, size_t count) 84void __memset_io(volatile void __iomem *dst, int c, size_t count)
57{ 85{
58 while (count) { 86 u64 qc = (u8)c;
87
88 qc |= qc << 8;
89 qc |= qc << 16;
90 qc |= qc << 32;
91
92 while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
93 __raw_writeb(c, dst);
94 dst++;
59 count--; 95 count--;
60 writeb(c, dst); 96 }
97
98 while (count >= 8) {
99 __raw_writeq(qc, dst);
100 dst += 8;
101 count -= 8;
102 }
103
104 while (count) {
105 __raw_writeb(c, dst);
61 dst++; 106 dst++;
107 count--;
62 } 108 }
63} 109}
64EXPORT_SYMBOL(__memset_io); 110EXPORT_SYMBOL(__memset_io);
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 071a6ec13bd8..240b75c0e94f 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -40,6 +40,8 @@ int arch_show_interrupts(struct seq_file *p, int prec)
40 return 0; 40 return 0;
41} 41}
42 42
43void (*handle_arch_irq)(struct pt_regs *) = NULL;
44
43void __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) 45void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
44{ 46{
45 if (handle_arch_irq) 47 if (handle_arch_irq)
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
index 263a166291fb..4f1fec7a46db 100644
--- a/arch/arm64/kernel/jump_label.c
+++ b/arch/arm64/kernel/jump_label.c
@@ -22,9 +22,8 @@
22 22
23#ifdef HAVE_JUMP_LABEL 23#ifdef HAVE_JUMP_LABEL
24 24
25static void __arch_jump_label_transform(struct jump_entry *entry, 25void arch_jump_label_transform(struct jump_entry *entry,
26 enum jump_label_type type, 26 enum jump_label_type type)
27 bool is_static)
28{ 27{
29 void *addr = (void *)entry->code; 28 void *addr = (void *)entry->code;
30 u32 insn; 29 u32 insn;
@@ -37,22 +36,18 @@ static void __arch_jump_label_transform(struct jump_entry *entry,
37 insn = aarch64_insn_gen_nop(); 36 insn = aarch64_insn_gen_nop();
38 } 37 }
39 38
40 if (is_static) 39 aarch64_insn_patch_text(&addr, &insn, 1);
41 aarch64_insn_patch_text_nosync(addr, insn);
42 else
43 aarch64_insn_patch_text(&addr, &insn, 1);
44}
45
46void arch_jump_label_transform(struct jump_entry *entry,
47 enum jump_label_type type)
48{
49 __arch_jump_label_transform(entry, type, false);
50} 40}
51 41
52void arch_jump_label_transform_static(struct jump_entry *entry, 42void arch_jump_label_transform_static(struct jump_entry *entry,
53 enum jump_label_type type) 43 enum jump_label_type type)
54{ 44{
55 __arch_jump_label_transform(entry, type, true); 45 /*
46 * We use the architected A64 NOP in arch_static_branch, so there's no
47 * need to patch an identical A64 NOP over the top of it here. The core
48 * will call arch_jump_label_transform from a module notifier if the
49 * NOP needs to be replaced by a branch.
50 */
56} 51}
57 52
58#endif /* HAVE_JUMP_LABEL */ 53#endif /* HAVE_JUMP_LABEL */
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 1eb1cc955139..fd027b101de5 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -26,6 +26,7 @@
26#include <linux/moduleloader.h> 26#include <linux/moduleloader.h>
27#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
28#include <asm/insn.h> 28#include <asm/insn.h>
29#include <asm/sections.h>
29 30
30#define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX 31#define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX
31#define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16 32#define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16
@@ -394,3 +395,20 @@ overflow:
394 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); 395 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
395 return -ENOEXEC; 396 return -ENOEXEC;
396} 397}
398
399int module_finalize(const Elf_Ehdr *hdr,
400 const Elf_Shdr *sechdrs,
401 struct module *me)
402{
403 const Elf_Shdr *s, *se;
404 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
405
406 for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
407 if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
408 apply_alternatives((void *)s->sh_addr, s->sh_size);
409 return 0;
410 }
411 }
412
413 return 0;
414}
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index aa29ecb4f800..25a5308744b1 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -169,8 +169,14 @@ armpmu_event_set_period(struct perf_event *event,
169 ret = 1; 169 ret = 1;
170 } 170 }
171 171
172 if (left > (s64)armpmu->max_period) 172 /*
173 left = armpmu->max_period; 173 * Limit the maximum period to prevent the counter value
174 * from overtaking the one we are about to program. In
175 * effect we are reducing max_period to account for
176 * interrupt latency (and we are being very conservative).
177 */
178 if (left > (armpmu->max_period >> 1))
179 left = armpmu->max_period >> 1;
174 180
175 local64_set(&hwc->prev_count, (u64)-left); 181 local64_set(&hwc->prev_count, (u64)-left);
176 182
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 8a4ae8e73213..d882b833dbdb 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -27,6 +27,7 @@
27#include <linux/smp.h> 27#include <linux/smp.h>
28#include <linux/ptrace.h> 28#include <linux/ptrace.h>
29#include <linux/user.h> 29#include <linux/user.h>
30#include <linux/seccomp.h>
30#include <linux/security.h> 31#include <linux/security.h>
31#include <linux/init.h> 32#include <linux/init.h>
32#include <linux/signal.h> 33#include <linux/signal.h>
@@ -551,6 +552,32 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
551 return ret; 552 return ret;
552} 553}
553 554
555static int system_call_get(struct task_struct *target,
556 const struct user_regset *regset,
557 unsigned int pos, unsigned int count,
558 void *kbuf, void __user *ubuf)
559{
560 int syscallno = task_pt_regs(target)->syscallno;
561
562 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
563 &syscallno, 0, -1);
564}
565
566static int system_call_set(struct task_struct *target,
567 const struct user_regset *regset,
568 unsigned int pos, unsigned int count,
569 const void *kbuf, const void __user *ubuf)
570{
571 int syscallno, ret;
572
573 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
574 if (ret)
575 return ret;
576
577 task_pt_regs(target)->syscallno = syscallno;
578 return ret;
579}
580
554enum aarch64_regset { 581enum aarch64_regset {
555 REGSET_GPR, 582 REGSET_GPR,
556 REGSET_FPR, 583 REGSET_FPR,
@@ -559,6 +586,7 @@ enum aarch64_regset {
559 REGSET_HW_BREAK, 586 REGSET_HW_BREAK,
560 REGSET_HW_WATCH, 587 REGSET_HW_WATCH,
561#endif 588#endif
589 REGSET_SYSTEM_CALL,
562}; 590};
563 591
564static const struct user_regset aarch64_regsets[] = { 592static const struct user_regset aarch64_regsets[] = {
@@ -608,6 +636,14 @@ static const struct user_regset aarch64_regsets[] = {
608 .set = hw_break_set, 636 .set = hw_break_set,
609 }, 637 },
610#endif 638#endif
639 [REGSET_SYSTEM_CALL] = {
640 .core_note_type = NT_ARM_SYSTEM_CALL,
641 .n = 1,
642 .size = sizeof(int),
643 .align = sizeof(int),
644 .get = system_call_get,
645 .set = system_call_set,
646 },
611}; 647};
612 648
613static const struct user_regset_view user_aarch64_view = { 649static const struct user_regset_view user_aarch64_view = {
@@ -1114,6 +1150,10 @@ static void tracehook_report_syscall(struct pt_regs *regs,
1114 1150
1115asmlinkage int syscall_trace_enter(struct pt_regs *regs) 1151asmlinkage int syscall_trace_enter(struct pt_regs *regs)
1116{ 1152{
1153 /* Do the secure computing check first; failures should be fast. */
1154 if (secure_computing() == -1)
1155 return -1;
1156
1117 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1157 if (test_thread_flag(TIF_SYSCALL_TRACE))
1118 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); 1158 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
1119 1159
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 2437196cc5d4..b80991166754 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -43,12 +43,14 @@
43#include <linux/of_fdt.h> 43#include <linux/of_fdt.h>
44#include <linux/of_platform.h> 44#include <linux/of_platform.h>
45#include <linux/efi.h> 45#include <linux/efi.h>
46#include <linux/personality.h>
46 47
47#include <asm/fixmap.h> 48#include <asm/fixmap.h>
48#include <asm/cpu.h> 49#include <asm/cpu.h>
49#include <asm/cputype.h> 50#include <asm/cputype.h>
50#include <asm/elf.h> 51#include <asm/elf.h>
51#include <asm/cputable.h> 52#include <asm/cputable.h>
53#include <asm/cpufeature.h>
52#include <asm/cpu_ops.h> 54#include <asm/cpu_ops.h>
53#include <asm/sections.h> 55#include <asm/sections.h>
54#include <asm/setup.h> 56#include <asm/setup.h>
@@ -72,13 +74,15 @@ EXPORT_SYMBOL_GPL(elf_hwcap);
72 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ 74 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
73 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ 75 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
74 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ 76 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
75 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV) 77 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
78 COMPAT_HWCAP_LPAE)
76unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; 79unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
77unsigned int compat_elf_hwcap2 __read_mostly; 80unsigned int compat_elf_hwcap2 __read_mostly;
78#endif 81#endif
79 82
83DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
84
80static const char *cpu_name; 85static const char *cpu_name;
81static const char *machine_name;
82phys_addr_t __fdt_pointer __initdata; 86phys_addr_t __fdt_pointer __initdata;
83 87
84/* 88/*
@@ -116,12 +120,16 @@ void __init early_print(const char *str, ...)
116 120
117void __init smp_setup_processor_id(void) 121void __init smp_setup_processor_id(void)
118{ 122{
123 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
124 cpu_logical_map(0) = mpidr;
125
119 /* 126 /*
120 * clear __my_cpu_offset on boot CPU to avoid hang caused by 127 * clear __my_cpu_offset on boot CPU to avoid hang caused by
121 * using percpu variable early, for example, lockdep will 128 * using percpu variable early, for example, lockdep will
122 * access percpu variable inside lock_release 129 * access percpu variable inside lock_release
123 */ 130 */
124 set_my_cpu_offset(0); 131 set_my_cpu_offset(0);
132 pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
125} 133}
126 134
127bool arch_match_cpu_phys_id(int cpu, u64 phys_id) 135bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
@@ -311,7 +319,7 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
311 cpu_relax(); 319 cpu_relax();
312 } 320 }
313 321
314 machine_name = of_flat_dt_get_machine_name(); 322 dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
315} 323}
316 324
317/* 325/*
@@ -376,6 +384,7 @@ void __init setup_arch(char **cmdline_p)
376 384
377 *cmdline_p = boot_command_line; 385 *cmdline_p = boot_command_line;
378 386
387 early_fixmap_init();
379 early_ioremap_init(); 388 early_ioremap_init();
380 389
381 parse_early_param(); 390 parse_early_param();
@@ -398,7 +407,6 @@ void __init setup_arch(char **cmdline_p)
398 407
399 psci_init(); 408 psci_init();
400 409
401 cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
402 cpu_read_bootcpu_ops(); 410 cpu_read_bootcpu_ops();
403#ifdef CONFIG_SMP 411#ifdef CONFIG_SMP
404 smp_init_cpus(); 412 smp_init_cpus();
@@ -447,14 +455,50 @@ static const char *hwcap_str[] = {
447 NULL 455 NULL
448}; 456};
449 457
458#ifdef CONFIG_COMPAT
459static const char *compat_hwcap_str[] = {
460 "swp",
461 "half",
462 "thumb",
463 "26bit",
464 "fastmult",
465 "fpa",
466 "vfp",
467 "edsp",
468 "java",
469 "iwmmxt",
470 "crunch",
471 "thumbee",
472 "neon",
473 "vfpv3",
474 "vfpv3d16",
475 "tls",
476 "vfpv4",
477 "idiva",
478 "idivt",
479 "vfpd32",
480 "lpae",
481 "evtstrm"
482};
483
484static const char *compat_hwcap2_str[] = {
485 "aes",
486 "pmull",
487 "sha1",
488 "sha2",
489 "crc32",
490 NULL
491};
492#endif /* CONFIG_COMPAT */
493
450static int c_show(struct seq_file *m, void *v) 494static int c_show(struct seq_file *m, void *v)
451{ 495{
452 int i; 496 int i, j;
453
454 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
455 cpu_name, read_cpuid_id() & 15, ELF_PLATFORM);
456 497
457 for_each_online_cpu(i) { 498 for_each_online_cpu(i) {
499 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
500 u32 midr = cpuinfo->reg_midr;
501
458 /* 502 /*
459 * glibc reads /proc/cpuinfo to determine the number of 503 * glibc reads /proc/cpuinfo to determine the number of
460 * online processors, looking for lines beginning with 504 * online processors, looking for lines beginning with
@@ -463,24 +507,38 @@ static int c_show(struct seq_file *m, void *v)
463#ifdef CONFIG_SMP 507#ifdef CONFIG_SMP
464 seq_printf(m, "processor\t: %d\n", i); 508 seq_printf(m, "processor\t: %d\n", i);
465#endif 509#endif
466 }
467
468 /* dump out the processor features */
469 seq_puts(m, "Features\t: ");
470
471 for (i = 0; hwcap_str[i]; i++)
472 if (elf_hwcap & (1 << i))
473 seq_printf(m, "%s ", hwcap_str[i]);
474
475 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
476 seq_printf(m, "CPU architecture: AArch64\n");
477 seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
478 seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
479 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
480 510
481 seq_puts(m, "\n"); 511 /*
482 512 * Dump out the common processor features in a single line.
483 seq_printf(m, "Hardware\t: %s\n", machine_name); 513 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
514 * rather than attempting to parse this, but there's a body of
515 * software which does already (at least for 32-bit).
516 */
517 seq_puts(m, "Features\t:");
518 if (personality(current->personality) == PER_LINUX32) {
519#ifdef CONFIG_COMPAT
520 for (j = 0; compat_hwcap_str[j]; j++)
521 if (compat_elf_hwcap & (1 << j))
522 seq_printf(m, " %s", compat_hwcap_str[j]);
523
524 for (j = 0; compat_hwcap2_str[j]; j++)
525 if (compat_elf_hwcap2 & (1 << j))
526 seq_printf(m, " %s", compat_hwcap2_str[j]);
527#endif /* CONFIG_COMPAT */
528 } else {
529 for (j = 0; hwcap_str[j]; j++)
530 if (elf_hwcap & (1 << j))
531 seq_printf(m, " %s", hwcap_str[j]);
532 }
533 seq_puts(m, "\n");
534
535 seq_printf(m, "CPU implementer\t: 0x%02x\n",
536 MIDR_IMPLEMENTOR(midr));
537 seq_printf(m, "CPU architecture: 8\n");
538 seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
539 seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
540 seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
541 }
484 542
485 return 0; 543 return 0;
486} 544}
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 1b9ad02837cf..5a1ba6e80d4e 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -186,6 +186,12 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
186 err |= __put_user(from->si_uid, &to->si_uid); 186 err |= __put_user(from->si_uid, &to->si_uid);
187 err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr); 187 err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr);
188 break; 188 break;
189 case __SI_SYS:
190 err |= __put_user((compat_uptr_t)(unsigned long)
191 from->si_call_addr, &to->si_call_addr);
192 err |= __put_user(from->si_syscall, &to->si_syscall);
193 err |= __put_user(from->si_arch, &to->si_arch);
194 break;
189 default: /* this is just in case for now ... */ 195 default: /* this is just in case for now ... */
190 err |= __put_user(from->si_pid, &to->si_pid); 196 err |= __put_user(from->si_pid, &to->si_pid);
191 err |= __put_user(from->si_uid, &to->si_uid); 197 err |= __put_user(from->si_uid, &to->si_uid);
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index a564b440416a..ede186cdd452 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -147,14 +147,12 @@ cpu_resume_after_mmu:
147 ret 147 ret
148ENDPROC(cpu_resume_after_mmu) 148ENDPROC(cpu_resume_after_mmu)
149 149
150 .data
151ENTRY(cpu_resume) 150ENTRY(cpu_resume)
152 bl el2_setup // if in EL2 drop to EL1 cleanly 151 bl el2_setup // if in EL2 drop to EL1 cleanly
153#ifdef CONFIG_SMP 152#ifdef CONFIG_SMP
154 mrs x1, mpidr_el1 153 mrs x1, mpidr_el1
155 adr x4, mpidr_hash_ptr 154 adrp x8, mpidr_hash
156 ldr x5, [x4] 155 add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
157 add x8, x4, x5 // x8 = struct mpidr_hash phys address
158 /* retrieve mpidr_hash members to compute the hash */ 156 /* retrieve mpidr_hash members to compute the hash */
159 ldr x2, [x8, #MPIDR_HASH_MASK] 157 ldr x2, [x8, #MPIDR_HASH_MASK]
160 ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS] 158 ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS]
@@ -164,14 +162,15 @@ ENTRY(cpu_resume)
164#else 162#else
165 mov x7, xzr 163 mov x7, xzr
166#endif 164#endif
167 adr x0, sleep_save_sp 165 adrp x0, sleep_save_sp
166 add x0, x0, #:lo12:sleep_save_sp
168 ldr x0, [x0, #SLEEP_SAVE_SP_PHYS] 167 ldr x0, [x0, #SLEEP_SAVE_SP_PHYS]
169 ldr x0, [x0, x7, lsl #3] 168 ldr x0, [x0, x7, lsl #3]
170 /* load sp from context */ 169 /* load sp from context */
171 ldr x2, [x0, #CPU_CTX_SP] 170 ldr x2, [x0, #CPU_CTX_SP]
172 adr x1, sleep_idmap_phys 171 adrp x1, sleep_idmap_phys
173 /* load physical address of identity map page table in x1 */ 172 /* load physical address of identity map page table in x1 */
174 ldr x1, [x1] 173 ldr x1, [x1, #:lo12:sleep_idmap_phys]
175 mov sp, x2 174 mov sp, x2
176 /* 175 /*
177 * cpu_do_resume expects x0 to contain context physical address 176 * cpu_do_resume expects x0 to contain context physical address
@@ -180,26 +179,3 @@ ENTRY(cpu_resume)
180 bl cpu_do_resume // PC relative jump, MMU off 179 bl cpu_do_resume // PC relative jump, MMU off
181 b cpu_resume_mmu // Resume MMU, never returns 180 b cpu_resume_mmu // Resume MMU, never returns
182ENDPROC(cpu_resume) 181ENDPROC(cpu_resume)
183
184 .align 3
185mpidr_hash_ptr:
186 /*
187 * offset of mpidr_hash symbol from current location
188 * used to obtain run-time mpidr_hash address with MMU off
189 */
190 .quad mpidr_hash - .
191/*
192 * physical address of identity mapped page tables
193 */
194 .type sleep_idmap_phys, #object
195ENTRY(sleep_idmap_phys)
196 .quad 0
197/*
198 * struct sleep_save_sp {
199 * phys_addr_t *save_ptr_stash;
200 * phys_addr_t save_ptr_stash_phys;
201 * };
202 */
203 .type sleep_save_sp, #object
204ENTRY(sleep_save_sp)
205 .space SLEEP_SAVE_SP_SZ // struct sleep_save_sp
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index b06d1d90ee8c..7ae6ee085261 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -37,6 +37,7 @@
37#include <linux/of.h> 37#include <linux/of.h>
38#include <linux/irq_work.h> 38#include <linux/irq_work.h>
39 39
40#include <asm/alternative.h>
40#include <asm/atomic.h> 41#include <asm/atomic.h>
41#include <asm/cacheflush.h> 42#include <asm/cacheflush.h>
42#include <asm/cpu.h> 43#include <asm/cpu.h>
@@ -309,6 +310,7 @@ void cpu_die(void)
309void __init smp_cpus_done(unsigned int max_cpus) 310void __init smp_cpus_done(unsigned int max_cpus)
310{ 311{
311 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); 312 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
313 apply_alternatives_all();
312} 314}
313 315
314void __init smp_prepare_boot_cpu(void) 316void __init smp_prepare_boot_cpu(void)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 13ad4dbb1615..3771b72b6569 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -126,8 +126,8 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
126 return ret; 126 return ret;
127} 127}
128 128
129extern struct sleep_save_sp sleep_save_sp; 129struct sleep_save_sp sleep_save_sp;
130extern phys_addr_t sleep_idmap_phys; 130phys_addr_t sleep_idmap_phys;
131 131
132static int __init cpu_suspend_init(void) 132static int __init cpu_suspend_init(void)
133{ 133{
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index dc47e53e9e28..28c511b06edf 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -28,29 +28,39 @@
28#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
29#include <asm/unistd.h> 29#include <asm/unistd.h>
30 30
31static inline void 31static long
32do_compat_cache_op(unsigned long start, unsigned long end, int flags) 32__do_compat_cache_op(unsigned long start, unsigned long end)
33{ 33{
34 struct mm_struct *mm = current->active_mm; 34 long ret;
35 struct vm_area_struct *vma;
36 35
37 if (end < start || flags) 36 do {
38 return; 37 unsigned long chunk = min(PAGE_SIZE, end - start);
39 38
40 down_read(&mm->mmap_sem); 39 if (fatal_signal_pending(current))
41 vma = find_vma(mm, start); 40 return 0;
42 if (vma && vma->vm_start < end) { 41
43 if (start < vma->vm_start) 42 ret = __flush_cache_user_range(start, start + chunk);
44 start = vma->vm_start; 43 if (ret)
45 if (end > vma->vm_end) 44 return ret;
46 end = vma->vm_end; 45
47 up_read(&mm->mmap_sem); 46 cond_resched();
48 __flush_cache_user_range(start & PAGE_MASK, PAGE_ALIGN(end)); 47 start += chunk;
49 return; 48 } while (start < end);
50 } 49
51 up_read(&mm->mmap_sem); 50 return 0;
52} 51}
53 52
53static inline long
54do_compat_cache_op(unsigned long start, unsigned long end, int flags)
55{
56 if (end < start || flags)
57 return -EINVAL;
58
59 if (!access_ok(VERIFY_READ, start, end - start))
60 return -EFAULT;
61
62 return __do_compat_cache_op(start, end);
63}
54/* 64/*
55 * Handle all unrecognised system calls. 65 * Handle all unrecognised system calls.
56 */ 66 */
@@ -74,8 +84,7 @@ long compat_arm_syscall(struct pt_regs *regs)
74 * the specified region). 84 * the specified region).
75 */ 85 */
76 case __ARM_NR_compat_cacheflush: 86 case __ARM_NR_compat_cacheflush:
77 do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]); 87 return do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]);
78 return 0;
79 88
80 case __ARM_NR_compat_set_tls: 89 case __ARM_NR_compat_set_tls:
81 current->thread.tp_value = regs->regs[0]; 90 current->thread.tp_value = regs->regs[0];
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index b6ee26b0939a..fcb8f7b42271 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -255,12 +255,15 @@ void store_cpu_topology(unsigned int cpuid)
255 /* Multiprocessor system : Multi-threads per core */ 255 /* Multiprocessor system : Multi-threads per core */
256 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 256 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
257 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); 257 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
258 cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); 258 cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
259 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
259 } else { 260 } else {
260 /* Multiprocessor system : Single-thread per core */ 261 /* Multiprocessor system : Single-thread per core */
261 cpuid_topo->thread_id = -1; 262 cpuid_topo->thread_id = -1;
262 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 263 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
263 cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); 264 cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
265 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
266 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
264 } 267 }
265 268
266 pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n", 269 pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
diff --git a/arch/arm64/kernel/trace-events-emulation.h b/arch/arm64/kernel/trace-events-emulation.h
new file mode 100644
index 000000000000..ae1dd598ea65
--- /dev/null
+++ b/arch/arm64/kernel/trace-events-emulation.h
@@ -0,0 +1,35 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM emulation
3
4#if !defined(_TRACE_EMULATION_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_EMULATION_H
6
7#include <linux/tracepoint.h>
8
9TRACE_EVENT(instruction_emulation,
10
11 TP_PROTO(const char *instr, u64 addr),
12 TP_ARGS(instr, addr),
13
14 TP_STRUCT__entry(
15 __string(instr, instr)
16 __field(u64, addr)
17 ),
18
19 TP_fast_assign(
20 __assign_str(instr, instr);
21 __entry->addr = addr;
22 ),
23
24 TP_printk("instr=\"%s\" addr=0x%llx", __get_str(instr), __entry->addr)
25);
26
27#endif /* _TRACE_EMULATION_H */
28
29/* This part must be outside protection */
30#undef TRACE_INCLUDE_PATH
31#undef TRACE_INCLUDE_FILE
32#define TRACE_INCLUDE_PATH .
33
34#define TRACE_INCLUDE_FILE trace-events-emulation
35#include <trace/define_trace.h>
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index de1b085e7963..0a801e3743d5 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -259,6 +259,69 @@ void arm64_notify_die(const char *str, struct pt_regs *regs,
259 } 259 }
260} 260}
261 261
262static LIST_HEAD(undef_hook);
263static DEFINE_RAW_SPINLOCK(undef_lock);
264
265void register_undef_hook(struct undef_hook *hook)
266{
267 unsigned long flags;
268
269 raw_spin_lock_irqsave(&undef_lock, flags);
270 list_add(&hook->node, &undef_hook);
271 raw_spin_unlock_irqrestore(&undef_lock, flags);
272}
273
274void unregister_undef_hook(struct undef_hook *hook)
275{
276 unsigned long flags;
277
278 raw_spin_lock_irqsave(&undef_lock, flags);
279 list_del(&hook->node);
280 raw_spin_unlock_irqrestore(&undef_lock, flags);
281}
282
283static int call_undef_hook(struct pt_regs *regs)
284{
285 struct undef_hook *hook;
286 unsigned long flags;
287 u32 instr;
288 int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
289 void __user *pc = (void __user *)instruction_pointer(regs);
290
291 if (!user_mode(regs))
292 return 1;
293
294 if (compat_thumb_mode(regs)) {
295 /* 16-bit Thumb instruction */
296 if (get_user(instr, (u16 __user *)pc))
297 goto exit;
298 instr = le16_to_cpu(instr);
299 if (aarch32_insn_is_wide(instr)) {
300 u32 instr2;
301
302 if (get_user(instr2, (u16 __user *)(pc + 2)))
303 goto exit;
304 instr2 = le16_to_cpu(instr2);
305 instr = (instr << 16) | instr2;
306 }
307 } else {
308 /* 32-bit ARM instruction */
309 if (get_user(instr, (u32 __user *)pc))
310 goto exit;
311 instr = le32_to_cpu(instr);
312 }
313
314 raw_spin_lock_irqsave(&undef_lock, flags);
315 list_for_each_entry(hook, &undef_hook, node)
316 if ((instr & hook->instr_mask) == hook->instr_val &&
317 (regs->pstate & hook->pstate_mask) == hook->pstate_val)
318 fn = hook->fn;
319
320 raw_spin_unlock_irqrestore(&undef_lock, flags);
321exit:
322 return fn ? fn(regs, instr) : 1;
323}
324
262asmlinkage void __exception do_undefinstr(struct pt_regs *regs) 325asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
263{ 326{
264 siginfo_t info; 327 siginfo_t info;
@@ -268,6 +331,9 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
268 if (!aarch32_break_handler(regs)) 331 if (!aarch32_break_handler(regs))
269 return; 332 return;
270 333
334 if (call_undef_hook(regs) == 0)
335 return;
336
271 if (show_unhandled_signals && unhandled_signal(current, SIGILL) && 337 if (show_unhandled_signals && unhandled_signal(current, SIGILL) &&
272 printk_ratelimit()) { 338 printk_ratelimit()) {
273 pr_info("%s[%d]: undefined instruction: pc=%p\n", 339 pr_info("%s[%d]: undefined instruction: pc=%p\n",
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index edf8715ba39b..9965ec87cbec 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -11,8 +11,9 @@
11 11
12#include "image.h" 12#include "image.h"
13 13
14#define ARM_EXIT_KEEP(x) 14/* .exit.text needed in case of alternative patching */
15#define ARM_EXIT_DISCARD(x) x 15#define ARM_EXIT_KEEP(x) x
16#define ARM_EXIT_DISCARD(x)
16 17
17OUTPUT_ARCH(aarch64) 18OUTPUT_ARCH(aarch64)
18ENTRY(_text) 19ENTRY(_text)
@@ -32,6 +33,22 @@ jiffies = jiffies_64;
32 *(.hyp.text) \ 33 *(.hyp.text) \
33 VMLINUX_SYMBOL(__hyp_text_end) = .; 34 VMLINUX_SYMBOL(__hyp_text_end) = .;
34 35
36/*
37 * The size of the PE/COFF section that covers the kernel image, which
38 * runs from stext to _edata, must be a round multiple of the PE/COFF
39 * FileAlignment, which we set to its minimum value of 0x200. 'stext'
40 * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned
41 * boundary should be sufficient.
42 */
43PECOFF_FILE_ALIGNMENT = 0x200;
44
45#ifdef CONFIG_EFI
46#define PECOFF_EDATA_PADDING \
47 .pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); }
48#else
49#define PECOFF_EDATA_PADDING
50#endif
51
35SECTIONS 52SECTIONS
36{ 53{
37 /* 54 /*
@@ -100,9 +117,21 @@ SECTIONS
100 . = ALIGN(PAGE_SIZE); 117 . = ALIGN(PAGE_SIZE);
101 __init_end = .; 118 __init_end = .;
102 119
120 . = ALIGN(4);
121 .altinstructions : {
122 __alt_instructions = .;
123 *(.altinstructions)
124 __alt_instructions_end = .;
125 }
126 .altinstr_replacement : {
127 *(.altinstr_replacement)
128 }
129
130 . = ALIGN(PAGE_SIZE);
103 _data = .; 131 _data = .;
104 _sdata = .; 132 _sdata = .;
105 RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) 133 RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
134 PECOFF_EDATA_PADDING
106 _edata = .; 135 _edata = .;
107 136
108 BSS_SECTION(0, 0, 0) 137 BSS_SECTION(0, 0, 0)