diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2017-08-09 10:37:49 -0400 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2017-08-09 10:37:49 -0400 |
commit | 0553896787353e2526078064ff1cf21ff7bc34ce (patch) | |
tree | bf9f6490d4a74991653da4054cfc4c1b7c647074 | |
parent | 739586951b8abe381a98797a5e27a0a9336333d6 (diff) | |
parent | 31e43ad3b74a5d7b282023b72f25fc677c14c727 (diff) |
Merge branch 'arm64/exception-stack' of git://git.kernel.org/pub/scm/linux/kernel/git/mark/linux into for-next/core
* 'arm64/exception-stack' of git://git.kernel.org/pub/scm/linux/kernel/git/mark/linux:
arm64: unwind: remove sp from struct stackframe
arm64: unwind: reference pt_regs via embedded stack frame
arm64: unwind: disregard frame.sp when validating frame pointer
arm64: unwind: avoid percpu indirection for irq stack
arm64: move non-entry code out of .entry.text
arm64: consistently use bl for C exception entry
arm64: Add ASM_BUG()
-rw-r--r-- | arch/arm64/include/asm/asm-bug.h | 54 | ||||
-rw-r--r-- | arch/arm64/include/asm/assembler.h | 11 | ||||
-rw-r--r-- | arch/arm64/include/asm/bug.h | 35 | ||||
-rw-r--r-- | arch/arm64/include/asm/irq.h | 39 | ||||
-rw-r--r-- | arch/arm64/include/asm/ptrace.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/stacktrace.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/traps.h | 5 | ||||
-rw-r--r-- | arch/arm64/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/arm64/kernel/entry.S | 122 | ||||
-rw-r--r-- | arch/arm64/kernel/head.S | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/perf_callchain.c | 1 | ||||
-rw-r--r-- | arch/arm64/kernel/process.c | 5 | ||||
-rw-r--r-- | arch/arm64/kernel/ptrace.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/return_address.c | 1 | ||||
-rw-r--r-- | arch/arm64/kernel/stacktrace.c | 57 | ||||
-rw-r--r-- | arch/arm64/kernel/time.c | 1 | ||||
-rw-r--r-- | arch/arm64/kernel/traps.c | 34 |
17 files changed, 176 insertions, 198 deletions
diff --git a/arch/arm64/include/asm/asm-bug.h b/arch/arm64/include/asm/asm-bug.h new file mode 100644 index 000000000000..636e755bcdca --- /dev/null +++ b/arch/arm64/include/asm/asm-bug.h | |||
@@ -0,0 +1,54 @@ | |||
1 | #ifndef __ASM_ASM_BUG_H | ||
2 | /* | ||
3 | * Copyright (C) 2017 ARM Limited | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | #define __ASM_ASM_BUG_H | ||
18 | |||
19 | #include <asm/brk-imm.h> | ||
20 | |||
21 | #ifdef CONFIG_DEBUG_BUGVERBOSE | ||
22 | #define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line) | ||
23 | #define __BUGVERBOSE_LOCATION(file, line) \ | ||
24 | .pushsection .rodata.str,"aMS",@progbits,1; \ | ||
25 | 2: .string file; \ | ||
26 | .popsection; \ | ||
27 | \ | ||
28 | .long 2b - 0b; \ | ||
29 | .short line; | ||
30 | #else | ||
31 | #define _BUGVERBOSE_LOCATION(file, line) | ||
32 | #endif | ||
33 | |||
34 | #ifdef CONFIG_GENERIC_BUG | ||
35 | |||
36 | #define __BUG_ENTRY(flags) \ | ||
37 | .pushsection __bug_table,"aw"; \ | ||
38 | .align 2; \ | ||
39 | 0: .long 1f - 0b; \ | ||
40 | _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \ | ||
41 | .short flags; \ | ||
42 | .popsection; \ | ||
43 | 1: | ||
44 | #else | ||
45 | #define __BUG_ENTRY(flags) | ||
46 | #endif | ||
47 | |||
48 | #define ASM_BUG_FLAGS(flags) \ | ||
49 | __BUG_ENTRY(flags) \ | ||
50 | brk BUG_BRK_IMM | ||
51 | |||
52 | #define ASM_BUG() ASM_BUG_FLAGS(0) | ||
53 | |||
54 | #endif /* __ASM_ASM_BUG_H */ | ||
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 5d8903c45031..1ef56837cc6f 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h | |||
@@ -409,6 +409,17 @@ alternative_endif | |||
409 | .size __pi_##x, . - x; \ | 409 | .size __pi_##x, . - x; \ |
410 | ENDPROC(x) | 410 | ENDPROC(x) |
411 | 411 | ||
412 | /* | ||
413 | * Annotate a function as being unsuitable for kprobes. | ||
414 | */ | ||
415 | #ifdef CONFIG_KPROBES | ||
416 | #define NOKPROBE(x) \ | ||
417 | .pushsection "_kprobe_blacklist", "aw"; \ | ||
418 | .quad x; \ | ||
419 | .popsection; | ||
420 | #else | ||
421 | #define NOKPROBE(x) | ||
422 | #endif | ||
412 | /* | 423 | /* |
413 | * Emit a 64-bit absolute little endian symbol reference in a way that | 424 | * Emit a 64-bit absolute little endian symbol reference in a way that |
414 | * ensures that it will be resolved at build time, even when building a | 425 | * ensures that it will be resolved at build time, even when building a |
diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h index a02a57186f56..d7dc43752705 100644 --- a/arch/arm64/include/asm/bug.h +++ b/arch/arm64/include/asm/bug.h | |||
@@ -18,41 +18,12 @@ | |||
18 | #ifndef _ARCH_ARM64_ASM_BUG_H | 18 | #ifndef _ARCH_ARM64_ASM_BUG_H |
19 | #define _ARCH_ARM64_ASM_BUG_H | 19 | #define _ARCH_ARM64_ASM_BUG_H |
20 | 20 | ||
21 | #include <asm/brk-imm.h> | 21 | #include <linux/stringify.h> |
22 | 22 | ||
23 | #ifdef CONFIG_DEBUG_BUGVERBOSE | 23 | #include <asm/asm-bug.h> |
24 | #define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line) | ||
25 | #define __BUGVERBOSE_LOCATION(file, line) \ | ||
26 | ".pushsection .rodata.str,\"aMS\",@progbits,1\n" \ | ||
27 | "2: .string \"" file "\"\n\t" \ | ||
28 | ".popsection\n\t" \ | ||
29 | \ | ||
30 | ".long 2b - 0b\n\t" \ | ||
31 | ".short " #line "\n\t" | ||
32 | #else | ||
33 | #define _BUGVERBOSE_LOCATION(file, line) | ||
34 | #endif | ||
35 | |||
36 | #ifdef CONFIG_GENERIC_BUG | ||
37 | |||
38 | #define __BUG_ENTRY(flags) \ | ||
39 | ".pushsection __bug_table,\"aw\"\n\t" \ | ||
40 | ".align 2\n\t" \ | ||
41 | "0: .long 1f - 0b\n\t" \ | ||
42 | _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \ | ||
43 | ".short " #flags "\n\t" \ | ||
44 | ".popsection\n" \ | ||
45 | "1: " | ||
46 | #else | ||
47 | #define __BUG_ENTRY(flags) "" | ||
48 | #endif | ||
49 | 24 | ||
50 | #define __BUG_FLAGS(flags) \ | 25 | #define __BUG_FLAGS(flags) \ |
51 | asm volatile ( \ | 26 | asm volatile (__stringify(ASM_BUG_FLAGS(flags))); |
52 | __BUG_ENTRY(flags) \ | ||
53 | "brk %[imm]" :: [imm] "i" (BUG_BRK_IMM) \ | ||
54 | ); | ||
55 | |||
56 | 27 | ||
57 | #define BUG() do { \ | 28 | #define BUG() do { \ |
58 | __BUG_FLAGS(0); \ | 29 | __BUG_FLAGS(0); \ |
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index b77197d941fc..8ba89c4ca183 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #ifndef __ASSEMBLER__ | 7 | #ifndef __ASSEMBLER__ |
8 | 8 | ||
9 | #include <linux/percpu.h> | 9 | #include <linux/percpu.h> |
10 | #include <linux/sched/task_stack.h> | ||
10 | 11 | ||
11 | #include <asm-generic/irq.h> | 12 | #include <asm-generic/irq.h> |
12 | #include <asm/thread_info.h> | 13 | #include <asm/thread_info.h> |
@@ -15,31 +16,6 @@ struct pt_regs; | |||
15 | 16 | ||
16 | DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack); | 17 | DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack); |
17 | 18 | ||
18 | /* | ||
19 | * The highest address on the stack, and the first to be used. Used to | ||
20 | * find the dummy-stack frame put down by el?_irq() in entry.S, which | ||
21 | * is structured as follows: | ||
22 | * | ||
23 | * ------------ | ||
24 | * | | <- irq_stack_ptr | ||
25 | * top ------------ | ||
26 | * | x19 | <- irq_stack_ptr - 0x08 | ||
27 | * ------------ | ||
28 | * | x29 | <- irq_stack_ptr - 0x10 | ||
29 | * ------------ | ||
30 | * | ||
31 | * where x19 holds a copy of the task stack pointer where the struct pt_regs | ||
32 | * from kernel_entry can be found. | ||
33 | * | ||
34 | */ | ||
35 | #define IRQ_STACK_PTR(cpu) ((unsigned long)per_cpu(irq_stack, cpu) + IRQ_STACK_START_SP) | ||
36 | |||
37 | /* | ||
38 | * The offset from irq_stack_ptr where entry.S will store the original | ||
39 | * stack pointer. Used by unwind_frame() and dump_backtrace(). | ||
40 | */ | ||
41 | #define IRQ_STACK_TO_TASK_STACK(ptr) (*((unsigned long *)((ptr) - 0x08))) | ||
42 | |||
43 | extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); | 19 | extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); |
44 | 20 | ||
45 | static inline int nr_legacy_irqs(void) | 21 | static inline int nr_legacy_irqs(void) |
@@ -47,14 +23,21 @@ static inline int nr_legacy_irqs(void) | |||
47 | return 0; | 23 | return 0; |
48 | } | 24 | } |
49 | 25 | ||
50 | static inline bool on_irq_stack(unsigned long sp, int cpu) | 26 | static inline bool on_irq_stack(unsigned long sp) |
51 | { | 27 | { |
52 | /* variable names the same as kernel/stacktrace.c */ | 28 | unsigned long low = (unsigned long)raw_cpu_ptr(irq_stack); |
53 | unsigned long low = (unsigned long)per_cpu(irq_stack, cpu); | ||
54 | unsigned long high = low + IRQ_STACK_START_SP; | 29 | unsigned long high = low + IRQ_STACK_START_SP; |
55 | 30 | ||
56 | return (low <= sp && sp <= high); | 31 | return (low <= sp && sp <= high); |
57 | } | 32 | } |
58 | 33 | ||
34 | static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp) | ||
35 | { | ||
36 | unsigned long low = (unsigned long)task_stack_page(tsk); | ||
37 | unsigned long high = low + THREAD_SIZE; | ||
38 | |||
39 | return (low <= sp && sp < high); | ||
40 | } | ||
41 | |||
59 | #endif /* !__ASSEMBLER__ */ | 42 | #endif /* !__ASSEMBLER__ */ |
60 | #endif | 43 | #endif |
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 4f64373b84fd..6069d66e0bc2 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h | |||
@@ -137,6 +137,7 @@ struct pt_regs { | |||
137 | 137 | ||
138 | u64 orig_addr_limit; | 138 | u64 orig_addr_limit; |
139 | u64 unused; // maintain 16 byte alignment | 139 | u64 unused; // maintain 16 byte alignment |
140 | u64 stackframe[2]; | ||
140 | }; | 141 | }; |
141 | 142 | ||
142 | static inline bool in_syscall(struct pt_regs const *regs) | 143 | static inline bool in_syscall(struct pt_regs const *regs) |
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 5b6eafccc5d8..3bebab378c72 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h | |||
@@ -20,7 +20,6 @@ struct task_struct; | |||
20 | 20 | ||
21 | struct stackframe { | 21 | struct stackframe { |
22 | unsigned long fp; | 22 | unsigned long fp; |
23 | unsigned long sp; | ||
24 | unsigned long pc; | 23 | unsigned long pc; |
25 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 24 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
26 | unsigned int graph; | 25 | unsigned int graph; |
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index 02e9035b0685..41361684580d 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h | |||
@@ -60,4 +60,9 @@ static inline int in_exception_text(unsigned long ptr) | |||
60 | return in ? : __in_irqentry_text(ptr); | 60 | return in ? : __in_irqentry_text(ptr); |
61 | } | 61 | } |
62 | 62 | ||
63 | static inline int in_entry_text(unsigned long ptr) | ||
64 | { | ||
65 | return ptr >= (unsigned long)&__entry_text_start && | ||
66 | ptr < (unsigned long)&__entry_text_end; | ||
67 | } | ||
63 | #endif | 68 | #endif |
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index b3bb7ef97bc8..71bf088f1e4b 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c | |||
@@ -75,6 +75,7 @@ int main(void) | |||
75 | DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0)); | 75 | DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0)); |
76 | DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); | 76 | DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); |
77 | DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit)); | 77 | DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit)); |
78 | DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe)); | ||
78 | DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); | 79 | DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); |
79 | BLANK(); | 80 | BLANK(); |
80 | DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter)); | 81 | DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter)); |
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index cace76d17535..0b8461158c56 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S | |||
@@ -111,6 +111,18 @@ | |||
111 | mrs x23, spsr_el1 | 111 | mrs x23, spsr_el1 |
112 | stp lr, x21, [sp, #S_LR] | 112 | stp lr, x21, [sp, #S_LR] |
113 | 113 | ||
114 | /* | ||
115 | * In order to be able to dump the contents of struct pt_regs at the | ||
116 | * time the exception was taken (in case we attempt to walk the call | ||
117 | * stack later), chain it together with the stack frames. | ||
118 | */ | ||
119 | .if \el == 0 | ||
120 | stp xzr, xzr, [sp, #S_STACKFRAME] | ||
121 | .else | ||
122 | stp x29, x22, [sp, #S_STACKFRAME] | ||
123 | .endif | ||
124 | add x29, sp, #S_STACKFRAME | ||
125 | |||
114 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | 126 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
115 | /* | 127 | /* |
116 | * Set the TTBR0 PAN bit in SPSR. When the exception is taken from | 128 | * Set the TTBR0 PAN bit in SPSR. When the exception is taken from |
@@ -263,14 +275,6 @@ alternative_else_nop_endif | |||
263 | 275 | ||
264 | /* switch to the irq stack */ | 276 | /* switch to the irq stack */ |
265 | mov sp, x26 | 277 | mov sp, x26 |
266 | |||
267 | /* | ||
268 | * Add a dummy stack frame, this non-standard format is fixed up | ||
269 | * by unwind_frame() | ||
270 | */ | ||
271 | stp x29, x19, [sp, #-16]! | ||
272 | mov x29, sp | ||
273 | |||
274 | 9998: | 278 | 9998: |
275 | .endm | 279 | .endm |
276 | 280 | ||
@@ -350,7 +354,8 @@ END(vectors) | |||
350 | mov x0, sp | 354 | mov x0, sp |
351 | mov x1, #\reason | 355 | mov x1, #\reason |
352 | mrs x2, esr_el1 | 356 | mrs x2, esr_el1 |
353 | b bad_mode | 357 | bl bad_mode |
358 | ASM_BUG() | ||
354 | .endm | 359 | .endm |
355 | 360 | ||
356 | el0_sync_invalid: | 361 | el0_sync_invalid: |
@@ -447,14 +452,16 @@ el1_sp_pc: | |||
447 | mrs x0, far_el1 | 452 | mrs x0, far_el1 |
448 | enable_dbg | 453 | enable_dbg |
449 | mov x2, sp | 454 | mov x2, sp |
450 | b do_sp_pc_abort | 455 | bl do_sp_pc_abort |
456 | ASM_BUG() | ||
451 | el1_undef: | 457 | el1_undef: |
452 | /* | 458 | /* |
453 | * Undefined instruction | 459 | * Undefined instruction |
454 | */ | 460 | */ |
455 | enable_dbg | 461 | enable_dbg |
456 | mov x0, sp | 462 | mov x0, sp |
457 | b do_undefinstr | 463 | bl do_undefinstr |
464 | ASM_BUG() | ||
458 | el1_dbg: | 465 | el1_dbg: |
459 | /* | 466 | /* |
460 | * Debug exception handling | 467 | * Debug exception handling |
@@ -472,7 +479,8 @@ el1_inv: | |||
472 | mov x0, sp | 479 | mov x0, sp |
473 | mov x2, x1 | 480 | mov x2, x1 |
474 | mov x1, #BAD_SYNC | 481 | mov x1, #BAD_SYNC |
475 | b bad_mode | 482 | bl bad_mode |
483 | ASM_BUG() | ||
476 | ENDPROC(el1_sync) | 484 | ENDPROC(el1_sync) |
477 | 485 | ||
478 | .align 6 | 486 | .align 6 |
@@ -706,38 +714,6 @@ el0_irq_naked: | |||
706 | ENDPROC(el0_irq) | 714 | ENDPROC(el0_irq) |
707 | 715 | ||
708 | /* | 716 | /* |
709 | * Register switch for AArch64. The callee-saved registers need to be saved | ||
710 | * and restored. On entry: | ||
711 | * x0 = previous task_struct (must be preserved across the switch) | ||
712 | * x1 = next task_struct | ||
713 | * Previous and next are guaranteed not to be the same. | ||
714 | * | ||
715 | */ | ||
716 | ENTRY(cpu_switch_to) | ||
717 | mov x10, #THREAD_CPU_CONTEXT | ||
718 | add x8, x0, x10 | ||
719 | mov x9, sp | ||
720 | stp x19, x20, [x8], #16 // store callee-saved registers | ||
721 | stp x21, x22, [x8], #16 | ||
722 | stp x23, x24, [x8], #16 | ||
723 | stp x25, x26, [x8], #16 | ||
724 | stp x27, x28, [x8], #16 | ||
725 | stp x29, x9, [x8], #16 | ||
726 | str lr, [x8] | ||
727 | add x8, x1, x10 | ||
728 | ldp x19, x20, [x8], #16 // restore callee-saved registers | ||
729 | ldp x21, x22, [x8], #16 | ||
730 | ldp x23, x24, [x8], #16 | ||
731 | ldp x25, x26, [x8], #16 | ||
732 | ldp x27, x28, [x8], #16 | ||
733 | ldp x29, x9, [x8], #16 | ||
734 | ldr lr, [x8] | ||
735 | mov sp, x9 | ||
736 | msr sp_el0, x1 | ||
737 | ret | ||
738 | ENDPROC(cpu_switch_to) | ||
739 | |||
740 | /* | ||
741 | * This is the fast syscall return path. We do as little as possible here, | 717 | * This is the fast syscall return path. We do as little as possible here, |
742 | * and this includes saving x0 back into the kernel stack. | 718 | * and this includes saving x0 back into the kernel stack. |
743 | */ | 719 | */ |
@@ -780,18 +756,6 @@ finish_ret_to_user: | |||
780 | ENDPROC(ret_to_user) | 756 | ENDPROC(ret_to_user) |
781 | 757 | ||
782 | /* | 758 | /* |
783 | * This is how we return from a fork. | ||
784 | */ | ||
785 | ENTRY(ret_from_fork) | ||
786 | bl schedule_tail | ||
787 | cbz x19, 1f // not a kernel thread | ||
788 | mov x0, x20 | ||
789 | blr x19 | ||
790 | 1: get_thread_info tsk | ||
791 | b ret_to_user | ||
792 | ENDPROC(ret_from_fork) | ||
793 | |||
794 | /* | ||
795 | * SVC handler. | 759 | * SVC handler. |
796 | */ | 760 | */ |
797 | .align 6 | 761 | .align 6 |
@@ -863,3 +827,49 @@ ENTRY(sys_rt_sigreturn_wrapper) | |||
863 | mov x0, sp | 827 | mov x0, sp |
864 | b sys_rt_sigreturn | 828 | b sys_rt_sigreturn |
865 | ENDPROC(sys_rt_sigreturn_wrapper) | 829 | ENDPROC(sys_rt_sigreturn_wrapper) |
830 | |||
831 | /* | ||
832 | * Register switch for AArch64. The callee-saved registers need to be saved | ||
833 | * and restored. On entry: | ||
834 | * x0 = previous task_struct (must be preserved across the switch) | ||
835 | * x1 = next task_struct | ||
836 | * Previous and next are guaranteed not to be the same. | ||
837 | * | ||
838 | */ | ||
839 | ENTRY(cpu_switch_to) | ||
840 | mov x10, #THREAD_CPU_CONTEXT | ||
841 | add x8, x0, x10 | ||
842 | mov x9, sp | ||
843 | stp x19, x20, [x8], #16 // store callee-saved registers | ||
844 | stp x21, x22, [x8], #16 | ||
845 | stp x23, x24, [x8], #16 | ||
846 | stp x25, x26, [x8], #16 | ||
847 | stp x27, x28, [x8], #16 | ||
848 | stp x29, x9, [x8], #16 | ||
849 | str lr, [x8] | ||
850 | add x8, x1, x10 | ||
851 | ldp x19, x20, [x8], #16 // restore callee-saved registers | ||
852 | ldp x21, x22, [x8], #16 | ||
853 | ldp x23, x24, [x8], #16 | ||
854 | ldp x25, x26, [x8], #16 | ||
855 | ldp x27, x28, [x8], #16 | ||
856 | ldp x29, x9, [x8], #16 | ||
857 | ldr lr, [x8] | ||
858 | mov sp, x9 | ||
859 | msr sp_el0, x1 | ||
860 | ret | ||
861 | ENDPROC(cpu_switch_to) | ||
862 | NOKPROBE(cpu_switch_to) | ||
863 | |||
864 | /* | ||
865 | * This is how we return from a fork. | ||
866 | */ | ||
867 | ENTRY(ret_from_fork) | ||
868 | bl schedule_tail | ||
869 | cbz x19, 1f // not a kernel thread | ||
870 | mov x0, x20 | ||
871 | blr x19 | ||
872 | 1: get_thread_info tsk | ||
873 | b ret_to_user | ||
874 | ENDPROC(ret_from_fork) | ||
875 | NOKPROBE(ret_from_fork) | ||
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 73a0531e0187..d3015172c136 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -362,6 +362,9 @@ __primary_switched: | |||
362 | ret // to __primary_switch() | 362 | ret // to __primary_switch() |
363 | 0: | 363 | 0: |
364 | #endif | 364 | #endif |
365 | add sp, sp, #16 | ||
366 | mov x29, #0 | ||
367 | mov x30, #0 | ||
365 | b start_kernel | 368 | b start_kernel |
366 | ENDPROC(__primary_switched) | 369 | ENDPROC(__primary_switched) |
367 | 370 | ||
@@ -617,6 +620,7 @@ __secondary_switched: | |||
617 | ldr x2, [x0, #CPU_BOOT_TASK] | 620 | ldr x2, [x0, #CPU_BOOT_TASK] |
618 | msr sp_el0, x2 | 621 | msr sp_el0, x2 |
619 | mov x29, #0 | 622 | mov x29, #0 |
623 | mov x30, #0 | ||
620 | b secondary_start_kernel | 624 | b secondary_start_kernel |
621 | ENDPROC(__secondary_switched) | 625 | ENDPROC(__secondary_switched) |
622 | 626 | ||
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c index 713ca824f266..bcafd7dcfe8b 100644 --- a/arch/arm64/kernel/perf_callchain.c +++ b/arch/arm64/kernel/perf_callchain.c | |||
@@ -162,7 +162,6 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, | |||
162 | } | 162 | } |
163 | 163 | ||
164 | frame.fp = regs->regs[29]; | 164 | frame.fp = regs->regs[29]; |
165 | frame.sp = regs->sp; | ||
166 | frame.pc = regs->pc; | 165 | frame.pc = regs->pc; |
167 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 166 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
168 | frame.graph = current->curr_ret_stack; | 167 | frame.graph = current->curr_ret_stack; |
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 659ae8094ed5..85b953dd023a 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
@@ -382,15 +382,12 @@ unsigned long get_wchan(struct task_struct *p) | |||
382 | return 0; | 382 | return 0; |
383 | 383 | ||
384 | frame.fp = thread_saved_fp(p); | 384 | frame.fp = thread_saved_fp(p); |
385 | frame.sp = thread_saved_sp(p); | ||
386 | frame.pc = thread_saved_pc(p); | 385 | frame.pc = thread_saved_pc(p); |
387 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 386 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
388 | frame.graph = p->curr_ret_stack; | 387 | frame.graph = p->curr_ret_stack; |
389 | #endif | 388 | #endif |
390 | do { | 389 | do { |
391 | if (frame.sp < stack_page || | 390 | if (unwind_frame(p, &frame)) |
392 | frame.sp >= stack_page + THREAD_SIZE || | ||
393 | unwind_frame(p, &frame)) | ||
394 | goto out; | 391 | goto out; |
395 | if (!in_sched_functions(frame.pc)) { | 392 | if (!in_sched_functions(frame.pc)) { |
396 | ret = frame.pc; | 393 | ret = frame.pc; |
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 28619b5b6746..320df70c11e6 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c | |||
@@ -127,7 +127,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) | |||
127 | { | 127 | { |
128 | return ((addr & ~(THREAD_SIZE - 1)) == | 128 | return ((addr & ~(THREAD_SIZE - 1)) == |
129 | (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || | 129 | (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || |
130 | on_irq_stack(addr, raw_smp_processor_id()); | 130 | on_irq_stack(addr); |
131 | } | 131 | } |
132 | 132 | ||
133 | /** | 133 | /** |
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c index 12a87f2600f2..933adbc0f654 100644 --- a/arch/arm64/kernel/return_address.c +++ b/arch/arm64/kernel/return_address.c | |||
@@ -42,7 +42,6 @@ void *return_address(unsigned int level) | |||
42 | data.addr = NULL; | 42 | data.addr = NULL; |
43 | 43 | ||
44 | frame.fp = (unsigned long)__builtin_frame_address(0); | 44 | frame.fp = (unsigned long)__builtin_frame_address(0); |
45 | frame.sp = current_stack_pointer; | ||
46 | frame.pc = (unsigned long)return_address; /* dummy */ | 45 | frame.pc = (unsigned long)return_address; /* dummy */ |
47 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 46 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
48 | frame.graph = current->curr_ret_stack; | 47 | frame.graph = current->curr_ret_stack; |
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 09d37d66b630..35588caad9d0 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c | |||
@@ -42,9 +42,10 @@ | |||
42 | */ | 42 | */ |
43 | int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) | 43 | int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) |
44 | { | 44 | { |
45 | unsigned long high, low; | ||
46 | unsigned long fp = frame->fp; | 45 | unsigned long fp = frame->fp; |
47 | unsigned long irq_stack_ptr; | 46 | |
47 | if (fp & 0xf) | ||
48 | return -EINVAL; | ||
48 | 49 | ||
49 | if (!tsk) | 50 | if (!tsk) |
50 | tsk = current; | 51 | tsk = current; |
@@ -53,22 +54,10 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) | |||
53 | * Switching between stacks is valid when tracing current and in | 54 | * Switching between stacks is valid when tracing current and in |
54 | * non-preemptible context. | 55 | * non-preemptible context. |
55 | */ | 56 | */ |
56 | if (tsk == current && !preemptible()) | 57 | if (!(tsk == current && !preemptible() && on_irq_stack(fp)) && |
57 | irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id()); | 58 | !on_task_stack(tsk, fp)) |
58 | else | ||
59 | irq_stack_ptr = 0; | ||
60 | |||
61 | low = frame->sp; | ||
62 | /* irq stacks are not THREAD_SIZE aligned */ | ||
63 | if (on_irq_stack(frame->sp, raw_smp_processor_id())) | ||
64 | high = irq_stack_ptr; | ||
65 | else | ||
66 | high = ALIGN(low, THREAD_SIZE) - 0x20; | ||
67 | |||
68 | if (fp < low || fp > high || fp & 0xf) | ||
69 | return -EINVAL; | 59 | return -EINVAL; |
70 | 60 | ||
71 | frame->sp = fp + 0x10; | ||
72 | frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); | 61 | frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); |
73 | frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); | 62 | frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); |
74 | 63 | ||
@@ -86,34 +75,13 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) | |||
86 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 75 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
87 | 76 | ||
88 | /* | 77 | /* |
89 | * Check whether we are going to walk through from interrupt stack | 78 | * Frames created upon entry from EL0 have NULL FP and PC values, so |
90 | * to task stack. | 79 | * don't bother reporting these. Frames created by __noreturn functions |
91 | * If we reach the end of the stack - and its an interrupt stack, | 80 | * might have a valid FP even if PC is bogus, so only terminate where |
92 | * unpack the dummy frame to find the original elr. | 81 | * both are NULL. |
93 | * | ||
94 | * Check the frame->fp we read from the bottom of the irq_stack, | ||
95 | * and the original task stack pointer are both in current->stack. | ||
96 | */ | 82 | */ |
97 | if (frame->sp == irq_stack_ptr) { | 83 | if (!frame->fp && !frame->pc) |
98 | struct pt_regs *irq_args; | 84 | return -EINVAL; |
99 | unsigned long orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr); | ||
100 | |||
101 | if (object_is_on_stack((void *)orig_sp) && | ||
102 | object_is_on_stack((void *)frame->fp)) { | ||
103 | frame->sp = orig_sp; | ||
104 | |||
105 | /* orig_sp is the saved pt_regs, find the elr */ | ||
106 | irq_args = (struct pt_regs *)orig_sp; | ||
107 | frame->pc = irq_args->pc; | ||
108 | } else { | ||
109 | /* | ||
110 | * This frame has a non-standard format, and we | ||
111 | * didn't fix it, because the data looked wrong. | ||
112 | * Refuse to output this frame. | ||
113 | */ | ||
114 | return -EINVAL; | ||
115 | } | ||
116 | } | ||
117 | 85 | ||
118 | return 0; | 86 | return 0; |
119 | } | 87 | } |
@@ -167,7 +135,6 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) | |||
167 | data.no_sched_functions = 0; | 135 | data.no_sched_functions = 0; |
168 | 136 | ||
169 | frame.fp = regs->regs[29]; | 137 | frame.fp = regs->regs[29]; |
170 | frame.sp = regs->sp; | ||
171 | frame.pc = regs->pc; | 138 | frame.pc = regs->pc; |
172 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 139 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
173 | frame.graph = current->curr_ret_stack; | 140 | frame.graph = current->curr_ret_stack; |
@@ -192,12 +159,10 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
192 | if (tsk != current) { | 159 | if (tsk != current) { |
193 | data.no_sched_functions = 1; | 160 | data.no_sched_functions = 1; |
194 | frame.fp = thread_saved_fp(tsk); | 161 | frame.fp = thread_saved_fp(tsk); |
195 | frame.sp = thread_saved_sp(tsk); | ||
196 | frame.pc = thread_saved_pc(tsk); | 162 | frame.pc = thread_saved_pc(tsk); |
197 | } else { | 163 | } else { |
198 | data.no_sched_functions = 0; | 164 | data.no_sched_functions = 0; |
199 | frame.fp = (unsigned long)__builtin_frame_address(0); | 165 | frame.fp = (unsigned long)__builtin_frame_address(0); |
200 | frame.sp = current_stack_pointer; | ||
201 | frame.pc = (unsigned long)save_stack_trace_tsk; | 166 | frame.pc = (unsigned long)save_stack_trace_tsk; |
202 | } | 167 | } |
203 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 168 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index da33c90248e9..a4391280fba9 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c | |||
@@ -50,7 +50,6 @@ unsigned long profile_pc(struct pt_regs *regs) | |||
50 | return regs->pc; | 50 | return regs->pc; |
51 | 51 | ||
52 | frame.fp = regs->regs[29]; | 52 | frame.fp = regs->regs[29]; |
53 | frame.sp = regs->sp; | ||
54 | frame.pc = regs->pc; | 53 | frame.pc = regs->pc; |
55 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 54 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
56 | frame.graph = -1; /* no task info */ | 55 | frame.graph = -1; /* no task info */ |
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index ccb9727d67b2..9f023d128c8c 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
@@ -143,7 +143,6 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) | |||
143 | void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) | 143 | void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) |
144 | { | 144 | { |
145 | struct stackframe frame; | 145 | struct stackframe frame; |
146 | unsigned long irq_stack_ptr; | ||
147 | int skip; | 146 | int skip; |
148 | 147 | ||
149 | pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); | 148 | pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); |
@@ -154,25 +153,14 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) | |||
154 | if (!try_get_task_stack(tsk)) | 153 | if (!try_get_task_stack(tsk)) |
155 | return; | 154 | return; |
156 | 155 | ||
157 | /* | ||
158 | * Switching between stacks is valid when tracing current and in | ||
159 | * non-preemptible context. | ||
160 | */ | ||
161 | if (tsk == current && !preemptible()) | ||
162 | irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id()); | ||
163 | else | ||
164 | irq_stack_ptr = 0; | ||
165 | |||
166 | if (tsk == current) { | 156 | if (tsk == current) { |
167 | frame.fp = (unsigned long)__builtin_frame_address(0); | 157 | frame.fp = (unsigned long)__builtin_frame_address(0); |
168 | frame.sp = current_stack_pointer; | ||
169 | frame.pc = (unsigned long)dump_backtrace; | 158 | frame.pc = (unsigned long)dump_backtrace; |
170 | } else { | 159 | } else { |
171 | /* | 160 | /* |
172 | * task blocked in __switch_to | 161 | * task blocked in __switch_to |
173 | */ | 162 | */ |
174 | frame.fp = thread_saved_fp(tsk); | 163 | frame.fp = thread_saved_fp(tsk); |
175 | frame.sp = thread_saved_sp(tsk); | ||
176 | frame.pc = thread_saved_pc(tsk); | 164 | frame.pc = thread_saved_pc(tsk); |
177 | } | 165 | } |
178 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 166 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
@@ -182,13 +170,12 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) | |||
182 | skip = !!regs; | 170 | skip = !!regs; |
183 | printk("Call trace:\n"); | 171 | printk("Call trace:\n"); |
184 | while (1) { | 172 | while (1) { |
185 | unsigned long where = frame.pc; | ||
186 | unsigned long stack; | 173 | unsigned long stack; |
187 | int ret; | 174 | int ret; |
188 | 175 | ||
189 | /* skip until specified stack frame */ | 176 | /* skip until specified stack frame */ |
190 | if (!skip) { | 177 | if (!skip) { |
191 | dump_backtrace_entry(where); | 178 | dump_backtrace_entry(frame.pc); |
192 | } else if (frame.fp == regs->regs[29]) { | 179 | } else if (frame.fp == regs->regs[29]) { |
193 | skip = 0; | 180 | skip = 0; |
194 | /* | 181 | /* |
@@ -203,20 +190,13 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) | |||
203 | ret = unwind_frame(tsk, &frame); | 190 | ret = unwind_frame(tsk, &frame); |
204 | if (ret < 0) | 191 | if (ret < 0) |
205 | break; | 192 | break; |
206 | stack = frame.sp; | 193 | if (in_entry_text(frame.pc)) { |
207 | if (in_exception_text(where)) { | 194 | stack = frame.fp - offsetof(struct pt_regs, stackframe); |
208 | /* | ||
209 | * If we switched to the irq_stack before calling this | ||
210 | * exception handler, then the pt_regs will be on the | ||
211 | * task stack. The easiest way to tell is if the large | ||
212 | * pt_regs would overlap with the end of the irq_stack. | ||
213 | */ | ||
214 | if (stack < irq_stack_ptr && | ||
215 | (stack + sizeof(struct pt_regs)) > irq_stack_ptr) | ||
216 | stack = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr); | ||
217 | 195 | ||
218 | dump_mem("", "Exception stack", stack, | 196 | if (on_task_stack(tsk, stack) || |
219 | stack + sizeof(struct pt_regs)); | 197 | (tsk == current && !preemptible() && on_irq_stack(stack))) |
198 | dump_mem("", "Exception stack", stack, | ||
199 | stack + sizeof(struct pt_regs)); | ||
220 | } | 200 | } |
221 | } | 201 | } |
222 | 202 | ||