aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2019-01-28 02:33:08 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2019-05-02 07:54:11 -0400
commit78c98f9074135d3dab4e39544e0a537f92388fce (patch)
tree6f6d96e8e0911fe2aa27ec89cca52d56d3ed6f7e
parent1c705ad5efae9c712e763a47fbcc95b87b7347d2 (diff)
s390/unwind: introduce stack unwind API
Rework the dump_trace() stack unwinder interface to support different unwinding algorithms. The new interface looks like this: struct unwind_state state; unwind_for_each_frame(&state, task, regs, start_stack) do_something(state.sp, state.ip, state.reliable); The unwind_bc.c file contains the implementation for the classic back-chain unwinder. One positive side effect of the new code is it now handles ftraced functions gracefully. It prints the real name of the return function instead of 'return_to_handler'. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/processor.h72
-rw-r--r--arch/s390/include/asm/stacktrace.h114
-rw-r--r--arch/s390/include/asm/unwind.h101
-rw-r--r--arch/s390/kernel/Makefile3
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/dumpstack.c167
-rw-r--r--arch/s390/kernel/irq.c1
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/perf_event.c16
-rw-r--r--arch/s390/kernel/process.c1
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kernel/smp.c1
-rw-r--r--arch/s390/kernel/stacktrace.c69
-rw-r--r--arch/s390/kernel/unwind_bc.c155
-rw-r--r--arch/s390/mm/maccess.c1
-rw-r--r--arch/s390/oprofile/init.c22
16 files changed, 521 insertions, 205 deletions
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 8aa85e39f50b..9f2ff4a54aff 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -156,25 +156,6 @@ struct thread_struct {
156 156
157typedef struct thread_struct thread_struct; 157typedef struct thread_struct thread_struct;
158 158
159/*
160 * Stack layout of a C stack frame.
161 */
162#ifndef __PACK_STACK
163struct stack_frame {
164 unsigned long back_chain;
165 unsigned long empty1[5];
166 unsigned long gprs[10];
167 unsigned int empty2[8];
168};
169#else
170struct stack_frame {
171 unsigned long empty1[5];
172 unsigned int empty2[8];
173 unsigned long gprs[10];
174 unsigned long back_chain;
175};
176#endif
177
178#define ARCH_MIN_TASKALIGN 8 159#define ARCH_MIN_TASKALIGN 8
179 160
180#define INIT_THREAD { \ 161#define INIT_THREAD { \
@@ -206,11 +187,7 @@ struct mm_struct;
206struct seq_file; 187struct seq_file;
207struct pt_regs; 188struct pt_regs;
208 189
209typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
210void dump_trace(dump_trace_func_t func, void *data,
211 struct task_struct *task, unsigned long sp);
212void show_registers(struct pt_regs *regs); 190void show_registers(struct pt_regs *regs);
213
214void show_cacheinfo(struct seq_file *m); 191void show_cacheinfo(struct seq_file *m);
215 192
216/* Free all resources held by a thread. */ 193/* Free all resources held by a thread. */
@@ -244,55 +221,6 @@ static __no_kasan_or_inline unsigned short stap(void)
244 return cpu_address; 221 return cpu_address;
245} 222}
246 223
247#define CALL_ARGS_0() \
248 register unsigned long r2 asm("2")
249#define CALL_ARGS_1(arg1) \
250 register unsigned long r2 asm("2") = (unsigned long)(arg1)
251#define CALL_ARGS_2(arg1, arg2) \
252 CALL_ARGS_1(arg1); \
253 register unsigned long r3 asm("3") = (unsigned long)(arg2)
254#define CALL_ARGS_3(arg1, arg2, arg3) \
255 CALL_ARGS_2(arg1, arg2); \
256 register unsigned long r4 asm("4") = (unsigned long)(arg3)
257#define CALL_ARGS_4(arg1, arg2, arg3, arg4) \
258 CALL_ARGS_3(arg1, arg2, arg3); \
259 register unsigned long r4 asm("5") = (unsigned long)(arg4)
260#define CALL_ARGS_5(arg1, arg2, arg3, arg4, arg5) \
261 CALL_ARGS_4(arg1, arg2, arg3, arg4); \
262 register unsigned long r4 asm("6") = (unsigned long)(arg5)
263
264#define CALL_FMT_0 "=&d" (r2) :
265#define CALL_FMT_1 "+&d" (r2) :
266#define CALL_FMT_2 CALL_FMT_1 "d" (r3),
267#define CALL_FMT_3 CALL_FMT_2 "d" (r4),
268#define CALL_FMT_4 CALL_FMT_3 "d" (r5),
269#define CALL_FMT_5 CALL_FMT_4 "d" (r6),
270
271#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
272#define CALL_CLOBBER_4 CALL_CLOBBER_5
273#define CALL_CLOBBER_3 CALL_CLOBBER_4, "5"
274#define CALL_CLOBBER_2 CALL_CLOBBER_3, "4"
275#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
276#define CALL_CLOBBER_0 CALL_CLOBBER_1
277
278#define CALL_ON_STACK(fn, stack, nr, args...) \
279({ \
280 CALL_ARGS_##nr(args); \
281 unsigned long prev; \
282 \
283 asm volatile( \
284 " la %[_prev],0(15)\n" \
285 " la 15,0(%[_stack])\n" \
286 " stg %[_prev],%[_bc](15)\n" \
287 " brasl 14,%[_fn]\n" \
288 " la 15,0(%[_prev])\n" \
289 : [_prev] "=&a" (prev), CALL_FMT_##nr \
290 [_stack] "a" (stack), \
291 [_bc] "i" (offsetof(struct stack_frame, back_chain)), \
292 [_fn] "X" (fn) : CALL_CLOBBER_##nr); \
293 r2; \
294})
295
296/* 224/*
297 * Give up the time slice of the virtual PU. 225 * Give up the time slice of the virtual PU.
298 */ 226 */
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
new file mode 100644
index 000000000000..49634bfbecdd
--- /dev/null
+++ b/arch/s390/include/asm/stacktrace.h
@@ -0,0 +1,114 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_S390_STACKTRACE_H
3#define _ASM_S390_STACKTRACE_H
4
5#include <linux/uaccess.h>
6#include <linux/ptrace.h>
7#include <asm/switch_to.h>
8
9enum stack_type {
10 STACK_TYPE_UNKNOWN,
11 STACK_TYPE_TASK,
12 STACK_TYPE_IRQ,
13 STACK_TYPE_NODAT,
14 STACK_TYPE_RESTART,
15};
16
17struct stack_info {
18 enum stack_type type;
19 unsigned long begin, end;
20};
21
22const char *stack_type_name(enum stack_type type);
23int get_stack_info(unsigned long sp, struct task_struct *task,
24 struct stack_info *info, unsigned long *visit_mask);
25
26static inline bool on_stack(struct stack_info *info,
27 unsigned long addr, size_t len)
28{
29 if (info->type == STACK_TYPE_UNKNOWN)
30 return false;
31 if (addr + len < addr)
32 return false;
33 return addr >= info->begin && addr + len < info->end;
34}
35
36static inline unsigned long get_stack_pointer(struct task_struct *task,
37 struct pt_regs *regs)
38{
39 if (regs)
40 return (unsigned long) kernel_stack_pointer(regs);
41 if (task == current)
42 return current_stack_pointer();
43 return (unsigned long) task->thread.ksp;
44}
45
46/*
47 * Stack layout of a C stack frame.
48 */
49#ifndef __PACK_STACK
50struct stack_frame {
51 unsigned long back_chain;
52 unsigned long empty1[5];
53 unsigned long gprs[10];
54 unsigned int empty2[8];
55};
56#else
57struct stack_frame {
58 unsigned long empty1[5];
59 unsigned int empty2[8];
60 unsigned long gprs[10];
61 unsigned long back_chain;
62};
63#endif
64
65#define CALL_ARGS_0() \
66 register unsigned long r2 asm("2")
67#define CALL_ARGS_1(arg1) \
68 register unsigned long r2 asm("2") = (unsigned long)(arg1)
69#define CALL_ARGS_2(arg1, arg2) \
70 CALL_ARGS_1(arg1); \
71 register unsigned long r3 asm("3") = (unsigned long)(arg2)
72#define CALL_ARGS_3(arg1, arg2, arg3) \
73 CALL_ARGS_2(arg1, arg2); \
74 register unsigned long r4 asm("4") = (unsigned long)(arg3)
75#define CALL_ARGS_4(arg1, arg2, arg3, arg4) \
76 CALL_ARGS_3(arg1, arg2, arg3); \
77 register unsigned long r4 asm("5") = (unsigned long)(arg4)
78#define CALL_ARGS_5(arg1, arg2, arg3, arg4, arg5) \
79 CALL_ARGS_4(arg1, arg2, arg3, arg4); \
80 register unsigned long r4 asm("6") = (unsigned long)(arg5)
81
82#define CALL_FMT_0 "=&d" (r2) :
83#define CALL_FMT_1 "+&d" (r2) :
84#define CALL_FMT_2 CALL_FMT_1 "d" (r3),
85#define CALL_FMT_3 CALL_FMT_2 "d" (r4),
86#define CALL_FMT_4 CALL_FMT_3 "d" (r5),
87#define CALL_FMT_5 CALL_FMT_4 "d" (r6),
88
89#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
90#define CALL_CLOBBER_4 CALL_CLOBBER_5
91#define CALL_CLOBBER_3 CALL_CLOBBER_4, "5"
92#define CALL_CLOBBER_2 CALL_CLOBBER_3, "4"
93#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
94#define CALL_CLOBBER_0 CALL_CLOBBER_1
95
96#define CALL_ON_STACK(fn, stack, nr, args...) \
97({ \
98 CALL_ARGS_##nr(args); \
99 unsigned long prev; \
100 \
101 asm volatile( \
102 " la %[_prev],0(15)\n" \
103 " la 15,0(%[_stack])\n" \
104 " stg %[_prev],%[_bc](15)\n" \
105 " brasl 14,%[_fn]\n" \
106 " la 15,0(%[_prev])\n" \
107 : [_prev] "=&a" (prev), CALL_FMT_##nr \
108 [_stack] "a" (stack), \
109 [_bc] "i" (offsetof(struct stack_frame, back_chain)), \
110 [_fn] "X" (fn) : CALL_CLOBBER_##nr); \
111 r2; \
112})
113
114#endif /* _ASM_S390_STACKTRACE_H */
diff --git a/arch/s390/include/asm/unwind.h b/arch/s390/include/asm/unwind.h
new file mode 100644
index 000000000000..6eb2ef105d87
--- /dev/null
+++ b/arch/s390/include/asm/unwind.h
@@ -0,0 +1,101 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_S390_UNWIND_H
3#define _ASM_S390_UNWIND_H
4
5#include <linux/sched.h>
6#include <linux/ftrace.h>
7#include <asm/ptrace.h>
8#include <asm/stacktrace.h>
9
10/*
11 * To use the stack unwinder it has to be initialized with unwind_start.
12 * There four combinations for task and regs:
13 * 1) task==NULL, regs==NULL: the unwind starts for the task that is currently
14 * running, sp/ip picked up from the CPU registers
15 * 2) task==NULL, regs!=NULL: the unwind starts from the sp/ip found in
16 * the struct pt_regs of an interrupt frame for the current task
17 * 3) task!=NULL, regs==NULL: the unwind starts for an inactive task with
18 * the sp picked up from task->thread.ksp and the ip picked up from the
19 * return address stored by __switch_to
20 * 4) task!=NULL, regs!=NULL: the sp/ip are picked up from the interrupt
21 * frame 'regs' of a inactive task
22 * If 'first_frame' is not zero unwind_start skips unwind frames until it
23 * reaches the specified stack pointer.
24 * The end of the unwinding is indicated with unwind_done, this can be true
25 * right after unwind_start, e.g. with first_frame!=0 that can not be found.
26 * unwind_next_frame skips to the next frame.
27 * Once the unwind is completed unwind_error() can be used to check if there
28 * has been a situation where the unwinder could not correctly understand
29 * the tasks call chain.
30 */
31
32struct unwind_state {
33 struct stack_info stack_info;
34 unsigned long stack_mask;
35 struct task_struct *task;
36 struct pt_regs *regs;
37 unsigned long sp, ip;
38 int graph_idx;
39 bool reliable;
40 bool error;
41};
42
43void __unwind_start(struct unwind_state *state, struct task_struct *task,
44 struct pt_regs *regs, unsigned long first_frame);
45bool unwind_next_frame(struct unwind_state *state);
46unsigned long unwind_get_return_address(struct unwind_state *state);
47
48static inline bool unwind_done(struct unwind_state *state)
49{
50 return state->stack_info.type == STACK_TYPE_UNKNOWN;
51}
52
53static inline bool unwind_error(struct unwind_state *state)
54{
55 return state->error;
56}
57
58static inline void unwind_start(struct unwind_state *state,
59 struct task_struct *task,
60 struct pt_regs *regs,
61 unsigned long sp)
62{
63 sp = sp ? : get_stack_pointer(task, regs);
64 __unwind_start(state, task, regs, sp);
65}
66
67static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
68{
69 return unwind_done(state) ? NULL : state->regs;
70}
71
72#define unwind_for_each_frame(state, task, regs, first_frame) \
73 for (unwind_start(state, task, regs, first_frame); \
74 !unwind_done(state); \
75 unwind_next_frame(state))
76
77static inline void unwind_init(void) {}
78static inline void unwind_module_init(struct module *mod, void *orc_ip,
79 size_t orc_ip_size, void *orc,
80 size_t orc_size) {}
81
82#ifdef CONFIG_KASAN
83/*
84 * This disables KASAN checking when reading a value from another task's stack,
85 * since the other task could be running on another CPU and could have poisoned
86 * the stack in the meantime.
87 */
88#define READ_ONCE_TASK_STACK(task, x) \
89({ \
90 unsigned long val; \
91 if (task == current) \
92 val = READ_ONCE(x); \
93 else \
94 val = READ_ONCE_NOCHECK(x); \
95 val; \
96})
97#else
98#define READ_ONCE_TASK_STACK(task, x) READ_ONCE(x)
99#endif
100
101#endif /* _ASM_S390_UNWIND_H */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 19425605a83d..b0478d01a0c5 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -39,6 +39,7 @@ CFLAGS_smp.o := -Wno-nonnull
39# 39#
40CFLAGS_stacktrace.o += -fno-optimize-sibling-calls 40CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
41CFLAGS_dumpstack.o += -fno-optimize-sibling-calls 41CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
42CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
42 43
43# 44#
44# Pass UTS_MACHINE for user_regset definition 45# Pass UTS_MACHINE for user_regset definition
@@ -51,7 +52,7 @@ obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o early_nobss.o
51obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o 52obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o
52obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o 53obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
53obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o 54obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
54obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o 55obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
55 56
56extra-y += head64.o vmlinux.lds 57extra-y += head64.o vmlinux.lds
57 58
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 164bec175628..41ac4ad21311 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -16,6 +16,7 @@
16#include <asm/pgtable.h> 16#include <asm/pgtable.h>
17#include <asm/gmap.h> 17#include <asm/gmap.h>
18#include <asm/nmi.h> 18#include <asm/nmi.h>
19#include <asm/stacktrace.h>
19 20
20int main(void) 21int main(void)
21{ 22{
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index cb7f55bbe06e..9e87b68be21c 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -21,95 +21,124 @@
21#include <asm/debug.h> 21#include <asm/debug.h>
22#include <asm/dis.h> 22#include <asm/dis.h>
23#include <asm/ipl.h> 23#include <asm/ipl.h>
24#include <asm/unwind.h>
24 25
25/* 26const char *stack_type_name(enum stack_type type)
26 * For dump_trace we have tree different stack to consider:
27 * - the panic stack which is used if the kernel stack has overflown
28 * - the asynchronous interrupt stack (cpu related)
29 * - the synchronous kernel stack (process related)
30 * The stack trace can start at any of the three stacks and can potentially
31 * touch all of them. The order is: panic stack, async stack, sync stack.
32 */
33static unsigned long __no_sanitize_address
34__dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
35 unsigned long low, unsigned long high)
36{ 27{
37 struct stack_frame *sf; 28 switch (type) {
38 struct pt_regs *regs; 29 case STACK_TYPE_TASK:
39 30 return "task";
40 while (1) { 31 case STACK_TYPE_IRQ:
41 if (sp < low || sp > high - sizeof(*sf)) 32 return "irq";
42 return sp; 33 case STACK_TYPE_NODAT:
43 sf = (struct stack_frame *) sp; 34 return "nodat";
44 if (func(data, sf->gprs[8], 0)) 35 case STACK_TYPE_RESTART:
45 return sp; 36 return "restart";
46 /* Follow the backchain. */ 37 default:
47 while (1) { 38 return "unknown";
48 low = sp;
49 sp = sf->back_chain;
50 if (!sp)
51 break;
52 if (sp <= low || sp > high - sizeof(*sf))
53 return sp;
54 sf = (struct stack_frame *) sp;
55 if (func(data, sf->gprs[8], 1))
56 return sp;
57 }
58 /* Zero backchain detected, check for interrupt frame. */
59 sp = (unsigned long) (sf + 1);
60 if (sp <= low || sp > high - sizeof(*regs))
61 return sp;
62 regs = (struct pt_regs *) sp;
63 if (!user_mode(regs)) {
64 if (func(data, regs->psw.addr, 1))
65 return sp;
66 }
67 low = sp;
68 sp = regs->gprs[15];
69 } 39 }
70} 40}
71 41
72void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task, 42static inline bool in_stack(unsigned long sp, struct stack_info *info,
73 unsigned long sp) 43 enum stack_type type, unsigned long low,
44 unsigned long high)
45{
46 if (sp < low || sp >= high)
47 return false;
48 info->type = type;
49 info->begin = low;
50 info->end = high;
51 return true;
52}
53
54static bool in_task_stack(unsigned long sp, struct task_struct *task,
55 struct stack_info *info)
56{
57 unsigned long stack;
58
59 stack = (unsigned long) task_stack_page(task);
60 return in_stack(sp, info, STACK_TYPE_TASK, stack, stack + THREAD_SIZE);
61}
62
63static bool in_irq_stack(unsigned long sp, struct stack_info *info)
74{ 64{
75 unsigned long frame_size; 65 unsigned long frame_size, top;
76 66
77 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); 67 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
78#ifdef CONFIG_CHECK_STACK 68 top = S390_lowcore.async_stack + frame_size;
79 sp = __dump_trace(func, data, sp, 69 return in_stack(sp, info, STACK_TYPE_IRQ, top - THREAD_SIZE, top);
80 S390_lowcore.nodat_stack + frame_size - THREAD_SIZE, 70}
81 S390_lowcore.nodat_stack + frame_size); 71
82#endif 72static bool in_nodat_stack(unsigned long sp, struct stack_info *info)
83 sp = __dump_trace(func, data, sp, 73{
84 S390_lowcore.async_stack + frame_size - THREAD_SIZE, 74 unsigned long frame_size, top;
85 S390_lowcore.async_stack + frame_size); 75
86 task = task ?: current; 76 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
87 __dump_trace(func, data, sp, 77 top = S390_lowcore.nodat_stack + frame_size;
88 (unsigned long)task_stack_page(task), 78 return in_stack(sp, info, STACK_TYPE_NODAT, top - THREAD_SIZE, top);
89 (unsigned long)task_stack_page(task) + THREAD_SIZE);
90} 79}
91EXPORT_SYMBOL_GPL(dump_trace);
92 80
93static int show_address(void *data, unsigned long address, int reliable) 81static bool in_restart_stack(unsigned long sp, struct stack_info *info)
94{ 82{
95 if (reliable) 83 unsigned long frame_size, top;
96 printk(" [<%016lx>] %pSR \n", address, (void *)address); 84
97 else 85 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
98 printk("([<%016lx>] %pSR)\n", address, (void *)address); 86 top = S390_lowcore.restart_stack + frame_size;
87 return in_stack(sp, info, STACK_TYPE_RESTART, top - THREAD_SIZE, top);
88}
89
90int get_stack_info(unsigned long sp, struct task_struct *task,
91 struct stack_info *info, unsigned long *visit_mask)
92{
93 if (!sp)
94 goto unknown;
95
96 task = task ? : current;
97
98 /* Check per-task stack */
99 if (in_task_stack(sp, task, info))
100 goto recursion_check;
101
102 if (task != current)
103 goto unknown;
104
105 /* Check per-cpu stacks */
106 if (!in_irq_stack(sp, info) &&
107 !in_nodat_stack(sp, info) &&
108 !in_restart_stack(sp, info))
109 goto unknown;
110
111recursion_check:
112 /*
113 * Make sure we don't iterate through any given stack more than once.
114 * If it comes up a second time then there's something wrong going on:
115 * just break out and report an unknown stack type.
116 */
117 if (*visit_mask & (1UL << info->type)) {
118 printk_deferred_once(KERN_WARNING
119 "WARNING: stack recursion on stack type %d\n",
120 info->type);
121 goto unknown;
122 }
123 *visit_mask |= 1UL << info->type;
99 return 0; 124 return 0;
125unknown:
126 info->type = STACK_TYPE_UNKNOWN;
127 return -EINVAL;
100} 128}
101 129
102void show_stack(struct task_struct *task, unsigned long *stack) 130void show_stack(struct task_struct *task, unsigned long *stack)
103{ 131{
104 unsigned long sp = (unsigned long) stack; 132 struct unwind_state state;
105 133
106 if (!sp)
107 sp = task ? task->thread.ksp : current_stack_pointer();
108 printk("Call Trace:\n"); 134 printk("Call Trace:\n");
109 dump_trace(show_address, NULL, task, sp);
110 if (!task) 135 if (!task)
111 task = current; 136 task = current;
112 debug_show_held_locks(task); 137 unwind_for_each_frame(&state, task, NULL, (unsigned long) stack)
138 printk(state.reliable ? " [<%016lx>] %pSR \n" :
139 "([<%016lx>] %pSR)\n",
140 state.ip, (void *) state.ip);
141 debug_show_held_locks(task ? : current);
113} 142}
114 143
115static void show_last_breaking_event(struct pt_regs *regs) 144static void show_last_breaking_event(struct pt_regs *regs)
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 150964f91183..8371855042dc 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -26,6 +26,7 @@
26#include <asm/lowcore.h> 26#include <asm/lowcore.h>
27#include <asm/irq.h> 27#include <asm/irq.h>
28#include <asm/hw_irq.h> 28#include <asm/hw_irq.h>
29#include <asm/stacktrace.h>
29#include "entry.h" 30#include "entry.h"
30 31
31DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat); 32DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index e2ba7b7f574e..2f3a742a71a5 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -27,6 +27,7 @@
27#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
28#include <asm/os_info.h> 28#include <asm/os_info.h>
29#include <asm/set_memory.h> 29#include <asm/set_memory.h>
30#include <asm/stacktrace.h>
30#include <asm/switch_to.h> 31#include <asm/switch_to.h>
31#include <asm/nmi.h> 32#include <asm/nmi.h>
32 33
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 0d770e513abf..fcb6c2e92b07 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -21,6 +21,7 @@
21#include <asm/lowcore.h> 21#include <asm/lowcore.h>
22#include <asm/processor.h> 22#include <asm/processor.h>
23#include <asm/sysinfo.h> 23#include <asm/sysinfo.h>
24#include <asm/unwind.h>
24 25
25const char *perf_pmu_name(void) 26const char *perf_pmu_name(void)
26{ 27{
@@ -219,20 +220,13 @@ static int __init service_level_perf_register(void)
219} 220}
220arch_initcall(service_level_perf_register); 221arch_initcall(service_level_perf_register);
221 222
222static int __perf_callchain_kernel(void *data, unsigned long address, int reliable)
223{
224 struct perf_callchain_entry_ctx *entry = data;
225
226 perf_callchain_store(entry, address);
227 return 0;
228}
229
230void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, 223void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
231 struct pt_regs *regs) 224 struct pt_regs *regs)
232{ 225{
233 if (user_mode(regs)) 226 struct unwind_state state;
234 return; 227
235 dump_trace(__perf_callchain_kernel, entry, NULL, regs->gprs[15]); 228 unwind_for_each_frame(&state, current, regs, 0)
229 perf_callchain_store(entry, state.ip);
236} 230}
237 231
238/* Perf definitions for PMU event attributes in sysfs */ 232/* Perf definitions for PMU event attributes in sysfs */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 6e758bb6cd29..63873aa6693f 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -37,6 +37,7 @@
37#include <asm/irq.h> 37#include <asm/irq.h>
38#include <asm/nmi.h> 38#include <asm/nmi.h>
39#include <asm/smp.h> 39#include <asm/smp.h>
40#include <asm/stacktrace.h>
40#include <asm/switch_to.h> 41#include <asm/switch_to.h>
41#include <asm/runtime_instr.h> 42#include <asm/runtime_instr.h>
42#include "entry.h" 43#include "entry.h"
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 64e4bc9dd130..f8544d517430 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -66,6 +66,7 @@
66#include <asm/diag.h> 66#include <asm/diag.h>
67#include <asm/os_info.h> 67#include <asm/os_info.h>
68#include <asm/sclp.h> 68#include <asm/sclp.h>
69#include <asm/stacktrace.h>
69#include <asm/sysinfo.h> 70#include <asm/sysinfo.h>
70#include <asm/numa.h> 71#include <asm/numa.h>
71#include <asm/alternative.h> 72#include <asm/alternative.h>
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 88634fb0cc50..35fafa2b91a8 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -53,6 +53,7 @@
53#include <asm/sigp.h> 53#include <asm/sigp.h>
54#include <asm/idle.h> 54#include <asm/idle.h>
55#include <asm/nmi.h> 55#include <asm/nmi.h>
56#include <asm/stacktrace.h>
56#include <asm/topology.h> 57#include <asm/topology.h>
57#include "entry.h" 58#include "entry.h"
58 59
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 460dcfba7d4e..89f9f63dca18 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -11,40 +11,21 @@
11#include <linux/stacktrace.h> 11#include <linux/stacktrace.h>
12#include <linux/kallsyms.h> 12#include <linux/kallsyms.h>
13#include <linux/export.h> 13#include <linux/export.h>
14 14#include <asm/stacktrace.h>
15static int __save_address(void *data, unsigned long address, int nosched) 15#include <asm/unwind.h>
16{
17 struct stack_trace *trace = data;
18
19 if (nosched && in_sched_functions(address))
20 return 0;
21 if (trace->skip > 0) {
22 trace->skip--;
23 return 0;
24 }
25 if (trace->nr_entries < trace->max_entries) {
26 trace->entries[trace->nr_entries++] = address;
27 return 0;
28 }
29 return 1;
30}
31
32static int save_address(void *data, unsigned long address, int reliable)
33{
34 return __save_address(data, address, 0);
35}
36
37static int save_address_nosched(void *data, unsigned long address, int reliable)
38{
39 return __save_address(data, address, 1);
40}
41 16
42void save_stack_trace(struct stack_trace *trace) 17void save_stack_trace(struct stack_trace *trace)
43{ 18{
44 unsigned long sp; 19 struct unwind_state state;
45 20
46 sp = current_stack_pointer(); 21 unwind_for_each_frame(&state, current, NULL, 0) {
47 dump_trace(save_address, trace, NULL, sp); 22 if (trace->nr_entries >= trace->max_entries)
23 break;
24 if (trace->skip > 0)
25 trace->skip--;
26 else
27 trace->entries[trace->nr_entries++] = state.ip;
28 }
48 if (trace->nr_entries < trace->max_entries) 29 if (trace->nr_entries < trace->max_entries)
49 trace->entries[trace->nr_entries++] = ULONG_MAX; 30 trace->entries[trace->nr_entries++] = ULONG_MAX;
50} 31}
@@ -52,12 +33,18 @@ EXPORT_SYMBOL_GPL(save_stack_trace);
52 33
53void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 34void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
54{ 35{
55 unsigned long sp; 36 struct unwind_state state;
56 37
57 sp = tsk->thread.ksp; 38 unwind_for_each_frame(&state, tsk, NULL, 0) {
58 if (tsk == current) 39 if (trace->nr_entries >= trace->max_entries)
59 sp = current_stack_pointer(); 40 break;
60 dump_trace(save_address_nosched, trace, tsk, sp); 41 if (in_sched_functions(state.ip))
42 continue;
43 if (trace->skip > 0)
44 trace->skip--;
45 else
46 trace->entries[trace->nr_entries++] = state.ip;
47 }
61 if (trace->nr_entries < trace->max_entries) 48 if (trace->nr_entries < trace->max_entries)
62 trace->entries[trace->nr_entries++] = ULONG_MAX; 49 trace->entries[trace->nr_entries++] = ULONG_MAX;
63} 50}
@@ -65,10 +52,16 @@ EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
65 52
66void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) 53void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
67{ 54{
68 unsigned long sp; 55 struct unwind_state state;
69 56
70 sp = kernel_stack_pointer(regs); 57 unwind_for_each_frame(&state, current, regs, 0) {
71 dump_trace(save_address, trace, NULL, sp); 58 if (trace->nr_entries >= trace->max_entries)
59 break;
60 if (trace->skip > 0)
61 trace->skip--;
62 else
63 trace->entries[trace->nr_entries++] = state.ip;
64 }
72 if (trace->nr_entries < trace->max_entries) 65 if (trace->nr_entries < trace->max_entries)
73 trace->entries[trace->nr_entries++] = ULONG_MAX; 66 trace->entries[trace->nr_entries++] = ULONG_MAX;
74} 67}
diff --git a/arch/s390/kernel/unwind_bc.c b/arch/s390/kernel/unwind_bc.c
new file mode 100644
index 000000000000..cf5a630f3aa9
--- /dev/null
+++ b/arch/s390/kernel/unwind_bc.c
@@ -0,0 +1,155 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/sched.h>
3#include <linux/sched/task.h>
4#include <linux/sched/task_stack.h>
5#include <linux/interrupt.h>
6#include <asm/sections.h>
7#include <asm/ptrace.h>
8#include <asm/bitops.h>
9#include <asm/stacktrace.h>
10#include <asm/unwind.h>
11
12unsigned long unwind_get_return_address(struct unwind_state *state)
13{
14 if (unwind_done(state))
15 return 0;
16 return __kernel_text_address(state->ip) ? state->ip : 0;
17}
18EXPORT_SYMBOL_GPL(unwind_get_return_address);
19
20static bool outside_of_stack(struct unwind_state *state, unsigned long sp)
21{
22 return (sp <= state->sp) ||
23 (sp + sizeof(struct stack_frame) > state->stack_info.end);
24}
25
26static bool update_stack_info(struct unwind_state *state, unsigned long sp)
27{
28 struct stack_info *info = &state->stack_info;
29 unsigned long *mask = &state->stack_mask;
30
31 /* New stack pointer leaves the current stack */
32 if (get_stack_info(sp, state->task, info, mask) != 0 ||
33 !on_stack(info, sp, sizeof(struct stack_frame)))
34 /* 'sp' does not point to a valid stack */
35 return false;
36 return true;
37}
38
39bool unwind_next_frame(struct unwind_state *state)
40{
41 struct stack_info *info = &state->stack_info;
42 struct stack_frame *sf;
43 struct pt_regs *regs;
44 unsigned long sp, ip;
45 bool reliable;
46
47 regs = state->regs;
48 if (unlikely(regs)) {
49 sp = READ_ONCE_TASK_STACK(state->task, regs->gprs[15]);
50 if (unlikely(outside_of_stack(state, sp))) {
51 if (!update_stack_info(state, sp))
52 goto out_err;
53 }
54 sf = (struct stack_frame *) sp;
55 ip = READ_ONCE_TASK_STACK(state->task, sf->gprs[8]);
56 reliable = false;
57 regs = NULL;
58 } else {
59 sf = (struct stack_frame *) state->sp;
60 sp = READ_ONCE_TASK_STACK(state->task, sf->back_chain);
61 if (likely(sp)) {
62 /* Non-zero back-chain points to the previous frame */
63 if (unlikely(outside_of_stack(state, sp))) {
64 if (!update_stack_info(state, sp))
65 goto out_err;
66 }
67 sf = (struct stack_frame *) sp;
68 ip = READ_ONCE_TASK_STACK(state->task, sf->gprs[8]);
69 reliable = true;
70 } else {
71 /* No back-chain, look for a pt_regs structure */
72 sp = state->sp + STACK_FRAME_OVERHEAD;
73 if (!on_stack(info, sp, sizeof(struct pt_regs)))
74 goto out_stop;
75 regs = (struct pt_regs *) sp;
76 if (user_mode(regs))
77 goto out_stop;
78 ip = READ_ONCE_TASK_STACK(state->task, regs->psw.addr);
79 reliable = true;
80 }
81 }
82
83#ifdef CONFIG_FUNCTION_GRAPH_TRACER
84 /* Decode any ftrace redirection */
85 if (ip == (unsigned long) return_to_handler)
86 ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
87 ip, NULL);
88#endif
89
90 /* Update unwind state */
91 state->sp = sp;
92 state->ip = ip;
93 state->regs = regs;
94 state->reliable = reliable;
95 return true;
96
97out_err:
98 state->error = true;
99out_stop:
100 state->stack_info.type = STACK_TYPE_UNKNOWN;
101 return false;
102}
103EXPORT_SYMBOL_GPL(unwind_next_frame);
104
105void __unwind_start(struct unwind_state *state, struct task_struct *task,
106 struct pt_regs *regs, unsigned long sp)
107{
108 struct stack_info *info = &state->stack_info;
109 unsigned long *mask = &state->stack_mask;
110 struct stack_frame *sf;
111 unsigned long ip;
112 bool reliable;
113
114 memset(state, 0, sizeof(*state));
115 state->task = task;
116 state->regs = regs;
117
118 /* Don't even attempt to start from user mode regs: */
119 if (regs && user_mode(regs)) {
120 info->type = STACK_TYPE_UNKNOWN;
121 return;
122 }
123
124 /* Get current stack pointer and initialize stack info */
125 if (get_stack_info(sp, task, info, mask) != 0 ||
126 !on_stack(info, sp, sizeof(struct stack_frame))) {
127 /* Something is wrong with the stack pointer */
128 info->type = STACK_TYPE_UNKNOWN;
129 state->error = true;
130 return;
131 }
132
133 /* Get the instruction pointer from pt_regs or the stack frame */
134 if (regs) {
135 ip = READ_ONCE_TASK_STACK(state->task, regs->psw.addr);
136 reliable = true;
137 } else {
138 sf = (struct stack_frame *) sp;
139 ip = READ_ONCE_TASK_STACK(state->task, sf->gprs[8]);
140 reliable = false;
141 }
142
143#ifdef CONFIG_FUNCTION_GRAPH_TRACER
144 /* Decode any ftrace redirection */
145 if (ip == (unsigned long) return_to_handler)
146 ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
147 ip, NULL);
148#endif
149
150 /* Update unwind state */
151 state->sp = sp;
152 state->ip = ip;
153 state->reliable = reliable;
154}
155EXPORT_SYMBOL_GPL(__unwind_start);
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 97b3ee53852b..818deeb1ebc3 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -16,6 +16,7 @@
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <asm/ctl_reg.h> 17#include <asm/ctl_reg.h>
18#include <asm/io.h> 18#include <asm/io.h>
19#include <asm/stacktrace.h>
19 20
20static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size) 21static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
21{ 22{
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 43d9525c36fc..7441857df51b 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -13,23 +13,17 @@
13#include <linux/oprofile.h> 13#include <linux/oprofile.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <asm/processor.h> 15#include <asm/processor.h>
16 16#include <asm/unwind.h>
17static int __s390_backtrace(void *data, unsigned long address, int reliable)
18{
19 unsigned int *depth = data;
20
21 if (*depth == 0)
22 return 1;
23 (*depth)--;
24 oprofile_add_trace(address);
25 return 0;
26}
27 17
28static void s390_backtrace(struct pt_regs *regs, unsigned int depth) 18static void s390_backtrace(struct pt_regs *regs, unsigned int depth)
29{ 19{
30 if (user_mode(regs)) 20 struct unwind_state state;
31 return; 21
32 dump_trace(__s390_backtrace, &depth, NULL, regs->gprs[15]); 22 unwind_for_each_frame(&state, current, regs, 0) {
23 if (depth-- == 0)
24 break;
25 oprofile_add_trace(state.ip);
26 }
33} 27}
34 28
35int __init oprofile_arch_init(struct oprofile_operations *ops) 29int __init oprofile_arch_init(struct oprofile_operations *ops)