diff options
-rw-r--r-- | arch/x86_64/kernel/stacktrace.c | 214 | ||||
-rw-r--r-- | arch/x86_64/kernel/traps.c | 99 | ||||
-rw-r--r-- | include/asm-x86_64/stacktrace.h | 18 |
3 files changed, 120 insertions, 211 deletions
diff --git a/arch/x86_64/kernel/stacktrace.c b/arch/x86_64/kernel/stacktrace.c index 1c022af8fe1e..6026b31d037e 100644 --- a/arch/x86_64/kernel/stacktrace.c +++ b/arch/x86_64/kernel/stacktrace.c | |||
@@ -7,211 +7,49 @@ | |||
7 | */ | 7 | */ |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <linux/stacktrace.h> | 9 | #include <linux/stacktrace.h> |
10 | #include <linux/module.h> | ||
11 | #include <asm/stacktrace.h> | ||
10 | 12 | ||
11 | #include <asm/smp.h> | 13 | static void save_stack_warning(void *data, char *msg) |
12 | |||
13 | static inline int | ||
14 | in_range(unsigned long start, unsigned long addr, unsigned long end) | ||
15 | { | 14 | { |
16 | return addr >= start && addr <= end; | ||
17 | } | 15 | } |
18 | 16 | ||
19 | static unsigned long | 17 | static void |
20 | get_stack_end(struct task_struct *task, unsigned long stack) | 18 | save_stack_warning_symbol(void *data, char *msg, unsigned long symbol) |
21 | { | 19 | { |
22 | unsigned long stack_start, stack_end, flags; | ||
23 | int i, cpu; | ||
24 | |||
25 | /* | ||
26 | * The most common case is that we are in the task stack: | ||
27 | */ | ||
28 | stack_start = (unsigned long)task->thread_info; | ||
29 | stack_end = stack_start + THREAD_SIZE; | ||
30 | |||
31 | if (in_range(stack_start, stack, stack_end)) | ||
32 | return stack_end; | ||
33 | |||
34 | /* | ||
35 | * We are in an interrupt if irqstackptr is set: | ||
36 | */ | ||
37 | raw_local_irq_save(flags); | ||
38 | cpu = safe_smp_processor_id(); | ||
39 | stack_end = (unsigned long)cpu_pda(cpu)->irqstackptr; | ||
40 | |||
41 | if (stack_end) { | ||
42 | stack_start = stack_end & ~(IRQSTACKSIZE-1); | ||
43 | if (in_range(stack_start, stack, stack_end)) | ||
44 | goto out_restore; | ||
45 | /* | ||
46 | * We get here if we are in an IRQ context but we | ||
47 | * are also in an exception stack. | ||
48 | */ | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Iterate over all exception stacks, and figure out whether | ||
53 | * 'stack' is in one of them: | ||
54 | */ | ||
55 | for (i = 0; i < N_EXCEPTION_STACKS; i++) { | ||
56 | /* | ||
57 | * set 'end' to the end of the exception stack. | ||
58 | */ | ||
59 | stack_end = per_cpu(init_tss, cpu).ist[i]; | ||
60 | stack_start = stack_end - EXCEPTION_STKSZ; | ||
61 | |||
62 | /* | ||
63 | * Is 'stack' above this exception frame's end? | ||
64 | * If yes then skip to the next frame. | ||
65 | */ | ||
66 | if (stack >= stack_end) | ||
67 | continue; | ||
68 | /* | ||
69 | * Is 'stack' above this exception frame's start address? | ||
70 | * If yes then we found the right frame. | ||
71 | */ | ||
72 | if (stack >= stack_start) | ||
73 | goto out_restore; | ||
74 | |||
75 | /* | ||
76 | * If this is a debug stack, and if it has a larger size than | ||
77 | * the usual exception stacks, then 'stack' might still | ||
78 | * be within the lower portion of the debug stack: | ||
79 | */ | ||
80 | #if DEBUG_STKSZ > EXCEPTION_STKSZ | ||
81 | if (i == DEBUG_STACK - 1 && stack >= stack_end - DEBUG_STKSZ) { | ||
82 | /* | ||
83 | * Black magic. A large debug stack is composed of | ||
84 | * multiple exception stack entries, which we | ||
85 | * iterate through now. Dont look: | ||
86 | */ | ||
87 | do { | ||
88 | stack_end -= EXCEPTION_STKSZ; | ||
89 | stack_start -= EXCEPTION_STKSZ; | ||
90 | } while (stack < stack_start); | ||
91 | |||
92 | goto out_restore; | ||
93 | } | ||
94 | #endif | ||
95 | } | ||
96 | /* | ||
97 | * Ok, 'stack' is not pointing to any of the system stacks. | ||
98 | */ | ||
99 | stack_end = 0; | ||
100 | |||
101 | out_restore: | ||
102 | raw_local_irq_restore(flags); | ||
103 | |||
104 | return stack_end; | ||
105 | } | 20 | } |
106 | 21 | ||
107 | 22 | static int save_stack_stack(void *data, char *name) | |
108 | /* | ||
109 | * Save stack-backtrace addresses into a stack_trace buffer: | ||
110 | */ | ||
111 | static inline unsigned long | ||
112 | save_context_stack(struct stack_trace *trace, | ||
113 | unsigned long stack, unsigned long stack_end) | ||
114 | { | 23 | { |
115 | int skip = trace->skip; | 24 | struct stack_trace *trace = (struct stack_trace *)data; |
116 | unsigned long addr; | 25 | return trace->all_contexts ? 0 : -1; |
117 | 26 | } | |
118 | #ifdef CONFIG_FRAME_POINTER | ||
119 | unsigned long prev_stack = 0; | ||
120 | 27 | ||
121 | while (in_range(prev_stack, stack, stack_end)) { | 28 | static void save_stack_address(void *data, unsigned long addr) |
122 | pr_debug("stack: %p\n", (void *)stack); | 29 | { |
123 | addr = (unsigned long)(((unsigned long *)stack)[1]); | 30 | struct stack_trace *trace = (struct stack_trace *)data; |
124 | pr_debug("addr: %p\n", (void *)addr); | 31 | if (trace->skip > 0) { |
125 | if (!skip) | 32 | trace->skip--; |
126 | trace->entries[trace->nr_entries++] = addr-1; | 33 | return; |
127 | else | ||
128 | skip--; | ||
129 | if (trace->nr_entries >= trace->max_entries) | ||
130 | break; | ||
131 | if (!addr) | ||
132 | return 0; | ||
133 | /* | ||
134 | * Stack frames must go forwards (otherwise a loop could | ||
135 | * happen if the stackframe is corrupted), so we move | ||
136 | * prev_stack forwards: | ||
137 | */ | ||
138 | prev_stack = stack; | ||
139 | stack = (unsigned long)(((unsigned long *)stack)[0]); | ||
140 | } | ||
141 | pr_debug("invalid: %p\n", (void *)stack); | ||
142 | #else | ||
143 | while (stack < stack_end) { | ||
144 | addr = ((unsigned long *)stack)[0]; | ||
145 | stack += sizeof(long); | ||
146 | if (__kernel_text_address(addr)) { | ||
147 | if (!skip) | ||
148 | trace->entries[trace->nr_entries++] = addr-1; | ||
149 | else | ||
150 | skip--; | ||
151 | if (trace->nr_entries >= trace->max_entries) | ||
152 | break; | ||
153 | } | ||
154 | } | 34 | } |
155 | #endif | 35 | if (trace->nr_entries < trace->max_entries - 1) |
156 | return stack; | 36 | trace->entries[trace->nr_entries++] = addr; |
157 | } | 37 | } |
158 | 38 | ||
159 | #define MAX_STACKS 10 | 39 | static struct stacktrace_ops save_stack_ops = { |
40 | .warning = save_stack_warning, | ||
41 | .warning_symbol = save_stack_warning_symbol, | ||
42 | .stack = save_stack_stack, | ||
43 | .address = save_stack_address, | ||
44 | }; | ||
160 | 45 | ||
161 | /* | 46 | /* |
162 | * Save stack-backtrace addresses into a stack_trace buffer. | 47 | * Save stack-backtrace addresses into a stack_trace buffer. |
163 | */ | 48 | */ |
164 | void save_stack_trace(struct stack_trace *trace, struct task_struct *task) | 49 | void save_stack_trace(struct stack_trace *trace, struct task_struct *task) |
165 | { | 50 | { |
166 | unsigned long stack = (unsigned long)&stack; | 51 | dump_trace(task, NULL, NULL, &save_stack_ops, trace); |
167 | int i, nr_stacks = 0, stacks_done[MAX_STACKS]; | 52 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
168 | |||
169 | WARN_ON(trace->nr_entries || !trace->max_entries); | ||
170 | |||
171 | if (!task) | ||
172 | task = current; | ||
173 | |||
174 | pr_debug("task: %p, ti: %p\n", task, task->thread_info); | ||
175 | |||
176 | if (!task || task == current) { | ||
177 | /* Grab rbp right from our regs: */ | ||
178 | asm ("mov %%rbp, %0" : "=r" (stack)); | ||
179 | pr_debug("rbp: %p\n", (void *)stack); | ||
180 | } else { | ||
181 | /* rbp is the last reg pushed by switch_to(): */ | ||
182 | stack = task->thread.rsp; | ||
183 | pr_debug("other task rsp: %p\n", (void *)stack); | ||
184 | stack = (unsigned long)(((unsigned long *)stack)[0]); | ||
185 | pr_debug("other task rbp: %p\n", (void *)stack); | ||
186 | } | ||
187 | |||
188 | while (1) { | ||
189 | unsigned long stack_end = get_stack_end(task, stack); | ||
190 | |||
191 | pr_debug("stack: %p\n", (void *)stack); | ||
192 | pr_debug("stack end: %p\n", (void *)stack_end); | ||
193 | |||
194 | /* | ||
195 | * Invalid stack addres? | ||
196 | */ | ||
197 | if (!stack_end) | ||
198 | return; | ||
199 | /* | ||
200 | * Were we in this stack already? (recursion) | ||
201 | */ | ||
202 | for (i = 0; i < nr_stacks; i++) | ||
203 | if (stacks_done[i] == stack_end) | ||
204 | return; | ||
205 | stacks_done[nr_stacks] = stack_end; | ||
206 | |||
207 | stack = save_context_stack(trace, stack, stack_end); | ||
208 | if (!stack || trace->nr_entries >= trace->max_entries) | ||
209 | return; | ||
210 | trace->entries[trace->nr_entries++] = ULONG_MAX; | ||
211 | if (trace->nr_entries >= trace->max_entries) | ||
212 | return; | ||
213 | if (++nr_stacks >= MAX_STACKS) | ||
214 | return; | ||
215 | } | ||
216 | } | 53 | } |
54 | EXPORT_SYMBOL(save_stack_trace); | ||
217 | 55 | ||
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index 9ec2b1d5893d..4ac18b02eada 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <asm/pda.h> | 45 | #include <asm/pda.h> |
46 | #include <asm/proto.h> | 46 | #include <asm/proto.h> |
47 | #include <asm/nmi.h> | 47 | #include <asm/nmi.h> |
48 | #include <asm/stacktrace.h> | ||
48 | 49 | ||
49 | asmlinkage void divide_error(void); | 50 | asmlinkage void divide_error(void); |
50 | asmlinkage void debug(void); | 51 | asmlinkage void debug(void); |
@@ -142,7 +143,7 @@ void printk_address(unsigned long address) | |||
142 | #endif | 143 | #endif |
143 | 144 | ||
144 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | 145 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, |
145 | unsigned *usedp, const char **idp) | 146 | unsigned *usedp, char **idp) |
146 | { | 147 | { |
147 | static char ids[][8] = { | 148 | static char ids[][8] = { |
148 | [DEBUG_STACK - 1] = "#DB", | 149 | [DEBUG_STACK - 1] = "#DB", |
@@ -234,13 +235,19 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | |||
234 | return NULL; | 235 | return NULL; |
235 | } | 236 | } |
236 | 237 | ||
237 | static int show_trace_unwind(struct unwind_frame_info *info, void *context) | 238 | struct ops_and_data { |
239 | struct stacktrace_ops *ops; | ||
240 | void *data; | ||
241 | }; | ||
242 | |||
243 | static int dump_trace_unwind(struct unwind_frame_info *info, void *context) | ||
238 | { | 244 | { |
245 | struct ops_and_data *oad = (struct ops_and_data *)context; | ||
239 | int n = 0; | 246 | int n = 0; |
240 | 247 | ||
241 | while (unwind(info) == 0 && UNW_PC(info)) { | 248 | while (unwind(info) == 0 && UNW_PC(info)) { |
242 | n++; | 249 | n++; |
243 | printk_address(UNW_PC(info)); | 250 | oad->ops->address(oad->data, UNW_PC(info)); |
244 | if (arch_unw_user_mode(info)) | 251 | if (arch_unw_user_mode(info)) |
245 | break; | 252 | break; |
246 | } | 253 | } |
@@ -254,45 +261,51 @@ static int show_trace_unwind(struct unwind_frame_info *info, void *context) | |||
254 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack | 261 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack |
255 | */ | 262 | */ |
256 | 263 | ||
257 | void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack) | 264 | void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack, |
265 | struct stacktrace_ops *ops, void *data) | ||
258 | { | 266 | { |
259 | const unsigned cpu = safe_smp_processor_id(); | 267 | const unsigned cpu = safe_smp_processor_id(); |
260 | unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; | 268 | unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; |
261 | unsigned used = 0; | 269 | unsigned used = 0; |
262 | 270 | ||
263 | printk("\nCall Trace:\n"); | ||
264 | |||
265 | if (!tsk) | 271 | if (!tsk) |
266 | tsk = current; | 272 | tsk = current; |
267 | 273 | ||
268 | if (call_trace >= 0) { | 274 | if (call_trace >= 0) { |
269 | int unw_ret = 0; | 275 | int unw_ret = 0; |
270 | struct unwind_frame_info info; | 276 | struct unwind_frame_info info; |
277 | struct ops_and_data oad = { .ops = ops, .data = data }; | ||
271 | 278 | ||
272 | if (regs) { | 279 | if (regs) { |
273 | if (unwind_init_frame_info(&info, tsk, regs) == 0) | 280 | if (unwind_init_frame_info(&info, tsk, regs) == 0) |
274 | unw_ret = show_trace_unwind(&info, NULL); | 281 | unw_ret = dump_trace_unwind(&info, &oad); |
275 | } else if (tsk == current) | 282 | } else if (tsk == current) |
276 | unw_ret = unwind_init_running(&info, show_trace_unwind, NULL); | 283 | unw_ret = unwind_init_running(&info, dump_trace_unwind, &oad); |
277 | else { | 284 | else { |
278 | if (unwind_init_blocked(&info, tsk) == 0) | 285 | if (unwind_init_blocked(&info, tsk) == 0) |
279 | unw_ret = show_trace_unwind(&info, NULL); | 286 | unw_ret = dump_trace_unwind(&info, &oad); |
280 | } | 287 | } |
281 | if (unw_ret > 0) { | 288 | if (unw_ret > 0) { |
282 | if (call_trace == 1 && !arch_unw_user_mode(&info)) { | 289 | if (call_trace == 1 && !arch_unw_user_mode(&info)) { |
283 | print_symbol("DWARF2 unwinder stuck at %s\n", | 290 | ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n", |
284 | UNW_PC(&info)); | 291 | UNW_PC(&info)); |
285 | if ((long)UNW_SP(&info) < 0) { | 292 | if ((long)UNW_SP(&info) < 0) { |
286 | printk("Leftover inexact backtrace:\n"); | 293 | ops->warning(data, "Leftover inexact backtrace:\n"); |
287 | stack = (unsigned long *)UNW_SP(&info); | 294 | stack = (unsigned long *)UNW_SP(&info); |
288 | } else | 295 | } else |
289 | printk("Full inexact backtrace again:\n"); | 296 | ops->warning(data, "Full inexact backtrace again:\n"); |
290 | } else if (call_trace >= 1) | 297 | } else if (call_trace >= 1) |
291 | return; | 298 | return; |
292 | else | 299 | else |
293 | printk("Full inexact backtrace again:\n"); | 300 | ops->warning(data, "Full inexact backtrace again:\n"); |
294 | } else | 301 | } else |
295 | printk("Inexact backtrace:\n"); | 302 | ops->warning(data, "Inexact backtrace:\n"); |
303 | } | ||
304 | if (!stack) { | ||
305 | unsigned long dummy; | ||
306 | stack = &dummy; | ||
307 | if (tsk && tsk != current) | ||
308 | stack = (unsigned long *)tsk->thread.rsp; | ||
296 | } | 309 | } |
297 | 310 | ||
298 | /* | 311 | /* |
@@ -312,7 +325,7 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s | |||
312 | * down the cause of the crash will be able to figure \ | 325 | * down the cause of the crash will be able to figure \ |
313 | * out the call path that was taken. \ | 326 | * out the call path that was taken. \ |
314 | */ \ | 327 | */ \ |
315 | printk_address(addr); \ | 328 | ops->address(data, addr); \ |
316 | } \ | 329 | } \ |
317 | } while (0) | 330 | } while (0) |
318 | 331 | ||
@@ -321,16 +334,17 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s | |||
321 | * current stack address. If the stacks consist of nested | 334 | * current stack address. If the stacks consist of nested |
322 | * exceptions | 335 | * exceptions |
323 | */ | 336 | */ |
324 | for ( ; ; ) { | 337 | for (;;) { |
325 | const char *id; | 338 | char *id; |
326 | unsigned long *estack_end; | 339 | unsigned long *estack_end; |
327 | estack_end = in_exception_stack(cpu, (unsigned long)stack, | 340 | estack_end = in_exception_stack(cpu, (unsigned long)stack, |
328 | &used, &id); | 341 | &used, &id); |
329 | 342 | ||
330 | if (estack_end) { | 343 | if (estack_end) { |
331 | printk(" <%s>", id); | 344 | if (ops->stack(data, id) < 0) |
345 | break; | ||
332 | HANDLE_STACK (stack < estack_end); | 346 | HANDLE_STACK (stack < estack_end); |
333 | printk(" <EOE>"); | 347 | ops->stack(data, "<EOE>"); |
334 | /* | 348 | /* |
335 | * We link to the next stack via the | 349 | * We link to the next stack via the |
336 | * second-to-last pointer (index -2 to end) in the | 350 | * second-to-last pointer (index -2 to end) in the |
@@ -345,7 +359,8 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s | |||
345 | (IRQSTACKSIZE - 64) / sizeof(*irqstack); | 359 | (IRQSTACKSIZE - 64) / sizeof(*irqstack); |
346 | 360 | ||
347 | if (stack >= irqstack && stack < irqstack_end) { | 361 | if (stack >= irqstack && stack < irqstack_end) { |
348 | printk(" <IRQ>"); | 362 | if (ops->stack(data, "IRQ") < 0) |
363 | break; | ||
349 | HANDLE_STACK (stack < irqstack_end); | 364 | HANDLE_STACK (stack < irqstack_end); |
350 | /* | 365 | /* |
351 | * We link to the next stack (which would be | 366 | * We link to the next stack (which would be |
@@ -354,7 +369,7 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s | |||
354 | */ | 369 | */ |
355 | stack = (unsigned long *) (irqstack_end[-1]); | 370 | stack = (unsigned long *) (irqstack_end[-1]); |
356 | irqstack_end = NULL; | 371 | irqstack_end = NULL; |
357 | printk(" <EOI>"); | 372 | ops->stack(data, "EOI"); |
358 | continue; | 373 | continue; |
359 | } | 374 | } |
360 | } | 375 | } |
@@ -362,15 +377,53 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s | |||
362 | } | 377 | } |
363 | 378 | ||
364 | /* | 379 | /* |
365 | * This prints the process stack: | 380 | * This handles the process stack: |
366 | */ | 381 | */ |
367 | HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0); | 382 | HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0); |
368 | #undef HANDLE_STACK | 383 | #undef HANDLE_STACK |
384 | } | ||
385 | EXPORT_SYMBOL(dump_trace); | ||
369 | 386 | ||
387 | static void | ||
388 | print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) | ||
389 | { | ||
390 | print_symbol(msg, symbol); | ||
391 | printk("\n"); | ||
392 | } | ||
393 | |||
394 | static void print_trace_warning(void *data, char *msg) | ||
395 | { | ||
396 | printk("%s\n", msg); | ||
397 | } | ||
398 | |||
399 | static int print_trace_stack(void *data, char *name) | ||
400 | { | ||
401 | printk(" <%s> ", name); | ||
402 | return 0; | ||
403 | } | ||
404 | |||
405 | static void print_trace_address(void *data, unsigned long addr) | ||
406 | { | ||
407 | printk_address(addr); | ||
408 | } | ||
409 | |||
410 | static struct stacktrace_ops print_trace_ops = { | ||
411 | .warning = print_trace_warning, | ||
412 | .warning_symbol = print_trace_warning_symbol, | ||
413 | .stack = print_trace_stack, | ||
414 | .address = print_trace_address, | ||
415 | }; | ||
416 | |||
417 | void | ||
418 | show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack) | ||
419 | { | ||
420 | printk("\nCall Trace:\n"); | ||
421 | dump_trace(tsk, regs, stack, &print_trace_ops, NULL); | ||
370 | printk("\n"); | 422 | printk("\n"); |
371 | } | 423 | } |
372 | 424 | ||
373 | static void _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long * rsp) | 425 | static void |
426 | _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *rsp) | ||
374 | { | 427 | { |
375 | unsigned long *stack; | 428 | unsigned long *stack; |
376 | int i; | 429 | int i; |
diff --git a/include/asm-x86_64/stacktrace.h b/include/asm-x86_64/stacktrace.h new file mode 100644 index 000000000000..5eb9799bef76 --- /dev/null +++ b/include/asm-x86_64/stacktrace.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef _ASM_STACKTRACE_H | ||
2 | #define _ASM_STACKTRACE_H 1 | ||
3 | |||
4 | /* Generic stack tracer with callbacks */ | ||
5 | |||
6 | struct stacktrace_ops { | ||
7 | void (*warning)(void *data, char *msg); | ||
8 | /* msg must contain %s for the symbol */ | ||
9 | void (*warning_symbol)(void *data, char *msg, unsigned long symbol); | ||
10 | void (*address)(void *data, unsigned long address); | ||
11 | /* On negative return stop dumping */ | ||
12 | int (*stack)(void *data, char *name); | ||
13 | }; | ||
14 | |||
15 | void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack, | ||
16 | struct stacktrace_ops *ops, void *data); | ||
17 | |||
18 | #endif | ||