diff options
author | Arjan van de Ven <arjan@linux.intel.com> | 2008-01-30 07:33:07 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:33:07 -0500 |
commit | 80b51f310b6f55006a265d087b8f48744e65663d (patch) | |
tree | 49826076129eaa61db1bbbc259382a297c77491b /arch/x86/kernel/traps_64.c | |
parent | e4a94568b18c5d7d72741ebde5736d77d235743c (diff) |
x86: use the stack frames to get exact stack-traces for CONFIG_FRAMEPOINTER on x86-64
x86 32 bit already has this feature: This patch uses the stack frames with
frame pointer into an exact stack trace, by following the frame pointer.
This only affects kernels built with the CONFIG_FRAME_POINTER config option
enabled, and greatly reduces the amount of noise in oopses.
This code uses the traditional method of doing backtraces, but if it
finds a valid frame pointer chain, will use that to show which parts
of the backtrace are reliable and which parts are not
Due to the fragility and importance of the backtrace code, this needs to
be well reviewed and well tested before merging into mainlne.
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/traps_64.c')
-rw-r--r-- | arch/x86/kernel/traps_64.c | 67 |
1 files changed, 44 insertions, 23 deletions
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index b8303ed95057..304ca6b4a1ca 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c | |||
@@ -225,31 +225,34 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, | |||
225 | return p > t && p < t + THREAD_SIZE - size; | 225 | return p > t && p < t + THREAD_SIZE - size; |
226 | } | 226 | } |
227 | 227 | ||
228 | /* The form of the top of the frame on the stack */ | ||
229 | struct stack_frame { | ||
230 | struct stack_frame *next_frame; | ||
231 | unsigned long return_address; | ||
232 | }; | ||
233 | |||
234 | |||
228 | static inline unsigned long print_context_stack(struct thread_info *tinfo, | 235 | static inline unsigned long print_context_stack(struct thread_info *tinfo, |
229 | unsigned long *stack, unsigned long bp, | 236 | unsigned long *stack, unsigned long bp, |
230 | const struct stacktrace_ops *ops, void *data, | 237 | const struct stacktrace_ops *ops, void *data, |
231 | unsigned long *end) | 238 | unsigned long *end) |
232 | { | 239 | { |
233 | /* | 240 | struct stack_frame *frame = (struct stack_frame *)bp; |
234 | * Print function call entries within a stack. 'cond' is the | 241 | |
235 | * "end of stackframe" condition, that the 'stack++' | 242 | while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { |
236 | * iteration will eventually trigger. | 243 | unsigned long addr; |
237 | */ | 244 | |
238 | while (valid_stack_ptr(tinfo, stack, 3, end)) { | 245 | addr = *stack; |
239 | unsigned long addr = *stack++; | ||
240 | /* Use unlocked access here because except for NMIs | ||
241 | we should be already protected against module unloads */ | ||
242 | if (__kernel_text_address(addr)) { | 246 | if (__kernel_text_address(addr)) { |
243 | /* | 247 | if ((unsigned long) stack == bp + 8) { |
244 | * If the address is either in the text segment of the | 248 | ops->address(data, addr, 1); |
245 | * kernel, or in the region which contains vmalloc'ed | 249 | frame = frame->next_frame; |
246 | * memory, it *may* be the address of a calling | 250 | bp = (unsigned long) frame; |
247 | * routine; if so, print it so that someone tracing | 251 | } else { |
248 | * down the cause of the crash will be able to figure | 252 | ops->address(data, addr, bp == 0); |
249 | * out the call path that was taken. | 253 | } |
250 | */ | ||
251 | ops->address(data, addr, 1); | ||
252 | } | 254 | } |
255 | stack++; | ||
253 | } | 256 | } |
254 | return bp; | 257 | return bp; |
255 | } | 258 | } |
@@ -274,6 +277,19 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | |||
274 | stack = (unsigned long *)tsk->thread.sp; | 277 | stack = (unsigned long *)tsk->thread.sp; |
275 | } | 278 | } |
276 | 279 | ||
280 | #ifdef CONFIG_FRAME_POINTER | ||
281 | if (!bp) { | ||
282 | if (tsk == current) { | ||
283 | /* Grab bp right from our regs */ | ||
284 | asm("movq %%rbp, %0" : "=r" (bp):); | ||
285 | } else { | ||
286 | /* bp is the last reg pushed by switch_to */ | ||
287 | bp = *(unsigned long *) tsk->thread.sp; | ||
288 | } | ||
289 | } | ||
290 | #endif | ||
291 | |||
292 | |||
277 | 293 | ||
278 | /* | 294 | /* |
279 | * Print function call entries in all stacks, starting at the | 295 | * Print function call entries in all stacks, starting at the |
@@ -290,8 +306,8 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | |||
290 | if (ops->stack(data, id) < 0) | 306 | if (ops->stack(data, id) < 0) |
291 | break; | 307 | break; |
292 | 308 | ||
293 | print_context_stack(tinfo, stack, 0, ops, | 309 | bp = print_context_stack(tinfo, stack, bp, ops, |
294 | data, estack_end); | 310 | data, estack_end); |
295 | ops->stack(data, "<EOE>"); | 311 | ops->stack(data, "<EOE>"); |
296 | /* | 312 | /* |
297 | * We link to the next stack via the | 313 | * We link to the next stack via the |
@@ -309,8 +325,8 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | |||
309 | if (stack >= irqstack && stack < irqstack_end) { | 325 | if (stack >= irqstack && stack < irqstack_end) { |
310 | if (ops->stack(data, "IRQ") < 0) | 326 | if (ops->stack(data, "IRQ") < 0) |
311 | break; | 327 | break; |
312 | print_context_stack(tinfo, stack, 0, ops, | 328 | bp = print_context_stack(tinfo, stack, bp, |
313 | data, irqstack_end); | 329 | ops, data, irqstack_end); |
314 | /* | 330 | /* |
315 | * We link to the next stack (which would be | 331 | * We link to the next stack (which would be |
316 | * the process stack normally) the last | 332 | * the process stack normally) the last |
@@ -328,7 +344,7 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | |||
328 | /* | 344 | /* |
329 | * This handles the process stack: | 345 | * This handles the process stack: |
330 | */ | 346 | */ |
331 | print_context_stack(tinfo, stack, 0, ops, data, NULL); | 347 | bp = print_context_stack(tinfo, stack, bp, ops, data, NULL); |
332 | put_cpu(); | 348 | put_cpu(); |
333 | } | 349 | } |
334 | EXPORT_SYMBOL(dump_trace); | 350 | EXPORT_SYMBOL(dump_trace); |
@@ -425,6 +441,11 @@ void dump_stack(void) | |||
425 | unsigned long dummy; | 441 | unsigned long dummy; |
426 | unsigned long bp = 0; | 442 | unsigned long bp = 0; |
427 | 443 | ||
444 | #ifdef CONFIG_FRAME_POINTER | ||
445 | if (!bp) | ||
446 | asm("movq %%rbp, %0" : "=r" (bp):); | ||
447 | #endif | ||
448 | |||
428 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", | 449 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", |
429 | current->pid, current->comm, print_tainted(), | 450 | current->pid, current->comm, print_tainted(), |
430 | init_utsname()->release, | 451 | init_utsname()->release, |