aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2006-08-30 13:37:11 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-08-30 19:05:15 -0400
commitea424055b771a165c9abd3ae109255a3b825c745 (patch)
tree228f3bf4f392a8ce45d78a1ea8cfd8bd6f1f4f5b /arch/x86_64
parent61171b8dbd36b0cc34d3813a59a8e4dc2984414d (diff)
[PATCH] x86: Make backtracer fallback logic more bullet-proof
The unwinder fallback logic still had potential for falling through to the legacy stack trace code without printing an indication (at once serving as a separator) of this. Further, the stack pointer retrieval for the fallback should be as restrictive as possible (in order to avoid having the legacy stack tracer try to access invalid memory). The patch tightens that, but this could certainly be further improved. Also making the call_trace command line option now conditional upon CONFIG_STACK_UNWIND (as it's meaningless otherwise). Signed-off-by: Jan Beulich <jbeulich@novell.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/kernel/traps.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 14052f089814..5e00af54af65 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -107,7 +107,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
107} 107}
108 108
109static int kstack_depth_to_print = 12; 109static int kstack_depth_to_print = 12;
110#ifdef CONFIG_STACK_UNWIND
110static int call_trace = 1; 111static int call_trace = 1;
112#else
113#define call_trace (-1)
114#endif
111 115
112#ifdef CONFIG_KALLSYMS 116#ifdef CONFIG_KALLSYMS
113# include <linux/kallsyms.h> 117# include <linux/kallsyms.h>
@@ -274,21 +278,21 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
274 if (unwind_init_blocked(&info, tsk) == 0) 278 if (unwind_init_blocked(&info, tsk) == 0)
275 unw_ret = show_trace_unwind(&info, NULL); 279 unw_ret = show_trace_unwind(&info, NULL);
276 } 280 }
277 if (unw_ret > 0 && !arch_unw_user_mode(&info)) { 281 if (unw_ret > 0) {
278#ifdef CONFIG_STACK_UNWIND 282 if (call_trace == 1 && !arch_unw_user_mode(&info)) {
279 unsigned long rip = info.regs.rip; 283 print_symbol("DWARF2 unwinder stuck at %s\n",
280 print_symbol("DWARF2 unwinder stuck at %s\n", rip); 284 UNW_PC(&info));
281 if (call_trace == 1) { 285 if ((long)UNW_SP(&info) < 0) {
282 printk("Leftover inexact backtrace:\n"); 286 printk("Leftover inexact backtrace:\n");
283 stack = (unsigned long *)info.regs.rsp; 287 stack = (unsigned long *)UNW_SP(&info);
284 } else if (call_trace > 1) 288 } else
289 printk("Full inexact backtrace again:\n");
290 } else if (call_trace >= 1)
285 return; 291 return;
286 else 292 else
287 printk("Full inexact backtrace again:\n"); 293 printk("Full inexact backtrace again:\n");
288#else 294 } else
289 printk("Inexact backtrace:\n"); 295 printk("Inexact backtrace:\n");
290#endif
291 }
292 } 296 }
293 297
294 /* 298 /*
@@ -1120,6 +1124,7 @@ static int __init kstack_setup(char *s)
1120} 1124}
1121__setup("kstack=", kstack_setup); 1125__setup("kstack=", kstack_setup);
1122 1126
1127#ifdef CONFIG_STACK_UNWIND
1123static int __init call_trace_setup(char *s) 1128static int __init call_trace_setup(char *s)
1124{ 1129{
1125 if (strcmp(s, "old") == 0) 1130 if (strcmp(s, "old") == 0)
@@ -1133,3 +1138,4 @@ static int __init call_trace_setup(char *s)
1133 return 1; 1138 return 1;
1134} 1139}
1135__setup("call_trace=", call_trace_setup); 1140__setup("call_trace=", call_trace_setup);
1141#endif