diff options
author | Andi Kleen <ak@suse.de> | 2006-09-26 04:52:34 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2006-09-26 04:52:34 -0400 |
commit | 2b14a78cd07a52001b8c3865ed615d8b9b905b78 (patch) | |
tree | 415682b4b8a65322ed881fce5ae04fcb36f55930 | |
parent | be7a91709b90825990e571b2f20cea937d5eef6c (diff) |
[PATCH] i386: Do stacktracer conversion too
Following x86-64 patches. Reuses code from them in fact.
Convert the standard backtracer to do all output using
callbacks. Use the x86-64 stack tracer implementation
that uses these callbacks to implement the stacktrace interface.
This allows to use the new dwarf2 unwinder for stacktrace
and get better backtraces.
Cc: mingo@elte.hu
Signed-off-by: Andi Kleen <ak@suse.de>
-rw-r--r-- | arch/i386/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/i386/kernel/stacktrace.c | 93 | ||||
-rw-r--r-- | arch/i386/kernel/traps.c | 108 | ||||
-rw-r--r-- | include/asm-i386/stacktrace.h | 1 |
4 files changed, 82 insertions, 121 deletions
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile index dab497472deb..1a884b6e6e5c 100644 --- a/arch/i386/kernel/Makefile +++ b/arch/i386/kernel/Makefile | |||
@@ -81,4 +81,5 @@ $(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \ | |||
81 | $(call if_changed,syscall) | 81 | $(call if_changed,syscall) |
82 | 82 | ||
83 | k8-y += ../../x86_64/kernel/k8.o | 83 | k8-y += ../../x86_64/kernel/k8.o |
84 | stacktrace-y += ../../x86_64/kernel/stacktrace.o | ||
84 | 85 | ||
diff --git a/arch/i386/kernel/stacktrace.c b/arch/i386/kernel/stacktrace.c deleted file mode 100644 index ae3c32a87add..000000000000 --- a/arch/i386/kernel/stacktrace.c +++ /dev/null | |||
@@ -1,93 +0,0 @@ | |||
1 | /* | ||
2 | * arch/i386/kernel/stacktrace.c | ||
3 | * | ||
4 | * Stack trace management functions | ||
5 | * | ||
6 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
7 | */ | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/stacktrace.h> | ||
10 | |||
11 | static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) | ||
12 | { | ||
13 | return p > (void *)tinfo && | ||
14 | p < (void *)tinfo + THREAD_SIZE - 3; | ||
15 | } | ||
16 | |||
17 | /* | ||
18 | * Save stack-backtrace addresses into a stack_trace buffer: | ||
19 | */ | ||
20 | static inline unsigned long | ||
21 | save_context_stack(struct stack_trace *trace, unsigned int skip, | ||
22 | struct thread_info *tinfo, unsigned long *stack, | ||
23 | unsigned long ebp) | ||
24 | { | ||
25 | unsigned long addr; | ||
26 | |||
27 | #ifdef CONFIG_FRAME_POINTER | ||
28 | while (valid_stack_ptr(tinfo, (void *)ebp)) { | ||
29 | addr = *(unsigned long *)(ebp + 4); | ||
30 | if (!skip) | ||
31 | trace->entries[trace->nr_entries++] = addr; | ||
32 | else | ||
33 | skip--; | ||
34 | if (trace->nr_entries >= trace->max_entries) | ||
35 | break; | ||
36 | /* | ||
37 | * break out of recursive entries (such as | ||
38 | * end_of_stack_stop_unwind_function): | ||
39 | */ | ||
40 | if (ebp == *(unsigned long *)ebp) | ||
41 | break; | ||
42 | |||
43 | ebp = *(unsigned long *)ebp; | ||
44 | } | ||
45 | #else | ||
46 | while (valid_stack_ptr(tinfo, stack)) { | ||
47 | addr = *stack++; | ||
48 | if (__kernel_text_address(addr)) { | ||
49 | if (!skip) | ||
50 | trace->entries[trace->nr_entries++] = addr; | ||
51 | else | ||
52 | skip--; | ||
53 | if (trace->nr_entries >= trace->max_entries) | ||
54 | break; | ||
55 | } | ||
56 | } | ||
57 | #endif | ||
58 | |||
59 | return ebp; | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * Save stack-backtrace addresses into a stack_trace buffer. | ||
64 | */ | ||
65 | void save_stack_trace(struct stack_trace *trace, struct task_struct *task) | ||
66 | { | ||
67 | unsigned long ebp; | ||
68 | unsigned long *stack = &ebp; | ||
69 | |||
70 | WARN_ON(trace->nr_entries || !trace->max_entries); | ||
71 | |||
72 | if (!task || task == current) { | ||
73 | /* Grab ebp right from our regs: */ | ||
74 | asm ("movl %%ebp, %0" : "=r" (ebp)); | ||
75 | } else { | ||
76 | /* ebp is the last reg pushed by switch_to(): */ | ||
77 | ebp = *(unsigned long *) task->thread.esp; | ||
78 | } | ||
79 | |||
80 | while (1) { | ||
81 | struct thread_info *context = (struct thread_info *) | ||
82 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); | ||
83 | |||
84 | ebp = save_context_stack(trace, trace->skip, context, stack, ebp); | ||
85 | stack = (unsigned long *)context->previous_esp; | ||
86 | if (!stack || trace->nr_entries >= trace->max_entries) | ||
87 | break; | ||
88 | trace->entries[trace->nr_entries++] = ULONG_MAX; | ||
89 | if (trace->nr_entries >= trace->max_entries) | ||
90 | break; | ||
91 | } | ||
92 | } | ||
93 | |||
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 3c85c89f68d8..4ced4285163b 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <asm/smp.h> | 51 | #include <asm/smp.h> |
52 | #include <asm/arch_hooks.h> | 52 | #include <asm/arch_hooks.h> |
53 | #include <asm/kdebug.h> | 53 | #include <asm/kdebug.h> |
54 | #include <asm/stacktrace.h> | ||
54 | 55 | ||
55 | #include <linux/module.h> | 56 | #include <linux/module.h> |
56 | 57 | ||
@@ -118,26 +119,16 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) | |||
118 | p < (void *)tinfo + THREAD_SIZE - 3; | 119 | p < (void *)tinfo + THREAD_SIZE - 3; |
119 | } | 120 | } |
120 | 121 | ||
121 | /* | ||
122 | * Print one address/symbol entries per line. | ||
123 | */ | ||
124 | static inline void print_addr_and_symbol(unsigned long addr, char *log_lvl) | ||
125 | { | ||
126 | printk(" [<%08lx>] ", addr); | ||
127 | |||
128 | print_symbol("%s\n", addr); | ||
129 | } | ||
130 | |||
131 | static inline unsigned long print_context_stack(struct thread_info *tinfo, | 122 | static inline unsigned long print_context_stack(struct thread_info *tinfo, |
132 | unsigned long *stack, unsigned long ebp, | 123 | unsigned long *stack, unsigned long ebp, |
133 | char *log_lvl) | 124 | struct stacktrace_ops *ops, void *data) |
134 | { | 125 | { |
135 | unsigned long addr; | 126 | unsigned long addr; |
136 | 127 | ||
137 | #ifdef CONFIG_FRAME_POINTER | 128 | #ifdef CONFIG_FRAME_POINTER |
138 | while (valid_stack_ptr(tinfo, (void *)ebp)) { | 129 | while (valid_stack_ptr(tinfo, (void *)ebp)) { |
139 | addr = *(unsigned long *)(ebp + 4); | 130 | addr = *(unsigned long *)(ebp + 4); |
140 | print_addr_and_symbol(addr, log_lvl); | 131 | ops->address(data, addr); |
141 | /* | 132 | /* |
142 | * break out of recursive entries (such as | 133 | * break out of recursive entries (such as |
143 | * end_of_stack_stop_unwind_function): | 134 | * end_of_stack_stop_unwind_function): |
@@ -150,28 +141,35 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, | |||
150 | while (valid_stack_ptr(tinfo, stack)) { | 141 | while (valid_stack_ptr(tinfo, stack)) { |
151 | addr = *stack++; | 142 | addr = *stack++; |
152 | if (__kernel_text_address(addr)) | 143 | if (__kernel_text_address(addr)) |
153 | print_addr_and_symbol(addr, log_lvl); | 144 | ops->address(data, addr); |
154 | } | 145 | } |
155 | #endif | 146 | #endif |
156 | return ebp; | 147 | return ebp; |
157 | } | 148 | } |
158 | 149 | ||
150 | struct ops_and_data { | ||
151 | struct stacktrace_ops *ops; | ||
152 | void *data; | ||
153 | }; | ||
154 | |||
159 | static asmlinkage int | 155 | static asmlinkage int |
160 | show_trace_unwind(struct unwind_frame_info *info, void *log_lvl) | 156 | dump_trace_unwind(struct unwind_frame_info *info, void *data) |
161 | { | 157 | { |
158 | struct ops_and_data *oad = (struct ops_and_data *)data; | ||
162 | int n = 0; | 159 | int n = 0; |
163 | 160 | ||
164 | while (unwind(info) == 0 && UNW_PC(info)) { | 161 | while (unwind(info) == 0 && UNW_PC(info)) { |
165 | n++; | 162 | n++; |
166 | print_addr_and_symbol(UNW_PC(info), log_lvl); | 163 | oad->ops->address(oad->data, UNW_PC(info)); |
167 | if (arch_unw_user_mode(info)) | 164 | if (arch_unw_user_mode(info)) |
168 | break; | 165 | break; |
169 | } | 166 | } |
170 | return n; | 167 | return n; |
171 | } | 168 | } |
172 | 169 | ||
173 | static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | 170 | void dump_trace(struct task_struct *task, struct pt_regs *regs, |
174 | unsigned long *stack, char *log_lvl) | 171 | unsigned long *stack, |
172 | struct stacktrace_ops *ops, void *data) | ||
175 | { | 173 | { |
176 | unsigned long ebp; | 174 | unsigned long ebp; |
177 | 175 | ||
@@ -181,31 +179,37 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | |||
181 | if (call_trace >= 0) { | 179 | if (call_trace >= 0) { |
182 | int unw_ret = 0; | 180 | int unw_ret = 0; |
183 | struct unwind_frame_info info; | 181 | struct unwind_frame_info info; |
182 | struct ops_and_data oad = { .ops = ops, .data = data }; | ||
184 | 183 | ||
185 | if (regs) { | 184 | if (regs) { |
186 | if (unwind_init_frame_info(&info, task, regs) == 0) | 185 | if (unwind_init_frame_info(&info, task, regs) == 0) |
187 | unw_ret = show_trace_unwind(&info, log_lvl); | 186 | unw_ret = dump_trace_unwind(&info, &oad); |
188 | } else if (task == current) | 187 | } else if (task == current) |
189 | unw_ret = unwind_init_running(&info, show_trace_unwind, log_lvl); | 188 | unw_ret = unwind_init_running(&info, dump_trace_unwind, &oad); |
190 | else { | 189 | else { |
191 | if (unwind_init_blocked(&info, task) == 0) | 190 | if (unwind_init_blocked(&info, task) == 0) |
192 | unw_ret = show_trace_unwind(&info, log_lvl); | 191 | unw_ret = dump_trace_unwind(&info, &oad); |
193 | } | 192 | } |
194 | if (unw_ret > 0) { | 193 | if (unw_ret > 0) { |
195 | if (call_trace == 1 && !arch_unw_user_mode(&info)) { | 194 | if (call_trace == 1 && !arch_unw_user_mode(&info)) { |
196 | print_symbol("DWARF2 unwinder stuck at %s\n", | 195 | ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n", |
197 | UNW_PC(&info)); | 196 | UNW_PC(&info)); |
198 | if (UNW_SP(&info) >= PAGE_OFFSET) { | 197 | if (UNW_SP(&info) >= PAGE_OFFSET) { |
199 | printk("Leftover inexact backtrace:\n"); | 198 | ops->warning(data, "Leftover inexact backtrace:\n"); |
200 | stack = (void *)UNW_SP(&info); | 199 | stack = (void *)UNW_SP(&info); |
201 | } else | 200 | } else |
202 | printk("Full inexact backtrace again:\n"); | 201 | ops->warning(data, "Full inexact backtrace again:\n"); |
203 | } else if (call_trace >= 1) | 202 | } else if (call_trace >= 1) |
204 | return; | 203 | return; |
205 | else | 204 | else |
206 | printk("Full inexact backtrace again:\n"); | 205 | ops->warning(data, "Full inexact backtrace again:\n"); |
207 | } else | 206 | } else |
208 | printk("Inexact backtrace:\n"); | 207 | ops->warning(data, "Inexact backtrace:\n"); |
208 | } else if (!stack) { | ||
209 | unsigned long dummy; | ||
210 | stack = &dummy; | ||
211 | if (task && task != current) | ||
212 | stack = (unsigned long *)task->thread.esp; | ||
209 | } | 213 | } |
210 | 214 | ||
211 | if (task == current) { | 215 | if (task == current) { |
@@ -220,15 +224,63 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | |||
220 | struct thread_info *context; | 224 | struct thread_info *context; |
221 | context = (struct thread_info *) | 225 | context = (struct thread_info *) |
222 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); | 226 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); |
223 | ebp = print_context_stack(context, stack, ebp, log_lvl); | 227 | ebp = print_context_stack(context, stack, ebp, ops, data); |
228 | /* Should be after the line below, but somewhere | ||
229 | in early boot context comes out corrupted and we | ||
230 | can't reference it -AK */ | ||
231 | if (ops->stack(data, "IRQ") < 0) | ||
232 | break; | ||
224 | stack = (unsigned long*)context->previous_esp; | 233 | stack = (unsigned long*)context->previous_esp; |
225 | if (!stack) | 234 | if (!stack) |
226 | break; | 235 | break; |
227 | printk("%s =======================\n", log_lvl); | ||
228 | } | 236 | } |
229 | } | 237 | } |
238 | EXPORT_SYMBOL(dump_trace); | ||
239 | |||
240 | static void | ||
241 | print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) | ||
242 | { | ||
243 | printk(data); | ||
244 | print_symbol(msg, symbol); | ||
245 | printk("\n"); | ||
246 | } | ||
247 | |||
248 | static void print_trace_warning(void *data, char *msg) | ||
249 | { | ||
250 | printk("%s%s\n", (char *)data, msg); | ||
251 | } | ||
252 | |||
253 | static int print_trace_stack(void *data, char *name) | ||
254 | { | ||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | /* | ||
259 | * Print one address/symbol entries per line. | ||
260 | */ | ||
261 | static void print_trace_address(void *data, unsigned long addr) | ||
262 | { | ||
263 | printk("%s [<%08lx>] ", (char *)data, addr); | ||
264 | print_symbol("%s\n", addr); | ||
265 | } | ||
266 | |||
267 | static struct stacktrace_ops print_trace_ops = { | ||
268 | .warning = print_trace_warning, | ||
269 | .warning_symbol = print_trace_warning_symbol, | ||
270 | .stack = print_trace_stack, | ||
271 | .address = print_trace_address, | ||
272 | }; | ||
273 | |||
274 | static void | ||
275 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | ||
276 | unsigned long * stack, char *log_lvl) | ||
277 | { | ||
278 | dump_trace(task, regs, stack, &print_trace_ops, log_lvl); | ||
279 | printk("%s =======================\n", log_lvl); | ||
280 | } | ||
230 | 281 | ||
231 | void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long * stack) | 282 | void show_trace(struct task_struct *task, struct pt_regs *regs, |
283 | unsigned long * stack) | ||
232 | { | 284 | { |
233 | show_trace_log_lvl(task, regs, stack, ""); | 285 | show_trace_log_lvl(task, regs, stack, ""); |
234 | } | 286 | } |
diff --git a/include/asm-i386/stacktrace.h b/include/asm-i386/stacktrace.h new file mode 100644 index 000000000000..7d1f6a5cbfca --- /dev/null +++ b/include/asm-i386/stacktrace.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-x86_64/stacktrace.h> | |||