diff options
| author | Andi Kleen <ak@suse.de> | 2006-09-26 04:52:34 -0400 |
|---|---|---|
| committer | Andi Kleen <andi@basil.nowhere.org> | 2006-09-26 04:52:34 -0400 |
| commit | 2b14a78cd07a52001b8c3865ed615d8b9b905b78 (patch) | |
| tree | 415682b4b8a65322ed881fce5ae04fcb36f55930 /arch/i386/kernel/stacktrace.c | |
| parent | be7a91709b90825990e571b2f20cea937d5eef6c (diff) | |
[PATCH] i386: Do stacktracer conversion too
Following x86-64 patches. Reuses code from them in fact.
Convert the standard backtracer to do all output using
callbacks. Use the x86-64 stack tracer implementation
that uses these callbacks to implement the stacktrace interface.
This allows to use the new dwarf2 unwinder for stacktrace
and get better backtraces.
Cc: mingo@elte.hu
Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch/i386/kernel/stacktrace.c')
| -rw-r--r-- | arch/i386/kernel/stacktrace.c | 93 |
1 files changed, 0 insertions, 93 deletions
diff --git a/arch/i386/kernel/stacktrace.c b/arch/i386/kernel/stacktrace.c deleted file mode 100644 index ae3c32a87add..000000000000 --- a/arch/i386/kernel/stacktrace.c +++ /dev/null | |||
| @@ -1,93 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * arch/i386/kernel/stacktrace.c | ||
| 3 | * | ||
| 4 | * Stack trace management functions | ||
| 5 | * | ||
| 6 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
| 7 | */ | ||
| 8 | #include <linux/sched.h> | ||
| 9 | #include <linux/stacktrace.h> | ||
| 10 | |||
| 11 | static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) | ||
| 12 | { | ||
| 13 | return p > (void *)tinfo && | ||
| 14 | p < (void *)tinfo + THREAD_SIZE - 3; | ||
| 15 | } | ||
| 16 | |||
| 17 | /* | ||
| 18 | * Save stack-backtrace addresses into a stack_trace buffer: | ||
| 19 | */ | ||
| 20 | static inline unsigned long | ||
| 21 | save_context_stack(struct stack_trace *trace, unsigned int skip, | ||
| 22 | struct thread_info *tinfo, unsigned long *stack, | ||
| 23 | unsigned long ebp) | ||
| 24 | { | ||
| 25 | unsigned long addr; | ||
| 26 | |||
| 27 | #ifdef CONFIG_FRAME_POINTER | ||
| 28 | while (valid_stack_ptr(tinfo, (void *)ebp)) { | ||
| 29 | addr = *(unsigned long *)(ebp + 4); | ||
| 30 | if (!skip) | ||
| 31 | trace->entries[trace->nr_entries++] = addr; | ||
| 32 | else | ||
| 33 | skip--; | ||
| 34 | if (trace->nr_entries >= trace->max_entries) | ||
| 35 | break; | ||
| 36 | /* | ||
| 37 | * break out of recursive entries (such as | ||
| 38 | * end_of_stack_stop_unwind_function): | ||
| 39 | */ | ||
| 40 | if (ebp == *(unsigned long *)ebp) | ||
| 41 | break; | ||
| 42 | |||
| 43 | ebp = *(unsigned long *)ebp; | ||
| 44 | } | ||
| 45 | #else | ||
| 46 | while (valid_stack_ptr(tinfo, stack)) { | ||
| 47 | addr = *stack++; | ||
| 48 | if (__kernel_text_address(addr)) { | ||
| 49 | if (!skip) | ||
| 50 | trace->entries[trace->nr_entries++] = addr; | ||
| 51 | else | ||
| 52 | skip--; | ||
| 53 | if (trace->nr_entries >= trace->max_entries) | ||
| 54 | break; | ||
| 55 | } | ||
| 56 | } | ||
| 57 | #endif | ||
| 58 | |||
| 59 | return ebp; | ||
| 60 | } | ||
| 61 | |||
| 62 | /* | ||
| 63 | * Save stack-backtrace addresses into a stack_trace buffer. | ||
| 64 | */ | ||
| 65 | void save_stack_trace(struct stack_trace *trace, struct task_struct *task) | ||
| 66 | { | ||
| 67 | unsigned long ebp; | ||
| 68 | unsigned long *stack = &ebp; | ||
| 69 | |||
| 70 | WARN_ON(trace->nr_entries || !trace->max_entries); | ||
| 71 | |||
| 72 | if (!task || task == current) { | ||
| 73 | /* Grab ebp right from our regs: */ | ||
| 74 | asm ("movl %%ebp, %0" : "=r" (ebp)); | ||
| 75 | } else { | ||
| 76 | /* ebp is the last reg pushed by switch_to(): */ | ||
| 77 | ebp = *(unsigned long *) task->thread.esp; | ||
| 78 | } | ||
| 79 | |||
| 80 | while (1) { | ||
| 81 | struct thread_info *context = (struct thread_info *) | ||
| 82 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); | ||
| 83 | |||
| 84 | ebp = save_context_stack(trace, trace->skip, context, stack, ebp); | ||
| 85 | stack = (unsigned long *)context->previous_esp; | ||
| 86 | if (!stack || trace->nr_entries >= trace->max_entries) | ||
| 87 | break; | ||
| 88 | trace->entries[trace->nr_entries++] = ULONG_MAX; | ||
| 89 | if (trace->nr_entries >= trace->max_entries) | ||
| 90 | break; | ||
| 91 | } | ||
| 92 | } | ||
| 93 | |||
