aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/stacktrace.c
diff options
context:
space:
mode:
authorArjan van de Ven <arjan@linux.intel.com>2008-01-30 07:33:07 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:33:07 -0500
commit5bc27dc2f55fd3043597b5a8de6536183f28a449 (patch)
treedca83b12fb2f01f85a9e31bf1fb3802bd2cfef2a /arch/x86/kernel/stacktrace.c
parente9d4efddbec3d852d435b370b9c40ff7ac24afe6 (diff)
x86: pull bp calculation earlier into the backtrace path
Right now, we take the stack pointer early during the backtrace path, but only calculate bp several functions deep later, making it hard to reconcile the stack and bp backtraces (as well as showing several internal backtrace functions on the stack with bp based backtracing). This patch moves the bp taking to the same place we take the stack pointer; sadly this ripples through several layers of the back tracing stack, but it's not all that bad in the end I hope. Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/stacktrace.c')
-rw-r--r--arch/x86/kernel/stacktrace.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 8c4e4f5bf040..4f4021b5bfb5 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -33,7 +33,8 @@ static void save_stack_address(void *data, unsigned long addr, int reliable)
33 trace->entries[trace->nr_entries++] = addr; 33 trace->entries[trace->nr_entries++] = addr;
34} 34}
35 35
36static void save_stack_address_nosched(void *data, unsigned long addr) 36static void
37save_stack_address_nosched(void *data, unsigned long addr, int reliable)
37{ 38{
38 struct stack_trace *trace = (struct stack_trace *)data; 39 struct stack_trace *trace = (struct stack_trace *)data;
39 if (in_sched_functions(addr)) 40 if (in_sched_functions(addr))
@@ -65,14 +66,14 @@ static const struct stacktrace_ops save_stack_ops_nosched = {
65 */ 66 */
66void save_stack_trace(struct stack_trace *trace) 67void save_stack_trace(struct stack_trace *trace)
67{ 68{
68 dump_trace(current, NULL, NULL, &save_stack_ops, trace); 69 dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
69 if (trace->nr_entries < trace->max_entries) 70 if (trace->nr_entries < trace->max_entries)
70 trace->entries[trace->nr_entries++] = ULONG_MAX; 71 trace->entries[trace->nr_entries++] = ULONG_MAX;
71} 72}
72 73
73void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 74void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
74{ 75{
75 dump_trace(tsk, NULL, NULL, &save_stack_ops_nosched, trace); 76 dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
76 if (trace->nr_entries < trace->max_entries) 77 if (trace->nr_entries < trace->max_entries)
77 trace->entries[trace->nr_entries++] = ULONG_MAX; 78 trace->entries[trace->nr_entries++] = ULONG_MAX;
78} 79}