diff options
author | Soeren Sandmann <sandmann@daimi.au.dk> | 2008-05-11 23:28:50 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 17:59:12 -0400 |
commit | cf3271a73b612a03da00681ecd9bfefab37c74c9 (patch) | |
tree | fa4423bf262449aedea54a1145e6b10deefc3bf7 /kernel/trace | |
parent | 8a9e94c1fbfdac45a3b6811b880777c4116aa309 (diff) |
ftrace/sysprof: don't trace the user stack if we are a kernel thread.
Check that current->mm is non-NULL before attempting to trace the user
stack.
Also take depth of the kernel stack into account when comparing
against sample_max_depth.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace_sysprof.c | 50 |
1 files changed, 29 insertions, 21 deletions
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index fe23d6dba7f1..2301e1e7c606 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -95,13 +95,12 @@ const static struct stacktrace_ops backtrace_ops = { | |||
95 | .address = backtrace_address, | 95 | .address = backtrace_address, |
96 | }; | 96 | }; |
97 | 97 | ||
98 | static struct pt_regs * | 98 | static int |
99 | trace_kernel(struct pt_regs *regs, struct trace_array *tr, | 99 | trace_kernel(struct pt_regs *regs, struct trace_array *tr, |
100 | struct trace_array_cpu *data) | 100 | struct trace_array_cpu *data) |
101 | { | 101 | { |
102 | struct backtrace_info info; | 102 | struct backtrace_info info; |
103 | unsigned long bp; | 103 | unsigned long bp; |
104 | char *user_stack; | ||
105 | char *stack; | 104 | char *stack; |
106 | 105 | ||
107 | info.tr = tr; | 106 | info.tr = tr; |
@@ -119,10 +118,7 @@ trace_kernel(struct pt_regs *regs, struct trace_array *tr, | |||
119 | 118 | ||
120 | dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, &info); | 119 | dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, &info); |
121 | 120 | ||
122 | /* Now trace the user stack */ | 121 | return info.pos; |
123 | user_stack = ((char *)current->thread.sp0 - sizeof(struct pt_regs)); | ||
124 | |||
125 | return (struct pt_regs *)user_stack; | ||
126 | } | 122 | } |
127 | 123 | ||
128 | static void timer_notify(struct pt_regs *regs, int cpu) | 124 | static void timer_notify(struct pt_regs *regs, int cpu) |
@@ -150,32 +146,44 @@ static void timer_notify(struct pt_regs *regs, int cpu) | |||
150 | __trace_special(tr, data, 0, 0, current->pid); | 146 | __trace_special(tr, data, 0, 0, current->pid); |
151 | 147 | ||
152 | if (!is_user) | 148 | if (!is_user) |
153 | regs = trace_kernel(regs, tr, data); | 149 | i = trace_kernel(regs, tr, data); |
150 | else | ||
151 | i = 0; | ||
154 | 152 | ||
155 | fp = (void __user *)regs->bp; | 153 | /* |
154 | * Trace user stack if we are not a kernel thread | ||
155 | */ | ||
156 | if (current->mm && i < sample_max_depth) { | ||
157 | regs = (struct pt_regs *)current->thread.sp0 - 1; | ||
156 | 158 | ||
157 | __trace_special(tr, data, 2, regs->ip, 0); | 159 | fp = (void __user *)regs->bp; |
158 | 160 | ||
159 | for (i = 0; i < sample_max_depth; i++) { | 161 | __trace_special(tr, data, 2, regs->ip, 0); |
160 | frame.next_fp = 0; | ||
161 | frame.return_address = 0; | ||
162 | if (!copy_stack_frame(fp, &frame)) | ||
163 | break; | ||
164 | if ((unsigned long)fp < regs->sp) | ||
165 | break; | ||
166 | 162 | ||
167 | __trace_special(tr, data, 2, frame.return_address, | 163 | while (i < sample_max_depth) { |
168 | (unsigned long)fp); | 164 | frame.next_fp = 0; |
169 | fp = frame.next_fp; | 165 | frame.return_address = 0; |
170 | } | 166 | if (!copy_stack_frame(fp, &frame)) |
167 | break; | ||
168 | if ((unsigned long)fp < regs->sp) | ||
169 | break; | ||
171 | 170 | ||
172 | __trace_special(tr, data, 3, current->pid, i); | 171 | __trace_special(tr, data, 2, frame.return_address, |
172 | (unsigned long)fp); | ||
173 | fp = frame.next_fp; | ||
174 | |||
175 | i++; | ||
176 | } | ||
177 | |||
178 | } | ||
173 | 179 | ||
174 | /* | 180 | /* |
175 | * Special trace entry if we overflow the max depth: | 181 | * Special trace entry if we overflow the max depth: |
176 | */ | 182 | */ |
177 | if (i == sample_max_depth) | 183 | if (i == sample_max_depth) |
178 | __trace_special(tr, data, -1, -1, -1); | 184 | __trace_special(tr, data, -1, -1, -1); |
185 | |||
186 | __trace_special(tr, data, 3, current->pid, i); | ||
179 | } | 187 | } |
180 | 188 | ||
181 | static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer) | 189 | static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer) |