diff options
Diffstat (limited to 'kernel/trace/trace_stack.c')
-rw-r--r-- | kernel/trace/trace_stack.c | 36 |
1 files changed, 21 insertions, 15 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 74c5d9a3afa..fde3be15c64 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -44,6 +44,10 @@ static inline void check_stack(void) | |||
44 | if (this_size <= max_stack_size) | 44 | if (this_size <= max_stack_size) |
45 | return; | 45 | return; |
46 | 46 | ||
47 | /* we do not handle interrupt stacks yet */ | ||
48 | if (!object_is_on_stack(&this_size)) | ||
49 | return; | ||
50 | |||
47 | raw_local_irq_save(flags); | 51 | raw_local_irq_save(flags); |
48 | __raw_spin_lock(&max_stack_lock); | 52 | __raw_spin_lock(&max_stack_lock); |
49 | 53 | ||
@@ -103,8 +107,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
103 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) | 107 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) |
104 | return; | 108 | return; |
105 | 109 | ||
106 | resched = need_resched(); | 110 | resched = ftrace_preempt_disable(); |
107 | preempt_disable_notrace(); | ||
108 | 111 | ||
109 | cpu = raw_smp_processor_id(); | 112 | cpu = raw_smp_processor_id(); |
110 | /* no atomic needed, we only modify this variable by this cpu */ | 113 | /* no atomic needed, we only modify this variable by this cpu */ |
@@ -116,10 +119,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
116 | out: | 119 | out: |
117 | per_cpu(trace_active, cpu)--; | 120 | per_cpu(trace_active, cpu)--; |
118 | /* prevent recursion in schedule */ | 121 | /* prevent recursion in schedule */ |
119 | if (resched) | 122 | ftrace_preempt_enable(resched); |
120 | preempt_enable_no_resched_notrace(); | ||
121 | else | ||
122 | preempt_enable_notrace(); | ||
123 | } | 123 | } |
124 | 124 | ||
125 | static struct ftrace_ops trace_ops __read_mostly = | 125 | static struct ftrace_ops trace_ops __read_mostly = |
@@ -180,11 +180,16 @@ static struct file_operations stack_max_size_fops = { | |||
180 | static void * | 180 | static void * |
181 | t_next(struct seq_file *m, void *v, loff_t *pos) | 181 | t_next(struct seq_file *m, void *v, loff_t *pos) |
182 | { | 182 | { |
183 | long i = (long)m->private; | 183 | long i; |
184 | 184 | ||
185 | (*pos)++; | 185 | (*pos)++; |
186 | 186 | ||
187 | i++; | 187 | if (v == SEQ_START_TOKEN) |
188 | i = 0; | ||
189 | else { | ||
190 | i = *(long *)v; | ||
191 | i++; | ||
192 | } | ||
188 | 193 | ||
189 | if (i >= max_stack_trace.nr_entries || | 194 | if (i >= max_stack_trace.nr_entries || |
190 | stack_dump_trace[i] == ULONG_MAX) | 195 | stack_dump_trace[i] == ULONG_MAX) |
@@ -197,12 +202,15 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
197 | 202 | ||
198 | static void *t_start(struct seq_file *m, loff_t *pos) | 203 | static void *t_start(struct seq_file *m, loff_t *pos) |
199 | { | 204 | { |
200 | void *t = &m->private; | 205 | void *t = SEQ_START_TOKEN; |
201 | loff_t l = 0; | 206 | loff_t l = 0; |
202 | 207 | ||
203 | local_irq_disable(); | 208 | local_irq_disable(); |
204 | __raw_spin_lock(&max_stack_lock); | 209 | __raw_spin_lock(&max_stack_lock); |
205 | 210 | ||
211 | if (*pos == 0) | ||
212 | return SEQ_START_TOKEN; | ||
213 | |||
206 | for (; t && l < *pos; t = t_next(m, t, &l)) | 214 | for (; t && l < *pos; t = t_next(m, t, &l)) |
207 | ; | 215 | ; |
208 | 216 | ||
@@ -231,10 +239,10 @@ static int trace_lookup_stack(struct seq_file *m, long i) | |||
231 | 239 | ||
232 | static int t_show(struct seq_file *m, void *v) | 240 | static int t_show(struct seq_file *m, void *v) |
233 | { | 241 | { |
234 | long i = *(long *)v; | 242 | long i; |
235 | int size; | 243 | int size; |
236 | 244 | ||
237 | if (i < 0) { | 245 | if (v == SEQ_START_TOKEN) { |
238 | seq_printf(m, " Depth Size Location" | 246 | seq_printf(m, " Depth Size Location" |
239 | " (%d entries)\n" | 247 | " (%d entries)\n" |
240 | " ----- ---- --------\n", | 248 | " ----- ---- --------\n", |
@@ -242,6 +250,8 @@ static int t_show(struct seq_file *m, void *v) | |||
242 | return 0; | 250 | return 0; |
243 | } | 251 | } |
244 | 252 | ||
253 | i = *(long *)v; | ||
254 | |||
245 | if (i >= max_stack_trace.nr_entries || | 255 | if (i >= max_stack_trace.nr_entries || |
246 | stack_dump_trace[i] == ULONG_MAX) | 256 | stack_dump_trace[i] == ULONG_MAX) |
247 | return 0; | 257 | return 0; |
@@ -271,10 +281,6 @@ static int stack_trace_open(struct inode *inode, struct file *file) | |||
271 | int ret; | 281 | int ret; |
272 | 282 | ||
273 | ret = seq_open(file, &stack_trace_seq_ops); | 283 | ret = seq_open(file, &stack_trace_seq_ops); |
274 | if (!ret) { | ||
275 | struct seq_file *m = file->private_data; | ||
276 | m->private = (void *)-1; | ||
277 | } | ||
278 | 284 | ||
279 | return ret; | 285 | return ret; |
280 | } | 286 | } |