diff options
Diffstat (limited to 'kernel/trace/trace_stack.c')
-rw-r--r-- | kernel/trace/trace_stack.c | 94 |
1 files changed, 70 insertions, 24 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index be682b62fe58..d0871bc0aca5 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/debugfs.h> | 10 | #include <linux/debugfs.h> |
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/sysctl.h> | ||
13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
14 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
15 | #include "trace.h" | 16 | #include "trace.h" |
@@ -31,6 +32,10 @@ static raw_spinlock_t max_stack_lock = | |||
31 | 32 | ||
32 | static int stack_trace_disabled __read_mostly; | 33 | static int stack_trace_disabled __read_mostly; |
33 | static DEFINE_PER_CPU(int, trace_active); | 34 | static DEFINE_PER_CPU(int, trace_active); |
35 | static DEFINE_MUTEX(stack_sysctl_mutex); | ||
36 | |||
37 | int stack_tracer_enabled; | ||
38 | static int last_stack_tracer_enabled; | ||
34 | 39 | ||
35 | static inline void check_stack(void) | 40 | static inline void check_stack(void) |
36 | { | 41 | { |
@@ -48,7 +53,7 @@ static inline void check_stack(void) | |||
48 | if (!object_is_on_stack(&this_size)) | 53 | if (!object_is_on_stack(&this_size)) |
49 | return; | 54 | return; |
50 | 55 | ||
51 | raw_local_irq_save(flags); | 56 | local_irq_save(flags); |
52 | __raw_spin_lock(&max_stack_lock); | 57 | __raw_spin_lock(&max_stack_lock); |
53 | 58 | ||
54 | /* a race could have already updated it */ | 59 | /* a race could have already updated it */ |
@@ -78,6 +83,7 @@ static inline void check_stack(void) | |||
78 | * on a new max, so it is far from a fast path. | 83 | * on a new max, so it is far from a fast path. |
79 | */ | 84 | */ |
80 | while (i < max_stack_trace.nr_entries) { | 85 | while (i < max_stack_trace.nr_entries) { |
86 | int found = 0; | ||
81 | 87 | ||
82 | stack_dump_index[i] = this_size; | 88 | stack_dump_index[i] = this_size; |
83 | p = start; | 89 | p = start; |
@@ -86,17 +92,19 @@ static inline void check_stack(void) | |||
86 | if (*p == stack_dump_trace[i]) { | 92 | if (*p == stack_dump_trace[i]) { |
87 | this_size = stack_dump_index[i++] = | 93 | this_size = stack_dump_index[i++] = |
88 | (top - p) * sizeof(unsigned long); | 94 | (top - p) * sizeof(unsigned long); |
95 | found = 1; | ||
89 | /* Start the search from here */ | 96 | /* Start the search from here */ |
90 | start = p + 1; | 97 | start = p + 1; |
91 | } | 98 | } |
92 | } | 99 | } |
93 | 100 | ||
94 | i++; | 101 | if (!found) |
102 | i++; | ||
95 | } | 103 | } |
96 | 104 | ||
97 | out: | 105 | out: |
98 | __raw_spin_unlock(&max_stack_lock); | 106 | __raw_spin_unlock(&max_stack_lock); |
99 | raw_local_irq_restore(flags); | 107 | local_irq_restore(flags); |
100 | } | 108 | } |
101 | 109 | ||
102 | static void | 110 | static void |
@@ -107,8 +115,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
107 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) | 115 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) |
108 | return; | 116 | return; |
109 | 117 | ||
110 | resched = need_resched(); | 118 | resched = ftrace_preempt_disable(); |
111 | preempt_disable_notrace(); | ||
112 | 119 | ||
113 | cpu = raw_smp_processor_id(); | 120 | cpu = raw_smp_processor_id(); |
114 | /* no atomic needed, we only modify this variable by this cpu */ | 121 | /* no atomic needed, we only modify this variable by this cpu */ |
@@ -120,10 +127,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
120 | out: | 127 | out: |
121 | per_cpu(trace_active, cpu)--; | 128 | per_cpu(trace_active, cpu)--; |
122 | /* prevent recursion in schedule */ | 129 | /* prevent recursion in schedule */ |
123 | if (resched) | 130 | ftrace_preempt_enable(resched); |
124 | preempt_enable_no_resched_notrace(); | ||
125 | else | ||
126 | preempt_enable_notrace(); | ||
127 | } | 131 | } |
128 | 132 | ||
129 | static struct ftrace_ops trace_ops __read_mostly = | 133 | static struct ftrace_ops trace_ops __read_mostly = |
@@ -166,16 +170,16 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
166 | if (ret < 0) | 170 | if (ret < 0) |
167 | return ret; | 171 | return ret; |
168 | 172 | ||
169 | raw_local_irq_save(flags); | 173 | local_irq_save(flags); |
170 | __raw_spin_lock(&max_stack_lock); | 174 | __raw_spin_lock(&max_stack_lock); |
171 | *ptr = val; | 175 | *ptr = val; |
172 | __raw_spin_unlock(&max_stack_lock); | 176 | __raw_spin_unlock(&max_stack_lock); |
173 | raw_local_irq_restore(flags); | 177 | local_irq_restore(flags); |
174 | 178 | ||
175 | return count; | 179 | return count; |
176 | } | 180 | } |
177 | 181 | ||
178 | static struct file_operations stack_max_size_fops = { | 182 | static const struct file_operations stack_max_size_fops = { |
179 | .open = tracing_open_generic, | 183 | .open = tracing_open_generic, |
180 | .read = stack_max_size_read, | 184 | .read = stack_max_size_read, |
181 | .write = stack_max_size_write, | 185 | .write = stack_max_size_write, |
@@ -184,11 +188,16 @@ static struct file_operations stack_max_size_fops = { | |||
184 | static void * | 188 | static void * |
185 | t_next(struct seq_file *m, void *v, loff_t *pos) | 189 | t_next(struct seq_file *m, void *v, loff_t *pos) |
186 | { | 190 | { |
187 | long i = (long)m->private; | 191 | long i; |
188 | 192 | ||
189 | (*pos)++; | 193 | (*pos)++; |
190 | 194 | ||
191 | i++; | 195 | if (v == SEQ_START_TOKEN) |
196 | i = 0; | ||
197 | else { | ||
198 | i = *(long *)v; | ||
199 | i++; | ||
200 | } | ||
192 | 201 | ||
193 | if (i >= max_stack_trace.nr_entries || | 202 | if (i >= max_stack_trace.nr_entries || |
194 | stack_dump_trace[i] == ULONG_MAX) | 203 | stack_dump_trace[i] == ULONG_MAX) |
@@ -201,12 +210,15 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
201 | 210 | ||
202 | static void *t_start(struct seq_file *m, loff_t *pos) | 211 | static void *t_start(struct seq_file *m, loff_t *pos) |
203 | { | 212 | { |
204 | void *t = &m->private; | 213 | void *t = SEQ_START_TOKEN; |
205 | loff_t l = 0; | 214 | loff_t l = 0; |
206 | 215 | ||
207 | local_irq_disable(); | 216 | local_irq_disable(); |
208 | __raw_spin_lock(&max_stack_lock); | 217 | __raw_spin_lock(&max_stack_lock); |
209 | 218 | ||
219 | if (*pos == 0) | ||
220 | return SEQ_START_TOKEN; | ||
221 | |||
210 | for (; t && l < *pos; t = t_next(m, t, &l)) | 222 | for (; t && l < *pos; t = t_next(m, t, &l)) |
211 | ; | 223 | ; |
212 | 224 | ||
@@ -235,10 +247,10 @@ static int trace_lookup_stack(struct seq_file *m, long i) | |||
235 | 247 | ||
236 | static int t_show(struct seq_file *m, void *v) | 248 | static int t_show(struct seq_file *m, void *v) |
237 | { | 249 | { |
238 | long i = *(long *)v; | 250 | long i; |
239 | int size; | 251 | int size; |
240 | 252 | ||
241 | if (i < 0) { | 253 | if (v == SEQ_START_TOKEN) { |
242 | seq_printf(m, " Depth Size Location" | 254 | seq_printf(m, " Depth Size Location" |
243 | " (%d entries)\n" | 255 | " (%d entries)\n" |
244 | " ----- ---- --------\n", | 256 | " ----- ---- --------\n", |
@@ -246,6 +258,8 @@ static int t_show(struct seq_file *m, void *v) | |||
246 | return 0; | 258 | return 0; |
247 | } | 259 | } |
248 | 260 | ||
261 | i = *(long *)v; | ||
262 | |||
249 | if (i >= max_stack_trace.nr_entries || | 263 | if (i >= max_stack_trace.nr_entries || |
250 | stack_dump_trace[i] == ULONG_MAX) | 264 | stack_dump_trace[i] == ULONG_MAX) |
251 | return 0; | 265 | return 0; |
@@ -263,7 +277,7 @@ static int t_show(struct seq_file *m, void *v) | |||
263 | return 0; | 277 | return 0; |
264 | } | 278 | } |
265 | 279 | ||
266 | static struct seq_operations stack_trace_seq_ops = { | 280 | static const struct seq_operations stack_trace_seq_ops = { |
267 | .start = t_start, | 281 | .start = t_start, |
268 | .next = t_next, | 282 | .next = t_next, |
269 | .stop = t_stop, | 283 | .stop = t_stop, |
@@ -275,20 +289,51 @@ static int stack_trace_open(struct inode *inode, struct file *file) | |||
275 | int ret; | 289 | int ret; |
276 | 290 | ||
277 | ret = seq_open(file, &stack_trace_seq_ops); | 291 | ret = seq_open(file, &stack_trace_seq_ops); |
278 | if (!ret) { | ||
279 | struct seq_file *m = file->private_data; | ||
280 | m->private = (void *)-1; | ||
281 | } | ||
282 | 292 | ||
283 | return ret; | 293 | return ret; |
284 | } | 294 | } |
285 | 295 | ||
286 | static struct file_operations stack_trace_fops = { | 296 | static const struct file_operations stack_trace_fops = { |
287 | .open = stack_trace_open, | 297 | .open = stack_trace_open, |
288 | .read = seq_read, | 298 | .read = seq_read, |
289 | .llseek = seq_lseek, | 299 | .llseek = seq_lseek, |
290 | }; | 300 | }; |
291 | 301 | ||
302 | int | ||
303 | stack_trace_sysctl(struct ctl_table *table, int write, | ||
304 | struct file *file, void __user *buffer, size_t *lenp, | ||
305 | loff_t *ppos) | ||
306 | { | ||
307 | int ret; | ||
308 | |||
309 | mutex_lock(&stack_sysctl_mutex); | ||
310 | |||
311 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); | ||
312 | |||
313 | if (ret || !write || | ||
314 | (last_stack_tracer_enabled == stack_tracer_enabled)) | ||
315 | goto out; | ||
316 | |||
317 | last_stack_tracer_enabled = stack_tracer_enabled; | ||
318 | |||
319 | if (stack_tracer_enabled) | ||
320 | register_ftrace_function(&trace_ops); | ||
321 | else | ||
322 | unregister_ftrace_function(&trace_ops); | ||
323 | |||
324 | out: | ||
325 | mutex_unlock(&stack_sysctl_mutex); | ||
326 | return ret; | ||
327 | } | ||
328 | |||
329 | static __init int enable_stacktrace(char *str) | ||
330 | { | ||
331 | stack_tracer_enabled = 1; | ||
332 | last_stack_tracer_enabled = 1; | ||
333 | return 1; | ||
334 | } | ||
335 | __setup("stacktrace", enable_stacktrace); | ||
336 | |||
292 | static __init int stack_trace_init(void) | 337 | static __init int stack_trace_init(void) |
293 | { | 338 | { |
294 | struct dentry *d_tracer; | 339 | struct dentry *d_tracer; |
@@ -306,7 +351,8 @@ static __init int stack_trace_init(void) | |||
306 | if (!entry) | 351 | if (!entry) |
307 | pr_warning("Could not create debugfs 'stack_trace' entry\n"); | 352 | pr_warning("Could not create debugfs 'stack_trace' entry\n"); |
308 | 353 | ||
309 | register_ftrace_function(&trace_ops); | 354 | if (stack_tracer_enabled) |
355 | register_ftrace_function(&trace_ops); | ||
310 | 356 | ||
311 | return 0; | 357 | return 0; |
312 | } | 358 | } |