diff options
Diffstat (limited to 'kernel/trace/trace.c')
| -rw-r--r-- | kernel/trace/trace.c | 30 |
1 files changed, 28 insertions, 2 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 82d9b8106cd0..f8cf959bad45 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -17,7 +17,6 @@ | |||
| 17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> |
| 18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
| 19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
| 20 | #include <linux/smp_lock.h> | ||
| 21 | #include <linux/notifier.h> | 20 | #include <linux/notifier.h> |
| 22 | #include <linux/irqflags.h> | 21 | #include <linux/irqflags.h> |
| 23 | #include <linux/debugfs.h> | 22 | #include <linux/debugfs.h> |
| @@ -1284,6 +1283,8 @@ void trace_dump_stack(void) | |||
| 1284 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); | 1283 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); |
| 1285 | } | 1284 | } |
| 1286 | 1285 | ||
| 1286 | static DEFINE_PER_CPU(int, user_stack_count); | ||
| 1287 | |||
| 1287 | void | 1288 | void |
| 1288 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 1289 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) |
| 1289 | { | 1290 | { |
| @@ -1302,6 +1303,18 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
| 1302 | if (unlikely(in_nmi())) | 1303 | if (unlikely(in_nmi())) |
| 1303 | return; | 1304 | return; |
| 1304 | 1305 | ||
| 1306 | /* | ||
| 1307 | * prevent recursion, since the user stack tracing may | ||
| 1308 | * trigger other kernel events. | ||
| 1309 | */ | ||
| 1310 | preempt_disable(); | ||
| 1311 | if (__this_cpu_read(user_stack_count)) | ||
| 1312 | goto out; | ||
| 1313 | |||
| 1314 | __this_cpu_inc(user_stack_count); | ||
| 1315 | |||
| 1316 | |||
| 1317 | |||
| 1305 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, | 1318 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
| 1306 | sizeof(*entry), flags, pc); | 1319 | sizeof(*entry), flags, pc); |
| 1307 | if (!event) | 1320 | if (!event) |
| @@ -1319,6 +1332,11 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
| 1319 | save_stack_trace_user(&trace); | 1332 | save_stack_trace_user(&trace); |
| 1320 | if (!filter_check_discard(call, entry, buffer, event)) | 1333 | if (!filter_check_discard(call, entry, buffer, event)) |
| 1321 | ring_buffer_unlock_commit(buffer, event); | 1334 | ring_buffer_unlock_commit(buffer, event); |
| 1335 | |||
| 1336 | __this_cpu_dec(user_stack_count); | ||
| 1337 | |||
| 1338 | out: | ||
| 1339 | preempt_enable(); | ||
| 1322 | } | 1340 | } |
| 1323 | 1341 | ||
| 1324 | #ifdef UNUSED | 1342 | #ifdef UNUSED |
| @@ -2320,11 +2338,19 @@ tracing_write_stub(struct file *filp, const char __user *ubuf, | |||
| 2320 | return count; | 2338 | return count; |
| 2321 | } | 2339 | } |
| 2322 | 2340 | ||
| 2341 | static loff_t tracing_seek(struct file *file, loff_t offset, int origin) | ||
| 2342 | { | ||
| 2343 | if (file->f_mode & FMODE_READ) | ||
| 2344 | return seq_lseek(file, offset, origin); | ||
| 2345 | else | ||
| 2346 | return 0; | ||
| 2347 | } | ||
| 2348 | |||
| 2323 | static const struct file_operations tracing_fops = { | 2349 | static const struct file_operations tracing_fops = { |
| 2324 | .open = tracing_open, | 2350 | .open = tracing_open, |
| 2325 | .read = seq_read, | 2351 | .read = seq_read, |
| 2326 | .write = tracing_write_stub, | 2352 | .write = tracing_write_stub, |
| 2327 | .llseek = seq_lseek, | 2353 | .llseek = tracing_seek, |
| 2328 | .release = tracing_release, | 2354 | .release = tracing_release, |
| 2329 | }; | 2355 | }; |
| 2330 | 2356 | ||
