aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c51
1 files changed, 39 insertions, 12 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ba14a22be4cc..f8cf959bad45 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -17,7 +17,6 @@
17#include <linux/writeback.h> 17#include <linux/writeback.h>
18#include <linux/kallsyms.h> 18#include <linux/kallsyms.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/smp_lock.h>
21#include <linux/notifier.h> 20#include <linux/notifier.h>
22#include <linux/irqflags.h> 21#include <linux/irqflags.h>
23#include <linux/debugfs.h> 22#include <linux/debugfs.h>
@@ -1284,6 +1283,8 @@ void trace_dump_stack(void)
1284 __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); 1283 __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
1285} 1284}
1286 1285
1286static DEFINE_PER_CPU(int, user_stack_count);
1287
1287void 1288void
1288ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) 1289ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1289{ 1290{
@@ -1302,6 +1303,18 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1302 if (unlikely(in_nmi())) 1303 if (unlikely(in_nmi()))
1303 return; 1304 return;
1304 1305
1306 /*
1307 * prevent recursion, since the user stack tracing may
1308 * trigger other kernel events.
1309 */
1310 preempt_disable();
1311 if (__this_cpu_read(user_stack_count))
1312 goto out;
1313
1314 __this_cpu_inc(user_stack_count);
1315
1316
1317
1305 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 1318 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1306 sizeof(*entry), flags, pc); 1319 sizeof(*entry), flags, pc);
1307 if (!event) 1320 if (!event)
@@ -1319,6 +1332,11 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1319 save_stack_trace_user(&trace); 1332 save_stack_trace_user(&trace);
1320 if (!filter_check_discard(call, entry, buffer, event)) 1333 if (!filter_check_discard(call, entry, buffer, event))
1321 ring_buffer_unlock_commit(buffer, event); 1334 ring_buffer_unlock_commit(buffer, event);
1335
1336 __this_cpu_dec(user_stack_count);
1337
1338 out:
1339 preempt_enable();
1322} 1340}
1323 1341
1324#ifdef UNUSED 1342#ifdef UNUSED
@@ -2196,7 +2214,7 @@ int tracing_open_generic(struct inode *inode, struct file *filp)
2196 2214
2197static int tracing_release(struct inode *inode, struct file *file) 2215static int tracing_release(struct inode *inode, struct file *file)
2198{ 2216{
2199 struct seq_file *m = (struct seq_file *)file->private_data; 2217 struct seq_file *m = file->private_data;
2200 struct trace_iterator *iter; 2218 struct trace_iterator *iter;
2201 int cpu; 2219 int cpu;
2202 2220
@@ -2320,11 +2338,19 @@ tracing_write_stub(struct file *filp, const char __user *ubuf,
2320 return count; 2338 return count;
2321} 2339}
2322 2340
2341static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
2342{
2343 if (file->f_mode & FMODE_READ)
2344 return seq_lseek(file, offset, origin);
2345 else
2346 return 0;
2347}
2348
2323static const struct file_operations tracing_fops = { 2349static const struct file_operations tracing_fops = {
2324 .open = tracing_open, 2350 .open = tracing_open,
2325 .read = seq_read, 2351 .read = seq_read,
2326 .write = tracing_write_stub, 2352 .write = tracing_write_stub,
2327 .llseek = seq_lseek, 2353 .llseek = tracing_seek,
2328 .release = tracing_release, 2354 .release = tracing_release,
2329}; 2355};
2330 2356
@@ -3463,6 +3489,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3463 size_t cnt, loff_t *fpos) 3489 size_t cnt, loff_t *fpos)
3464{ 3490{
3465 char *buf; 3491 char *buf;
3492 size_t written;
3466 3493
3467 if (tracing_disabled) 3494 if (tracing_disabled)
3468 return -EINVAL; 3495 return -EINVAL;
@@ -3484,11 +3511,15 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3484 } else 3511 } else
3485 buf[cnt] = '\0'; 3512 buf[cnt] = '\0';
3486 3513
3487 cnt = mark_printk("%s", buf); 3514 written = mark_printk("%s", buf);
3488 kfree(buf); 3515 kfree(buf);
3489 *fpos += cnt; 3516 *fpos += written;
3490 3517
3491 return cnt; 3518 /* don't tell userspace we wrote more - it might confuse them */
3519 if (written > cnt)
3520 written = cnt;
3521
3522 return written;
3492} 3523}
3493 3524
3494static int tracing_clock_show(struct seq_file *m, void *v) 3525static int tracing_clock_show(struct seq_file *m, void *v)
@@ -3991,13 +4022,9 @@ static void tracing_init_debugfs_percpu(long cpu)
3991{ 4022{
3992 struct dentry *d_percpu = tracing_dentry_percpu(); 4023 struct dentry *d_percpu = tracing_dentry_percpu();
3993 struct dentry *d_cpu; 4024 struct dentry *d_cpu;
3994 /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ 4025 char cpu_dir[30]; /* 30 characters should be more than enough */
3995 char cpu_dir[7];
3996
3997 if (cpu > 999 || cpu < 0)
3998 return;
3999 4026
4000 sprintf(cpu_dir, "cpu%ld", cpu); 4027 snprintf(cpu_dir, 30, "cpu%ld", cpu);
4001 d_cpu = debugfs_create_dir(cpu_dir, d_percpu); 4028 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
4002 if (!d_cpu) { 4029 if (!d_cpu) {
4003 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); 4030 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);