aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c441
1 files changed, 298 insertions, 143 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 737b0efa1a62..16f7038d1f4d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -275,7 +275,7 @@ int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
275} 275}
276EXPORT_SYMBOL_GPL(call_filter_check_discard); 276EXPORT_SYMBOL_GPL(call_filter_check_discard);
277 277
278cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) 278static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
279{ 279{
280 u64 ts; 280 u64 ts;
281 281
@@ -599,7 +599,7 @@ static int alloc_snapshot(struct trace_array *tr)
599 return 0; 599 return 0;
600} 600}
601 601
602void free_snapshot(struct trace_array *tr) 602static void free_snapshot(struct trace_array *tr)
603{ 603{
604 /* 604 /*
605 * We don't free the ring buffer. instead, resize it because 605 * We don't free the ring buffer. instead, resize it because
@@ -963,27 +963,9 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
963 return cnt; 963 return cnt;
964} 964}
965 965
966/*
967 * ftrace_max_lock is used to protect the swapping of buffers
968 * when taking a max snapshot. The buffers themselves are
969 * protected by per_cpu spinlocks. But the action of the swap
970 * needs its own lock.
971 *
972 * This is defined as a arch_spinlock_t in order to help
973 * with performance when lockdep debugging is enabled.
974 *
975 * It is also used in other places outside the update_max_tr
976 * so it needs to be defined outside of the
977 * CONFIG_TRACER_MAX_TRACE.
978 */
979static arch_spinlock_t ftrace_max_lock =
980 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
981
982unsigned long __read_mostly tracing_thresh; 966unsigned long __read_mostly tracing_thresh;
983 967
984#ifdef CONFIG_TRACER_MAX_TRACE 968#ifdef CONFIG_TRACER_MAX_TRACE
985unsigned long __read_mostly tracing_max_latency;
986
987/* 969/*
988 * Copy the new maximum trace into the separate maximum-trace 970 * Copy the new maximum trace into the separate maximum-trace
989 * structure. (this way the maximum trace is permanently saved, 971 * structure. (this way the maximum trace is permanently saved,
@@ -1000,7 +982,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1000 max_buf->cpu = cpu; 982 max_buf->cpu = cpu;
1001 max_buf->time_start = data->preempt_timestamp; 983 max_buf->time_start = data->preempt_timestamp;
1002 984
1003 max_data->saved_latency = tracing_max_latency; 985 max_data->saved_latency = tr->max_latency;
1004 max_data->critical_start = data->critical_start; 986 max_data->critical_start = data->critical_start;
1005 max_data->critical_end = data->critical_end; 987 max_data->critical_end = data->critical_end;
1006 988
@@ -1048,14 +1030,14 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1048 return; 1030 return;
1049 } 1031 }
1050 1032
1051 arch_spin_lock(&ftrace_max_lock); 1033 arch_spin_lock(&tr->max_lock);
1052 1034
1053 buf = tr->trace_buffer.buffer; 1035 buf = tr->trace_buffer.buffer;
1054 tr->trace_buffer.buffer = tr->max_buffer.buffer; 1036 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1055 tr->max_buffer.buffer = buf; 1037 tr->max_buffer.buffer = buf;
1056 1038
1057 __update_max_tr(tr, tsk, cpu); 1039 __update_max_tr(tr, tsk, cpu);
1058 arch_spin_unlock(&ftrace_max_lock); 1040 arch_spin_unlock(&tr->max_lock);
1059} 1041}
1060 1042
1061/** 1043/**
@@ -1081,7 +1063,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1081 return; 1063 return;
1082 } 1064 }
1083 1065
1084 arch_spin_lock(&ftrace_max_lock); 1066 arch_spin_lock(&tr->max_lock);
1085 1067
1086 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); 1068 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1087 1069
@@ -1099,11 +1081,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1099 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 1081 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1100 1082
1101 __update_max_tr(tr, tsk, cpu); 1083 __update_max_tr(tr, tsk, cpu);
1102 arch_spin_unlock(&ftrace_max_lock); 1084 arch_spin_unlock(&tr->max_lock);
1103} 1085}
1104#endif /* CONFIG_TRACER_MAX_TRACE */ 1086#endif /* CONFIG_TRACER_MAX_TRACE */
1105 1087
1106static void default_wait_pipe(struct trace_iterator *iter) 1088static void wait_on_pipe(struct trace_iterator *iter)
1107{ 1089{
1108 /* Iterators are static, they should be filled or empty */ 1090 /* Iterators are static, they should be filled or empty */
1109 if (trace_buffer_iter(iter, iter->cpu_file)) 1091 if (trace_buffer_iter(iter, iter->cpu_file))
@@ -1220,8 +1202,6 @@ int register_tracer(struct tracer *type)
1220 else 1202 else
1221 if (!type->flags->opts) 1203 if (!type->flags->opts)
1222 type->flags->opts = dummy_tracer_opt; 1204 type->flags->opts = dummy_tracer_opt;
1223 if (!type->wait_pipe)
1224 type->wait_pipe = default_wait_pipe;
1225 1205
1226 ret = run_tracer_selftest(type); 1206 ret = run_tracer_selftest(type);
1227 if (ret < 0) 1207 if (ret < 0)
@@ -1305,22 +1285,71 @@ void tracing_reset_all_online_cpus(void)
1305 } 1285 }
1306} 1286}
1307 1287
1308#define SAVED_CMDLINES 128 1288#define SAVED_CMDLINES_DEFAULT 128
1309#define NO_CMDLINE_MAP UINT_MAX 1289#define NO_CMDLINE_MAP UINT_MAX
1310static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1311static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1312static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1313static int cmdline_idx;
1314static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; 1290static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1291struct saved_cmdlines_buffer {
1292 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1293 unsigned *map_cmdline_to_pid;
1294 unsigned cmdline_num;
1295 int cmdline_idx;
1296 char *saved_cmdlines;
1297};
1298static struct saved_cmdlines_buffer *savedcmd;
1315 1299
1316/* temporary disable recording */ 1300/* temporary disable recording */
1317static atomic_t trace_record_cmdline_disabled __read_mostly; 1301static atomic_t trace_record_cmdline_disabled __read_mostly;
1318 1302
1319static void trace_init_cmdlines(void) 1303static inline char *get_saved_cmdlines(int idx)
1320{ 1304{
1321 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline)); 1305 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1322 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid)); 1306}
1323 cmdline_idx = 0; 1307
1308static inline void set_cmdline(int idx, const char *cmdline)
1309{
1310 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1311}
1312
1313static int allocate_cmdlines_buffer(unsigned int val,
1314 struct saved_cmdlines_buffer *s)
1315{
1316 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1317 GFP_KERNEL);
1318 if (!s->map_cmdline_to_pid)
1319 return -ENOMEM;
1320
1321 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1322 if (!s->saved_cmdlines) {
1323 kfree(s->map_cmdline_to_pid);
1324 return -ENOMEM;
1325 }
1326
1327 s->cmdline_idx = 0;
1328 s->cmdline_num = val;
1329 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1330 sizeof(s->map_pid_to_cmdline));
1331 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1332 val * sizeof(*s->map_cmdline_to_pid));
1333
1334 return 0;
1335}
1336
1337static int trace_create_savedcmd(void)
1338{
1339 int ret;
1340
1341 savedcmd = kmalloc(sizeof(struct saved_cmdlines_buffer), GFP_KERNEL);
1342 if (!savedcmd)
1343 return -ENOMEM;
1344
1345 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1346 if (ret < 0) {
1347 kfree(savedcmd);
1348 savedcmd = NULL;
1349 return -ENOMEM;
1350 }
1351
1352 return 0;
1324} 1353}
1325 1354
1326int is_tracing_stopped(void) 1355int is_tracing_stopped(void)
@@ -1353,7 +1382,7 @@ void tracing_start(void)
1353 } 1382 }
1354 1383
1355 /* Prevent the buffers from switching */ 1384 /* Prevent the buffers from switching */
1356 arch_spin_lock(&ftrace_max_lock); 1385 arch_spin_lock(&global_trace.max_lock);
1357 1386
1358 buffer = global_trace.trace_buffer.buffer; 1387 buffer = global_trace.trace_buffer.buffer;
1359 if (buffer) 1388 if (buffer)
@@ -1365,7 +1394,7 @@ void tracing_start(void)
1365 ring_buffer_record_enable(buffer); 1394 ring_buffer_record_enable(buffer);
1366#endif 1395#endif
1367 1396
1368 arch_spin_unlock(&ftrace_max_lock); 1397 arch_spin_unlock(&global_trace.max_lock);
1369 1398
1370 ftrace_start(); 1399 ftrace_start();
1371 out: 1400 out:
@@ -1420,7 +1449,7 @@ void tracing_stop(void)
1420 goto out; 1449 goto out;
1421 1450
1422 /* Prevent the buffers from switching */ 1451 /* Prevent the buffers from switching */
1423 arch_spin_lock(&ftrace_max_lock); 1452 arch_spin_lock(&global_trace.max_lock);
1424 1453
1425 buffer = global_trace.trace_buffer.buffer; 1454 buffer = global_trace.trace_buffer.buffer;
1426 if (buffer) 1455 if (buffer)
@@ -1432,7 +1461,7 @@ void tracing_stop(void)
1432 ring_buffer_record_disable(buffer); 1461 ring_buffer_record_disable(buffer);
1433#endif 1462#endif
1434 1463
1435 arch_spin_unlock(&ftrace_max_lock); 1464 arch_spin_unlock(&global_trace.max_lock);
1436 1465
1437 out: 1466 out:
1438 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 1467 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
@@ -1461,12 +1490,12 @@ static void tracing_stop_tr(struct trace_array *tr)
1461 1490
1462void trace_stop_cmdline_recording(void); 1491void trace_stop_cmdline_recording(void);
1463 1492
1464static void trace_save_cmdline(struct task_struct *tsk) 1493static int trace_save_cmdline(struct task_struct *tsk)
1465{ 1494{
1466 unsigned pid, idx; 1495 unsigned pid, idx;
1467 1496
1468 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) 1497 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1469 return; 1498 return 0;
1470 1499
1471 /* 1500 /*
1472 * It's not the end of the world if we don't get 1501 * It's not the end of the world if we don't get
@@ -1475,11 +1504,11 @@ static void trace_save_cmdline(struct task_struct *tsk)
1475 * so if we miss here, then better luck next time. 1504 * so if we miss here, then better luck next time.
1476 */ 1505 */
1477 if (!arch_spin_trylock(&trace_cmdline_lock)) 1506 if (!arch_spin_trylock(&trace_cmdline_lock))
1478 return; 1507 return 0;
1479 1508
1480 idx = map_pid_to_cmdline[tsk->pid]; 1509 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1481 if (idx == NO_CMDLINE_MAP) { 1510 if (idx == NO_CMDLINE_MAP) {
1482 idx = (cmdline_idx + 1) % SAVED_CMDLINES; 1511 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1483 1512
1484 /* 1513 /*
1485 * Check whether the cmdline buffer at idx has a pid 1514 * Check whether the cmdline buffer at idx has a pid
@@ -1487,22 +1516,24 @@ static void trace_save_cmdline(struct task_struct *tsk)
1487 * need to clear the map_pid_to_cmdline. Otherwise we 1516 * need to clear the map_pid_to_cmdline. Otherwise we
1488 * would read the new comm for the old pid. 1517 * would read the new comm for the old pid.
1489 */ 1518 */
1490 pid = map_cmdline_to_pid[idx]; 1519 pid = savedcmd->map_cmdline_to_pid[idx];
1491 if (pid != NO_CMDLINE_MAP) 1520 if (pid != NO_CMDLINE_MAP)
1492 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; 1521 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1493 1522
1494 map_cmdline_to_pid[idx] = tsk->pid; 1523 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1495 map_pid_to_cmdline[tsk->pid] = idx; 1524 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1496 1525
1497 cmdline_idx = idx; 1526 savedcmd->cmdline_idx = idx;
1498 } 1527 }
1499 1528
1500 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 1529 set_cmdline(idx, tsk->comm);
1501 1530
1502 arch_spin_unlock(&trace_cmdline_lock); 1531 arch_spin_unlock(&trace_cmdline_lock);
1532
1533 return 1;
1503} 1534}
1504 1535
1505void trace_find_cmdline(int pid, char comm[]) 1536static void __trace_find_cmdline(int pid, char comm[])
1506{ 1537{
1507 unsigned map; 1538 unsigned map;
1508 1539
@@ -1521,13 +1552,19 @@ void trace_find_cmdline(int pid, char comm[])
1521 return; 1552 return;
1522 } 1553 }
1523 1554
1524 preempt_disable(); 1555 map = savedcmd->map_pid_to_cmdline[pid];
1525 arch_spin_lock(&trace_cmdline_lock);
1526 map = map_pid_to_cmdline[pid];
1527 if (map != NO_CMDLINE_MAP) 1556 if (map != NO_CMDLINE_MAP)
1528 strcpy(comm, saved_cmdlines[map]); 1557 strcpy(comm, get_saved_cmdlines(map));
1529 else 1558 else
1530 strcpy(comm, "<...>"); 1559 strcpy(comm, "<...>");
1560}
1561
1562void trace_find_cmdline(int pid, char comm[])
1563{
1564 preempt_disable();
1565 arch_spin_lock(&trace_cmdline_lock);
1566
1567 __trace_find_cmdline(pid, comm);
1531 1568
1532 arch_spin_unlock(&trace_cmdline_lock); 1569 arch_spin_unlock(&trace_cmdline_lock);
1533 preempt_enable(); 1570 preempt_enable();
@@ -1541,9 +1578,8 @@ void tracing_record_cmdline(struct task_struct *tsk)
1541 if (!__this_cpu_read(trace_cmdline_save)) 1578 if (!__this_cpu_read(trace_cmdline_save))
1542 return; 1579 return;
1543 1580
1544 __this_cpu_write(trace_cmdline_save, false); 1581 if (trace_save_cmdline(tsk))
1545 1582 __this_cpu_write(trace_cmdline_save, false);
1546 trace_save_cmdline(tsk);
1547} 1583}
1548 1584
1549void 1585void
@@ -1746,7 +1782,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
1746 */ 1782 */
1747 barrier(); 1783 barrier();
1748 if (use_stack == 1) { 1784 if (use_stack == 1) {
1749 trace.entries = &__get_cpu_var(ftrace_stack).calls[0]; 1785 trace.entries = this_cpu_ptr(ftrace_stack.calls);
1750 trace.max_entries = FTRACE_STACK_MAX_ENTRIES; 1786 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1751 1787
1752 if (regs) 1788 if (regs)
@@ -1995,7 +2031,21 @@ void trace_printk_init_buffers(void)
1995 if (alloc_percpu_trace_buffer()) 2031 if (alloc_percpu_trace_buffer())
1996 return; 2032 return;
1997 2033
1998 pr_info("ftrace: Allocated trace_printk buffers\n"); 2034 /* trace_printk() is for debug use only. Don't use it in production. */
2035
2036 pr_warning("\n**********************************************************\n");
2037 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2038 pr_warning("** **\n");
2039 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2040 pr_warning("** **\n");
2041 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2042 pr_warning("** unsafe for produciton use. **\n");
2043 pr_warning("** **\n");
2044 pr_warning("** If you see this message and you are not debugging **\n");
2045 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2046 pr_warning("** **\n");
2047 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2048 pr_warning("**********************************************************\n");
1999 2049
2000 /* Expand the buffers to set size */ 2050 /* Expand the buffers to set size */
2001 tracing_update_buffers(); 2051 tracing_update_buffers();
@@ -3333,7 +3383,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3333 mutex_lock(&tracing_cpumask_update_lock); 3383 mutex_lock(&tracing_cpumask_update_lock);
3334 3384
3335 local_irq_disable(); 3385 local_irq_disable();
3336 arch_spin_lock(&ftrace_max_lock); 3386 arch_spin_lock(&tr->max_lock);
3337 for_each_tracing_cpu(cpu) { 3387 for_each_tracing_cpu(cpu) {
3338 /* 3388 /*
3339 * Increase/decrease the disabled counter if we are 3389 * Increase/decrease the disabled counter if we are
@@ -3350,7 +3400,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3350 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); 3400 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3351 } 3401 }
3352 } 3402 }
3353 arch_spin_unlock(&ftrace_max_lock); 3403 arch_spin_unlock(&tr->max_lock);
3354 local_irq_enable(); 3404 local_irq_enable();
3355 3405
3356 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); 3406 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
@@ -3592,6 +3642,7 @@ static const char readme_msg[] =
3592 " trace_options\t\t- Set format or modify how tracing happens\n" 3642 " trace_options\t\t- Set format or modify how tracing happens\n"
3593 "\t\t\t Disable an option by adding a suffix 'no' to the\n" 3643 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3594 "\t\t\t option name\n" 3644 "\t\t\t option name\n"
3645 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3595#ifdef CONFIG_DYNAMIC_FTRACE 3646#ifdef CONFIG_DYNAMIC_FTRACE
3596 "\n available_filter_functions - list of functions that can be filtered on\n" 3647 "\n available_filter_functions - list of functions that can be filtered on\n"
3597 " set_ftrace_filter\t- echo function name in here to only trace these\n" 3648 " set_ftrace_filter\t- echo function name in here to only trace these\n"
@@ -3705,55 +3756,153 @@ static const struct file_operations tracing_readme_fops = {
3705 .llseek = generic_file_llseek, 3756 .llseek = generic_file_llseek,
3706}; 3757};
3707 3758
3759static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3760{
3761 unsigned int *ptr = v;
3762
3763 if (*pos || m->count)
3764 ptr++;
3765
3766 (*pos)++;
3767
3768 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3769 ptr++) {
3770 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3771 continue;
3772
3773 return ptr;
3774 }
3775
3776 return NULL;
3777}
3778
3779static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3780{
3781 void *v;
3782 loff_t l = 0;
3783
3784 preempt_disable();
3785 arch_spin_lock(&trace_cmdline_lock);
3786
3787 v = &savedcmd->map_cmdline_to_pid[0];
3788 while (l <= *pos) {
3789 v = saved_cmdlines_next(m, v, &l);
3790 if (!v)
3791 return NULL;
3792 }
3793
3794 return v;
3795}
3796
3797static void saved_cmdlines_stop(struct seq_file *m, void *v)
3798{
3799 arch_spin_unlock(&trace_cmdline_lock);
3800 preempt_enable();
3801}
3802
3803static int saved_cmdlines_show(struct seq_file *m, void *v)
3804{
3805 char buf[TASK_COMM_LEN];
3806 unsigned int *pid = v;
3807
3808 __trace_find_cmdline(*pid, buf);
3809 seq_printf(m, "%d %s\n", *pid, buf);
3810 return 0;
3811}
3812
3813static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3814 .start = saved_cmdlines_start,
3815 .next = saved_cmdlines_next,
3816 .stop = saved_cmdlines_stop,
3817 .show = saved_cmdlines_show,
3818};
3819
3820static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3821{
3822 if (tracing_disabled)
3823 return -ENODEV;
3824
3825 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3826}
3827
3828static const struct file_operations tracing_saved_cmdlines_fops = {
3829 .open = tracing_saved_cmdlines_open,
3830 .read = seq_read,
3831 .llseek = seq_lseek,
3832 .release = seq_release,
3833};
3834
3708static ssize_t 3835static ssize_t
3709tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, 3836tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3710 size_t cnt, loff_t *ppos) 3837 size_t cnt, loff_t *ppos)
3711{ 3838{
3712 char *buf_comm; 3839 char buf[64];
3713 char *file_buf; 3840 int r;
3714 char *buf; 3841
3715 int len = 0; 3842 arch_spin_lock(&trace_cmdline_lock);
3716 int pid; 3843 r = sprintf(buf, "%u\n", savedcmd->cmdline_num);
3717 int i; 3844 arch_spin_unlock(&trace_cmdline_lock);
3845
3846 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3847}
3848
3849static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3850{
3851 kfree(s->saved_cmdlines);
3852 kfree(s->map_cmdline_to_pid);
3853 kfree(s);
3854}
3855
3856static int tracing_resize_saved_cmdlines(unsigned int val)
3857{
3858 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3718 3859
3719 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL); 3860 s = kmalloc(sizeof(struct saved_cmdlines_buffer), GFP_KERNEL);
3720 if (!file_buf) 3861 if (!s)
3721 return -ENOMEM; 3862 return -ENOMEM;
3722 3863
3723 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL); 3864 if (allocate_cmdlines_buffer(val, s) < 0) {
3724 if (!buf_comm) { 3865 kfree(s);
3725 kfree(file_buf);
3726 return -ENOMEM; 3866 return -ENOMEM;
3727 } 3867 }
3728 3868
3729 buf = file_buf; 3869 arch_spin_lock(&trace_cmdline_lock);
3870 savedcmd_temp = savedcmd;
3871 savedcmd = s;
3872 arch_spin_unlock(&trace_cmdline_lock);
3873 free_saved_cmdlines_buffer(savedcmd_temp);
3730 3874
3731 for (i = 0; i < SAVED_CMDLINES; i++) { 3875 return 0;
3732 int r; 3876}
3733 3877
3734 pid = map_cmdline_to_pid[i]; 3878static ssize_t
3735 if (pid == -1 || pid == NO_CMDLINE_MAP) 3879tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3736 continue; 3880 size_t cnt, loff_t *ppos)
3881{
3882 unsigned long val;
3883 int ret;
3737 3884
3738 trace_find_cmdline(pid, buf_comm); 3885 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3739 r = sprintf(buf, "%d %s\n", pid, buf_comm); 3886 if (ret)
3740 buf += r; 3887 return ret;
3741 len += r;
3742 }
3743 3888
3744 len = simple_read_from_buffer(ubuf, cnt, ppos, 3889 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3745 file_buf, len); 3890 if (!val || val > PID_MAX_DEFAULT)
3891 return -EINVAL;
3746 3892
3747 kfree(file_buf); 3893 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3748 kfree(buf_comm); 3894 if (ret < 0)
3895 return ret;
3749 3896
3750 return len; 3897 *ppos += cnt;
3898
3899 return cnt;
3751} 3900}
3752 3901
3753static const struct file_operations tracing_saved_cmdlines_fops = { 3902static const struct file_operations tracing_saved_cmdlines_size_fops = {
3754 .open = tracing_open_generic, 3903 .open = tracing_open_generic,
3755 .read = tracing_saved_cmdlines_read, 3904 .read = tracing_saved_cmdlines_size_read,
3756 .llseek = generic_file_llseek, 3905 .write = tracing_saved_cmdlines_size_write,
3757}; 3906};
3758 3907
3759static ssize_t 3908static ssize_t
@@ -4225,25 +4374,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4225 return trace_poll(iter, filp, poll_table); 4374 return trace_poll(iter, filp, poll_table);
4226} 4375}
4227 4376
4228/*
4229 * This is a make-shift waitqueue.
4230 * A tracer might use this callback on some rare cases:
4231 *
4232 * 1) the current tracer might hold the runqueue lock when it wakes up
4233 * a reader, hence a deadlock (sched, function, and function graph tracers)
4234 * 2) the function tracers, trace all functions, we don't want
4235 * the overhead of calling wake_up and friends
4236 * (and tracing them too)
4237 *
4238 * Anyway, this is really very primitive wakeup.
4239 */
4240void poll_wait_pipe(struct trace_iterator *iter)
4241{
4242 set_current_state(TASK_INTERRUPTIBLE);
4243 /* sleep for 100 msecs, and try again. */
4244 schedule_timeout(HZ / 10);
4245}
4246
4247/* Must be called with trace_types_lock mutex held. */ 4377/* Must be called with trace_types_lock mutex held. */
4248static int tracing_wait_pipe(struct file *filp) 4378static int tracing_wait_pipe(struct file *filp)
4249{ 4379{
@@ -4255,15 +4385,6 @@ static int tracing_wait_pipe(struct file *filp)
4255 return -EAGAIN; 4385 return -EAGAIN;
4256 } 4386 }
4257 4387
4258 mutex_unlock(&iter->mutex);
4259
4260 iter->trace->wait_pipe(iter);
4261
4262 mutex_lock(&iter->mutex);
4263
4264 if (signal_pending(current))
4265 return -EINTR;
4266
4267 /* 4388 /*
4268 * We block until we read something and tracing is disabled. 4389 * We block until we read something and tracing is disabled.
4269 * We still block if tracing is disabled, but we have never 4390 * We still block if tracing is disabled, but we have never
@@ -4275,6 +4396,15 @@ static int tracing_wait_pipe(struct file *filp)
4275 */ 4396 */
4276 if (!tracing_is_on() && iter->pos) 4397 if (!tracing_is_on() && iter->pos)
4277 break; 4398 break;
4399
4400 mutex_unlock(&iter->mutex);
4401
4402 wait_on_pipe(iter);
4403
4404 mutex_lock(&iter->mutex);
4405
4406 if (signal_pending(current))
4407 return -EINTR;
4278 } 4408 }
4279 4409
4280 return 1; 4410 return 1;
@@ -5197,7 +5327,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
5197 goto out_unlock; 5327 goto out_unlock;
5198 } 5328 }
5199 mutex_unlock(&trace_types_lock); 5329 mutex_unlock(&trace_types_lock);
5200 iter->trace->wait_pipe(iter); 5330 wait_on_pipe(iter);
5201 mutex_lock(&trace_types_lock); 5331 mutex_lock(&trace_types_lock);
5202 if (signal_pending(current)) { 5332 if (signal_pending(current)) {
5203 size = -EINTR; 5333 size = -EINTR;
@@ -5408,7 +5538,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5408 goto out; 5538 goto out;
5409 } 5539 }
5410 mutex_unlock(&trace_types_lock); 5540 mutex_unlock(&trace_types_lock);
5411 iter->trace->wait_pipe(iter); 5541 wait_on_pipe(iter);
5412 mutex_lock(&trace_types_lock); 5542 mutex_lock(&trace_types_lock);
5413 if (signal_pending(current)) { 5543 if (signal_pending(current)) {
5414 ret = -EINTR; 5544 ret = -EINTR;
@@ -6102,6 +6232,25 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
6102 return 0; 6232 return 0;
6103} 6233}
6104 6234
6235static void free_trace_buffers(struct trace_array *tr)
6236{
6237 if (!tr)
6238 return;
6239
6240 if (tr->trace_buffer.buffer) {
6241 ring_buffer_free(tr->trace_buffer.buffer);
6242 tr->trace_buffer.buffer = NULL;
6243 free_percpu(tr->trace_buffer.data);
6244 }
6245
6246#ifdef CONFIG_TRACER_MAX_TRACE
6247 if (tr->max_buffer.buffer) {
6248 ring_buffer_free(tr->max_buffer.buffer);
6249 tr->max_buffer.buffer = NULL;
6250 }
6251#endif
6252}
6253
6105static int new_instance_create(const char *name) 6254static int new_instance_create(const char *name)
6106{ 6255{
6107 struct trace_array *tr; 6256 struct trace_array *tr;
@@ -6131,6 +6280,8 @@ static int new_instance_create(const char *name)
6131 6280
6132 raw_spin_lock_init(&tr->start_lock); 6281 raw_spin_lock_init(&tr->start_lock);
6133 6282
6283 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6284
6134 tr->current_trace = &nop_trace; 6285 tr->current_trace = &nop_trace;
6135 6286
6136 INIT_LIST_HEAD(&tr->systems); 6287 INIT_LIST_HEAD(&tr->systems);
@@ -6158,8 +6309,7 @@ static int new_instance_create(const char *name)
6158 return 0; 6309 return 0;
6159 6310
6160 out_free_tr: 6311 out_free_tr:
6161 if (tr->trace_buffer.buffer) 6312 free_trace_buffers(tr);
6162 ring_buffer_free(tr->trace_buffer.buffer);
6163 free_cpumask_var(tr->tracing_cpumask); 6313 free_cpumask_var(tr->tracing_cpumask);
6164 kfree(tr->name); 6314 kfree(tr->name);
6165 kfree(tr); 6315 kfree(tr);
@@ -6199,8 +6349,7 @@ static int instance_delete(const char *name)
6199 event_trace_del_tracer(tr); 6349 event_trace_del_tracer(tr);
6200 ftrace_destroy_function_files(tr); 6350 ftrace_destroy_function_files(tr);
6201 debugfs_remove_recursive(tr->dir); 6351 debugfs_remove_recursive(tr->dir);
6202 free_percpu(tr->trace_buffer.data); 6352 free_trace_buffers(tr);
6203 ring_buffer_free(tr->trace_buffer.buffer);
6204 6353
6205 kfree(tr->name); 6354 kfree(tr->name);
6206 kfree(tr); 6355 kfree(tr);
@@ -6328,6 +6477,11 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6328 trace_create_file("tracing_on", 0644, d_tracer, 6477 trace_create_file("tracing_on", 0644, d_tracer,
6329 tr, &rb_simple_fops); 6478 tr, &rb_simple_fops);
6330 6479
6480#ifdef CONFIG_TRACER_MAX_TRACE
6481 trace_create_file("tracing_max_latency", 0644, d_tracer,
6482 &tr->max_latency, &tracing_max_lat_fops);
6483#endif
6484
6331 if (ftrace_create_function_files(tr, d_tracer)) 6485 if (ftrace_create_function_files(tr, d_tracer))
6332 WARN(1, "Could not allocate function filter files"); 6486 WARN(1, "Could not allocate function filter files");
6333 6487
@@ -6353,11 +6507,6 @@ static __init int tracer_init_debugfs(void)
6353 6507
6354 init_tracer_debugfs(&global_trace, d_tracer); 6508 init_tracer_debugfs(&global_trace, d_tracer);
6355 6509
6356#ifdef CONFIG_TRACER_MAX_TRACE
6357 trace_create_file("tracing_max_latency", 0644, d_tracer,
6358 &tracing_max_latency, &tracing_max_lat_fops);
6359#endif
6360
6361 trace_create_file("tracing_thresh", 0644, d_tracer, 6510 trace_create_file("tracing_thresh", 0644, d_tracer,
6362 &tracing_thresh, &tracing_max_lat_fops); 6511 &tracing_thresh, &tracing_max_lat_fops);
6363 6512
@@ -6367,6 +6516,9 @@ static __init int tracer_init_debugfs(void)
6367 trace_create_file("saved_cmdlines", 0444, d_tracer, 6516 trace_create_file("saved_cmdlines", 0444, d_tracer,
6368 NULL, &tracing_saved_cmdlines_fops); 6517 NULL, &tracing_saved_cmdlines_fops);
6369 6518
6519 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6520 NULL, &tracing_saved_cmdlines_size_fops);
6521
6370#ifdef CONFIG_DYNAMIC_FTRACE 6522#ifdef CONFIG_DYNAMIC_FTRACE
6371 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 6523 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6372 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 6524 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
@@ -6603,18 +6755,19 @@ __init static int tracer_alloc_buffers(void)
6603 if (!temp_buffer) 6755 if (!temp_buffer)
6604 goto out_free_cpumask; 6756 goto out_free_cpumask;
6605 6757
6758 if (trace_create_savedcmd() < 0)
6759 goto out_free_temp_buffer;
6760
6606 /* TODO: make the number of buffers hot pluggable with CPUS */ 6761 /* TODO: make the number of buffers hot pluggable with CPUS */
6607 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { 6762 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6608 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 6763 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6609 WARN_ON(1); 6764 WARN_ON(1);
6610 goto out_free_temp_buffer; 6765 goto out_free_savedcmd;
6611 } 6766 }
6612 6767
6613 if (global_trace.buffer_disabled) 6768 if (global_trace.buffer_disabled)
6614 tracing_off(); 6769 tracing_off();
6615 6770
6616 trace_init_cmdlines();
6617
6618 if (trace_boot_clock) { 6771 if (trace_boot_clock) {
6619 ret = tracing_set_clock(&global_trace, trace_boot_clock); 6772 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6620 if (ret < 0) 6773 if (ret < 0)
@@ -6629,6 +6782,10 @@ __init static int tracer_alloc_buffers(void)
6629 */ 6782 */
6630 global_trace.current_trace = &nop_trace; 6783 global_trace.current_trace = &nop_trace;
6631 6784
6785 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6786
6787 ftrace_init_global_array_ops(&global_trace);
6788
6632 register_tracer(&nop_trace); 6789 register_tracer(&nop_trace);
6633 6790
6634 /* All seems OK, enable tracing */ 6791 /* All seems OK, enable tracing */
@@ -6656,13 +6813,11 @@ __init static int tracer_alloc_buffers(void)
6656 6813
6657 return 0; 6814 return 0;
6658 6815
6816out_free_savedcmd:
6817 free_saved_cmdlines_buffer(savedcmd);
6659out_free_temp_buffer: 6818out_free_temp_buffer:
6660 ring_buffer_free(temp_buffer); 6819 ring_buffer_free(temp_buffer);
6661out_free_cpumask: 6820out_free_cpumask:
6662 free_percpu(global_trace.trace_buffer.data);
6663#ifdef CONFIG_TRACER_MAX_TRACE
6664 free_percpu(global_trace.max_buffer.data);
6665#endif
6666 free_cpumask_var(global_trace.tracing_cpumask); 6821 free_cpumask_var(global_trace.tracing_cpumask);
6667out_free_buffer_mask: 6822out_free_buffer_mask:
6668 free_cpumask_var(tracing_buffer_mask); 6823 free_cpumask_var(tracing_buffer_mask);