aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c663
1 files changed, 562 insertions, 101 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 53042f118f23..a12f80efceaa 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -47,12 +47,13 @@
47int ftrace_enabled __read_mostly; 47int ftrace_enabled __read_mostly;
48static int last_ftrace_enabled; 48static int last_ftrace_enabled;
49 49
50/* set when tracing only a pid */
51struct pid *ftrace_pid_trace;
52static struct pid * const ftrace_swapper_pid = &init_struct_pid;
53
50/* Quick disabling of function tracer. */ 54/* Quick disabling of function tracer. */
51int function_trace_stop; 55int function_trace_stop;
52 56
53/* By default, current tracing type is normal tracing. */
54enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER;
55
56/* 57/*
57 * ftrace_disabled is set when an anomaly is discovered. 58 * ftrace_disabled is set when an anomaly is discovered.
58 * ftrace_disabled is much stronger than ftrace_enabled. 59 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -61,6 +62,7 @@ static int ftrace_disabled __read_mostly;
61 62
62static DEFINE_SPINLOCK(ftrace_lock); 63static DEFINE_SPINLOCK(ftrace_lock);
63static DEFINE_MUTEX(ftrace_sysctl_lock); 64static DEFINE_MUTEX(ftrace_sysctl_lock);
65static DEFINE_MUTEX(ftrace_start_lock);
64 66
65static struct ftrace_ops ftrace_list_end __read_mostly = 67static struct ftrace_ops ftrace_list_end __read_mostly =
66{ 68{
@@ -70,6 +72,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly =
70static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; 72static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
71ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 73ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
72ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; 74ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
75ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
73 76
74static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 77static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
75{ 78{
@@ -86,6 +89,21 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
86 }; 89 };
87} 90}
88 91
92static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
93{
94 if (!test_tsk_trace_trace(current))
95 return;
96
97 ftrace_pid_function(ip, parent_ip);
98}
99
100static void set_ftrace_pid_function(ftrace_func_t func)
101{
102 /* do not set ftrace_pid_function to itself! */
103 if (func != ftrace_pid_func)
104 ftrace_pid_function = func;
105}
106
89/** 107/**
90 * clear_ftrace_function - reset the ftrace function 108 * clear_ftrace_function - reset the ftrace function
91 * 109 *
@@ -96,6 +114,7 @@ void clear_ftrace_function(void)
96{ 114{
97 ftrace_trace_function = ftrace_stub; 115 ftrace_trace_function = ftrace_stub;
98 __ftrace_trace_function = ftrace_stub; 116 __ftrace_trace_function = ftrace_stub;
117 ftrace_pid_function = ftrace_stub;
99} 118}
100 119
101#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 120#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
@@ -128,20 +147,26 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
128 ftrace_list = ops; 147 ftrace_list = ops;
129 148
130 if (ftrace_enabled) { 149 if (ftrace_enabled) {
150 ftrace_func_t func;
151
152 if (ops->next == &ftrace_list_end)
153 func = ops->func;
154 else
155 func = ftrace_list_func;
156
157 if (ftrace_pid_trace) {
158 set_ftrace_pid_function(func);
159 func = ftrace_pid_func;
160 }
161
131 /* 162 /*
132 * For one func, simply call it directly. 163 * For one func, simply call it directly.
133 * For more than one func, call the chain. 164 * For more than one func, call the chain.
134 */ 165 */
135#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 166#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
136 if (ops->next == &ftrace_list_end) 167 ftrace_trace_function = func;
137 ftrace_trace_function = ops->func;
138 else
139 ftrace_trace_function = ftrace_list_func;
140#else 168#else
141 if (ops->next == &ftrace_list_end) 169 __ftrace_trace_function = func;
142 __ftrace_trace_function = ops->func;
143 else
144 __ftrace_trace_function = ftrace_list_func;
145 ftrace_trace_function = ftrace_test_stop_func; 170 ftrace_trace_function = ftrace_test_stop_func;
146#endif 171#endif
147 } 172 }
@@ -182,8 +207,19 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
182 207
183 if (ftrace_enabled) { 208 if (ftrace_enabled) {
184 /* If we only have one func left, then call that directly */ 209 /* If we only have one func left, then call that directly */
185 if (ftrace_list->next == &ftrace_list_end) 210 if (ftrace_list->next == &ftrace_list_end) {
186 ftrace_trace_function = ftrace_list->func; 211 ftrace_func_t func = ftrace_list->func;
212
213 if (ftrace_pid_trace) {
214 set_ftrace_pid_function(func);
215 func = ftrace_pid_func;
216 }
217#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
218 ftrace_trace_function = func;
219#else
220 __ftrace_trace_function = func;
221#endif
222 }
187 } 223 }
188 224
189 out: 225 out:
@@ -192,6 +228,36 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
192 return ret; 228 return ret;
193} 229}
194 230
231static void ftrace_update_pid_func(void)
232{
233 ftrace_func_t func;
234
235 /* should not be called from interrupt context */
236 spin_lock(&ftrace_lock);
237
238 if (ftrace_trace_function == ftrace_stub)
239 goto out;
240
241 func = ftrace_trace_function;
242
243 if (ftrace_pid_trace) {
244 set_ftrace_pid_function(func);
245 func = ftrace_pid_func;
246 } else {
247 if (func == ftrace_pid_func)
248 func = ftrace_pid_function;
249 }
250
251#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
252 ftrace_trace_function = func;
253#else
254 __ftrace_trace_function = func;
255#endif
256
257 out:
258 spin_unlock(&ftrace_lock);
259}
260
195#ifdef CONFIG_DYNAMIC_FTRACE 261#ifdef CONFIG_DYNAMIC_FTRACE
196#ifndef CONFIG_FTRACE_MCOUNT_RECORD 262#ifndef CONFIG_FTRACE_MCOUNT_RECORD
197# error Dynamic ftrace depends on MCOUNT_RECORD 263# error Dynamic ftrace depends on MCOUNT_RECORD
@@ -211,6 +277,8 @@ enum {
211 FTRACE_UPDATE_TRACE_FUNC = (1 << 2), 277 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
212 FTRACE_ENABLE_MCOUNT = (1 << 3), 278 FTRACE_ENABLE_MCOUNT = (1 << 3),
213 FTRACE_DISABLE_MCOUNT = (1 << 4), 279 FTRACE_DISABLE_MCOUNT = (1 << 4),
280 FTRACE_START_FUNC_RET = (1 << 5),
281 FTRACE_STOP_FUNC_RET = (1 << 6),
214}; 282};
215 283
216static int ftrace_filtered; 284static int ftrace_filtered;
@@ -395,14 +463,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
395 unsigned long ip, fl; 463 unsigned long ip, fl;
396 unsigned long ftrace_addr; 464 unsigned long ftrace_addr;
397 465
398#ifdef CONFIG_FUNCTION_RET_TRACER
399 if (ftrace_tracing_type == FTRACE_TYPE_ENTER)
400 ftrace_addr = (unsigned long)ftrace_caller;
401 else
402 ftrace_addr = (unsigned long)ftrace_return_caller;
403#else
404 ftrace_addr = (unsigned long)ftrace_caller; 466 ftrace_addr = (unsigned long)ftrace_caller;
405#endif
406 467
407 ip = rec->ip; 468 ip = rec->ip;
408 469
@@ -535,6 +596,11 @@ static int __ftrace_modify_code(void *data)
535 if (*command & FTRACE_UPDATE_TRACE_FUNC) 596 if (*command & FTRACE_UPDATE_TRACE_FUNC)
536 ftrace_update_ftrace_func(ftrace_trace_function); 597 ftrace_update_ftrace_func(ftrace_trace_function);
537 598
599 if (*command & FTRACE_START_FUNC_RET)
600 ftrace_enable_ftrace_graph_caller();
601 else if (*command & FTRACE_STOP_FUNC_RET)
602 ftrace_disable_ftrace_graph_caller();
603
538 return 0; 604 return 0;
539} 605}
540 606
@@ -545,12 +611,22 @@ static void ftrace_run_update_code(int command)
545 611
546static ftrace_func_t saved_ftrace_func; 612static ftrace_func_t saved_ftrace_func;
547static int ftrace_start_up; 613static int ftrace_start_up;
548static DEFINE_MUTEX(ftrace_start_lock);
549 614
550static void ftrace_startup(void) 615static void ftrace_startup_enable(int command)
551{ 616{
552 int command = 0; 617 if (saved_ftrace_func != ftrace_trace_function) {
618 saved_ftrace_func = ftrace_trace_function;
619 command |= FTRACE_UPDATE_TRACE_FUNC;
620 }
553 621
622 if (!command || !ftrace_enabled)
623 return;
624
625 ftrace_run_update_code(command);
626}
627
628static void ftrace_startup(int command)
629{
554 if (unlikely(ftrace_disabled)) 630 if (unlikely(ftrace_disabled))
555 return; 631 return;
556 632
@@ -558,23 +634,13 @@ static void ftrace_startup(void)
558 ftrace_start_up++; 634 ftrace_start_up++;
559 command |= FTRACE_ENABLE_CALLS; 635 command |= FTRACE_ENABLE_CALLS;
560 636
561 if (saved_ftrace_func != ftrace_trace_function) { 637 ftrace_startup_enable(command);
562 saved_ftrace_func = ftrace_trace_function;
563 command |= FTRACE_UPDATE_TRACE_FUNC;
564 }
565 638
566 if (!command || !ftrace_enabled)
567 goto out;
568
569 ftrace_run_update_code(command);
570 out:
571 mutex_unlock(&ftrace_start_lock); 639 mutex_unlock(&ftrace_start_lock);
572} 640}
573 641
574static void ftrace_shutdown(void) 642static void ftrace_shutdown(int command)
575{ 643{
576 int command = 0;
577
578 if (unlikely(ftrace_disabled)) 644 if (unlikely(ftrace_disabled))
579 return; 645 return;
580 646
@@ -719,7 +785,6 @@ enum {
719#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 785#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
720 786
721struct ftrace_iterator { 787struct ftrace_iterator {
722 loff_t pos;
723 struct ftrace_page *pg; 788 struct ftrace_page *pg;
724 unsigned idx; 789 unsigned idx;
725 unsigned flags; 790 unsigned flags;
@@ -744,6 +809,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
744 iter->pg = iter->pg->next; 809 iter->pg = iter->pg->next;
745 iter->idx = 0; 810 iter->idx = 0;
746 goto retry; 811 goto retry;
812 } else {
813 iter->idx = -1;
747 } 814 }
748 } else { 815 } else {
749 rec = &iter->pg->records[iter->idx++]; 816 rec = &iter->pg->records[iter->idx++];
@@ -766,8 +833,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
766 } 833 }
767 spin_unlock(&ftrace_lock); 834 spin_unlock(&ftrace_lock);
768 835
769 iter->pos = *pos;
770
771 return rec; 836 return rec;
772} 837}
773 838
@@ -775,13 +840,15 @@ static void *t_start(struct seq_file *m, loff_t *pos)
775{ 840{
776 struct ftrace_iterator *iter = m->private; 841 struct ftrace_iterator *iter = m->private;
777 void *p = NULL; 842 void *p = NULL;
778 loff_t l = -1;
779 843
780 if (*pos > iter->pos) 844 if (*pos > 0) {
781 *pos = iter->pos; 845 if (iter->idx < 0)
846 return p;
847 (*pos)--;
848 iter->idx--;
849 }
782 850
783 l = *pos; 851 p = t_next(m, p, pos);
784 p = t_next(m, p, &l);
785 852
786 return p; 853 return p;
787} 854}
@@ -792,21 +859,15 @@ static void t_stop(struct seq_file *m, void *p)
792 859
793static int t_show(struct seq_file *m, void *v) 860static int t_show(struct seq_file *m, void *v)
794{ 861{
795 struct ftrace_iterator *iter = m->private;
796 struct dyn_ftrace *rec = v; 862 struct dyn_ftrace *rec = v;
797 char str[KSYM_SYMBOL_LEN]; 863 char str[KSYM_SYMBOL_LEN];
798 int ret = 0;
799 864
800 if (!rec) 865 if (!rec)
801 return 0; 866 return 0;
802 867
803 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 868 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
804 869
805 ret = seq_printf(m, "%s\n", str); 870 seq_printf(m, "%s\n", str);
806 if (ret < 0) {
807 iter->pos--;
808 iter->idx--;
809 }
810 871
811 return 0; 872 return 0;
812} 873}
@@ -832,7 +893,6 @@ ftrace_avail_open(struct inode *inode, struct file *file)
832 return -ENOMEM; 893 return -ENOMEM;
833 894
834 iter->pg = ftrace_pages_start; 895 iter->pg = ftrace_pages_start;
835 iter->pos = 0;
836 896
837 ret = seq_open(file, &show_ftrace_seq_ops); 897 ret = seq_open(file, &show_ftrace_seq_ops);
838 if (!ret) { 898 if (!ret) {
@@ -919,7 +979,6 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
919 979
920 if (file->f_mode & FMODE_READ) { 980 if (file->f_mode & FMODE_READ) {
921 iter->pg = ftrace_pages_start; 981 iter->pg = ftrace_pages_start;
922 iter->pos = 0;
923 iter->flags = enable ? FTRACE_ITER_FILTER : 982 iter->flags = enable ? FTRACE_ITER_FILTER :
924 FTRACE_ITER_NOTRACE; 983 FTRACE_ITER_NOTRACE;
925 984
@@ -1262,12 +1321,233 @@ static struct file_operations ftrace_notrace_fops = {
1262 .release = ftrace_notrace_release, 1321 .release = ftrace_notrace_release,
1263}; 1322};
1264 1323
1265static __init int ftrace_init_debugfs(void) 1324#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1325
1326static DEFINE_MUTEX(graph_lock);
1327
1328int ftrace_graph_count;
1329unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1330
1331static void *
1332g_next(struct seq_file *m, void *v, loff_t *pos)
1266{ 1333{
1267 struct dentry *d_tracer; 1334 unsigned long *array = m->private;
1268 struct dentry *entry; 1335 int index = *pos;
1269 1336
1270 d_tracer = tracing_init_dentry(); 1337 (*pos)++;
1338
1339 if (index >= ftrace_graph_count)
1340 return NULL;
1341
1342 return &array[index];
1343}
1344
1345static void *g_start(struct seq_file *m, loff_t *pos)
1346{
1347 void *p = NULL;
1348
1349 mutex_lock(&graph_lock);
1350
1351 p = g_next(m, p, pos);
1352
1353 return p;
1354}
1355
1356static void g_stop(struct seq_file *m, void *p)
1357{
1358 mutex_unlock(&graph_lock);
1359}
1360
1361static int g_show(struct seq_file *m, void *v)
1362{
1363 unsigned long *ptr = v;
1364 char str[KSYM_SYMBOL_LEN];
1365
1366 if (!ptr)
1367 return 0;
1368
1369 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1370
1371 seq_printf(m, "%s\n", str);
1372
1373 return 0;
1374}
1375
1376static struct seq_operations ftrace_graph_seq_ops = {
1377 .start = g_start,
1378 .next = g_next,
1379 .stop = g_stop,
1380 .show = g_show,
1381};
1382
1383static int
1384ftrace_graph_open(struct inode *inode, struct file *file)
1385{
1386 int ret = 0;
1387
1388 if (unlikely(ftrace_disabled))
1389 return -ENODEV;
1390
1391 mutex_lock(&graph_lock);
1392 if ((file->f_mode & FMODE_WRITE) &&
1393 !(file->f_flags & O_APPEND)) {
1394 ftrace_graph_count = 0;
1395 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1396 }
1397
1398 if (file->f_mode & FMODE_READ) {
1399 ret = seq_open(file, &ftrace_graph_seq_ops);
1400 if (!ret) {
1401 struct seq_file *m = file->private_data;
1402 m->private = ftrace_graph_funcs;
1403 }
1404 } else
1405 file->private_data = ftrace_graph_funcs;
1406 mutex_unlock(&graph_lock);
1407
1408 return ret;
1409}
1410
1411static ssize_t
1412ftrace_graph_read(struct file *file, char __user *ubuf,
1413 size_t cnt, loff_t *ppos)
1414{
1415 if (file->f_mode & FMODE_READ)
1416 return seq_read(file, ubuf, cnt, ppos);
1417 else
1418 return -EPERM;
1419}
1420
1421static int
1422ftrace_set_func(unsigned long *array, int idx, char *buffer)
1423{
1424 char str[KSYM_SYMBOL_LEN];
1425 struct dyn_ftrace *rec;
1426 struct ftrace_page *pg;
1427 int found = 0;
1428 int i, j;
1429
1430 if (ftrace_disabled)
1431 return -ENODEV;
1432
1433 /* should not be called from interrupt context */
1434 spin_lock(&ftrace_lock);
1435
1436 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1437 for (i = 0; i < pg->index; i++) {
1438 rec = &pg->records[i];
1439
1440 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1441 continue;
1442
1443 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1444 if (strcmp(str, buffer) == 0) {
1445 found = 1;
1446 for (j = 0; j < idx; j++)
1447 if (array[j] == rec->ip) {
1448 found = 0;
1449 break;
1450 }
1451 if (found)
1452 array[idx] = rec->ip;
1453 break;
1454 }
1455 }
1456 }
1457 spin_unlock(&ftrace_lock);
1458
1459 return found ? 0 : -EINVAL;
1460}
1461
1462static ssize_t
1463ftrace_graph_write(struct file *file, const char __user *ubuf,
1464 size_t cnt, loff_t *ppos)
1465{
1466 unsigned char buffer[FTRACE_BUFF_MAX+1];
1467 unsigned long *array;
1468 size_t read = 0;
1469 ssize_t ret;
1470 int index = 0;
1471 char ch;
1472
1473 if (!cnt || cnt < 0)
1474 return 0;
1475
1476 mutex_lock(&graph_lock);
1477
1478 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1479 ret = -EBUSY;
1480 goto out;
1481 }
1482
1483 if (file->f_mode & FMODE_READ) {
1484 struct seq_file *m = file->private_data;
1485 array = m->private;
1486 } else
1487 array = file->private_data;
1488
1489 ret = get_user(ch, ubuf++);
1490 if (ret)
1491 goto out;
1492 read++;
1493 cnt--;
1494
1495 /* skip white space */
1496 while (cnt && isspace(ch)) {
1497 ret = get_user(ch, ubuf++);
1498 if (ret)
1499 goto out;
1500 read++;
1501 cnt--;
1502 }
1503
1504 if (isspace(ch)) {
1505 *ppos += read;
1506 ret = read;
1507 goto out;
1508 }
1509
1510 while (cnt && !isspace(ch)) {
1511 if (index < FTRACE_BUFF_MAX)
1512 buffer[index++] = ch;
1513 else {
1514 ret = -EINVAL;
1515 goto out;
1516 }
1517 ret = get_user(ch, ubuf++);
1518 if (ret)
1519 goto out;
1520 read++;
1521 cnt--;
1522 }
1523 buffer[index] = 0;
1524
1525 /* we allow only one at a time */
1526 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1527 if (ret)
1528 goto out;
1529
1530 ftrace_graph_count++;
1531
1532 file->f_pos += read;
1533
1534 ret = read;
1535 out:
1536 mutex_unlock(&graph_lock);
1537
1538 return ret;
1539}
1540
1541static const struct file_operations ftrace_graph_fops = {
1542 .open = ftrace_graph_open,
1543 .read = ftrace_graph_read,
1544 .write = ftrace_graph_write,
1545};
1546#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1547
1548static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1549{
1550 struct dentry *entry;
1271 1551
1272 entry = debugfs_create_file("available_filter_functions", 0444, 1552 entry = debugfs_create_file("available_filter_functions", 0444,
1273 d_tracer, NULL, &ftrace_avail_fops); 1553 d_tracer, NULL, &ftrace_avail_fops);
@@ -1292,11 +1572,18 @@ static __init int ftrace_init_debugfs(void)
1292 pr_warning("Could not create debugfs " 1572 pr_warning("Could not create debugfs "
1293 "'set_ftrace_notrace' entry\n"); 1573 "'set_ftrace_notrace' entry\n");
1294 1574
1575#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1576 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1577 NULL,
1578 &ftrace_graph_fops);
1579 if (!entry)
1580 pr_warning("Could not create debugfs "
1581 "'set_graph_function' entry\n");
1582#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1583
1295 return 0; 1584 return 0;
1296} 1585}
1297 1586
1298fs_initcall(ftrace_init_debugfs);
1299
1300static int ftrace_convert_nops(struct module *mod, 1587static int ftrace_convert_nops(struct module *mod,
1301 unsigned long *start, 1588 unsigned long *start,
1302 unsigned long *end) 1589 unsigned long *end)
@@ -1382,12 +1669,186 @@ static int __init ftrace_nodyn_init(void)
1382} 1669}
1383device_initcall(ftrace_nodyn_init); 1670device_initcall(ftrace_nodyn_init);
1384 1671
1385# define ftrace_startup() do { } while (0) 1672static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1386# define ftrace_shutdown() do { } while (0) 1673static inline void ftrace_startup_enable(int command) { }
1674/* Keep as macros so we do not need to define the commands */
1675# define ftrace_startup(command) do { } while (0)
1676# define ftrace_shutdown(command) do { } while (0)
1387# define ftrace_startup_sysctl() do { } while (0) 1677# define ftrace_startup_sysctl() do { } while (0)
1388# define ftrace_shutdown_sysctl() do { } while (0) 1678# define ftrace_shutdown_sysctl() do { } while (0)
1389#endif /* CONFIG_DYNAMIC_FTRACE */ 1679#endif /* CONFIG_DYNAMIC_FTRACE */
1390 1680
1681static ssize_t
1682ftrace_pid_read(struct file *file, char __user *ubuf,
1683 size_t cnt, loff_t *ppos)
1684{
1685 char buf[64];
1686 int r;
1687
1688 if (ftrace_pid_trace == ftrace_swapper_pid)
1689 r = sprintf(buf, "swapper tasks\n");
1690 else if (ftrace_pid_trace)
1691 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
1692 else
1693 r = sprintf(buf, "no pid\n");
1694
1695 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1696}
1697
1698static void clear_ftrace_swapper(void)
1699{
1700 struct task_struct *p;
1701 int cpu;
1702
1703 get_online_cpus();
1704 for_each_online_cpu(cpu) {
1705 p = idle_task(cpu);
1706 clear_tsk_trace_trace(p);
1707 }
1708 put_online_cpus();
1709}
1710
1711static void set_ftrace_swapper(void)
1712{
1713 struct task_struct *p;
1714 int cpu;
1715
1716 get_online_cpus();
1717 for_each_online_cpu(cpu) {
1718 p = idle_task(cpu);
1719 set_tsk_trace_trace(p);
1720 }
1721 put_online_cpus();
1722}
1723
1724static void clear_ftrace_pid(struct pid *pid)
1725{
1726 struct task_struct *p;
1727
1728 do_each_pid_task(pid, PIDTYPE_PID, p) {
1729 clear_tsk_trace_trace(p);
1730 } while_each_pid_task(pid, PIDTYPE_PID, p);
1731 put_pid(pid);
1732}
1733
1734static void set_ftrace_pid(struct pid *pid)
1735{
1736 struct task_struct *p;
1737
1738 do_each_pid_task(pid, PIDTYPE_PID, p) {
1739 set_tsk_trace_trace(p);
1740 } while_each_pid_task(pid, PIDTYPE_PID, p);
1741}
1742
1743static void clear_ftrace_pid_task(struct pid **pid)
1744{
1745 if (*pid == ftrace_swapper_pid)
1746 clear_ftrace_swapper();
1747 else
1748 clear_ftrace_pid(*pid);
1749
1750 *pid = NULL;
1751}
1752
1753static void set_ftrace_pid_task(struct pid *pid)
1754{
1755 if (pid == ftrace_swapper_pid)
1756 set_ftrace_swapper();
1757 else
1758 set_ftrace_pid(pid);
1759}
1760
1761static ssize_t
1762ftrace_pid_write(struct file *filp, const char __user *ubuf,
1763 size_t cnt, loff_t *ppos)
1764{
1765 struct pid *pid;
1766 char buf[64];
1767 long val;
1768 int ret;
1769
1770 if (cnt >= sizeof(buf))
1771 return -EINVAL;
1772
1773 if (copy_from_user(&buf, ubuf, cnt))
1774 return -EFAULT;
1775
1776 buf[cnt] = 0;
1777
1778 ret = strict_strtol(buf, 10, &val);
1779 if (ret < 0)
1780 return ret;
1781
1782 mutex_lock(&ftrace_start_lock);
1783 if (val < 0) {
1784 /* disable pid tracing */
1785 if (!ftrace_pid_trace)
1786 goto out;
1787
1788 clear_ftrace_pid_task(&ftrace_pid_trace);
1789
1790 } else {
1791 /* swapper task is special */
1792 if (!val) {
1793 pid = ftrace_swapper_pid;
1794 if (pid == ftrace_pid_trace)
1795 goto out;
1796 } else {
1797 pid = find_get_pid(val);
1798
1799 if (pid == ftrace_pid_trace) {
1800 put_pid(pid);
1801 goto out;
1802 }
1803 }
1804
1805 if (ftrace_pid_trace)
1806 clear_ftrace_pid_task(&ftrace_pid_trace);
1807
1808 if (!pid)
1809 goto out;
1810
1811 ftrace_pid_trace = pid;
1812
1813 set_ftrace_pid_task(ftrace_pid_trace);
1814 }
1815
1816 /* update the function call */
1817 ftrace_update_pid_func();
1818 ftrace_startup_enable(0);
1819
1820 out:
1821 mutex_unlock(&ftrace_start_lock);
1822
1823 return cnt;
1824}
1825
1826static struct file_operations ftrace_pid_fops = {
1827 .read = ftrace_pid_read,
1828 .write = ftrace_pid_write,
1829};
1830
1831static __init int ftrace_init_debugfs(void)
1832{
1833 struct dentry *d_tracer;
1834 struct dentry *entry;
1835
1836 d_tracer = tracing_init_dentry();
1837 if (!d_tracer)
1838 return 0;
1839
1840 ftrace_init_dyn_debugfs(d_tracer);
1841
1842 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1843 NULL, &ftrace_pid_fops);
1844 if (!entry)
1845 pr_warning("Could not create debugfs "
1846 "'set_ftrace_pid' entry\n");
1847 return 0;
1848}
1849
1850fs_initcall(ftrace_init_debugfs);
1851
1391/** 1852/**
1392 * ftrace_kill - kill ftrace 1853 * ftrace_kill - kill ftrace
1393 * 1854 *
@@ -1422,15 +1883,9 @@ int register_ftrace_function(struct ftrace_ops *ops)
1422 1883
1423 mutex_lock(&ftrace_sysctl_lock); 1884 mutex_lock(&ftrace_sysctl_lock);
1424 1885
1425 if (ftrace_tracing_type == FTRACE_TYPE_RETURN) {
1426 ret = -EBUSY;
1427 goto out;
1428 }
1429
1430 ret = __register_ftrace_function(ops); 1886 ret = __register_ftrace_function(ops);
1431 ftrace_startup(); 1887 ftrace_startup(0);
1432 1888
1433out:
1434 mutex_unlock(&ftrace_sysctl_lock); 1889 mutex_unlock(&ftrace_sysctl_lock);
1435 return ret; 1890 return ret;
1436} 1891}
@@ -1447,7 +1902,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
1447 1902
1448 mutex_lock(&ftrace_sysctl_lock); 1903 mutex_lock(&ftrace_sysctl_lock);
1449 ret = __unregister_ftrace_function(ops); 1904 ret = __unregister_ftrace_function(ops);
1450 ftrace_shutdown(); 1905 ftrace_shutdown(0);
1451 mutex_unlock(&ftrace_sysctl_lock); 1906 mutex_unlock(&ftrace_sysctl_lock);
1452 1907
1453 return ret; 1908 return ret;
@@ -1496,14 +1951,19 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1496 return ret; 1951 return ret;
1497} 1952}
1498 1953
1499#ifdef CONFIG_FUNCTION_RET_TRACER 1954#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1500 1955
1501static atomic_t ftrace_retfunc_active; 1956static atomic_t ftrace_graph_active;
1502 1957
1503/* The callback that hooks the return of a function */ 1958int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1504trace_function_return_t ftrace_function_return = 1959{
1505 (trace_function_return_t)ftrace_stub; 1960 return 0;
1961}
1506 1962
1963/* The callbacks that hook a function */
1964trace_func_graph_ret_t ftrace_graph_return =
1965 (trace_func_graph_ret_t)ftrace_stub;
1966trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
1507 1967
1508/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ 1968/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1509static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) 1969static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
@@ -1534,8 +1994,11 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1534 } 1994 }
1535 1995
1536 if (t->ret_stack == NULL) { 1996 if (t->ret_stack == NULL) {
1537 t->ret_stack = ret_stack_list[start++];
1538 t->curr_ret_stack = -1; 1997 t->curr_ret_stack = -1;
1998 /* Make sure IRQs see the -1 first: */
1999 barrier();
2000 t->ret_stack = ret_stack_list[start++];
2001 atomic_set(&t->tracing_graph_pause, 0);
1539 atomic_set(&t->trace_overrun, 0); 2002 atomic_set(&t->trace_overrun, 0);
1540 } 2003 }
1541 } while_each_thread(g, t); 2004 } while_each_thread(g, t);
@@ -1549,7 +2012,7 @@ free:
1549} 2012}
1550 2013
1551/* Allocate a return stack for each task */ 2014/* Allocate a return stack for each task */
1552static int start_return_tracing(void) 2015static int start_graph_tracing(void)
1553{ 2016{
1554 struct ftrace_ret_stack **ret_stack_list; 2017 struct ftrace_ret_stack **ret_stack_list;
1555 int ret; 2018 int ret;
@@ -1569,64 +2032,59 @@ static int start_return_tracing(void)
1569 return ret; 2032 return ret;
1570} 2033}
1571 2034
1572int register_ftrace_return(trace_function_return_t func) 2035int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2036 trace_func_graph_ent_t entryfunc)
1573{ 2037{
1574 int ret = 0; 2038 int ret = 0;
1575 2039
1576 mutex_lock(&ftrace_sysctl_lock); 2040 mutex_lock(&ftrace_sysctl_lock);
1577 2041
1578 /* 2042 atomic_inc(&ftrace_graph_active);
1579 * Don't launch return tracing if normal function 2043 ret = start_graph_tracing();
1580 * tracing is already running.
1581 */
1582 if (ftrace_trace_function != ftrace_stub) {
1583 ret = -EBUSY;
1584 goto out;
1585 }
1586 atomic_inc(&ftrace_retfunc_active);
1587 ret = start_return_tracing();
1588 if (ret) { 2044 if (ret) {
1589 atomic_dec(&ftrace_retfunc_active); 2045 atomic_dec(&ftrace_graph_active);
1590 goto out; 2046 goto out;
1591 } 2047 }
1592 ftrace_tracing_type = FTRACE_TYPE_RETURN; 2048
1593 ftrace_function_return = func; 2049 ftrace_graph_return = retfunc;
1594 ftrace_startup(); 2050 ftrace_graph_entry = entryfunc;
2051
2052 ftrace_startup(FTRACE_START_FUNC_RET);
1595 2053
1596out: 2054out:
1597 mutex_unlock(&ftrace_sysctl_lock); 2055 mutex_unlock(&ftrace_sysctl_lock);
1598 return ret; 2056 return ret;
1599} 2057}
1600 2058
1601void unregister_ftrace_return(void) 2059void unregister_ftrace_graph(void)
1602{ 2060{
1603 mutex_lock(&ftrace_sysctl_lock); 2061 mutex_lock(&ftrace_sysctl_lock);
1604 2062
1605 atomic_dec(&ftrace_retfunc_active); 2063 atomic_dec(&ftrace_graph_active);
1606 ftrace_function_return = (trace_function_return_t)ftrace_stub; 2064 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
1607 ftrace_shutdown(); 2065 ftrace_graph_entry = ftrace_graph_entry_stub;
1608 /* Restore normal tracing type */ 2066 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
1609 ftrace_tracing_type = FTRACE_TYPE_ENTER;
1610 2067
1611 mutex_unlock(&ftrace_sysctl_lock); 2068 mutex_unlock(&ftrace_sysctl_lock);
1612} 2069}
1613 2070
1614/* Allocate a return stack for newly created task */ 2071/* Allocate a return stack for newly created task */
1615void ftrace_retfunc_init_task(struct task_struct *t) 2072void ftrace_graph_init_task(struct task_struct *t)
1616{ 2073{
1617 if (atomic_read(&ftrace_retfunc_active)) { 2074 if (atomic_read(&ftrace_graph_active)) {
1618 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH 2075 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
1619 * sizeof(struct ftrace_ret_stack), 2076 * sizeof(struct ftrace_ret_stack),
1620 GFP_KERNEL); 2077 GFP_KERNEL);
1621 if (!t->ret_stack) 2078 if (!t->ret_stack)
1622 return; 2079 return;
1623 t->curr_ret_stack = -1; 2080 t->curr_ret_stack = -1;
2081 atomic_set(&t->tracing_graph_pause, 0);
1624 atomic_set(&t->trace_overrun, 0); 2082 atomic_set(&t->trace_overrun, 0);
1625 } else 2083 } else
1626 t->ret_stack = NULL; 2084 t->ret_stack = NULL;
1627} 2085}
1628 2086
1629void ftrace_retfunc_exit_task(struct task_struct *t) 2087void ftrace_graph_exit_task(struct task_struct *t)
1630{ 2088{
1631 struct ftrace_ret_stack *ret_stack = t->ret_stack; 2089 struct ftrace_ret_stack *ret_stack = t->ret_stack;
1632 2090
@@ -1636,7 +2094,10 @@ void ftrace_retfunc_exit_task(struct task_struct *t)
1636 2094
1637 kfree(ret_stack); 2095 kfree(ret_stack);
1638} 2096}
1639#endif
1640
1641 2097
2098void ftrace_graph_stop(void)
2099{
2100 ftrace_stop();
2101}
2102#endif
1642 2103