aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig19
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/ftrace.c306
-rw-r--r--kernel/trace/trace.c79
-rw-r--r--kernel/trace/trace.h35
-rw-r--r--kernel/trace/trace_functions_graph.c175
-rw-r--r--kernel/trace/trace_functions_return.c98
7 files changed, 491 insertions, 223 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index d151aab48ed6..8b6b673b4d6c 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -12,7 +12,7 @@ config NOP_TRACER
12config HAVE_FUNCTION_TRACER 12config HAVE_FUNCTION_TRACER
13 bool 13 bool
14 14
15config HAVE_FUNCTION_RET_TRACER 15config HAVE_FUNCTION_GRAPH_TRACER
16 bool 16 bool
17 17
18config HAVE_FUNCTION_TRACE_MCOUNT_TEST 18config HAVE_FUNCTION_TRACE_MCOUNT_TEST
@@ -63,15 +63,18 @@ config FUNCTION_TRACER
63 (the bootup default), then the overhead of the instructions is very 63 (the bootup default), then the overhead of the instructions is very
64 small and not measurable even in micro-benchmarks. 64 small and not measurable even in micro-benchmarks.
65 65
66config FUNCTION_RET_TRACER 66config FUNCTION_GRAPH_TRACER
67 bool "Kernel Function return Tracer" 67 bool "Kernel Function Graph Tracer"
68 depends on HAVE_FUNCTION_RET_TRACER 68 depends on HAVE_FUNCTION_GRAPH_TRACER
69 depends on FUNCTION_TRACER 69 depends on FUNCTION_TRACER
70 help 70 help
71 Enable the kernel to trace a function at its return. 71 Enable the kernel to trace a function at both its return
72 It's first purpose is to trace the duration of functions. 72 and its entry.
73 This is done by setting the current return address on the thread 73 It's first purpose is to trace the duration of functions and
74 info structure of the current task. 74 draw a call graph for each thread with some informations like
75 the return value.
76 This is done by setting the current return address on the current
77 task structure into a stack of calls.
75 78
76config IRQSOFF_TRACER 79config IRQSOFF_TRACER
77 bool "Interrupts-off Latency Tracer" 80 bool "Interrupts-off Latency Tracer"
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index acaa06553eca..62dc561b6676 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_NOP_TRACER) += trace_nop.o
29obj-$(CONFIG_STACK_TRACER) += trace_stack.o 29obj-$(CONFIG_STACK_TRACER) += trace_stack.o
30obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o 30obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
31obj-$(CONFIG_BOOT_TRACER) += trace_boot.o 31obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
32obj-$(CONFIG_FUNCTION_RET_TRACER) += trace_functions_return.o 32obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
33obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o 33obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
34obj-$(CONFIG_BTS_TRACER) += trace_bts.o 34obj-$(CONFIG_BTS_TRACER) += trace_bts.o
35obj-$(CONFIG_POWER_TRACER) += trace_power.o 35obj-$(CONFIG_POWER_TRACER) += trace_power.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 53042f118f23..cbf8b09f63a5 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -47,12 +47,12 @@
47int ftrace_enabled __read_mostly; 47int ftrace_enabled __read_mostly;
48static int last_ftrace_enabled; 48static int last_ftrace_enabled;
49 49
50/* ftrace_pid_trace >= 0 will only trace threads with this pid */
51static int ftrace_pid_trace = -1;
52
50/* Quick disabling of function tracer. */ 53/* Quick disabling of function tracer. */
51int function_trace_stop; 54int function_trace_stop;
52 55
53/* By default, current tracing type is normal tracing. */
54enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER;
55
56/* 56/*
57 * ftrace_disabled is set when an anomaly is discovered. 57 * ftrace_disabled is set when an anomaly is discovered.
58 * ftrace_disabled is much stronger than ftrace_enabled. 58 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -61,6 +61,7 @@ static int ftrace_disabled __read_mostly;
61 61
62static DEFINE_SPINLOCK(ftrace_lock); 62static DEFINE_SPINLOCK(ftrace_lock);
63static DEFINE_MUTEX(ftrace_sysctl_lock); 63static DEFINE_MUTEX(ftrace_sysctl_lock);
64static DEFINE_MUTEX(ftrace_start_lock);
64 65
65static struct ftrace_ops ftrace_list_end __read_mostly = 66static struct ftrace_ops ftrace_list_end __read_mostly =
66{ 67{
@@ -70,6 +71,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly =
70static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; 71static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
71ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 72ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
72ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; 73ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
74ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
73 75
74static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 76static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
75{ 77{
@@ -86,6 +88,21 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
86 }; 88 };
87} 89}
88 90
91static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
92{
93 if (current->pid != ftrace_pid_trace)
94 return;
95
96 ftrace_pid_function(ip, parent_ip);
97}
98
99static void set_ftrace_pid_function(ftrace_func_t func)
100{
101 /* do not set ftrace_pid_function to itself! */
102 if (func != ftrace_pid_func)
103 ftrace_pid_function = func;
104}
105
89/** 106/**
90 * clear_ftrace_function - reset the ftrace function 107 * clear_ftrace_function - reset the ftrace function
91 * 108 *
@@ -96,6 +113,7 @@ void clear_ftrace_function(void)
96{ 113{
97 ftrace_trace_function = ftrace_stub; 114 ftrace_trace_function = ftrace_stub;
98 __ftrace_trace_function = ftrace_stub; 115 __ftrace_trace_function = ftrace_stub;
116 ftrace_pid_function = ftrace_stub;
99} 117}
100 118
101#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 119#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
@@ -128,20 +146,26 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
128 ftrace_list = ops; 146 ftrace_list = ops;
129 147
130 if (ftrace_enabled) { 148 if (ftrace_enabled) {
149 ftrace_func_t func;
150
151 if (ops->next == &ftrace_list_end)
152 func = ops->func;
153 else
154 func = ftrace_list_func;
155
156 if (ftrace_pid_trace >= 0) {
157 set_ftrace_pid_function(func);
158 func = ftrace_pid_func;
159 }
160
131 /* 161 /*
132 * For one func, simply call it directly. 162 * For one func, simply call it directly.
133 * For more than one func, call the chain. 163 * For more than one func, call the chain.
134 */ 164 */
135#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 165#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
136 if (ops->next == &ftrace_list_end) 166 ftrace_trace_function = func;
137 ftrace_trace_function = ops->func;
138 else
139 ftrace_trace_function = ftrace_list_func;
140#else 167#else
141 if (ops->next == &ftrace_list_end) 168 __ftrace_trace_function = func;
142 __ftrace_trace_function = ops->func;
143 else
144 __ftrace_trace_function = ftrace_list_func;
145 ftrace_trace_function = ftrace_test_stop_func; 169 ftrace_trace_function = ftrace_test_stop_func;
146#endif 170#endif
147 } 171 }
@@ -182,8 +206,19 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
182 206
183 if (ftrace_enabled) { 207 if (ftrace_enabled) {
184 /* If we only have one func left, then call that directly */ 208 /* If we only have one func left, then call that directly */
185 if (ftrace_list->next == &ftrace_list_end) 209 if (ftrace_list->next == &ftrace_list_end) {
186 ftrace_trace_function = ftrace_list->func; 210 ftrace_func_t func = ftrace_list->func;
211
212 if (ftrace_pid_trace >= 0) {
213 set_ftrace_pid_function(func);
214 func = ftrace_pid_func;
215 }
216#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
217 ftrace_trace_function = func;
218#else
219 __ftrace_trace_function = func;
220#endif
221 }
187 } 222 }
188 223
189 out: 224 out:
@@ -192,6 +227,38 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
192 return ret; 227 return ret;
193} 228}
194 229
230static void ftrace_update_pid_func(void)
231{
232 ftrace_func_t func;
233
234 /* should not be called from interrupt context */
235 spin_lock(&ftrace_lock);
236
237 if (ftrace_trace_function == ftrace_stub)
238 goto out;
239
240 func = ftrace_trace_function;
241
242 if (ftrace_pid_trace >= 0) {
243 set_ftrace_pid_function(func);
244 func = ftrace_pid_func;
245 } else {
246 if (func != ftrace_pid_func)
247 goto out;
248
249 set_ftrace_pid_function(func);
250 }
251
252#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
253 ftrace_trace_function = func;
254#else
255 __ftrace_trace_function = func;
256#endif
257
258 out:
259 spin_unlock(&ftrace_lock);
260}
261
195#ifdef CONFIG_DYNAMIC_FTRACE 262#ifdef CONFIG_DYNAMIC_FTRACE
196#ifndef CONFIG_FTRACE_MCOUNT_RECORD 263#ifndef CONFIG_FTRACE_MCOUNT_RECORD
197# error Dynamic ftrace depends on MCOUNT_RECORD 264# error Dynamic ftrace depends on MCOUNT_RECORD
@@ -211,6 +278,8 @@ enum {
211 FTRACE_UPDATE_TRACE_FUNC = (1 << 2), 278 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
212 FTRACE_ENABLE_MCOUNT = (1 << 3), 279 FTRACE_ENABLE_MCOUNT = (1 << 3),
213 FTRACE_DISABLE_MCOUNT = (1 << 4), 280 FTRACE_DISABLE_MCOUNT = (1 << 4),
281 FTRACE_START_FUNC_RET = (1 << 5),
282 FTRACE_STOP_FUNC_RET = (1 << 6),
214}; 283};
215 284
216static int ftrace_filtered; 285static int ftrace_filtered;
@@ -395,14 +464,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
395 unsigned long ip, fl; 464 unsigned long ip, fl;
396 unsigned long ftrace_addr; 465 unsigned long ftrace_addr;
397 466
398#ifdef CONFIG_FUNCTION_RET_TRACER
399 if (ftrace_tracing_type == FTRACE_TYPE_ENTER)
400 ftrace_addr = (unsigned long)ftrace_caller;
401 else
402 ftrace_addr = (unsigned long)ftrace_return_caller;
403#else
404 ftrace_addr = (unsigned long)ftrace_caller; 467 ftrace_addr = (unsigned long)ftrace_caller;
405#endif
406 468
407 ip = rec->ip; 469 ip = rec->ip;
408 470
@@ -535,6 +597,11 @@ static int __ftrace_modify_code(void *data)
535 if (*command & FTRACE_UPDATE_TRACE_FUNC) 597 if (*command & FTRACE_UPDATE_TRACE_FUNC)
536 ftrace_update_ftrace_func(ftrace_trace_function); 598 ftrace_update_ftrace_func(ftrace_trace_function);
537 599
600 if (*command & FTRACE_START_FUNC_RET)
601 ftrace_enable_ftrace_graph_caller();
602 else if (*command & FTRACE_STOP_FUNC_RET)
603 ftrace_disable_ftrace_graph_caller();
604
538 return 0; 605 return 0;
539} 606}
540 607
@@ -545,12 +612,22 @@ static void ftrace_run_update_code(int command)
545 612
546static ftrace_func_t saved_ftrace_func; 613static ftrace_func_t saved_ftrace_func;
547static int ftrace_start_up; 614static int ftrace_start_up;
548static DEFINE_MUTEX(ftrace_start_lock);
549 615
550static void ftrace_startup(void) 616static void ftrace_startup_enable(int command)
551{ 617{
552 int command = 0; 618 if (saved_ftrace_func != ftrace_trace_function) {
619 saved_ftrace_func = ftrace_trace_function;
620 command |= FTRACE_UPDATE_TRACE_FUNC;
621 }
622
623 if (!command || !ftrace_enabled)
624 return;
625
626 ftrace_run_update_code(command);
627}
553 628
629static void ftrace_startup(int command)
630{
554 if (unlikely(ftrace_disabled)) 631 if (unlikely(ftrace_disabled))
555 return; 632 return;
556 633
@@ -558,23 +635,13 @@ static void ftrace_startup(void)
558 ftrace_start_up++; 635 ftrace_start_up++;
559 command |= FTRACE_ENABLE_CALLS; 636 command |= FTRACE_ENABLE_CALLS;
560 637
561 if (saved_ftrace_func != ftrace_trace_function) { 638 ftrace_startup_enable(command);
562 saved_ftrace_func = ftrace_trace_function;
563 command |= FTRACE_UPDATE_TRACE_FUNC;
564 }
565 639
566 if (!command || !ftrace_enabled)
567 goto out;
568
569 ftrace_run_update_code(command);
570 out:
571 mutex_unlock(&ftrace_start_lock); 640 mutex_unlock(&ftrace_start_lock);
572} 641}
573 642
574static void ftrace_shutdown(void) 643static void ftrace_shutdown(int command)
575{ 644{
576 int command = 0;
577
578 if (unlikely(ftrace_disabled)) 645 if (unlikely(ftrace_disabled))
579 return; 646 return;
580 647
@@ -1262,13 +1329,10 @@ static struct file_operations ftrace_notrace_fops = {
1262 .release = ftrace_notrace_release, 1329 .release = ftrace_notrace_release,
1263}; 1330};
1264 1331
1265static __init int ftrace_init_debugfs(void) 1332static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1266{ 1333{
1267 struct dentry *d_tracer;
1268 struct dentry *entry; 1334 struct dentry *entry;
1269 1335
1270 d_tracer = tracing_init_dentry();
1271
1272 entry = debugfs_create_file("available_filter_functions", 0444, 1336 entry = debugfs_create_file("available_filter_functions", 0444,
1273 d_tracer, NULL, &ftrace_avail_fops); 1337 d_tracer, NULL, &ftrace_avail_fops);
1274 if (!entry) 1338 if (!entry)
@@ -1295,8 +1359,6 @@ static __init int ftrace_init_debugfs(void)
1295 return 0; 1359 return 0;
1296} 1360}
1297 1361
1298fs_initcall(ftrace_init_debugfs);
1299
1300static int ftrace_convert_nops(struct module *mod, 1362static int ftrace_convert_nops(struct module *mod,
1301 unsigned long *start, 1363 unsigned long *start,
1302 unsigned long *end) 1364 unsigned long *end)
@@ -1382,12 +1444,101 @@ static int __init ftrace_nodyn_init(void)
1382} 1444}
1383device_initcall(ftrace_nodyn_init); 1445device_initcall(ftrace_nodyn_init);
1384 1446
1385# define ftrace_startup() do { } while (0) 1447static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1386# define ftrace_shutdown() do { } while (0) 1448static inline void ftrace_startup_enable(int command) { }
1449/* Keep as macros so we do not need to define the commands */
1450# define ftrace_startup(command) do { } while (0)
1451# define ftrace_shutdown(command) do { } while (0)
1387# define ftrace_startup_sysctl() do { } while (0) 1452# define ftrace_startup_sysctl() do { } while (0)
1388# define ftrace_shutdown_sysctl() do { } while (0) 1453# define ftrace_shutdown_sysctl() do { } while (0)
1389#endif /* CONFIG_DYNAMIC_FTRACE */ 1454#endif /* CONFIG_DYNAMIC_FTRACE */
1390 1455
1456static ssize_t
1457ftrace_pid_read(struct file *file, char __user *ubuf,
1458 size_t cnt, loff_t *ppos)
1459{
1460 char buf[64];
1461 int r;
1462
1463 if (ftrace_pid_trace >= 0)
1464 r = sprintf(buf, "%u\n", ftrace_pid_trace);
1465 else
1466 r = sprintf(buf, "no pid\n");
1467
1468 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1469}
1470
1471static ssize_t
1472ftrace_pid_write(struct file *filp, const char __user *ubuf,
1473 size_t cnt, loff_t *ppos)
1474{
1475 char buf[64];
1476 long val;
1477 int ret;
1478
1479 if (cnt >= sizeof(buf))
1480 return -EINVAL;
1481
1482 if (copy_from_user(&buf, ubuf, cnt))
1483 return -EFAULT;
1484
1485 buf[cnt] = 0;
1486
1487 ret = strict_strtol(buf, 10, &val);
1488 if (ret < 0)
1489 return ret;
1490
1491 mutex_lock(&ftrace_start_lock);
1492 if (ret < 0) {
1493 /* disable pid tracing */
1494 if (ftrace_pid_trace < 0)
1495 goto out;
1496 ftrace_pid_trace = -1;
1497
1498 } else {
1499
1500 if (ftrace_pid_trace == val)
1501 goto out;
1502
1503 ftrace_pid_trace = val;
1504 }
1505
1506 /* update the function call */
1507 ftrace_update_pid_func();
1508 ftrace_startup_enable(0);
1509
1510 out:
1511 mutex_unlock(&ftrace_start_lock);
1512
1513 return cnt;
1514}
1515
1516static struct file_operations ftrace_pid_fops = {
1517 .read = ftrace_pid_read,
1518 .write = ftrace_pid_write,
1519};
1520
1521static __init int ftrace_init_debugfs(void)
1522{
1523 struct dentry *d_tracer;
1524 struct dentry *entry;
1525
1526 d_tracer = tracing_init_dentry();
1527 if (!d_tracer)
1528 return 0;
1529
1530 ftrace_init_dyn_debugfs(d_tracer);
1531
1532 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1533 NULL, &ftrace_pid_fops);
1534 if (!entry)
1535 pr_warning("Could not create debugfs "
1536 "'set_ftrace_pid' entry\n");
1537 return 0;
1538}
1539
1540fs_initcall(ftrace_init_debugfs);
1541
1391/** 1542/**
1392 * ftrace_kill - kill ftrace 1543 * ftrace_kill - kill ftrace
1393 * 1544 *
@@ -1422,15 +1573,9 @@ int register_ftrace_function(struct ftrace_ops *ops)
1422 1573
1423 mutex_lock(&ftrace_sysctl_lock); 1574 mutex_lock(&ftrace_sysctl_lock);
1424 1575
1425 if (ftrace_tracing_type == FTRACE_TYPE_RETURN) {
1426 ret = -EBUSY;
1427 goto out;
1428 }
1429
1430 ret = __register_ftrace_function(ops); 1576 ret = __register_ftrace_function(ops);
1431 ftrace_startup(); 1577 ftrace_startup(0);
1432 1578
1433out:
1434 mutex_unlock(&ftrace_sysctl_lock); 1579 mutex_unlock(&ftrace_sysctl_lock);
1435 return ret; 1580 return ret;
1436} 1581}
@@ -1447,7 +1592,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
1447 1592
1448 mutex_lock(&ftrace_sysctl_lock); 1593 mutex_lock(&ftrace_sysctl_lock);
1449 ret = __unregister_ftrace_function(ops); 1594 ret = __unregister_ftrace_function(ops);
1450 ftrace_shutdown(); 1595 ftrace_shutdown(0);
1451 mutex_unlock(&ftrace_sysctl_lock); 1596 mutex_unlock(&ftrace_sysctl_lock);
1452 1597
1453 return ret; 1598 return ret;
@@ -1496,14 +1641,15 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1496 return ret; 1641 return ret;
1497} 1642}
1498 1643
1499#ifdef CONFIG_FUNCTION_RET_TRACER 1644#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1500
1501static atomic_t ftrace_retfunc_active;
1502 1645
1503/* The callback that hooks the return of a function */ 1646static atomic_t ftrace_graph_active;
1504trace_function_return_t ftrace_function_return =
1505 (trace_function_return_t)ftrace_stub;
1506 1647
1648/* The callbacks that hook a function */
1649trace_func_graph_ret_t ftrace_graph_return =
1650 (trace_func_graph_ret_t)ftrace_stub;
1651trace_func_graph_ent_t ftrace_graph_entry =
1652 (trace_func_graph_ent_t)ftrace_stub;
1507 1653
1508/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ 1654/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1509static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) 1655static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
@@ -1549,7 +1695,7 @@ free:
1549} 1695}
1550 1696
1551/* Allocate a return stack for each task */ 1697/* Allocate a return stack for each task */
1552static int start_return_tracing(void) 1698static int start_graph_tracing(void)
1553{ 1699{
1554 struct ftrace_ret_stack **ret_stack_list; 1700 struct ftrace_ret_stack **ret_stack_list;
1555 int ret; 1701 int ret;
@@ -1569,52 +1715,46 @@ static int start_return_tracing(void)
1569 return ret; 1715 return ret;
1570} 1716}
1571 1717
1572int register_ftrace_return(trace_function_return_t func) 1718int register_ftrace_graph(trace_func_graph_ret_t retfunc,
1719 trace_func_graph_ent_t entryfunc)
1573{ 1720{
1574 int ret = 0; 1721 int ret = 0;
1575 1722
1576 mutex_lock(&ftrace_sysctl_lock); 1723 mutex_lock(&ftrace_sysctl_lock);
1577 1724
1578 /* 1725 atomic_inc(&ftrace_graph_active);
1579 * Don't launch return tracing if normal function 1726 ret = start_graph_tracing();
1580 * tracing is already running.
1581 */
1582 if (ftrace_trace_function != ftrace_stub) {
1583 ret = -EBUSY;
1584 goto out;
1585 }
1586 atomic_inc(&ftrace_retfunc_active);
1587 ret = start_return_tracing();
1588 if (ret) { 1727 if (ret) {
1589 atomic_dec(&ftrace_retfunc_active); 1728 atomic_dec(&ftrace_graph_active);
1590 goto out; 1729 goto out;
1591 } 1730 }
1592 ftrace_tracing_type = FTRACE_TYPE_RETURN; 1731
1593 ftrace_function_return = func; 1732 ftrace_graph_return = retfunc;
1594 ftrace_startup(); 1733 ftrace_graph_entry = entryfunc;
1734
1735 ftrace_startup(FTRACE_START_FUNC_RET);
1595 1736
1596out: 1737out:
1597 mutex_unlock(&ftrace_sysctl_lock); 1738 mutex_unlock(&ftrace_sysctl_lock);
1598 return ret; 1739 return ret;
1599} 1740}
1600 1741
1601void unregister_ftrace_return(void) 1742void unregister_ftrace_graph(void)
1602{ 1743{
1603 mutex_lock(&ftrace_sysctl_lock); 1744 mutex_lock(&ftrace_sysctl_lock);
1604 1745
1605 atomic_dec(&ftrace_retfunc_active); 1746 atomic_dec(&ftrace_graph_active);
1606 ftrace_function_return = (trace_function_return_t)ftrace_stub; 1747 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
1607 ftrace_shutdown(); 1748 ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
1608 /* Restore normal tracing type */ 1749 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
1609 ftrace_tracing_type = FTRACE_TYPE_ENTER;
1610 1750
1611 mutex_unlock(&ftrace_sysctl_lock); 1751 mutex_unlock(&ftrace_sysctl_lock);
1612} 1752}
1613 1753
1614/* Allocate a return stack for newly created task */ 1754/* Allocate a return stack for newly created task */
1615void ftrace_retfunc_init_task(struct task_struct *t) 1755void ftrace_graph_init_task(struct task_struct *t)
1616{ 1756{
1617 if (atomic_read(&ftrace_retfunc_active)) { 1757 if (atomic_read(&ftrace_graph_active)) {
1618 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH 1758 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
1619 * sizeof(struct ftrace_ret_stack), 1759 * sizeof(struct ftrace_ret_stack),
1620 GFP_KERNEL); 1760 GFP_KERNEL);
@@ -1626,7 +1766,7 @@ void ftrace_retfunc_init_task(struct task_struct *t)
1626 t->ret_stack = NULL; 1766 t->ret_stack = NULL;
1627} 1767}
1628 1768
1629void ftrace_retfunc_exit_task(struct task_struct *t) 1769void ftrace_graph_exit_task(struct task_struct *t)
1630{ 1770{
1631 struct ftrace_ret_stack *ret_stack = t->ret_stack; 1771 struct ftrace_ret_stack *ret_stack = t->ret_stack;
1632 1772
@@ -1638,5 +1778,3 @@ void ftrace_retfunc_exit_task(struct task_struct *t)
1638} 1778}
1639#endif 1779#endif
1640 1780
1641
1642
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8df8fdd69c95..5811e0a5f732 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -804,7 +804,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
804 spin_unlock(&trace_cmdline_lock); 804 spin_unlock(&trace_cmdline_lock);
805} 805}
806 806
807static char *trace_find_cmdline(int pid) 807char *trace_find_cmdline(int pid)
808{ 808{
809 char *cmdline = "<...>"; 809 char *cmdline = "<...>";
810 unsigned map; 810 unsigned map;
@@ -878,15 +878,15 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
878 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 878 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
879} 879}
880 880
881#ifdef CONFIG_FUNCTION_RET_TRACER 881#ifdef CONFIG_FUNCTION_GRAPH_TRACER
882static void __trace_function_return(struct trace_array *tr, 882static void __trace_graph_entry(struct trace_array *tr,
883 struct trace_array_cpu *data, 883 struct trace_array_cpu *data,
884 struct ftrace_retfunc *trace, 884 struct ftrace_graph_ent *trace,
885 unsigned long flags, 885 unsigned long flags,
886 int pc) 886 int pc)
887{ 887{
888 struct ring_buffer_event *event; 888 struct ring_buffer_event *event;
889 struct ftrace_ret_entry *entry; 889 struct ftrace_graph_ent_entry *entry;
890 unsigned long irq_flags; 890 unsigned long irq_flags;
891 891
892 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 892 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
@@ -898,12 +898,32 @@ static void __trace_function_return(struct trace_array *tr,
898 return; 898 return;
899 entry = ring_buffer_event_data(event); 899 entry = ring_buffer_event_data(event);
900 tracing_generic_entry_update(&entry->ent, flags, pc); 900 tracing_generic_entry_update(&entry->ent, flags, pc);
901 entry->ent.type = TRACE_FN_RET; 901 entry->ent.type = TRACE_GRAPH_ENT;
902 entry->ip = trace->func; 902 entry->graph_ent = *trace;
903 entry->parent_ip = trace->ret; 903 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
904 entry->rettime = trace->rettime; 904}
905 entry->calltime = trace->calltime; 905
906 entry->overrun = trace->overrun; 906static void __trace_graph_return(struct trace_array *tr,
907 struct trace_array_cpu *data,
908 struct ftrace_graph_ret *trace,
909 unsigned long flags,
910 int pc)
911{
912 struct ring_buffer_event *event;
913 struct ftrace_graph_ret_entry *entry;
914 unsigned long irq_flags;
915
916 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
917 return;
918
919 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
920 &irq_flags);
921 if (!event)
922 return;
923 entry = ring_buffer_event_data(event);
924 tracing_generic_entry_update(&entry->ent, flags, pc);
925 entry->ent.type = TRACE_GRAPH_RET;
926 entry->ret = *trace;
907 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); 927 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
908} 928}
909#endif 929#endif
@@ -1177,8 +1197,29 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1177 local_irq_restore(flags); 1197 local_irq_restore(flags);
1178} 1198}
1179 1199
1180#ifdef CONFIG_FUNCTION_RET_TRACER 1200#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1181void trace_function_return(struct ftrace_retfunc *trace) 1201void trace_graph_entry(struct ftrace_graph_ent *trace)
1202{
1203 struct trace_array *tr = &global_trace;
1204 struct trace_array_cpu *data;
1205 unsigned long flags;
1206 long disabled;
1207 int cpu;
1208 int pc;
1209
1210 raw_local_irq_save(flags);
1211 cpu = raw_smp_processor_id();
1212 data = tr->data[cpu];
1213 disabled = atomic_inc_return(&data->disabled);
1214 if (likely(disabled == 1)) {
1215 pc = preempt_count();
1216 __trace_graph_entry(tr, data, trace, flags, pc);
1217 }
1218 atomic_dec(&data->disabled);
1219 raw_local_irq_restore(flags);
1220}
1221
1222void trace_graph_return(struct ftrace_graph_ret *trace)
1182{ 1223{
1183 struct trace_array *tr = &global_trace; 1224 struct trace_array *tr = &global_trace;
1184 struct trace_array_cpu *data; 1225 struct trace_array_cpu *data;
@@ -1193,12 +1234,12 @@ void trace_function_return(struct ftrace_retfunc *trace)
1193 disabled = atomic_inc_return(&data->disabled); 1234 disabled = atomic_inc_return(&data->disabled);
1194 if (likely(disabled == 1)) { 1235 if (likely(disabled == 1)) {
1195 pc = preempt_count(); 1236 pc = preempt_count();
1196 __trace_function_return(tr, data, trace, flags, pc); 1237 __trace_graph_return(tr, data, trace, flags, pc);
1197 } 1238 }
1198 atomic_dec(&data->disabled); 1239 atomic_dec(&data->disabled);
1199 raw_local_irq_restore(flags); 1240 raw_local_irq_restore(flags);
1200} 1241}
1201#endif /* CONFIG_FUNCTION_RET_TRACER */ 1242#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1202 1243
1203static struct ftrace_ops trace_ops __read_mostly = 1244static struct ftrace_ops trace_ops __read_mostly =
1204{ 1245{
@@ -2000,9 +2041,11 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2000 trace_seq_print_cont(s, iter); 2041 trace_seq_print_cont(s, iter);
2001 break; 2042 break;
2002 } 2043 }
2003 case TRACE_FN_RET: { 2044 case TRACE_GRAPH_RET: {
2004 return print_return_function(iter); 2045 return print_graph_function(iter);
2005 break; 2046 }
2047 case TRACE_GRAPH_ENT: {
2048 return print_graph_function(iter);
2006 } 2049 }
2007 case TRACE_BRANCH: { 2050 case TRACE_BRANCH: {
2008 struct trace_branch *field; 2051 struct trace_branch *field;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 4c453778a6ab..f96f4e787ff3 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -25,7 +25,8 @@ enum trace_type {
25 TRACE_BRANCH, 25 TRACE_BRANCH,
26 TRACE_BOOT_CALL, 26 TRACE_BOOT_CALL,
27 TRACE_BOOT_RET, 27 TRACE_BOOT_RET,
28 TRACE_FN_RET, 28 TRACE_GRAPH_RET,
29 TRACE_GRAPH_ENT,
29 TRACE_USER_STACK, 30 TRACE_USER_STACK,
30 TRACE_BTS, 31 TRACE_BTS,
31 TRACE_POWER, 32 TRACE_POWER,
@@ -57,14 +58,16 @@ struct ftrace_entry {
57 unsigned long parent_ip; 58 unsigned long parent_ip;
58}; 59};
59 60
61/* Function call entry */
62struct ftrace_graph_ent_entry {
63 struct trace_entry ent;
64 struct ftrace_graph_ent graph_ent;
65};
66
60/* Function return entry */ 67/* Function return entry */
61struct ftrace_ret_entry { 68struct ftrace_graph_ret_entry {
62 struct trace_entry ent; 69 struct trace_entry ent;
63 unsigned long ip; 70 struct ftrace_graph_ret ret;
64 unsigned long parent_ip;
65 unsigned long long calltime;
66 unsigned long long rettime;
67 unsigned long overrun;
68}; 71};
69extern struct tracer boot_tracer; 72extern struct tracer boot_tracer;
70 73
@@ -270,7 +273,10 @@ extern void __ftrace_bad_type(void);
270 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ 273 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
271 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ 274 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
272 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ 275 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
273 IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\ 276 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
277 TRACE_GRAPH_ENT); \
278 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
279 TRACE_GRAPH_RET); \
274 IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\ 280 IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\
275 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ 281 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
276 __ftrace_bad_type(); \ 282 __ftrace_bad_type(); \
@@ -404,9 +410,9 @@ void trace_function(struct trace_array *tr,
404 unsigned long ip, 410 unsigned long ip,
405 unsigned long parent_ip, 411 unsigned long parent_ip,
406 unsigned long flags, int pc); 412 unsigned long flags, int pc);
407void
408trace_function_return(struct ftrace_retfunc *trace);
409 413
414void trace_graph_return(struct ftrace_graph_ret *trace);
415void trace_graph_entry(struct ftrace_graph_ent *trace);
410void trace_bts(struct trace_array *tr, 416void trace_bts(struct trace_array *tr,
411 unsigned long from, 417 unsigned long from,
412 unsigned long to); 418 unsigned long to);
@@ -451,6 +457,7 @@ struct tracer_switch_ops {
451 struct tracer_switch_ops *next; 457 struct tracer_switch_ops *next;
452}; 458};
453 459
460char *trace_find_cmdline(int pid);
454#endif /* CONFIG_CONTEXT_SWITCH_TRACER */ 461#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
455 462
456#ifdef CONFIG_DYNAMIC_FTRACE 463#ifdef CONFIG_DYNAMIC_FTRACE
@@ -496,11 +503,11 @@ extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
496extern unsigned long trace_flags; 503extern unsigned long trace_flags;
497 504
498/* Standard output formatting function used for function return traces */ 505/* Standard output formatting function used for function return traces */
499#ifdef CONFIG_FUNCTION_RET_TRACER 506#ifdef CONFIG_FUNCTION_GRAPH_TRACER
500extern enum print_line_t print_return_function(struct trace_iterator *iter); 507extern enum print_line_t print_graph_function(struct trace_iterator *iter);
501#else 508#else
502static inline enum print_line_t 509static inline enum print_line_t
503print_return_function(struct trace_iterator *iter) 510print_graph_function(struct trace_iterator *iter)
504{ 511{
505 return TRACE_TYPE_UNHANDLED; 512 return TRACE_TYPE_UNHANDLED;
506} 513}
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
new file mode 100644
index 000000000000..d31d695174aa
--- /dev/null
+++ b/kernel/trace/trace_functions_graph.c
@@ -0,0 +1,175 @@
1/*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/fs.h>
13
14#include "trace.h"
15
16#define TRACE_GRAPH_INDENT 2
17
18#define TRACE_GRAPH_PRINT_OVERRUN 0x1
19static struct tracer_opt trace_opts[] = {
20 /* Display overruns or not */
21 { TRACER_OPT(overrun, TRACE_GRAPH_PRINT_OVERRUN) },
22 { } /* Empty entry */
23};
24
25static struct tracer_flags tracer_flags = {
26 .val = 0, /* Don't display overruns by default */
27 .opts = trace_opts
28};
29
30/* pid on the last trace processed */
31static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
32
33static int graph_trace_init(struct trace_array *tr)
34{
35 int cpu, ret;
36
37 for_each_online_cpu(cpu)
38 tracing_reset(tr, cpu);
39
40 ret = register_ftrace_graph(&trace_graph_return,
41 &trace_graph_entry);
42 if (ret)
43 return ret;
44 tracing_start_cmdline_record();
45
46 return 0;
47}
48
49static void graph_trace_reset(struct trace_array *tr)
50{
51 tracing_stop_cmdline_record();
52 unregister_ftrace_graph();
53}
54
55/* If the pid changed since the last trace, output this event */
56static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
57{
58 char *comm;
59
60 if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
61 return 1;
62
63 last_pid[cpu] = pid;
64 comm = trace_find_cmdline(pid);
65
66 return trace_seq_printf(s, "\nCPU[%03d]"
67 " ------------8<---------- thread %s-%d"
68 " ------------8<----------\n\n",
69 cpu, comm, pid);
70}
71
72static enum print_line_t
73print_graph_entry(struct ftrace_graph_ent *call, struct trace_seq *s,
74 struct trace_entry *ent, int cpu)
75{
76 int i;
77 int ret;
78
79 if (!verif_pid(s, ent->pid, cpu))
80 return TRACE_TYPE_PARTIAL_LINE;
81
82 ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
83 if (!ret)
84 return TRACE_TYPE_PARTIAL_LINE;
85
86 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
87 ret = trace_seq_printf(s, " ");
88 if (!ret)
89 return TRACE_TYPE_PARTIAL_LINE;
90 }
91
92 ret = seq_print_ip_sym(s, call->func, 0);
93 if (!ret)
94 return TRACE_TYPE_PARTIAL_LINE;
95
96 ret = trace_seq_printf(s, "() {\n");
97 if (!ret)
98 return TRACE_TYPE_PARTIAL_LINE;
99 return TRACE_TYPE_HANDLED;
100}
101
102static enum print_line_t
103print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
104 struct trace_entry *ent, int cpu)
105{
106 int i;
107 int ret;
108
109 if (!verif_pid(s, ent->pid, cpu))
110 return TRACE_TYPE_PARTIAL_LINE;
111
112 ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
113 if (!ret)
114 return TRACE_TYPE_PARTIAL_LINE;
115
116 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
117 ret = trace_seq_printf(s, " ");
118 if (!ret)
119 return TRACE_TYPE_PARTIAL_LINE;
120 }
121
122 ret = trace_seq_printf(s, "} ");
123 if (!ret)
124 return TRACE_TYPE_PARTIAL_LINE;
125
126 ret = trace_seq_printf(s, "%llu\n", trace->rettime - trace->calltime);
127 if (!ret)
128 return TRACE_TYPE_PARTIAL_LINE;
129
130 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
131 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
132 trace->overrun);
133 if (!ret)
134 return TRACE_TYPE_PARTIAL_LINE;
135 }
136 return TRACE_TYPE_HANDLED;
137}
138
139enum print_line_t
140print_graph_function(struct trace_iterator *iter)
141{
142 struct trace_seq *s = &iter->seq;
143 struct trace_entry *entry = iter->ent;
144
145 switch (entry->type) {
146 case TRACE_GRAPH_ENT: {
147 struct ftrace_graph_ent_entry *field;
148 trace_assign_type(field, entry);
149 return print_graph_entry(&field->graph_ent, s, entry,
150 iter->cpu);
151 }
152 case TRACE_GRAPH_RET: {
153 struct ftrace_graph_ret_entry *field;
154 trace_assign_type(field, entry);
155 return print_graph_return(&field->ret, s, entry, iter->cpu);
156 }
157 default:
158 return TRACE_TYPE_UNHANDLED;
159 }
160}
161
162static struct tracer graph_trace __read_mostly = {
163 .name = "function-graph",
164 .init = graph_trace_init,
165 .reset = graph_trace_reset,
166 .print_line = print_graph_function,
167 .flags = &tracer_flags,
168};
169
170static __init int init_graph_trace(void)
171{
172 return register_tracer(&graph_trace);
173}
174
175device_initcall(init_graph_trace);
diff --git a/kernel/trace/trace_functions_return.c b/kernel/trace/trace_functions_return.c
deleted file mode 100644
index e00d64509c9c..000000000000
--- a/kernel/trace/trace_functions_return.c
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 *
3 * Function return tracer.
4 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/fs.h>
13
14#include "trace.h"
15
16
17#define TRACE_RETURN_PRINT_OVERRUN 0x1
18static struct tracer_opt trace_opts[] = {
19 /* Display overruns or not */
20 { TRACER_OPT(overrun, TRACE_RETURN_PRINT_OVERRUN) },
21 { } /* Empty entry */
22};
23
24static struct tracer_flags tracer_flags = {
25 .val = 0, /* Don't display overruns by default */
26 .opts = trace_opts
27};
28
29
30static int return_trace_init(struct trace_array *tr)
31{
32 int cpu;
33 for_each_online_cpu(cpu)
34 tracing_reset(tr, cpu);
35
36 return register_ftrace_return(&trace_function_return);
37}
38
39static void return_trace_reset(struct trace_array *tr)
40{
41 unregister_ftrace_return();
42}
43
44
45enum print_line_t
46print_return_function(struct trace_iterator *iter)
47{
48 struct trace_seq *s = &iter->seq;
49 struct trace_entry *entry = iter->ent;
50 struct ftrace_ret_entry *field;
51 int ret;
52
53 if (entry->type == TRACE_FN_RET) {
54 trace_assign_type(field, entry);
55 ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip);
56 if (!ret)
57 return TRACE_TYPE_PARTIAL_LINE;
58
59 ret = seq_print_ip_sym(s, field->ip,
60 trace_flags & TRACE_ITER_SYM_MASK);
61 if (!ret)
62 return TRACE_TYPE_PARTIAL_LINE;
63
64 ret = trace_seq_printf(s, " (%llu ns)",
65 field->rettime - field->calltime);
66 if (!ret)
67 return TRACE_TYPE_PARTIAL_LINE;
68
69 if (tracer_flags.val & TRACE_RETURN_PRINT_OVERRUN) {
70 ret = trace_seq_printf(s, " (Overruns: %lu)",
71 field->overrun);
72 if (!ret)
73 return TRACE_TYPE_PARTIAL_LINE;
74 }
75
76 ret = trace_seq_printf(s, "\n");
77 if (!ret)
78 return TRACE_TYPE_PARTIAL_LINE;
79
80 return TRACE_TYPE_HANDLED;
81 }
82 return TRACE_TYPE_UNHANDLED;
83}
84
85static struct tracer return_trace __read_mostly = {
86 .name = "return",
87 .init = return_trace_init,
88 .reset = return_trace_reset,
89 .print_line = print_return_function,
90 .flags = &tracer_flags,
91};
92
93static __init int init_return_trace(void)
94{
95 return register_tracer(&return_trace);
96}
97
98device_initcall(init_return_trace);