aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-11-25 18:57:25 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-25 19:59:45 -0500
commit287b6e68ca7209caec40b2f44f837c580a413bae (patch)
treeb0867d75868f6049dc5747bd39fdae2d477dde66 /kernel/trace
parentfb52607afcd0629776f1dc9e657647ceae81dd50 (diff)
tracing/function-return-tracer: set a more human readable output
Impact: feature This patch sets a C-like output for the function graph tracing. For this aim, we now call two handler for each function: one on the entry and one other on return. This way we can draw a well-ordered call stack. The pid of the previous trace is loosely stored to be compared against the one of the current trace to see if there were a context switch. Without this little feature, the call tree would seem broken at some locations. We could use the sched_tracer to capture these sched_events but this way of processing is much more simpler. 2 spaces have been chosen for indentation to fit the screen while deep calls. The time of execution in nanosecs is printed just after closed braces, it seems more easy this way to find the corresponding function. If the time was printed as a first column, it would be not so easy to find the corresponding function if it is called on a deep depth. I plan to output the return value but on 32 bits CPU, the return value can be 32 or 64, and its difficult to guess on which case we are. I don't know what would be the better solution on X86-32: only print eax (low-part) or even edx (high-part). Actually it's thee same problem when a function return a 8 bits value, the high part of eax could contain junk values... Here is an example of trace: sys_read() { fget_light() { } 526 vfs_read() { rw_verify_area() { security_file_permission() { cap_file_permission() { } 519 } 1564 } 2640 do_sync_read() { pipe_read() { __might_sleep() { } 511 pipe_wait() { prepare_to_wait() { } 760 deactivate_task() { dequeue_task() { dequeue_task_fair() { dequeue_entity() { update_curr() { update_min_vruntime() { } 504 } 1587 clear_buddies() { } 512 add_cfs_task_weight() { } 519 update_min_vruntime() { } 511 } 5602 dequeue_entity() { update_curr() { update_min_vruntime() { } 496 } 1631 clear_buddies() { } 496 update_min_vruntime() { } 527 } 4580 hrtick_update() { hrtick_start_fair() { } 488 } 1489 } 13700 } 14949 } 16016 msecs_to_jiffies() { } 496 put_prev_task_fair() { } 504 pick_next_task_fair() { } 489 pick_next_task_rt() { } 496 pick_next_task_fair() { } 489 pick_next_task_idle() { } 489 ------------8<---------- thread 4 ------------8<---------- finish_task_switch() { } 1203 do_softirq() { __do_softirq() { __local_bh_disable() { } 669 rcu_process_callbacks() { __rcu_process_callbacks() { cpu_quiet() { rcu_start_batch() { } 503 } 1647 } 3128 __rcu_process_callbacks() { } 542 } 5362 _local_bh_enable() { } 587 } 8880 } 9986 kthread_should_stop() { } 669 deactivate_task() { dequeue_task() { dequeue_task_fair() { dequeue_entity() { update_curr() { calc_delta_mine() { } 511 update_min_vruntime() { } 511 } 2813 Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c30
-rw-r--r--kernel/trace/trace.c67
-rw-r--r--kernel/trace/trace.h28
-rw-r--r--kernel/trace/trace_functions_graph.c104
-rw-r--r--kernel/trace/trace_functions_return.c98
5 files changed, 168 insertions, 159 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 9e19976af727..7e2d3b91692d 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1498,12 +1498,13 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1498 1498
1499#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1499#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1500 1500
1501static atomic_t ftrace_retfunc_active; 1501static atomic_t ftrace_graph_active;
1502
1503/* The callback that hooks the return of a function */
1504trace_function_graph_t ftrace_graph_function =
1505 (trace_function_graph_t)ftrace_stub;
1506 1502
1503/* The callbacks that hook a function */
1504trace_func_graph_ret_t ftrace_graph_return =
1505 (trace_func_graph_ret_t)ftrace_stub;
1506trace_func_graph_ent_t ftrace_graph_entry =
1507 (trace_func_graph_ent_t)ftrace_stub;
1507 1508
1508/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ 1509/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1509static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) 1510static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
@@ -1569,7 +1570,8 @@ static int start_graph_tracing(void)
1569 return ret; 1570 return ret;
1570} 1571}
1571 1572
1572int register_ftrace_graph(trace_function_graph_t func) 1573int register_ftrace_graph(trace_func_graph_ret_t retfunc,
1574 trace_func_graph_ent_t entryfunc)
1573{ 1575{
1574 int ret = 0; 1576 int ret = 0;
1575 1577
@@ -1583,14 +1585,15 @@ int register_ftrace_graph(trace_function_graph_t func)
1583 ret = -EBUSY; 1585 ret = -EBUSY;
1584 goto out; 1586 goto out;
1585 } 1587 }
1586 atomic_inc(&ftrace_retfunc_active); 1588 atomic_inc(&ftrace_graph_active);
1587 ret = start_graph_tracing(); 1589 ret = start_graph_tracing();
1588 if (ret) { 1590 if (ret) {
1589 atomic_dec(&ftrace_retfunc_active); 1591 atomic_dec(&ftrace_graph_active);
1590 goto out; 1592 goto out;
1591 } 1593 }
1592 ftrace_tracing_type = FTRACE_TYPE_RETURN; 1594 ftrace_tracing_type = FTRACE_TYPE_RETURN;
1593 ftrace_graph_function = func; 1595 ftrace_graph_return = retfunc;
1596 ftrace_graph_entry = entryfunc;
1594 ftrace_startup(); 1597 ftrace_startup();
1595 1598
1596out: 1599out:
@@ -1602,8 +1605,9 @@ void unregister_ftrace_graph(void)
1602{ 1605{
1603 mutex_lock(&ftrace_sysctl_lock); 1606 mutex_lock(&ftrace_sysctl_lock);
1604 1607
1605 atomic_dec(&ftrace_retfunc_active); 1608 atomic_dec(&ftrace_graph_active);
1606 ftrace_graph_function = (trace_function_graph_t)ftrace_stub; 1609 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
1610 ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
1607 ftrace_shutdown(); 1611 ftrace_shutdown();
1608 /* Restore normal tracing type */ 1612 /* Restore normal tracing type */
1609 ftrace_tracing_type = FTRACE_TYPE_ENTER; 1613 ftrace_tracing_type = FTRACE_TYPE_ENTER;
@@ -1614,7 +1618,7 @@ void unregister_ftrace_graph(void)
1614/* Allocate a return stack for newly created task */ 1618/* Allocate a return stack for newly created task */
1615void ftrace_graph_init_task(struct task_struct *t) 1619void ftrace_graph_init_task(struct task_struct *t)
1616{ 1620{
1617 if (atomic_read(&ftrace_retfunc_active)) { 1621 if (atomic_read(&ftrace_graph_active)) {
1618 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH 1622 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
1619 * sizeof(struct ftrace_ret_stack), 1623 * sizeof(struct ftrace_ret_stack),
1620 GFP_KERNEL); 1624 GFP_KERNEL);
@@ -1638,5 +1642,3 @@ void ftrace_graph_exit_task(struct task_struct *t)
1638} 1642}
1639#endif 1643#endif
1640 1644
1641
1642
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f21ab2c68fd4..9d5f7c94f251 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -879,14 +879,38 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
879} 879}
880 880
881#ifdef CONFIG_FUNCTION_GRAPH_TRACER 881#ifdef CONFIG_FUNCTION_GRAPH_TRACER
882static void __trace_function_graph(struct trace_array *tr, 882static void __trace_graph_entry(struct trace_array *tr,
883 struct trace_array_cpu *data,
884 struct ftrace_graph_ent *trace,
885 unsigned long flags,
886 int pc)
887{
888 struct ring_buffer_event *event;
889 struct ftrace_graph_ent_entry *entry;
890 unsigned long irq_flags;
891
892 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
893 return;
894
895 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
896 &irq_flags);
897 if (!event)
898 return;
899 entry = ring_buffer_event_data(event);
900 tracing_generic_entry_update(&entry->ent, flags, pc);
901 entry->ent.type = TRACE_GRAPH_ENT;
902 entry->graph_ent = *trace;
903 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
904}
905
906static void __trace_graph_return(struct trace_array *tr,
883 struct trace_array_cpu *data, 907 struct trace_array_cpu *data,
884 struct ftrace_graph_ret *trace, 908 struct ftrace_graph_ret *trace,
885 unsigned long flags, 909 unsigned long flags,
886 int pc) 910 int pc)
887{ 911{
888 struct ring_buffer_event *event; 912 struct ring_buffer_event *event;
889 struct ftrace_graph_entry *entry; 913 struct ftrace_graph_ret_entry *entry;
890 unsigned long irq_flags; 914 unsigned long irq_flags;
891 915
892 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 916 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
@@ -898,12 +922,8 @@ static void __trace_function_graph(struct trace_array *tr,
898 return; 922 return;
899 entry = ring_buffer_event_data(event); 923 entry = ring_buffer_event_data(event);
900 tracing_generic_entry_update(&entry->ent, flags, pc); 924 tracing_generic_entry_update(&entry->ent, flags, pc);
901 entry->ent.type = TRACE_FN_RET; 925 entry->ent.type = TRACE_GRAPH_RET;
902 entry->ip = trace->func; 926 entry->ret = *trace;
903 entry->parent_ip = trace->ret;
904 entry->rettime = trace->rettime;
905 entry->calltime = trace->calltime;
906 entry->overrun = trace->overrun;
907 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); 927 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
908} 928}
909#endif 929#endif
@@ -1178,7 +1198,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1178} 1198}
1179 1199
1180#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1200#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1181void trace_function_graph(struct ftrace_graph_ret *trace) 1201void trace_graph_entry(struct ftrace_graph_ent *trace)
1182{ 1202{
1183 struct trace_array *tr = &global_trace; 1203 struct trace_array *tr = &global_trace;
1184 struct trace_array_cpu *data; 1204 struct trace_array_cpu *data;
@@ -1193,7 +1213,28 @@ void trace_function_graph(struct ftrace_graph_ret *trace)
1193 disabled = atomic_inc_return(&data->disabled); 1213 disabled = atomic_inc_return(&data->disabled);
1194 if (likely(disabled == 1)) { 1214 if (likely(disabled == 1)) {
1195 pc = preempt_count(); 1215 pc = preempt_count();
1196 __trace_function_graph(tr, data, trace, flags, pc); 1216 __trace_graph_entry(tr, data, trace, flags, pc);
1217 }
1218 atomic_dec(&data->disabled);
1219 raw_local_irq_restore(flags);
1220}
1221
1222void trace_graph_return(struct ftrace_graph_ret *trace)
1223{
1224 struct trace_array *tr = &global_trace;
1225 struct trace_array_cpu *data;
1226 unsigned long flags;
1227 long disabled;
1228 int cpu;
1229 int pc;
1230
1231 raw_local_irq_save(flags);
1232 cpu = raw_smp_processor_id();
1233 data = tr->data[cpu];
1234 disabled = atomic_inc_return(&data->disabled);
1235 if (likely(disabled == 1)) {
1236 pc = preempt_count();
1237 __trace_graph_return(tr, data, trace, flags, pc);
1197 } 1238 }
1198 atomic_dec(&data->disabled); 1239 atomic_dec(&data->disabled);
1199 raw_local_irq_restore(flags); 1240 raw_local_irq_restore(flags);
@@ -2000,9 +2041,11 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2000 trace_seq_print_cont(s, iter); 2041 trace_seq_print_cont(s, iter);
2001 break; 2042 break;
2002 } 2043 }
2003 case TRACE_FN_RET: { 2044 case TRACE_GRAPH_RET: {
2045 return print_graph_function(iter);
2046 }
2047 case TRACE_GRAPH_ENT: {
2004 return print_graph_function(iter); 2048 return print_graph_function(iter);
2005 break;
2006 } 2049 }
2007 case TRACE_BRANCH: { 2050 case TRACE_BRANCH: {
2008 struct trace_branch *field; 2051 struct trace_branch *field;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 72b5ef868765..ffe1bb1eb620 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -25,7 +25,8 @@ enum trace_type {
25 TRACE_BRANCH, 25 TRACE_BRANCH,
26 TRACE_BOOT_CALL, 26 TRACE_BOOT_CALL,
27 TRACE_BOOT_RET, 27 TRACE_BOOT_RET,
28 TRACE_FN_RET, 28 TRACE_GRAPH_RET,
29 TRACE_GRAPH_ENT,
29 TRACE_USER_STACK, 30 TRACE_USER_STACK,
30 TRACE_BTS, 31 TRACE_BTS,
31 32
@@ -56,14 +57,16 @@ struct ftrace_entry {
56 unsigned long parent_ip; 57 unsigned long parent_ip;
57}; 58};
58 59
60/* Function call entry */
61struct ftrace_graph_ent_entry {
62 struct trace_entry ent;
63 struct ftrace_graph_ent graph_ent;
64};
65
59/* Function return entry */ 66/* Function return entry */
60struct ftrace_graph_entry { 67struct ftrace_graph_ret_entry {
61 struct trace_entry ent; 68 struct trace_entry ent;
62 unsigned long ip; 69 struct ftrace_graph_ret ret;
63 unsigned long parent_ip;
64 unsigned long long calltime;
65 unsigned long long rettime;
66 unsigned long overrun;
67}; 70};
68extern struct tracer boot_tracer; 71extern struct tracer boot_tracer;
69 72
@@ -264,7 +267,10 @@ extern void __ftrace_bad_type(void);
264 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ 267 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
265 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ 268 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
266 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ 269 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
267 IF_ASSIGN(var, ent, struct ftrace_graph_entry, TRACE_FN_RET);\ 270 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
271 TRACE_GRAPH_ENT); \
272 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
273 TRACE_GRAPH_RET); \
268 IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\ 274 IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\
269 __ftrace_bad_type(); \ 275 __ftrace_bad_type(); \
270 } while (0) 276 } while (0)
@@ -397,9 +403,9 @@ void trace_function(struct trace_array *tr,
397 unsigned long ip, 403 unsigned long ip,
398 unsigned long parent_ip, 404 unsigned long parent_ip,
399 unsigned long flags, int pc); 405 unsigned long flags, int pc);
400void
401trace_function_graph(struct ftrace_graph_ret *trace);
402 406
407void trace_graph_return(struct ftrace_graph_ret *trace);
408void trace_graph_entry(struct ftrace_graph_ent *trace);
403void trace_bts(struct trace_array *tr, 409void trace_bts(struct trace_array *tr,
404 unsigned long from, 410 unsigned long from,
405 unsigned long to); 411 unsigned long to);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index f5bad4624d2b..b6f0cc2a00cb 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -13,6 +13,7 @@
13 13
14#include "trace.h" 14#include "trace.h"
15 15
16#define TRACE_GRAPH_INDENT 2
16 17
17#define TRACE_GRAPH_PRINT_OVERRUN 0x1 18#define TRACE_GRAPH_PRINT_OVERRUN 0x1
18static struct tracer_opt trace_opts[] = { 19static struct tracer_opt trace_opts[] = {
@@ -26,6 +27,8 @@ static struct tracer_flags tracer_flags = {
26 .opts = trace_opts 27 .opts = trace_opts
27}; 28};
28 29
30/* pid on the last trace processed */
31static pid_t last_pid = -1;
29 32
30static int graph_trace_init(struct trace_array *tr) 33static int graph_trace_init(struct trace_array *tr)
31{ 34{
@@ -33,7 +36,8 @@ static int graph_trace_init(struct trace_array *tr)
33 for_each_online_cpu(cpu) 36 for_each_online_cpu(cpu)
34 tracing_reset(tr, cpu); 37 tracing_reset(tr, cpu);
35 38
36 return register_ftrace_graph(&trace_function_graph); 39 return register_ftrace_graph(&trace_graph_return,
40 &trace_graph_entry);
37} 41}
38 42
39static void graph_trace_reset(struct trace_array *tr) 43static void graph_trace_reset(struct trace_array *tr)
@@ -41,45 +45,97 @@ static void graph_trace_reset(struct trace_array *tr)
41 unregister_ftrace_graph(); 45 unregister_ftrace_graph();
42} 46}
43 47
48/* If the pid changed since the last trace, output this event */
49static int verif_pid(struct trace_seq *s, pid_t pid)
50{
51 if (last_pid != -1 && last_pid == pid)
52 return 1;
44 53
45enum print_line_t 54 last_pid = pid;
46print_graph_function(struct trace_iterator *iter) 55 return trace_seq_printf(s, "\n------------8<---------- thread %d"
56 " ------------8<----------\n\n",
57 pid);
58}
59
60static enum print_line_t
61print_graph_entry(struct ftrace_graph_ent *call, struct trace_seq *s,
62 struct trace_entry *ent)
47{ 63{
48 struct trace_seq *s = &iter->seq; 64 int i;
49 struct trace_entry *entry = iter->ent;
50 struct ftrace_graph_entry *field;
51 int ret; 65 int ret;
52 66
53 if (entry->type == TRACE_FN_RET) { 67 if (!verif_pid(s, ent->pid))
54 trace_assign_type(field, entry); 68 return TRACE_TYPE_PARTIAL_LINE;
55 ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip);
56 if (!ret)
57 return TRACE_TYPE_PARTIAL_LINE;
58 69
59 ret = seq_print_ip_sym(s, field->ip, 70 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
60 trace_flags & TRACE_ITER_SYM_MASK); 71 ret = trace_seq_printf(s, " ");
61 if (!ret) 72 if (!ret)
62 return TRACE_TYPE_PARTIAL_LINE; 73 return TRACE_TYPE_PARTIAL_LINE;
74 }
75
76 ret = seq_print_ip_sym(s, call->func, 0);
77 if (!ret)
78 return TRACE_TYPE_PARTIAL_LINE;
79
80 ret = trace_seq_printf(s, "() {\n");
81 if (!ret)
82 return TRACE_TYPE_PARTIAL_LINE;
83 return TRACE_TYPE_HANDLED;
84}
85
86static enum print_line_t
87print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
88 struct trace_entry *ent)
89{
90 int i;
91 int ret;
92
93 if (!verif_pid(s, ent->pid))
94 return TRACE_TYPE_PARTIAL_LINE;
63 95
64 ret = trace_seq_printf(s, " (%llu ns)", 96 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
65 field->rettime - field->calltime); 97 ret = trace_seq_printf(s, " ");
66 if (!ret) 98 if (!ret)
67 return TRACE_TYPE_PARTIAL_LINE; 99 return TRACE_TYPE_PARTIAL_LINE;
100 }
101
102 ret = trace_seq_printf(s, "} ");
103 if (!ret)
104 return TRACE_TYPE_PARTIAL_LINE;
68 105
69 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { 106 ret = trace_seq_printf(s, "%llu\n", trace->rettime - trace->calltime);
70 ret = trace_seq_printf(s, " (Overruns: %lu)", 107 if (!ret)
71 field->overrun); 108 return TRACE_TYPE_PARTIAL_LINE;
72 if (!ret)
73 return TRACE_TYPE_PARTIAL_LINE;
74 }
75 109
76 ret = trace_seq_printf(s, "\n"); 110 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
111 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
112 trace->overrun);
77 if (!ret) 113 if (!ret)
78 return TRACE_TYPE_PARTIAL_LINE; 114 return TRACE_TYPE_PARTIAL_LINE;
115 }
116 return TRACE_TYPE_HANDLED;
117}
118
119enum print_line_t
120print_graph_function(struct trace_iterator *iter)
121{
122 struct trace_seq *s = &iter->seq;
123 struct trace_entry *entry = iter->ent;
79 124
80 return TRACE_TYPE_HANDLED; 125 switch (entry->type) {
126 case TRACE_GRAPH_ENT: {
127 struct ftrace_graph_ent_entry *field;
128 trace_assign_type(field, entry);
129 return print_graph_entry(&field->graph_ent, s, entry);
130 }
131 case TRACE_GRAPH_RET: {
132 struct ftrace_graph_ret_entry *field;
133 trace_assign_type(field, entry);
134 return print_graph_return(&field->ret, s, entry);
135 }
136 default:
137 return TRACE_TYPE_UNHANDLED;
81 } 138 }
82 return TRACE_TYPE_UNHANDLED;
83} 139}
84 140
85static struct tracer graph_trace __read_mostly = { 141static struct tracer graph_trace __read_mostly = {
diff --git a/kernel/trace/trace_functions_return.c b/kernel/trace/trace_functions_return.c
deleted file mode 100644
index e00d64509c9c..000000000000
--- a/kernel/trace/trace_functions_return.c
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 *
3 * Function return tracer.
4 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/fs.h>
13
14#include "trace.h"
15
16
17#define TRACE_RETURN_PRINT_OVERRUN 0x1
18static struct tracer_opt trace_opts[] = {
19 /* Display overruns or not */
20 { TRACER_OPT(overrun, TRACE_RETURN_PRINT_OVERRUN) },
21 { } /* Empty entry */
22};
23
24static struct tracer_flags tracer_flags = {
25 .val = 0, /* Don't display overruns by default */
26 .opts = trace_opts
27};
28
29
30static int return_trace_init(struct trace_array *tr)
31{
32 int cpu;
33 for_each_online_cpu(cpu)
34 tracing_reset(tr, cpu);
35
36 return register_ftrace_return(&trace_function_return);
37}
38
39static void return_trace_reset(struct trace_array *tr)
40{
41 unregister_ftrace_return();
42}
43
44
45enum print_line_t
46print_return_function(struct trace_iterator *iter)
47{
48 struct trace_seq *s = &iter->seq;
49 struct trace_entry *entry = iter->ent;
50 struct ftrace_ret_entry *field;
51 int ret;
52
53 if (entry->type == TRACE_FN_RET) {
54 trace_assign_type(field, entry);
55 ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip);
56 if (!ret)
57 return TRACE_TYPE_PARTIAL_LINE;
58
59 ret = seq_print_ip_sym(s, field->ip,
60 trace_flags & TRACE_ITER_SYM_MASK);
61 if (!ret)
62 return TRACE_TYPE_PARTIAL_LINE;
63
64 ret = trace_seq_printf(s, " (%llu ns)",
65 field->rettime - field->calltime);
66 if (!ret)
67 return TRACE_TYPE_PARTIAL_LINE;
68
69 if (tracer_flags.val & TRACE_RETURN_PRINT_OVERRUN) {
70 ret = trace_seq_printf(s, " (Overruns: %lu)",
71 field->overrun);
72 if (!ret)
73 return TRACE_TYPE_PARTIAL_LINE;
74 }
75
76 ret = trace_seq_printf(s, "\n");
77 if (!ret)
78 return TRACE_TYPE_PARTIAL_LINE;
79
80 return TRACE_TYPE_HANDLED;
81 }
82 return TRACE_TYPE_UNHANDLED;
83}
84
85static struct tracer return_trace __read_mostly = {
86 .name = "return",
87 .init = return_trace_init,
88 .reset = return_trace_reset,
89 .print_line = print_return_function,
90 .flags = &tracer_flags,
91};
92
93static __init int init_return_trace(void)
94{
95 return register_tracer(&return_trace);
96}
97
98device_initcall(init_return_trace);