aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c175
1 files changed, 175 insertions, 0 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
new file mode 100644
index 000000000000..d31d695174aa
--- /dev/null
+++ b/kernel/trace/trace_functions_graph.c
@@ -0,0 +1,175 @@
1/*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/fs.h>
13
14#include "trace.h"
15
16#define TRACE_GRAPH_INDENT 2
17
18#define TRACE_GRAPH_PRINT_OVERRUN 0x1
19static struct tracer_opt trace_opts[] = {
20 /* Display overruns or not */
21 { TRACER_OPT(overrun, TRACE_GRAPH_PRINT_OVERRUN) },
22 { } /* Empty entry */
23};
24
25static struct tracer_flags tracer_flags = {
26 .val = 0, /* Don't display overruns by default */
27 .opts = trace_opts
28};
29
30/* pid on the last trace processed */
31static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
32
33static int graph_trace_init(struct trace_array *tr)
34{
35 int cpu, ret;
36
37 for_each_online_cpu(cpu)
38 tracing_reset(tr, cpu);
39
40 ret = register_ftrace_graph(&trace_graph_return,
41 &trace_graph_entry);
42 if (ret)
43 return ret;
44 tracing_start_cmdline_record();
45
46 return 0;
47}
48
49static void graph_trace_reset(struct trace_array *tr)
50{
51 tracing_stop_cmdline_record();
52 unregister_ftrace_graph();
53}
54
55/* If the pid changed since the last trace, output this event */
56static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
57{
58 char *comm;
59
60 if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
61 return 1;
62
63 last_pid[cpu] = pid;
64 comm = trace_find_cmdline(pid);
65
66 return trace_seq_printf(s, "\nCPU[%03d]"
67 " ------------8<---------- thread %s-%d"
68 " ------------8<----------\n\n",
69 cpu, comm, pid);
70}
71
72static enum print_line_t
73print_graph_entry(struct ftrace_graph_ent *call, struct trace_seq *s,
74 struct trace_entry *ent, int cpu)
75{
76 int i;
77 int ret;
78
79 if (!verif_pid(s, ent->pid, cpu))
80 return TRACE_TYPE_PARTIAL_LINE;
81
82 ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
83 if (!ret)
84 return TRACE_TYPE_PARTIAL_LINE;
85
86 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
87 ret = trace_seq_printf(s, " ");
88 if (!ret)
89 return TRACE_TYPE_PARTIAL_LINE;
90 }
91
92 ret = seq_print_ip_sym(s, call->func, 0);
93 if (!ret)
94 return TRACE_TYPE_PARTIAL_LINE;
95
96 ret = trace_seq_printf(s, "() {\n");
97 if (!ret)
98 return TRACE_TYPE_PARTIAL_LINE;
99 return TRACE_TYPE_HANDLED;
100}
101
102static enum print_line_t
103print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
104 struct trace_entry *ent, int cpu)
105{
106 int i;
107 int ret;
108
109 if (!verif_pid(s, ent->pid, cpu))
110 return TRACE_TYPE_PARTIAL_LINE;
111
112 ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
113 if (!ret)
114 return TRACE_TYPE_PARTIAL_LINE;
115
116 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
117 ret = trace_seq_printf(s, " ");
118 if (!ret)
119 return TRACE_TYPE_PARTIAL_LINE;
120 }
121
122 ret = trace_seq_printf(s, "} ");
123 if (!ret)
124 return TRACE_TYPE_PARTIAL_LINE;
125
126 ret = trace_seq_printf(s, "%llu\n", trace->rettime - trace->calltime);
127 if (!ret)
128 return TRACE_TYPE_PARTIAL_LINE;
129
130 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
131 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
132 trace->overrun);
133 if (!ret)
134 return TRACE_TYPE_PARTIAL_LINE;
135 }
136 return TRACE_TYPE_HANDLED;
137}
138
139enum print_line_t
140print_graph_function(struct trace_iterator *iter)
141{
142 struct trace_seq *s = &iter->seq;
143 struct trace_entry *entry = iter->ent;
144
145 switch (entry->type) {
146 case TRACE_GRAPH_ENT: {
147 struct ftrace_graph_ent_entry *field;
148 trace_assign_type(field, entry);
149 return print_graph_entry(&field->graph_ent, s, entry,
150 iter->cpu);
151 }
152 case TRACE_GRAPH_RET: {
153 struct ftrace_graph_ret_entry *field;
154 trace_assign_type(field, entry);
155 return print_graph_return(&field->ret, s, entry, iter->cpu);
156 }
157 default:
158 return TRACE_TYPE_UNHANDLED;
159 }
160}
161
162static struct tracer graph_trace __read_mostly = {
163 .name = "function-graph",
164 .init = graph_trace_init,
165 .reset = graph_trace_reset,
166 .print_line = print_graph_function,
167 .flags = &tracer_flags,
168};
169
170static __init int init_graph_trace(void)
171{
172 return register_tracer(&graph_trace);
173}
174
175device_initcall(init_graph_trace);