aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-11-26 19:46:33 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-27 04:59:14 -0500
commit83a8df618eb04bd2819a758f3b409b1449862434 (patch)
treef6a8d6a75e2c94d7c0cfb475f11d5bd4adc20cf8 /kernel/trace
parentc7cc77307669336a08928ab8668bdb3f3bcc021b (diff)
tracing/function-graph-tracer: enhancements for the trace output
Impact: enhance the output of the graph-tracer This patch applies some ideas of Ingo Molnar and Steven Rostedt. * Output leaf functions in one line with parenthesis, semicolon and duration output. * Add a second column (after cpu) for an overhead sign. if duration > 100 us, "!" if duration > 10 us, "+" else " " * Print output in us with remaining nanosec: u.n * Print duration on the right end, following the indentation of the functions. Use also visual clues: "-" on entry call (no duration to output) and "+" on return (duration output). The name of the tracer has been fixed as well: function-branch becomes function_branch. Here is an example of the new output: CPU[000] dequeue_entity() { - CPU[000] update_curr() { - CPU[000] update_min_vruntime(); + 0.512 us CPU[000] } + 1.504 us CPU[000] clear_buddies(); + 0.481 us CPU[000] update_min_vruntime(); + 0.504 us CPU[000] } + 4.557 us CPU[000] hrtick_update() { - CPU[000] hrtick_start_fair(); + 0.489 us CPU[000] } + 1.443 us CPU[000] + } + 14.655 us CPU[000] + } + 15.678 us CPU[000] + } + 16.686 us CPU[000] msecs_to_jiffies(); + 0.481 us CPU[000] put_prev_task_fair(); + 0.504 us CPU[000] pick_next_task_fair(); + 0.482 us CPU[000] pick_next_task_rt(); + 0.504 us CPU[000] pick_next_task_fair(); + 0.481 us CPU[000] pick_next_task_idle(); + 0.489 us CPU[000] _spin_trylock(); + 0.655 us CPU[000] _spin_unlock(); + 0.609 us CPU[000] ------------8<---------- thread bash-2794 ------------8<---------- CPU[000] finish_task_switch() { - CPU[000] _spin_unlock_irq(); + 0.722 us CPU[000] } + 2.369 us CPU[000] ! } + 501972.605 us CPU[000] ! } + 501973.763 us CPU[000] copy_from_read_buf() { - CPU[000] _spin_lock_irqsave(); + 0.670 us CPU[000] _spin_unlock_irqrestore(); + 0.699 us CPU[000] copy_to_user() { - CPU[000] might_fault() { - CPU[000] __might_sleep(); + 0.503 us CPU[000] } + 1.632 us CPU[000] __copy_to_user_ll(); + 0.542 us CPU[000] } + 3.858 us CPU[000] tty_audit_add_data() { - CPU[000] _spin_lock_irq(); + 0.609 us CPU[000] _spin_unlock_irq(); + 0.624 us CPU[000] } + 3.196 us CPU[000] _spin_lock_irqsave(); + 0.624 us CPU[000] _spin_unlock_irqrestore(); + 0.625 us CPU[000] + } + 13.611 us CPU[000] copy_from_read_buf() { - CPU[000] _spin_lock_irqsave(); + 0.624 us CPU[000] _spin_unlock_irqrestore(); + 0.616 us CPU[000] } + 2.820 us CPU[000] Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace_functions_graph.c168
1 files changed, 159 insertions, 9 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index d31d695174aa..b958d60377b3 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -14,6 +14,10 @@
14#include "trace.h" 14#include "trace.h"
15 15
16#define TRACE_GRAPH_INDENT 2 16#define TRACE_GRAPH_INDENT 2
17/* Spaces between function call and time duration */
18#define TRACE_GRAPH_TIMESPACE_ENTRY " "
19/* Spaces between function call and closing braces */
20#define TRACE_GRAPH_TIMESPACE_RET " "
17 21
18#define TRACE_GRAPH_PRINT_OVERRUN 0x1 22#define TRACE_GRAPH_PRINT_OVERRUN 0x1
19static struct tracer_opt trace_opts[] = { 23static struct tracer_opt trace_opts[] = {
@@ -63,26 +67,130 @@ static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
63 last_pid[cpu] = pid; 67 last_pid[cpu] = pid;
64 comm = trace_find_cmdline(pid); 68 comm = trace_find_cmdline(pid);
65 69
66 return trace_seq_printf(s, "\nCPU[%03d]" 70 return trace_seq_printf(s, "\nCPU[%03d] "
67 " ------------8<---------- thread %s-%d" 71 " ------------8<---------- thread %s-%d"
68 " ------------8<----------\n\n", 72 " ------------8<----------\n\n",
69 cpu, comm, pid); 73 cpu, comm, pid);
70} 74}
71 75
76static bool
77trace_branch_is_leaf(struct trace_iterator *iter,
78 struct ftrace_graph_ent_entry *curr)
79{
80 struct ring_buffer_iter *ring_iter;
81 struct ring_buffer_event *event;
82 struct ftrace_graph_ret_entry *next;
83
84 ring_iter = iter->buffer_iter[iter->cpu];
85
86 if (!ring_iter)
87 return false;
88
89 event = ring_buffer_iter_peek(iter->buffer_iter[iter->cpu], NULL);
90
91 if (!event)
92 return false;
93
94 next = ring_buffer_event_data(event);
95
96 if (next->ent.type != TRACE_GRAPH_RET)
97 return false;
98
99 if (curr->ent.pid != next->ent.pid ||
100 curr->graph_ent.func != next->ret.func)
101 return false;
102
103 return true;
104}
105
106
107static inline int
108print_graph_duration(unsigned long long duration, struct trace_seq *s)
109{
110 unsigned long nsecs_rem = do_div(duration, 1000);
111 return trace_seq_printf(s, "+ %llu.%lu us\n", duration, nsecs_rem);
112}
113
114/* Signal a overhead of time execution to the output */
115static int
116print_graph_overhead(unsigned long long duration, struct trace_seq *s)
117{
118 /* Duration exceeded 100 msecs */
119 if (duration > 100000ULL)
120 return trace_seq_printf(s, "! ");
121
122 /* Duration exceeded 10 msecs */
123 if (duration > 10000ULL)
124 return trace_seq_printf(s, "+ ");
125
126 return trace_seq_printf(s, " ");
127}
128
129/* Case of a leaf function on its call entry */
72static enum print_line_t 130static enum print_line_t
73print_graph_entry(struct ftrace_graph_ent *call, struct trace_seq *s, 131print_graph_entry_leaf(struct trace_iterator *iter,
74 struct trace_entry *ent, int cpu) 132 struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
75{ 133{
134 struct ftrace_graph_ret_entry *ret_entry;
135 struct ftrace_graph_ret *graph_ret;
136 struct ring_buffer_event *event;
137 struct ftrace_graph_ent *call;
138 unsigned long long duration;
76 int i; 139 int i;
77 int ret; 140 int ret;
78 141
79 if (!verif_pid(s, ent->pid, cpu)) 142 event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
143 ret_entry = ring_buffer_event_data(event);
144 graph_ret = &ret_entry->ret;
145 call = &entry->graph_ent;
146 duration = graph_ret->rettime - graph_ret->calltime;
147
148 /* Overhead */
149 ret = print_graph_overhead(duration, s);
150 if (!ret)
80 return TRACE_TYPE_PARTIAL_LINE; 151 return TRACE_TYPE_PARTIAL_LINE;
81 152
82 ret = trace_seq_printf(s, "CPU[%03d] ", cpu); 153 /* Function */
154 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
155 ret = trace_seq_printf(s, " ");
156 if (!ret)
157 return TRACE_TYPE_PARTIAL_LINE;
158 }
159
160 ret = seq_print_ip_sym(s, call->func, 0);
161 if (!ret)
162 return TRACE_TYPE_PARTIAL_LINE;
163
164 ret = trace_seq_printf(s, "();");
83 if (!ret) 165 if (!ret)
84 return TRACE_TYPE_PARTIAL_LINE; 166 return TRACE_TYPE_PARTIAL_LINE;
85 167
168 /* Duration */
169 ret = trace_seq_printf(s, TRACE_GRAPH_TIMESPACE_ENTRY);
170 if (!ret)
171 return TRACE_TYPE_PARTIAL_LINE;
172
173 ret = print_graph_duration(duration, s);
174 if (!ret)
175 return TRACE_TYPE_PARTIAL_LINE;
176
177 return TRACE_TYPE_HANDLED;
178}
179
180static enum print_line_t
181print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
182 struct trace_seq *s)
183{
184 int i;
185 int ret;
186 struct ftrace_graph_ent *call = &entry->graph_ent;
187
188 /* No overhead */
189 ret = trace_seq_printf(s, " ");
190 if (!ret)
191 return TRACE_TYPE_PARTIAL_LINE;
192
193 /* Function */
86 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 194 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
87 ret = trace_seq_printf(s, " "); 195 ret = trace_seq_printf(s, " ");
88 if (!ret) 196 if (!ret)
@@ -93,26 +201,62 @@ print_graph_entry(struct ftrace_graph_ent *call, struct trace_seq *s,
93 if (!ret) 201 if (!ret)
94 return TRACE_TYPE_PARTIAL_LINE; 202 return TRACE_TYPE_PARTIAL_LINE;
95 203
96 ret = trace_seq_printf(s, "() {\n"); 204 ret = trace_seq_printf(s, "() {");
97 if (!ret) 205 if (!ret)
98 return TRACE_TYPE_PARTIAL_LINE; 206 return TRACE_TYPE_PARTIAL_LINE;
207
208 /* No duration to print at this state */
209 ret = trace_seq_printf(s, TRACE_GRAPH_TIMESPACE_ENTRY "-\n");
210 if (!ret)
211 return TRACE_TYPE_PARTIAL_LINE;
212
99 return TRACE_TYPE_HANDLED; 213 return TRACE_TYPE_HANDLED;
100} 214}
101 215
102static enum print_line_t 216static enum print_line_t
217print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
218 struct trace_iterator *iter, int cpu)
219{
220 int ret;
221 struct trace_entry *ent = iter->ent;
222
223 if (!verif_pid(s, ent->pid, cpu))
224 return TRACE_TYPE_PARTIAL_LINE;
225
226 ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
227 if (!ret)
228 return TRACE_TYPE_PARTIAL_LINE;
229
230 if (trace_branch_is_leaf(iter, field))
231 return print_graph_entry_leaf(iter, field, s);
232 else
233 return print_graph_entry_nested(field, s);
234
235}
236
237static enum print_line_t
103print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, 238print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
104 struct trace_entry *ent, int cpu) 239 struct trace_entry *ent, int cpu)
105{ 240{
106 int i; 241 int i;
107 int ret; 242 int ret;
243 unsigned long long duration = trace->rettime - trace->calltime;
108 244
245 /* Pid */
109 if (!verif_pid(s, ent->pid, cpu)) 246 if (!verif_pid(s, ent->pid, cpu))
110 return TRACE_TYPE_PARTIAL_LINE; 247 return TRACE_TYPE_PARTIAL_LINE;
111 248
249 /* Cpu */
112 ret = trace_seq_printf(s, "CPU[%03d] ", cpu); 250 ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
113 if (!ret) 251 if (!ret)
114 return TRACE_TYPE_PARTIAL_LINE; 252 return TRACE_TYPE_PARTIAL_LINE;
115 253
254 /* Overhead */
255 ret = print_graph_overhead(duration, s);
256 if (!ret)
257 return TRACE_TYPE_PARTIAL_LINE;
258
259 /* Closing brace */
116 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { 260 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
117 ret = trace_seq_printf(s, " "); 261 ret = trace_seq_printf(s, " ");
118 if (!ret) 262 if (!ret)
@@ -123,10 +267,16 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
123 if (!ret) 267 if (!ret)
124 return TRACE_TYPE_PARTIAL_LINE; 268 return TRACE_TYPE_PARTIAL_LINE;
125 269
126 ret = trace_seq_printf(s, "%llu\n", trace->rettime - trace->calltime); 270 /* Duration */
271 ret = trace_seq_printf(s, TRACE_GRAPH_TIMESPACE_RET);
272 if (!ret)
273 return TRACE_TYPE_PARTIAL_LINE;
274
275 ret = print_graph_duration(duration, s);
127 if (!ret) 276 if (!ret)
128 return TRACE_TYPE_PARTIAL_LINE; 277 return TRACE_TYPE_PARTIAL_LINE;
129 278
279 /* Overrun */
130 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { 280 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
131 ret = trace_seq_printf(s, " (Overruns: %lu)\n", 281 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
132 trace->overrun); 282 trace->overrun);
@@ -146,7 +296,7 @@ print_graph_function(struct trace_iterator *iter)
146 case TRACE_GRAPH_ENT: { 296 case TRACE_GRAPH_ENT: {
147 struct ftrace_graph_ent_entry *field; 297 struct ftrace_graph_ent_entry *field;
148 trace_assign_type(field, entry); 298 trace_assign_type(field, entry);
149 return print_graph_entry(&field->graph_ent, s, entry, 299 return print_graph_entry(field, s, iter,
150 iter->cpu); 300 iter->cpu);
151 } 301 }
152 case TRACE_GRAPH_RET: { 302 case TRACE_GRAPH_RET: {
@@ -160,7 +310,7 @@ print_graph_function(struct trace_iterator *iter)
160} 310}
161 311
162static struct tracer graph_trace __read_mostly = { 312static struct tracer graph_trace __read_mostly = {
163 .name = "function-graph", 313 .name = "function_graph",
164 .init = graph_trace_init, 314 .init = graph_trace_init,
165 .reset = graph_trace_reset, 315 .reset = graph_trace_reset,
166 .print_line = print_graph_function, 316 .print_line = print_graph_function,