aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-16 15:23:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-16 15:23:18 -0500
commitb29c8306a368cf65782669eba079f81dc861c54d (patch)
tree35d75aa0e671070d4024f11338d3ae89b078b1ed /kernel/trace/trace_functions_graph.c
parent0bde7294e2ada03d0f1cc61cec51274081d9a9cf (diff)
parent3a81a5210b7d33bb6d836b4c4952a54166a336f3 (diff)
Merge tag 'trace-3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing update from Steven Rostedt: "This batch of changes is mostly clean ups and small bug fixes. The only real feature that was added this release is from Namhyung Kim, who introduced "set_graph_notrace" filter that lets you run the function graph tracer and not trace particular functions and their call chain. Tom Zanussi added some updates to the ftrace multibuffer tracing that made it more consistent with the top level tracing. One of the fixes for perf function tracing required an API change in RCU; the addition of "rcu_is_watching()". As Paul McKenney is pushing that change in this release too, he gave me a branch that included all the changes to get that working, and I pulled that into my tree in order to complete the perf function tracing fix" * tag 'trace-3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Add rcu annotation for syscall trace descriptors tracing: Do not use signed enums with unsigned long long in fgragh output tracing: Remove unused function ftrace_off_permanent() tracing: Do not assign filp->private_data to freed memory tracing: Add helper function tracing_is_disabled() tracing: Open tracer when ftrace_dump_on_oops is used tracing: Add support for SOFT_DISABLE to syscall events tracing: Make register/unregister_ftrace_command __init tracing: Update event filters for multibuffer recordmcount.pl: Add support for __fentry__ ftrace: Have control op function callback only trace when RCU is watching rcu: Do not trace rcu_is_watching() functions ftrace/x86: skip over the breakpoint for ftrace caller trace/trace_stat: use rbtree postorder iteration helper instead of opencoding ftrace: Add set_graph_notrace filter ftrace: Narrow down the protected area of graph_lock ftrace: Introduce struct ftrace_graph_data ftrace: Get rid of ftrace_graph_filter_enabled tracing: Fix potential out-of-bounds in trace_get_user() tracing: Show more exact help information about snapshot
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c82
1 files changed, 66 insertions, 16 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index b5c09242683d..0b99120d395c 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -82,9 +82,9 @@ static struct trace_array *graph_array;
82 * to fill in space into DURATION column. 82 * to fill in space into DURATION column.
83 */ 83 */
84enum { 84enum {
85 DURATION_FILL_FULL = -1, 85 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
86 DURATION_FILL_START = -2, 86 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
87 DURATION_FILL_END = -3, 87 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
88}; 88};
89 89
90static enum print_line_t 90static enum print_line_t
@@ -114,16 +114,37 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
114 return -EBUSY; 114 return -EBUSY;
115 } 115 }
116 116
117 /*
118 * The curr_ret_stack is an index to ftrace return stack of
119 * current task. Its value should be in [0, FTRACE_RETFUNC_
120 * DEPTH) when the function graph tracer is used. To support
121 * filtering out specific functions, it makes the index
122 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
123 * so when it sees a negative index the ftrace will ignore
124 * the record. And the index gets recovered when returning
125 * from the filtered function by adding the FTRACE_NOTRACE_
126 * DEPTH and then it'll continue to record functions normally.
127 *
128 * The curr_ret_stack is initialized to -1 and get increased
129 * in this function. So it can be less than -1 only if it was
130 * filtered out via ftrace_graph_notrace_addr() which can be
131 * set from set_graph_notrace file in debugfs by user.
132 */
133 if (current->curr_ret_stack < -1)
134 return -EBUSY;
135
117 calltime = trace_clock_local(); 136 calltime = trace_clock_local();
118 137
119 index = ++current->curr_ret_stack; 138 index = ++current->curr_ret_stack;
139 if (ftrace_graph_notrace_addr(func))
140 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
120 barrier(); 141 barrier();
121 current->ret_stack[index].ret = ret; 142 current->ret_stack[index].ret = ret;
122 current->ret_stack[index].func = func; 143 current->ret_stack[index].func = func;
123 current->ret_stack[index].calltime = calltime; 144 current->ret_stack[index].calltime = calltime;
124 current->ret_stack[index].subtime = 0; 145 current->ret_stack[index].subtime = 0;
125 current->ret_stack[index].fp = frame_pointer; 146 current->ret_stack[index].fp = frame_pointer;
126 *depth = index; 147 *depth = current->curr_ret_stack;
127 148
128 return 0; 149 return 0;
129} 150}
@@ -137,7 +158,17 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
137 158
138 index = current->curr_ret_stack; 159 index = current->curr_ret_stack;
139 160
140 if (unlikely(index < 0)) { 161 /*
162 * A negative index here means that it's just returned from a
163 * notrace'd function. Recover index to get an original
164 * return address. See ftrace_push_return_trace().
165 *
166 * TODO: Need to check whether the stack gets corrupted.
167 */
168 if (index < 0)
169 index += FTRACE_NOTRACE_DEPTH;
170
171 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
141 ftrace_graph_stop(); 172 ftrace_graph_stop();
142 WARN_ON(1); 173 WARN_ON(1);
143 /* Might as well panic, otherwise we have no where to go */ 174 /* Might as well panic, otherwise we have no where to go */
@@ -193,6 +224,15 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
193 trace.rettime = trace_clock_local(); 224 trace.rettime = trace_clock_local();
194 barrier(); 225 barrier();
195 current->curr_ret_stack--; 226 current->curr_ret_stack--;
227 /*
228 * The curr_ret_stack can be less than -1 only if it was
229 * filtered out and it's about to return from the function.
230 * Recover the index and continue to trace normal functions.
231 */
232 if (current->curr_ret_stack < -1) {
233 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
234 return ret;
235 }
196 236
197 /* 237 /*
198 * The trace should run after decrementing the ret counter 238 * The trace should run after decrementing the ret counter
@@ -230,7 +270,7 @@ int __trace_graph_entry(struct trace_array *tr,
230 return 0; 270 return 0;
231 entry = ring_buffer_event_data(event); 271 entry = ring_buffer_event_data(event);
232 entry->graph_ent = *trace; 272 entry->graph_ent = *trace;
233 if (!filter_current_check_discard(buffer, call, entry, event)) 273 if (!call_filter_check_discard(call, entry, buffer, event))
234 __buffer_unlock_commit(buffer, event); 274 __buffer_unlock_commit(buffer, event);
235 275
236 return 1; 276 return 1;
@@ -259,10 +299,20 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
259 299
260 /* trace it when it is-nested-in or is a function enabled. */ 300 /* trace it when it is-nested-in or is a function enabled. */
261 if ((!(trace->depth || ftrace_graph_addr(trace->func)) || 301 if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
262 ftrace_graph_ignore_irqs()) || 302 ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
263 (max_depth && trace->depth >= max_depth)) 303 (max_depth && trace->depth >= max_depth))
264 return 0; 304 return 0;
265 305
306 /*
307 * Do not trace a function if it's filtered by set_graph_notrace.
308 * Make the index of ret stack negative to indicate that it should
309 * ignore further functions. But it needs its own ret stack entry
310 * to recover the original index in order to continue tracing after
311 * returning from the function.
312 */
313 if (ftrace_graph_notrace_addr(trace->func))
314 return 1;
315
266 local_irq_save(flags); 316 local_irq_save(flags);
267 cpu = raw_smp_processor_id(); 317 cpu = raw_smp_processor_id();
268 data = per_cpu_ptr(tr->trace_buffer.data, cpu); 318 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
@@ -335,7 +385,7 @@ void __trace_graph_return(struct trace_array *tr,
335 return; 385 return;
336 entry = ring_buffer_event_data(event); 386 entry = ring_buffer_event_data(event);
337 entry->ret = *trace; 387 entry->ret = *trace;
338 if (!filter_current_check_discard(buffer, call, entry, event)) 388 if (!call_filter_check_discard(call, entry, buffer, event))
339 __buffer_unlock_commit(buffer, event); 389 __buffer_unlock_commit(buffer, event);
340} 390}
341 391
@@ -652,7 +702,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
652 } 702 }
653 703
654 /* No overhead */ 704 /* No overhead */
655 ret = print_graph_duration(DURATION_FILL_START, s, flags); 705 ret = print_graph_duration(0, s, flags | FLAGS_FILL_START);
656 if (ret != TRACE_TYPE_HANDLED) 706 if (ret != TRACE_TYPE_HANDLED)
657 return ret; 707 return ret;
658 708
@@ -664,7 +714,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
664 if (!ret) 714 if (!ret)
665 return TRACE_TYPE_PARTIAL_LINE; 715 return TRACE_TYPE_PARTIAL_LINE;
666 716
667 ret = print_graph_duration(DURATION_FILL_END, s, flags); 717 ret = print_graph_duration(0, s, flags | FLAGS_FILL_END);
668 if (ret != TRACE_TYPE_HANDLED) 718 if (ret != TRACE_TYPE_HANDLED)
669 return ret; 719 return ret;
670 720
@@ -729,14 +779,14 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s,
729 return TRACE_TYPE_HANDLED; 779 return TRACE_TYPE_HANDLED;
730 780
731 /* No real adata, just filling the column with spaces */ 781 /* No real adata, just filling the column with spaces */
732 switch (duration) { 782 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
733 case DURATION_FILL_FULL: 783 case FLAGS_FILL_FULL:
734 ret = trace_seq_puts(s, " | "); 784 ret = trace_seq_puts(s, " | ");
735 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 785 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
736 case DURATION_FILL_START: 786 case FLAGS_FILL_START:
737 ret = trace_seq_puts(s, " "); 787 ret = trace_seq_puts(s, " ");
738 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 788 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
739 case DURATION_FILL_END: 789 case FLAGS_FILL_END:
740 ret = trace_seq_puts(s, " |"); 790 ret = trace_seq_puts(s, " |");
741 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 791 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
742 } 792 }
@@ -852,7 +902,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
852 } 902 }
853 903
854 /* No time */ 904 /* No time */
855 ret = print_graph_duration(DURATION_FILL_FULL, s, flags); 905 ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
856 if (ret != TRACE_TYPE_HANDLED) 906 if (ret != TRACE_TYPE_HANDLED)
857 return ret; 907 return ret;
858 908
@@ -1172,7 +1222,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1172 return TRACE_TYPE_PARTIAL_LINE; 1222 return TRACE_TYPE_PARTIAL_LINE;
1173 1223
1174 /* No time */ 1224 /* No time */
1175 ret = print_graph_duration(DURATION_FILL_FULL, s, flags); 1225 ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
1176 if (ret != TRACE_TYPE_HANDLED) 1226 if (ret != TRACE_TYPE_HANDLED)
1177 return ret; 1227 return ret;
1178 1228