aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2014-01-13 10:30:23 -0500
committerSteven Rostedt <rostedt@goodmis.org>2014-01-13 10:52:58 -0500
commit23a8e8441a0a74dd612edf81dc89d1600bc0a3d1 (patch)
tree51808ae87f00c4acfc114d739baf4e13c44afa29 /kernel/trace
parent405e1d834807e51b2ebd3dea81cb51e53fb61504 (diff)
ftrace: Have function graph only trace based on global_ops filters
Doing some different tests, I discovered that function graph tracing, when filtered via the set_ftrace_filter and set_ftrace_notrace files, does not always keep with them if another function ftrace_ops is registered to trace functions. The reason is that function graph just happens to trace all functions that the function tracer enables. When there was only one user of function tracing, the function graph tracer did not need to worry about being called by functions that it did not want to trace. But now that there are other users, this becomes a problem. For example, one just needs to do the following: # cd /sys/kernel/debug/tracing # echo schedule > set_ftrace_filter # echo function_graph > current_tracer # cat trace [..] 0) | schedule() { ------------------------------------------ 0) <idle>-0 => rcu_pre-7 ------------------------------------------ 0) ! 2980.314 us | } 0) | schedule() { ------------------------------------------ 0) rcu_pre-7 => <idle>-0 ------------------------------------------ 0) + 20.701 us | } # echo 1 > /proc/sys/kernel/stack_tracer_enabled # cat trace [..] 1) + 20.825 us | } 1) + 21.651 us | } 1) + 30.924 us | } /* SyS_ioctl */ 1) | do_page_fault() { 1) | __do_page_fault() { 1) 0.274 us | down_read_trylock(); 1) 0.098 us | find_vma(); 1) | handle_mm_fault() { 1) | _raw_spin_lock() { 1) 0.102 us | preempt_count_add(); 1) 0.097 us | do_raw_spin_lock(); 1) 2.173 us | } 1) | do_wp_page() { 1) 0.079 us | vm_normal_page(); 1) 0.086 us | reuse_swap_page(); 1) 0.076 us | page_move_anon_rmap(); 1) | unlock_page() { 1) 0.082 us | page_waitqueue(); 1) 0.086 us | __wake_up_bit(); 1) 1.801 us | } 1) 0.075 us | ptep_set_access_flags(); 1) | _raw_spin_unlock() { 1) 0.098 us | do_raw_spin_unlock(); 1) 0.105 us | preempt_count_sub(); 1) 1.884 us | } 1) 9.149 us | } 1) + 13.083 us | } 1) 0.146 us | up_read(); When the stack tracer was enabled, it enabled all functions to be traced, which now the function graph tracer also traces. This is a side effect that should not occur. To fix this a test is added when the function tracing is changed, as well as when the graph tracer is enabled, to see if anything other than the ftrace global_ops function tracer is enabled. If so, then the graph tracer calls a test trampoline that will look at the function that is being traced and compare it with the filters defined by the global_ops. As an optimization, if there's no other function tracers registered, or if the only registered function tracers also use the global ops, the function graph infrastructure will call the registered function graph callback directly and not go through the test trampoline. Cc: stable@vger.kernel.org # 3.3+ Fixes: d2d45c7a03a2 "tracing: Have stack_tracer use a separate list of functions" Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c45
1 files changed, 44 insertions, 1 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0ffb811cbb1f..7f21b06648e9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -297,6 +297,12 @@ static void ftrace_sync_ipi(void *data)
297 smp_rmb(); 297 smp_rmb();
298} 298}
299 299
300#ifdef CONFIG_FUNCTION_GRAPH_TRACER
301static void update_function_graph_func(void);
302#else
303static inline void update_function_graph_func(void) { }
304#endif
305
300static void update_ftrace_function(void) 306static void update_ftrace_function(void)
301{ 307{
302 ftrace_func_t func; 308 ftrace_func_t func;
@@ -329,6 +335,8 @@ static void update_ftrace_function(void)
329 if (ftrace_trace_function == func) 335 if (ftrace_trace_function == func)
330 return; 336 return;
331 337
338 update_function_graph_func();
339
332 /* 340 /*
333 * If we are using the list function, it doesn't care 341 * If we are using the list function, it doesn't care
334 * about the function_trace_ops. 342 * about the function_trace_ops.
@@ -4906,6 +4914,7 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4906trace_func_graph_ret_t ftrace_graph_return = 4914trace_func_graph_ret_t ftrace_graph_return =
4907 (trace_func_graph_ret_t)ftrace_stub; 4915 (trace_func_graph_ret_t)ftrace_stub;
4908trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; 4916trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4917static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
4909 4918
4910/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ 4919/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4911static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) 4920static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
@@ -5047,6 +5056,30 @@ static struct ftrace_ops fgraph_ops __read_mostly = {
5047 FTRACE_OPS_FL_RECURSION_SAFE, 5056 FTRACE_OPS_FL_RECURSION_SAFE,
5048}; 5057};
5049 5058
5059static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5060{
5061 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5062 return 0;
5063 return __ftrace_graph_entry(trace);
5064}
5065
5066/*
5067 * The function graph tracer should only trace the functions defined
5068 * by set_ftrace_filter and set_ftrace_notrace. If another function
5069 * tracer ops is registered, the graph tracer requires testing the
5070 * function against the global ops, and not just trace any function
5071 * that any ftrace_ops registered.
5072 */
5073static void update_function_graph_func(void)
5074{
5075 if (ftrace_ops_list == &ftrace_list_end ||
5076 (ftrace_ops_list == &global_ops &&
5077 global_ops.next == &ftrace_list_end))
5078 ftrace_graph_entry = __ftrace_graph_entry;
5079 else
5080 ftrace_graph_entry = ftrace_graph_entry_test;
5081}
5082
5050int register_ftrace_graph(trace_func_graph_ret_t retfunc, 5083int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5051 trace_func_graph_ent_t entryfunc) 5084 trace_func_graph_ent_t entryfunc)
5052{ 5085{
@@ -5071,7 +5104,16 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5071 } 5104 }
5072 5105
5073 ftrace_graph_return = retfunc; 5106 ftrace_graph_return = retfunc;
5074 ftrace_graph_entry = entryfunc; 5107
5108 /*
5109 * Update the indirect function to the entryfunc, and the
5110 * function that gets called to the entry_test first. Then
5111 * call the update fgraph entry function to determine if
5112 * the entryfunc should be called directly or not.
5113 */
5114 __ftrace_graph_entry = entryfunc;
5115 ftrace_graph_entry = ftrace_graph_entry_test;
5116 update_function_graph_func();
5075 5117
5076 ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET); 5118 ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
5077 5119
@@ -5090,6 +5132,7 @@ void unregister_ftrace_graph(void)
5090 ftrace_graph_active--; 5132 ftrace_graph_active--;
5091 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 5133 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5092 ftrace_graph_entry = ftrace_graph_entry_stub; 5134 ftrace_graph_entry = ftrace_graph_entry_stub;
5135 __ftrace_graph_entry = ftrace_graph_entry_stub;
5093 ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET); 5136 ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
5094 unregister_pm_notifier(&ftrace_suspend_notifier); 5137 unregister_pm_notifier(&ftrace_suspend_notifier);
5095 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 5138 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);