aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2014-02-07 14:42:35 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-02-13 16:48:03 -0500
commit1499a3eb0473cad61fb2ed8f2e5247f4568a5786 (patch)
treebd885c94971688dd3cb45e85a71c07f7705f1df5
parentb6c5a8d32c2e2fea5a574365cef91bbfe75d861c (diff)
ftrace: Have function graph only trace based on global_ops filters
commit 23a8e8441a0a74dd612edf81dc89d1600bc0a3d1 upstream. Doing some different tests, I discovered that function graph tracing, when filtered via the set_ftrace_filter and set_ftrace_notrace files, does not always keep with them if another function ftrace_ops is registered to trace functions. The reason is that function graph just happens to trace all functions that the function tracer enables. When there was only one user of function tracing, the function graph tracer did not need to worry about being called by functions that it did not want to trace. But now that there are other users, this becomes a problem. For example, one just needs to do the following: # cd /sys/kernel/debug/tracing # echo schedule > set_ftrace_filter # echo function_graph > current_tracer # cat trace [..] 0) | schedule() { ------------------------------------------ 0) <idle>-0 => rcu_pre-7 ------------------------------------------ 0) ! 2980.314 us | } 0) | schedule() { ------------------------------------------ 0) rcu_pre-7 => <idle>-0 ------------------------------------------ 0) + 20.701 us | } # echo 1 > /proc/sys/kernel/stack_tracer_enabled # cat trace [..] 1) + 20.825 us | } 1) + 21.651 us | } 1) + 30.924 us | } /* SyS_ioctl */ 1) | do_page_fault() { 1) | __do_page_fault() { 1) 0.274 us | down_read_trylock(); 1) 0.098 us | find_vma(); 1) | handle_mm_fault() { 1) | _raw_spin_lock() { 1) 0.102 us | preempt_count_add(); 1) 0.097 us | do_raw_spin_lock(); 1) 2.173 us | } 1) | do_wp_page() { 1) 0.079 us | vm_normal_page(); 1) 0.086 us | reuse_swap_page(); 1) 0.076 us | page_move_anon_rmap(); 1) | unlock_page() { 1) 0.082 us | page_waitqueue(); 1) 0.086 us | __wake_up_bit(); 1) 1.801 us | } 1) 0.075 us | ptep_set_access_flags(); 1) | _raw_spin_unlock() { 1) 0.098 us | do_raw_spin_unlock(); 1) 0.105 us | preempt_count_sub(); 1) 1.884 us | } 1) 9.149 us | } 1) + 13.083 us | } 1) 0.146 us | up_read(); When the stack tracer was enabled, it enabled all functions to be traced, which now the function graph tracer also traces. This is a side effect that should not occur. To fix this a test is added when the function tracing is changed, as well as when the graph tracer is enabled, to see if anything other than the ftrace global_ops function tracer is enabled. If so, then the graph tracer calls a test trampoline that will look at the function that is being traced and compare it with the filters defined by the global_ops. As an optimization, if there's no other function tracers registered, or if the only registered function tracers also use the global ops, the function graph infrastructure will call the registered function graph callback directly and not go through the test trampoline. Fixes: d2d45c7a03a2 "tracing: Have stack_tracer use a separate list of functions" Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--kernel/trace/ftrace.c45
1 files changed, 44 insertions, 1 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 17b95a492948..4b93b8412252 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -297,6 +297,12 @@ static void ftrace_sync_ipi(void *data)
297 smp_rmb(); 297 smp_rmb();
298} 298}
299 299
300#ifdef CONFIG_FUNCTION_GRAPH_TRACER
301static void update_function_graph_func(void);
302#else
303static inline void update_function_graph_func(void) { }
304#endif
305
300static void update_ftrace_function(void) 306static void update_ftrace_function(void)
301{ 307{
302 ftrace_func_t func; 308 ftrace_func_t func;
@@ -329,6 +335,8 @@ static void update_ftrace_function(void)
329 if (ftrace_trace_function == func) 335 if (ftrace_trace_function == func)
330 return; 336 return;
331 337
338 update_function_graph_func();
339
332 /* 340 /*
333 * If we are using the list function, it doesn't care 341 * If we are using the list function, it doesn't care
334 * about the function_trace_ops. 342 * about the function_trace_ops.
@@ -4810,6 +4818,7 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4810trace_func_graph_ret_t ftrace_graph_return = 4818trace_func_graph_ret_t ftrace_graph_return =
4811 (trace_func_graph_ret_t)ftrace_stub; 4819 (trace_func_graph_ret_t)ftrace_stub;
4812trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; 4820trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4821static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
4813 4822
4814/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ 4823/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4815static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) 4824static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
@@ -4951,6 +4960,30 @@ static struct ftrace_ops fgraph_ops __read_mostly = {
4951 FTRACE_OPS_FL_RECURSION_SAFE, 4960 FTRACE_OPS_FL_RECURSION_SAFE,
4952}; 4961};
4953 4962
4963static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
4964{
4965 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
4966 return 0;
4967 return __ftrace_graph_entry(trace);
4968}
4969
4970/*
4971 * The function graph tracer should only trace the functions defined
4972 * by set_ftrace_filter and set_ftrace_notrace. If another function
4973 * tracer ops is registered, the graph tracer requires testing the
4974 * function against the global ops, and not just trace any function
4975 * that any ftrace_ops registered.
4976 */
4977static void update_function_graph_func(void)
4978{
4979 if (ftrace_ops_list == &ftrace_list_end ||
4980 (ftrace_ops_list == &global_ops &&
4981 global_ops.next == &ftrace_list_end))
4982 ftrace_graph_entry = __ftrace_graph_entry;
4983 else
4984 ftrace_graph_entry = ftrace_graph_entry_test;
4985}
4986
4954int register_ftrace_graph(trace_func_graph_ret_t retfunc, 4987int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4955 trace_func_graph_ent_t entryfunc) 4988 trace_func_graph_ent_t entryfunc)
4956{ 4989{
@@ -4975,7 +5008,16 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4975 } 5008 }
4976 5009
4977 ftrace_graph_return = retfunc; 5010 ftrace_graph_return = retfunc;
4978 ftrace_graph_entry = entryfunc; 5011
5012 /*
5013 * Update the indirect function to the entryfunc, and the
5014 * function that gets called to the entry_test first. Then
5015 * call the update fgraph entry function to determine if
5016 * the entryfunc should be called directly or not.
5017 */
5018 __ftrace_graph_entry = entryfunc;
5019 ftrace_graph_entry = ftrace_graph_entry_test;
5020 update_function_graph_func();
4979 5021
4980 ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET); 5022 ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
4981 5023
@@ -4994,6 +5036,7 @@ void unregister_ftrace_graph(void)
4994 ftrace_graph_active--; 5036 ftrace_graph_active--;
4995 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 5037 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4996 ftrace_graph_entry = ftrace_graph_entry_stub; 5038 ftrace_graph_entry = ftrace_graph_entry_stub;
5039 __ftrace_graph_entry = ftrace_graph_entry_stub;
4997 ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET); 5040 ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
4998 unregister_pm_notifier(&ftrace_suspend_notifier); 5041 unregister_pm_notifier(&ftrace_suspend_notifier);
4999 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 5042 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);