aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2014-07-11 14:39:10 -0400
committerSteven Rostedt <rostedt@goodmis.org>2014-07-16 11:01:24 -0400
commit646d7043adf3d92de5d3db1244a82a12628303de (patch)
tree09d3ed9c379a06b0ba7ce9a2ec5173ba8a097b96
parentca65ef1ab6b498b77985b9a3f5ab12c09bbf764e (diff)
ftrace: Allow archs to specify if they need a separate function graph trampoline
Currently if an arch supports function graph tracing, the core code will just assign the function graph trampoline to the function graph addr that gets called. But as the old method for function graph tracing always calls the function trampoline first and that calls the function graph trampoline, some archs may have the function graph trampoline dependent on operations that were done in the function trampoline. This causes function graph tracer to break on those archs. Instead of having the default be to set the function graph ftrace_ops to the function graph trampoline, have it instead just set it to zero which will keep it from jumping to a trampoline that is not set up to be jumped directly too. Link: http://lkml.kernel.org/r/53BED155.9040607@nvidia.com Reported-by: Tuomas Tynkkynen <ttynkkynen@nvidia.com> Tested-by: Tuomas Tynkkynen <ttynkkynen@nvidia.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--include/linux/ftrace.h10
-rw-r--r--kernel/trace/ftrace.c6
2 files changed, 14 insertions, 2 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 11e18fd58b1a..4807a39e7ae1 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -453,6 +453,16 @@ void ftrace_modify_all_code(int command);
453#endif 453#endif
454#endif 454#endif
455 455
456/*
457 * If an arch would like functions that are only traced
458 * by the function graph tracer to jump directly to its own
459 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
460 * to be that address to jump to.
461 */
462#ifndef FTRACE_GRAPH_TRAMP_ADDR
463#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
464#endif
465
456#ifdef CONFIG_FUNCTION_GRAPH_TRACER 466#ifdef CONFIG_FUNCTION_GRAPH_TRACER
457extern void ftrace_graph_caller(void); 467extern void ftrace_graph_caller(void);
458extern int ftrace_enable_ftrace_graph_caller(void); 468extern int ftrace_enable_ftrace_graph_caller(void);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 45aac1a742c5..1776153ea6e0 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -5366,7 +5366,8 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5366 5366
5367#ifdef CONFIG_DYNAMIC_FTRACE 5367#ifdef CONFIG_DYNAMIC_FTRACE
5368 /* Optimize function graph calling (if implemented by arch) */ 5368 /* Optimize function graph calling (if implemented by arch) */
5369 global_ops.trampoline = FTRACE_GRAPH_ADDR; 5369 if (FTRACE_GRAPH_TRAMP_ADDR != 0)
5370 global_ops.trampoline = FTRACE_GRAPH_TRAMP_ADDR;
5370#endif 5371#endif
5371 5372
5372 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); 5373 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
@@ -5390,7 +5391,8 @@ void unregister_ftrace_graph(void)
5390 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); 5391 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
5391 global_ops.flags &= ~FTRACE_OPS_FL_STUB; 5392 global_ops.flags &= ~FTRACE_OPS_FL_STUB;
5392#ifdef CONFIG_DYNAMIC_FTRACE 5393#ifdef CONFIG_DYNAMIC_FTRACE
5393 global_ops.trampoline = 0; 5394 if (FTRACE_GRAPH_TRAMP_ADDR != 0)
5395 global_ops.trampoline = 0;
5394#endif 5396#endif
5395 unregister_pm_notifier(&ftrace_suspend_notifier); 5397 unregister_pm_notifier(&ftrace_suspend_notifier);
5396 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 5398 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);