aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPratyush Anand <panand@redhat.com>2015-03-06 13:28:06 -0500
committerSteven Rostedt <rostedt@goodmis.org>2015-03-09 10:50:51 -0400
commit1619dc3f8f555ee1cdd3c75db3885d5715442b12 (patch)
tree40c56f34c48ad68a4394583b9aa011884e74ca28
parentb24d443b8f17d9776f5fc1f6c780a0a21eb02913 (diff)
ftrace: Fix en(dis)able graph caller when en(dis)abling record via sysctl
When ftrace is enabled globally through the proc interface, we must check if ftrace_graph_active is set. If it is set, then we should also pass the FTRACE_START_FUNC_RET command to ftrace_run_update_code(). Similarly, when ftrace is disabled globally through the proc interface, we must check if ftrace_graph_active is set. If it is set, then we should also pass the FTRACE_STOP_FUNC_RET command to ftrace_run_update_code(). Consider the following situation. # echo 0 > /proc/sys/kernel/ftrace_enabled After this ftrace_enabled = 0. # echo function_graph > /sys/kernel/debug/tracing/current_tracer Since ftrace_enabled = 0, ftrace_enable_ftrace_graph_caller() is never called. # echo 1 > /proc/sys/kernel/ftrace_enabled Now ftrace_enabled will be set to true, but still ftrace_enable_ftrace_graph_caller() will not be called, which is not desired. Further if we execute the following after this: # echo nop > /sys/kernel/debug/tracing/current_tracer Now since ftrace_enabled is set it will call ftrace_disable_ftrace_graph_caller(), which causes a kernel warning on the ARM platform. On the ARM platform, when ftrace_enable_ftrace_graph_caller() is called, it checks whether the old instruction is a nop or not. If it's not a nop, then it returns an error. If it is a nop then it replaces instruction at that address with a branch to ftrace_graph_caller. ftrace_disable_ftrace_graph_caller() behaves just the opposite. Therefore, if generic ftrace code ever calls either ftrace_enable_ftrace_graph_caller() or ftrace_disable_ftrace_graph_caller() consecutively two times in a row, then it will return an error, which will cause the generic ftrace code to raise a warning. Note, x86 does not have an issue with this because the architecture specific code for ftrace_enable_ftrace_graph_caller() and ftrace_disable_ftrace_graph_caller() does not check the previous state, and calling either of these functions twice in a row has no ill effect. Link: http://lkml.kernel.org/r/e4fbe64cdac0dd0e86a3bf914b0f83c0b419f146.1425666454.git.panand@redhat.com Cc: stable@vger.kernel.org # 2.6.31+ Signed-off-by: Pratyush Anand <panand@redhat.com> [ removed extra if (ftrace_start_up) and defined ftrace_graph_active as 0 if CONFIG_FUNCTION_GRAPH_TRACER is not set. ] Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--kernel/trace/ftrace.c28
1 files changed, 22 insertions, 6 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 14947e014b78..ea520bb54d44 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1059,6 +1059,12 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1059 1059
1060static struct pid * const ftrace_swapper_pid = &init_struct_pid; 1060static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1061 1061
1062#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1063static int ftrace_graph_active;
1064#else
1065# define ftrace_graph_active 0
1066#endif
1067
1062#ifdef CONFIG_DYNAMIC_FTRACE 1068#ifdef CONFIG_DYNAMIC_FTRACE
1063 1069
1064static struct ftrace_ops *removed_ops; 1070static struct ftrace_ops *removed_ops;
@@ -2692,24 +2698,36 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2692 2698
2693static void ftrace_startup_sysctl(void) 2699static void ftrace_startup_sysctl(void)
2694{ 2700{
2701 int command;
2702
2695 if (unlikely(ftrace_disabled)) 2703 if (unlikely(ftrace_disabled))
2696 return; 2704 return;
2697 2705
2698 /* Force update next time */ 2706 /* Force update next time */
2699 saved_ftrace_func = NULL; 2707 saved_ftrace_func = NULL;
2700 /* ftrace_start_up is true if we want ftrace running */ 2708 /* ftrace_start_up is true if we want ftrace running */
2701 if (ftrace_start_up) 2709 if (ftrace_start_up) {
2702 ftrace_run_update_code(FTRACE_UPDATE_CALLS); 2710 command = FTRACE_UPDATE_CALLS;
2711 if (ftrace_graph_active)
2712 command |= FTRACE_START_FUNC_RET;
2713 ftrace_run_update_code(command);
2714 }
2703} 2715}
2704 2716
2705static void ftrace_shutdown_sysctl(void) 2717static void ftrace_shutdown_sysctl(void)
2706{ 2718{
2719 int command;
2720
2707 if (unlikely(ftrace_disabled)) 2721 if (unlikely(ftrace_disabled))
2708 return; 2722 return;
2709 2723
2710 /* ftrace_start_up is true if ftrace is running */ 2724 /* ftrace_start_up is true if ftrace is running */
2711 if (ftrace_start_up) 2725 if (ftrace_start_up) {
2712 ftrace_run_update_code(FTRACE_DISABLE_CALLS); 2726 command = FTRACE_DISABLE_CALLS;
2727 if (ftrace_graph_active)
2728 command |= FTRACE_STOP_FUNC_RET;
2729 ftrace_run_update_code(command);
2730 }
2713} 2731}
2714 2732
2715static cycle_t ftrace_update_time; 2733static cycle_t ftrace_update_time;
@@ -5594,8 +5612,6 @@ static struct ftrace_ops graph_ops = {
5594 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) 5612 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
5595}; 5613};
5596 5614
5597static int ftrace_graph_active;
5598
5599int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) 5615int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
5600{ 5616{
5601 return 0; 5617 return 0;