aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2012-06-05 09:44:25 -0400
committerSteven Rostedt <rostedt@goodmis.org>2012-07-19 13:18:22 -0400
commitccf3672d530170c98c734dfc5db07d64bcbad2ad (patch)
tree0cbe9a0856429412b9e026baf77cec9e117c6896 /kernel/trace/ftrace.c
parent2f5f6ad9390c1ebbf738d130dbfe80b60eaa167e (diff)
ftrace: Consolidate arch dependent functions with 'list' function
As the function tracer starts to get more features, the support for theses features will spread out throughout the different architectures over time. These features boil down to what each arch does in the mcount trampoline (the ftrace_caller). Currently there's two features that are not the same throughout the archs. 1) Support to stop function tracing before the callback 2) passing of the ftrace ops Both of these require placing an indirect function to support the features if the mcount trampoline does not. On a side note, for all architectures, when more than one callback is registered to the function tracer, an intermediate 'list' function is called by the mcount trampoline to iterate through the callbacks that are registered. Instead of making a separate function for each of these features, and requiring several indirect calls, just use the single 'list' function as the intermediate, to handle all cases. If an arch does not support the 'stop function tracing' or the passing of ftrace ops, just force it to use the list function that will handle the features required. This makes the code cleaner and simpler and removes a lot of #ifdefs in the code. Link: http://lkml.kernel.org/r/20120612225424.495625483@goodmis.org Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c45
1 files changed, 4 insertions, 41 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4f2ab9352a68..4cbca2e6eb70 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -97,8 +97,6 @@ static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
97static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; 97static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
98static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 98static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
99ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 99ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
100static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
101ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
102ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; 100ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
103static struct ftrace_ops global_ops; 101static struct ftrace_ops global_ops;
104static struct ftrace_ops control_ops; 102static struct ftrace_ops control_ops;
@@ -162,26 +160,9 @@ static void set_ftrace_pid_function(ftrace_func_t func)
162void clear_ftrace_function(void) 160void clear_ftrace_function(void)
163{ 161{
164 ftrace_trace_function = ftrace_stub; 162 ftrace_trace_function = ftrace_stub;
165 __ftrace_trace_function = ftrace_stub;
166 __ftrace_trace_function_delay = ftrace_stub;
167 ftrace_pid_function = ftrace_stub; 163 ftrace_pid_function = ftrace_stub;
168} 164}
169 165
170#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
171/*
172 * For those archs that do not test ftrace_trace_stop in their
173 * mcount call site, we need to do it from C.
174 */
175static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip,
176 struct ftrace_ops *op)
177{
178 if (function_trace_stop)
179 return;
180
181 __ftrace_trace_function(ip, parent_ip, op);
182}
183#endif
184
185static void control_ops_disable_all(struct ftrace_ops *ops) 166static void control_ops_disable_all(struct ftrace_ops *ops)
186{ 167{
187 int cpu; 168 int cpu;
@@ -246,7 +227,7 @@ static void update_ftrace_function(void)
246 if (ftrace_ops_list == &ftrace_list_end || 227 if (ftrace_ops_list == &ftrace_list_end ||
247 (ftrace_ops_list->next == &ftrace_list_end && 228 (ftrace_ops_list->next == &ftrace_list_end &&
248 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) && 229 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
249 ARCH_SUPPORTS_FTRACE_OPS)) { 230 !FTRACE_FORCE_LIST_FUNC)) {
250 /* Set the ftrace_ops that the arch callback uses */ 231 /* Set the ftrace_ops that the arch callback uses */
251 if (ftrace_ops_list == &global_ops) 232 if (ftrace_ops_list == &global_ops)
252 function_trace_op = ftrace_global_list; 233 function_trace_op = ftrace_global_list;
@@ -259,18 +240,7 @@ static void update_ftrace_function(void)
259 func = ftrace_ops_list_func; 240 func = ftrace_ops_list_func;
260 } 241 }
261 242
262#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
263 ftrace_trace_function = func; 243 ftrace_trace_function = func;
264#else
265#ifdef CONFIG_DYNAMIC_FTRACE
266 /* do not update till all functions have been modified */
267 __ftrace_trace_function_delay = func;
268#else
269 __ftrace_trace_function = func;
270#endif
271 ftrace_trace_function =
272 (func == ftrace_stub) ? func : ftrace_test_stop_func;
273#endif
274} 244}
275 245
276static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) 246static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
@@ -1902,16 +1872,6 @@ static void ftrace_run_update_code(int command)
1902 */ 1872 */
1903 arch_ftrace_update_code(command); 1873 arch_ftrace_update_code(command);
1904 1874
1905#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1906 /*
1907 * For archs that call ftrace_test_stop_func(), we must
1908 * wait till after we update all the function callers
1909 * before we update the callback. This keeps different
1910 * ops that record different functions from corrupting
1911 * each other.
1912 */
1913 __ftrace_trace_function = __ftrace_trace_function_delay;
1914#endif
1915 function_trace_stop--; 1875 function_trace_stop--;
1916 1876
1917 ret = ftrace_arch_code_modify_post_process(); 1877 ret = ftrace_arch_code_modify_post_process();
@@ -3996,6 +3956,9 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
3996{ 3956{
3997 struct ftrace_ops *op; 3957 struct ftrace_ops *op;
3998 3958
3959 if (function_trace_stop)
3960 return;
3961
3999 if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT))) 3962 if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
4000 return; 3963 return;
4001 3964