aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions.c
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2014-01-10 17:01:58 -0500
committerSteven Rostedt <rostedt@goodmis.org>2014-04-21 13:59:25 -0400
commit4104d326b670c2b66f575d2004daa28b2d1b4c8d (patch)
tree9eb7a3084d9bc9d7d5b6eccb0d17d37481020c61 /kernel/trace/trace_functions.c
parenta798c10faf62a505d24e5f6213fbaf904a39623f (diff)
ftrace: Remove global function list and call function directly
Instead of having a list of global functions that are called, as only one global function is allow to be enabled at a time, there's no reason to have a list. Instead, simply have all the users of the global ops, use the global ops directly, instead of registering their own ftrace_ops. Just switch what function is used before enabling the function tracer. This removes a lot of code as well as the complexity involved with it. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r--kernel/trace/trace_functions.c55
1 files changed, 19 insertions, 36 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index ffd56351b521..2d9482b8f26a 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -26,8 +26,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
26static void 26static void
27function_stack_trace_call(unsigned long ip, unsigned long parent_ip, 27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28 struct ftrace_ops *op, struct pt_regs *pt_regs); 28 struct ftrace_ops *op, struct pt_regs *pt_regs);
29static struct ftrace_ops trace_ops;
30static struct ftrace_ops trace_stack_ops;
31static struct tracer_flags func_flags; 29static struct tracer_flags func_flags;
32 30
33/* Our option */ 31/* Our option */
@@ -83,28 +81,24 @@ void ftrace_destroy_function_files(struct trace_array *tr)
83 81
84static int function_trace_init(struct trace_array *tr) 82static int function_trace_init(struct trace_array *tr)
85{ 83{
86 struct ftrace_ops *ops; 84 ftrace_func_t func;
87
88 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
89 /* There's only one global tr */
90 if (!trace_ops.private) {
91 trace_ops.private = tr;
92 trace_stack_ops.private = tr;
93 }
94 85
95 if (func_flags.val & TRACE_FUNC_OPT_STACK) 86 /*
96 ops = &trace_stack_ops; 87 * Instance trace_arrays get their ops allocated
97 else 88 * at instance creation. Unless it failed
98 ops = &trace_ops; 89 * the allocation.
99 tr->ops = ops; 90 */
100 } else if (!tr->ops) { 91 if (!tr->ops)
101 /*
102 * Instance trace_arrays get their ops allocated
103 * at instance creation. Unless it failed
104 * the allocation.
105 */
106 return -ENOMEM; 92 return -ENOMEM;
107 } 93
94 /* Currently only the global instance can do stack tracing */
95 if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
96 func_flags.val & TRACE_FUNC_OPT_STACK)
97 func = function_stack_trace_call;
98 else
99 func = function_trace_call;
100
101 ftrace_init_array_ops(tr, func);
108 102
109 tr->trace_buffer.cpu = get_cpu(); 103 tr->trace_buffer.cpu = get_cpu();
110 put_cpu(); 104 put_cpu();
@@ -118,6 +112,7 @@ static void function_trace_reset(struct trace_array *tr)
118{ 112{
119 tracing_stop_function_trace(tr); 113 tracing_stop_function_trace(tr);
120 tracing_stop_cmdline_record(); 114 tracing_stop_cmdline_record();
115 ftrace_reset_array_ops(tr);
121} 116}
122 117
123static void function_trace_start(struct trace_array *tr) 118static void function_trace_start(struct trace_array *tr)
@@ -199,18 +194,6 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
199 local_irq_restore(flags); 194 local_irq_restore(flags);
200} 195}
201 196
202static struct ftrace_ops trace_ops __read_mostly =
203{
204 .func = function_trace_call,
205 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
206};
207
208static struct ftrace_ops trace_stack_ops __read_mostly =
209{
210 .func = function_stack_trace_call,
211 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
212};
213
214static struct tracer_opt func_opts[] = { 197static struct tracer_opt func_opts[] = {
215#ifdef CONFIG_STACKTRACE 198#ifdef CONFIG_STACKTRACE
216 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, 199 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
@@ -248,10 +231,10 @@ func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
248 unregister_ftrace_function(tr->ops); 231 unregister_ftrace_function(tr->ops);
249 232
250 if (set) { 233 if (set) {
251 tr->ops = &trace_stack_ops; 234 tr->ops->func = function_stack_trace_call;
252 register_ftrace_function(tr->ops); 235 register_ftrace_function(tr->ops);
253 } else { 236 } else {
254 tr->ops = &trace_ops; 237 tr->ops->func = function_trace_call;
255 register_ftrace_function(tr->ops); 238 register_ftrace_function(tr->ops);
256 } 239 }
257 240