aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r--kernel/trace/trace_functions.c72
1 files changed, 29 insertions, 43 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 5b781d2be383..57f0ec962d2c 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -26,8 +26,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
26static void 26static void
27function_stack_trace_call(unsigned long ip, unsigned long parent_ip, 27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28 struct ftrace_ops *op, struct pt_regs *pt_regs); 28 struct ftrace_ops *op, struct pt_regs *pt_regs);
29static struct ftrace_ops trace_ops;
30static struct ftrace_ops trace_stack_ops;
31static struct tracer_flags func_flags; 29static struct tracer_flags func_flags;
32 30
33/* Our option */ 31/* Our option */
@@ -58,12 +56,16 @@ int ftrace_create_function_files(struct trace_array *tr,
58{ 56{
59 int ret; 57 int ret;
60 58
61 /* The top level array uses the "global_ops". */ 59 /*
62 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) { 60 * The top level array uses the "global_ops", and the files are
63 ret = allocate_ftrace_ops(tr); 61 * created on boot up.
64 if (ret) 62 */
65 return ret; 63 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
66 } 64 return 0;
65
66 ret = allocate_ftrace_ops(tr);
67 if (ret)
68 return ret;
67 69
68 ftrace_create_filter_files(tr->ops, parent); 70 ftrace_create_filter_files(tr->ops, parent);
69 71
@@ -79,28 +81,24 @@ void ftrace_destroy_function_files(struct trace_array *tr)
79 81
80static int function_trace_init(struct trace_array *tr) 82static int function_trace_init(struct trace_array *tr)
81{ 83{
82 struct ftrace_ops *ops; 84 ftrace_func_t func;
83
84 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
85 /* There's only one global tr */
86 if (!trace_ops.private) {
87 trace_ops.private = tr;
88 trace_stack_ops.private = tr;
89 }
90 85
91 if (func_flags.val & TRACE_FUNC_OPT_STACK) 86 /*
92 ops = &trace_stack_ops; 87 * Instance trace_arrays get their ops allocated
93 else 88 * at instance creation. Unless it failed
94 ops = &trace_ops; 89 * the allocation.
95 tr->ops = ops; 90 */
96 } else if (!tr->ops) { 91 if (!tr->ops)
97 /*
98 * Instance trace_arrays get their ops allocated
99 * at instance creation. Unless it failed
100 * the allocation.
101 */
102 return -ENOMEM; 92 return -ENOMEM;
103 } 93
94 /* Currently only the global instance can do stack tracing */
95 if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
96 func_flags.val & TRACE_FUNC_OPT_STACK)
97 func = function_stack_trace_call;
98 else
99 func = function_trace_call;
100
101 ftrace_init_array_ops(tr, func);
104 102
105 tr->trace_buffer.cpu = get_cpu(); 103 tr->trace_buffer.cpu = get_cpu();
106 put_cpu(); 104 put_cpu();
@@ -114,6 +112,7 @@ static void function_trace_reset(struct trace_array *tr)
114{ 112{
115 tracing_stop_function_trace(tr); 113 tracing_stop_function_trace(tr);
116 tracing_stop_cmdline_record(); 114 tracing_stop_cmdline_record();
115 ftrace_reset_array_ops(tr);
117} 116}
118 117
119static void function_trace_start(struct trace_array *tr) 118static void function_trace_start(struct trace_array *tr)
@@ -195,18 +194,6 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
195 local_irq_restore(flags); 194 local_irq_restore(flags);
196} 195}
197 196
198static struct ftrace_ops trace_ops __read_mostly =
199{
200 .func = function_trace_call,
201 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
202};
203
204static struct ftrace_ops trace_stack_ops __read_mostly =
205{
206 .func = function_stack_trace_call,
207 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
208};
209
210static struct tracer_opt func_opts[] = { 197static struct tracer_opt func_opts[] = {
211#ifdef CONFIG_STACKTRACE 198#ifdef CONFIG_STACKTRACE
212 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, 199 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
@@ -244,10 +231,10 @@ func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
244 unregister_ftrace_function(tr->ops); 231 unregister_ftrace_function(tr->ops);
245 232
246 if (set) { 233 if (set) {
247 tr->ops = &trace_stack_ops; 234 tr->ops->func = function_stack_trace_call;
248 register_ftrace_function(tr->ops); 235 register_ftrace_function(tr->ops);
249 } else { 236 } else {
250 tr->ops = &trace_ops; 237 tr->ops->func = function_trace_call;
251 register_ftrace_function(tr->ops); 238 register_ftrace_function(tr->ops);
252 } 239 }
253 240
@@ -265,7 +252,6 @@ static struct tracer function_trace __tracer_data =
265 .init = function_trace_init, 252 .init = function_trace_init,
266 .reset = function_trace_reset, 253 .reset = function_trace_reset,
267 .start = function_trace_start, 254 .start = function_trace_start,
268 .wait_pipe = poll_wait_pipe,
269 .flags = &func_flags, 255 .flags = &func_flags,
270 .set_flag = func_set_flag, 256 .set_flag = func_set_flag,
271 .allow_instances = true, 257 .allow_instances = true,