aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r--kernel/trace/trace_functions.c155
1 files changed, 101 insertions, 54 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 38fe1483c508..57f0ec962d2c 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -13,33 +13,106 @@
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/slab.h>
16#include <linux/fs.h> 17#include <linux/fs.h>
17 18
18#include "trace.h" 19#include "trace.h"
19 20
20/* function tracing enabled */ 21static void tracing_start_function_trace(struct trace_array *tr);
21static int ftrace_function_enabled; 22static void tracing_stop_function_trace(struct trace_array *tr);
23static void
24function_trace_call(unsigned long ip, unsigned long parent_ip,
25 struct ftrace_ops *op, struct pt_regs *pt_regs);
26static void
27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28 struct ftrace_ops *op, struct pt_regs *pt_regs);
29static struct tracer_flags func_flags;
30
31/* Our option */
32enum {
33 TRACE_FUNC_OPT_STACK = 0x1,
34};
35
36static int allocate_ftrace_ops(struct trace_array *tr)
37{
38 struct ftrace_ops *ops;
22 39
23static struct trace_array *func_trace; 40 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
41 if (!ops)
42 return -ENOMEM;
43
44 /* Currently only the non stack verision is supported */
45 ops->func = function_trace_call;
46 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
47
48 tr->ops = ops;
49 ops->private = tr;
50 return 0;
51}
24 52
25static void tracing_start_function_trace(void); 53
26static void tracing_stop_function_trace(void); 54int ftrace_create_function_files(struct trace_array *tr,
55 struct dentry *parent)
56{
57 int ret;
58
59 /*
60 * The top level array uses the "global_ops", and the files are
61 * created on boot up.
62 */
63 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
64 return 0;
65
66 ret = allocate_ftrace_ops(tr);
67 if (ret)
68 return ret;
69
70 ftrace_create_filter_files(tr->ops, parent);
71
72 return 0;
73}
74
75void ftrace_destroy_function_files(struct trace_array *tr)
76{
77 ftrace_destroy_filter_files(tr->ops);
78 kfree(tr->ops);
79 tr->ops = NULL;
80}
27 81
28static int function_trace_init(struct trace_array *tr) 82static int function_trace_init(struct trace_array *tr)
29{ 83{
30 func_trace = tr; 84 ftrace_func_t func;
85
86 /*
87 * Instance trace_arrays get their ops allocated
88 * at instance creation. Unless it failed
89 * the allocation.
90 */
91 if (!tr->ops)
92 return -ENOMEM;
93
94 /* Currently only the global instance can do stack tracing */
95 if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
96 func_flags.val & TRACE_FUNC_OPT_STACK)
97 func = function_stack_trace_call;
98 else
99 func = function_trace_call;
100
101 ftrace_init_array_ops(tr, func);
102
31 tr->trace_buffer.cpu = get_cpu(); 103 tr->trace_buffer.cpu = get_cpu();
32 put_cpu(); 104 put_cpu();
33 105
34 tracing_start_cmdline_record(); 106 tracing_start_cmdline_record();
35 tracing_start_function_trace(); 107 tracing_start_function_trace(tr);
36 return 0; 108 return 0;
37} 109}
38 110
39static void function_trace_reset(struct trace_array *tr) 111static void function_trace_reset(struct trace_array *tr)
40{ 112{
41 tracing_stop_function_trace(); 113 tracing_stop_function_trace(tr);
42 tracing_stop_cmdline_record(); 114 tracing_stop_cmdline_record();
115 ftrace_reset_array_ops(tr);
43} 116}
44 117
45static void function_trace_start(struct trace_array *tr) 118static void function_trace_start(struct trace_array *tr)
@@ -47,25 +120,18 @@ static void function_trace_start(struct trace_array *tr)
47 tracing_reset_online_cpus(&tr->trace_buffer); 120 tracing_reset_online_cpus(&tr->trace_buffer);
48} 121}
49 122
50/* Our option */
51enum {
52 TRACE_FUNC_OPT_STACK = 0x1,
53};
54
55static struct tracer_flags func_flags;
56
57static void 123static void
58function_trace_call(unsigned long ip, unsigned long parent_ip, 124function_trace_call(unsigned long ip, unsigned long parent_ip,
59 struct ftrace_ops *op, struct pt_regs *pt_regs) 125 struct ftrace_ops *op, struct pt_regs *pt_regs)
60{ 126{
61 struct trace_array *tr = func_trace; 127 struct trace_array *tr = op->private;
62 struct trace_array_cpu *data; 128 struct trace_array_cpu *data;
63 unsigned long flags; 129 unsigned long flags;
64 int bit; 130 int bit;
65 int cpu; 131 int cpu;
66 int pc; 132 int pc;
67 133
68 if (unlikely(!ftrace_function_enabled)) 134 if (unlikely(!tr->function_enabled))
69 return; 135 return;
70 136
71 pc = preempt_count(); 137 pc = preempt_count();
@@ -91,14 +157,14 @@ static void
91function_stack_trace_call(unsigned long ip, unsigned long parent_ip, 157function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
92 struct ftrace_ops *op, struct pt_regs *pt_regs) 158 struct ftrace_ops *op, struct pt_regs *pt_regs)
93{ 159{
94 struct trace_array *tr = func_trace; 160 struct trace_array *tr = op->private;
95 struct trace_array_cpu *data; 161 struct trace_array_cpu *data;
96 unsigned long flags; 162 unsigned long flags;
97 long disabled; 163 long disabled;
98 int cpu; 164 int cpu;
99 int pc; 165 int pc;
100 166
101 if (unlikely(!ftrace_function_enabled)) 167 if (unlikely(!tr->function_enabled))
102 return; 168 return;
103 169
104 /* 170 /*
@@ -128,19 +194,6 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
128 local_irq_restore(flags); 194 local_irq_restore(flags);
129} 195}
130 196
131
132static struct ftrace_ops trace_ops __read_mostly =
133{
134 .func = function_trace_call,
135 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
136};
137
138static struct ftrace_ops trace_stack_ops __read_mostly =
139{
140 .func = function_stack_trace_call,
141 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
142};
143
144static struct tracer_opt func_opts[] = { 197static struct tracer_opt func_opts[] = {
145#ifdef CONFIG_STACKTRACE 198#ifdef CONFIG_STACKTRACE
146 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, 199 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
@@ -153,29 +206,21 @@ static struct tracer_flags func_flags = {
153 .opts = func_opts 206 .opts = func_opts
154}; 207};
155 208
156static void tracing_start_function_trace(void) 209static void tracing_start_function_trace(struct trace_array *tr)
157{ 210{
158 ftrace_function_enabled = 0; 211 tr->function_enabled = 0;
159 212 register_ftrace_function(tr->ops);
160 if (func_flags.val & TRACE_FUNC_OPT_STACK) 213 tr->function_enabled = 1;
161 register_ftrace_function(&trace_stack_ops);
162 else
163 register_ftrace_function(&trace_ops);
164
165 ftrace_function_enabled = 1;
166} 214}
167 215
168static void tracing_stop_function_trace(void) 216static void tracing_stop_function_trace(struct trace_array *tr)
169{ 217{
170 ftrace_function_enabled = 0; 218 tr->function_enabled = 0;
171 219 unregister_ftrace_function(tr->ops);
172 if (func_flags.val & TRACE_FUNC_OPT_STACK)
173 unregister_ftrace_function(&trace_stack_ops);
174 else
175 unregister_ftrace_function(&trace_ops);
176} 220}
177 221
178static int func_set_flag(u32 old_flags, u32 bit, int set) 222static int
223func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
179{ 224{
180 switch (bit) { 225 switch (bit) {
181 case TRACE_FUNC_OPT_STACK: 226 case TRACE_FUNC_OPT_STACK:
@@ -183,12 +228,14 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
183 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) 228 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
184 break; 229 break;
185 230
231 unregister_ftrace_function(tr->ops);
232
186 if (set) { 233 if (set) {
187 unregister_ftrace_function(&trace_ops); 234 tr->ops->func = function_stack_trace_call;
188 register_ftrace_function(&trace_stack_ops); 235 register_ftrace_function(tr->ops);
189 } else { 236 } else {
190 unregister_ftrace_function(&trace_stack_ops); 237 tr->ops->func = function_trace_call;
191 register_ftrace_function(&trace_ops); 238 register_ftrace_function(tr->ops);
192 } 239 }
193 240
194 break; 241 break;
@@ -205,9 +252,9 @@ static struct tracer function_trace __tracer_data =
205 .init = function_trace_init, 252 .init = function_trace_init,
206 .reset = function_trace_reset, 253 .reset = function_trace_reset,
207 .start = function_trace_start, 254 .start = function_trace_start,
208 .wait_pipe = poll_wait_pipe,
209 .flags = &func_flags, 255 .flags = &func_flags,
210 .set_flag = func_set_flag, 256 .set_flag = func_set_flag,
257 .allow_instances = true,
211#ifdef CONFIG_FTRACE_SELFTEST 258#ifdef CONFIG_FTRACE_SELFTEST
212 .selftest = trace_selftest_startup_function, 259 .selftest = trace_selftest_startup_function,
213#endif 260#endif