summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/ftrace.h66
-rw-r--r--kernel/trace/ftrace.c111
-rw-r--r--kernel/trace/trace.h2
3 files changed, 172 insertions, 7 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index f33fb3b041c6..64a309d2fbd6 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -31,16 +31,33 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
31 31
32typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); 32typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
33 33
34/*
35 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
36 * set in the flags member.
37 *
38 * ENABLED - set/unset when ftrace_ops is registered/unregistered
39 * GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops
40 * is part of the global tracers sharing the same filter
41 * via set_ftrace_* debugfs files.
42 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
43 * allocated ftrace_ops which need special care
44 * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
45 * could be controled by following calls:
46 * ftrace_function_local_enable
47 * ftrace_function_local_disable
48 */
34enum { 49enum {
35 FTRACE_OPS_FL_ENABLED = 1 << 0, 50 FTRACE_OPS_FL_ENABLED = 1 << 0,
36 FTRACE_OPS_FL_GLOBAL = 1 << 1, 51 FTRACE_OPS_FL_GLOBAL = 1 << 1,
37 FTRACE_OPS_FL_DYNAMIC = 1 << 2, 52 FTRACE_OPS_FL_DYNAMIC = 1 << 2,
53 FTRACE_OPS_FL_CONTROL = 1 << 3,
38}; 54};
39 55
40struct ftrace_ops { 56struct ftrace_ops {
41 ftrace_func_t func; 57 ftrace_func_t func;
42 struct ftrace_ops *next; 58 struct ftrace_ops *next;
43 unsigned long flags; 59 unsigned long flags;
60 int __percpu *disabled;
44#ifdef CONFIG_DYNAMIC_FTRACE 61#ifdef CONFIG_DYNAMIC_FTRACE
45 struct ftrace_hash *notrace_hash; 62 struct ftrace_hash *notrace_hash;
46 struct ftrace_hash *filter_hash; 63 struct ftrace_hash *filter_hash;
@@ -97,6 +114,55 @@ int register_ftrace_function(struct ftrace_ops *ops);
97int unregister_ftrace_function(struct ftrace_ops *ops); 114int unregister_ftrace_function(struct ftrace_ops *ops);
98void clear_ftrace_function(void); 115void clear_ftrace_function(void);
99 116
117/**
118 * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
119 *
120 * This function enables tracing on current cpu by decreasing
121 * the per cpu control variable.
122 * It must be called with preemption disabled and only on ftrace_ops
123 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
124 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
125 */
126static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
127{
128 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
129 return;
130
131 (*this_cpu_ptr(ops->disabled))--;
132}
133
134/**
135 * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
136 *
137 * This function enables tracing on current cpu by decreasing
138 * the per cpu control variable.
139 * It must be called with preemption disabled and only on ftrace_ops
140 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
141 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
142 */
143static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
144{
145 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
146 return;
147
148 (*this_cpu_ptr(ops->disabled))++;
149}
150
151/**
152 * ftrace_function_local_disabled - returns ftrace_ops disabled value
153 * on current cpu
154 *
155 * This function returns value of ftrace_ops::disabled on current cpu.
156 * It must be called with preemption disabled and only on ftrace_ops
157 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
158 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
159 */
160static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
161{
162 WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
163 return *this_cpu_ptr(ops->disabled);
164}
165
100extern void ftrace_stub(unsigned long a0, unsigned long a1); 166extern void ftrace_stub(unsigned long a0, unsigned long a1);
101 167
102#else /* !CONFIG_FUNCTION_TRACER */ 168#else /* !CONFIG_FUNCTION_TRACER */
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index d1499e910fe8..f615f974d90e 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -62,6 +62,8 @@
62#define FTRACE_HASH_DEFAULT_BITS 10 62#define FTRACE_HASH_DEFAULT_BITS 10
63#define FTRACE_HASH_MAX_BITS 12 63#define FTRACE_HASH_MAX_BITS 12
64 64
65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
65/* ftrace_enabled is a method to turn ftrace on or off */ 67/* ftrace_enabled is a method to turn ftrace on or off */
66int ftrace_enabled __read_mostly; 68int ftrace_enabled __read_mostly;
67static int last_ftrace_enabled; 69static int last_ftrace_enabled;
@@ -89,12 +91,14 @@ static struct ftrace_ops ftrace_list_end __read_mostly = {
89}; 91};
90 92
91static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; 93static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
94static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
92static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 95static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
93ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 96ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
94static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub; 97static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
95ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; 98ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
96ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; 99ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
97static struct ftrace_ops global_ops; 100static struct ftrace_ops global_ops;
101static struct ftrace_ops control_ops;
98 102
99static void 103static void
100ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); 104ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
@@ -168,6 +172,32 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
168} 172}
169#endif 173#endif
170 174
175static void control_ops_disable_all(struct ftrace_ops *ops)
176{
177 int cpu;
178
179 for_each_possible_cpu(cpu)
180 *per_cpu_ptr(ops->disabled, cpu) = 1;
181}
182
183static int control_ops_alloc(struct ftrace_ops *ops)
184{
185 int __percpu *disabled;
186
187 disabled = alloc_percpu(int);
188 if (!disabled)
189 return -ENOMEM;
190
191 ops->disabled = disabled;
192 control_ops_disable_all(ops);
193 return 0;
194}
195
196static void control_ops_free(struct ftrace_ops *ops)
197{
198 free_percpu(ops->disabled);
199}
200
171static void update_global_ops(void) 201static void update_global_ops(void)
172{ 202{
173 ftrace_func_t func; 203 ftrace_func_t func;
@@ -259,6 +289,26 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
259 return 0; 289 return 0;
260} 290}
261 291
292static void add_ftrace_list_ops(struct ftrace_ops **list,
293 struct ftrace_ops *main_ops,
294 struct ftrace_ops *ops)
295{
296 int first = *list == &ftrace_list_end;
297 add_ftrace_ops(list, ops);
298 if (first)
299 add_ftrace_ops(&ftrace_ops_list, main_ops);
300}
301
302static int remove_ftrace_list_ops(struct ftrace_ops **list,
303 struct ftrace_ops *main_ops,
304 struct ftrace_ops *ops)
305{
306 int ret = remove_ftrace_ops(list, ops);
307 if (!ret && *list == &ftrace_list_end)
308 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
309 return ret;
310}
311
262static int __register_ftrace_function(struct ftrace_ops *ops) 312static int __register_ftrace_function(struct ftrace_ops *ops)
263{ 313{
264 if (ftrace_disabled) 314 if (ftrace_disabled)
@@ -270,15 +320,20 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
270 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) 320 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
271 return -EBUSY; 321 return -EBUSY;
272 322
323 /* We don't support both control and global flags set. */
324 if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
325 return -EINVAL;
326
273 if (!core_kernel_data((unsigned long)ops)) 327 if (!core_kernel_data((unsigned long)ops))
274 ops->flags |= FTRACE_OPS_FL_DYNAMIC; 328 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
275 329
276 if (ops->flags & FTRACE_OPS_FL_GLOBAL) { 330 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
277 int first = ftrace_global_list == &ftrace_list_end; 331 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
278 add_ftrace_ops(&ftrace_global_list, ops);
279 ops->flags |= FTRACE_OPS_FL_ENABLED; 332 ops->flags |= FTRACE_OPS_FL_ENABLED;
280 if (first) 333 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
281 add_ftrace_ops(&ftrace_ops_list, &global_ops); 334 if (control_ops_alloc(ops))
335 return -ENOMEM;
336 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
282 } else 337 } else
283 add_ftrace_ops(&ftrace_ops_list, ops); 338 add_ftrace_ops(&ftrace_ops_list, ops);
284 339
@@ -302,11 +357,23 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
302 return -EINVAL; 357 return -EINVAL;
303 358
304 if (ops->flags & FTRACE_OPS_FL_GLOBAL) { 359 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
305 ret = remove_ftrace_ops(&ftrace_global_list, ops); 360 ret = remove_ftrace_list_ops(&ftrace_global_list,
306 if (!ret && ftrace_global_list == &ftrace_list_end) 361 &global_ops, ops);
307 ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
308 if (!ret) 362 if (!ret)
309 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 363 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
364 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
365 ret = remove_ftrace_list_ops(&ftrace_control_list,
366 &control_ops, ops);
367 if (!ret) {
368 /*
369 * The ftrace_ops is now removed from the list,
370 * so there'll be no new users. We must ensure
371 * all current users are done before we free
372 * the control data.
373 */
374 synchronize_sched();
375 control_ops_free(ops);
376 }
310 } else 377 } else
311 ret = remove_ftrace_ops(&ftrace_ops_list, ops); 378 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
312 379
@@ -3874,6 +3941,36 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3874#endif /* CONFIG_DYNAMIC_FTRACE */ 3941#endif /* CONFIG_DYNAMIC_FTRACE */
3875 3942
3876static void 3943static void
3944ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
3945{
3946 struct ftrace_ops *op;
3947
3948 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
3949 return;
3950
3951 /*
3952 * Some of the ops may be dynamically allocated,
3953 * they must be freed after a synchronize_sched().
3954 */
3955 preempt_disable_notrace();
3956 trace_recursion_set(TRACE_CONTROL_BIT);
3957 op = rcu_dereference_raw(ftrace_control_list);
3958 while (op != &ftrace_list_end) {
3959 if (!ftrace_function_local_disabled(op) &&
3960 ftrace_ops_test(op, ip))
3961 op->func(ip, parent_ip);
3962
3963 op = rcu_dereference_raw(op->next);
3964 };
3965 trace_recursion_clear(TRACE_CONTROL_BIT);
3966 preempt_enable_notrace();
3967}
3968
3969static struct ftrace_ops control_ops = {
3970 .func = ftrace_ops_control_func,
3971};
3972
3973static void
3877ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) 3974ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3878{ 3975{
3879 struct ftrace_ops *op; 3976 struct ftrace_ops *op;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index b93ecbadad6d..55c6ea00f28a 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -288,6 +288,8 @@ struct tracer {
288/* for function tracing recursion */ 288/* for function tracing recursion */
289#define TRACE_INTERNAL_BIT (1<<11) 289#define TRACE_INTERNAL_BIT (1<<11)
290#define TRACE_GLOBAL_BIT (1<<12) 290#define TRACE_GLOBAL_BIT (1<<12)
291#define TRACE_CONTROL_BIT (1<<13)
292
291/* 293/*
292 * Abuse of the trace_recursion. 294 * Abuse of the trace_recursion.
293 * As we need a way to maintain state if we are tracing the function 295 * As we need a way to maintain state if we are tracing the function