aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorJiri Olsa <jolsa@redhat.com>2012-02-15 09:51:48 -0500
committerSteven Rostedt <rostedt@goodmis.org>2012-02-21 11:08:23 -0500
commite248491ac283b516958ca9ab62c8e74b6718bca8 (patch)
tree77e5a6589cf55ebeabe01a321d792e216162efe6 /kernel/trace/ftrace.c
parent5b34926114e39e12005031269613d2b13194aeba (diff)
ftrace: Add enable/disable ftrace_ops control interface
Adding a way to temporarily enable/disable ftrace_ops. The change follows the same way as 'global' ftrace_ops are done. Introducing 2 global ftrace_ops - control_ops and ftrace_control_list which take over all ftrace_ops registered with FTRACE_OPS_FL_CONTROL flag. In addition new per cpu flag called 'disabled' is also added to ftrace_ops to provide the control information for each cpu. When ftrace_ops with FTRACE_OPS_FL_CONTROL is registered, it is set as disabled for all cpus. The ftrace_control_list contains all the registered 'control' ftrace_ops. The control_ops provides function which iterates ftrace_control_list and does the check for 'disabled' flag on current cpu. Adding 3 inline functions: ftrace_function_local_disable/ftrace_function_local_enable - enable/disable the ftrace_ops on current cpu ftrace_function_local_disabled - get disabled ftrace_ops::disabled value for current cpu Link: http://lkml.kernel.org/r/1329317514-8131-2-git-send-email-jolsa@redhat.com Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Jiri Olsa <jolsa@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c111
1 files changed, 104 insertions, 7 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index d1499e910fe8..f615f974d90e 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -62,6 +62,8 @@
62#define FTRACE_HASH_DEFAULT_BITS 10 62#define FTRACE_HASH_DEFAULT_BITS 10
63#define FTRACE_HASH_MAX_BITS 12 63#define FTRACE_HASH_MAX_BITS 12
64 64
65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
65/* ftrace_enabled is a method to turn ftrace on or off */ 67/* ftrace_enabled is a method to turn ftrace on or off */
66int ftrace_enabled __read_mostly; 68int ftrace_enabled __read_mostly;
67static int last_ftrace_enabled; 69static int last_ftrace_enabled;
@@ -89,12 +91,14 @@ static struct ftrace_ops ftrace_list_end __read_mostly = {
89}; 91};
90 92
91static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; 93static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
94static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
92static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 95static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
93ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 96ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
94static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub; 97static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
95ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; 98ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
96ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; 99ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
97static struct ftrace_ops global_ops; 100static struct ftrace_ops global_ops;
101static struct ftrace_ops control_ops;
98 102
99static void 103static void
100ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); 104ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
@@ -168,6 +172,32 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
168} 172}
169#endif 173#endif
170 174
175static void control_ops_disable_all(struct ftrace_ops *ops)
176{
177 int cpu;
178
179 for_each_possible_cpu(cpu)
180 *per_cpu_ptr(ops->disabled, cpu) = 1;
181}
182
183static int control_ops_alloc(struct ftrace_ops *ops)
184{
185 int __percpu *disabled;
186
187 disabled = alloc_percpu(int);
188 if (!disabled)
189 return -ENOMEM;
190
191 ops->disabled = disabled;
192 control_ops_disable_all(ops);
193 return 0;
194}
195
196static void control_ops_free(struct ftrace_ops *ops)
197{
198 free_percpu(ops->disabled);
199}
200
171static void update_global_ops(void) 201static void update_global_ops(void)
172{ 202{
173 ftrace_func_t func; 203 ftrace_func_t func;
@@ -259,6 +289,26 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
259 return 0; 289 return 0;
260} 290}
261 291
292static void add_ftrace_list_ops(struct ftrace_ops **list,
293 struct ftrace_ops *main_ops,
294 struct ftrace_ops *ops)
295{
296 int first = *list == &ftrace_list_end;
297 add_ftrace_ops(list, ops);
298 if (first)
299 add_ftrace_ops(&ftrace_ops_list, main_ops);
300}
301
302static int remove_ftrace_list_ops(struct ftrace_ops **list,
303 struct ftrace_ops *main_ops,
304 struct ftrace_ops *ops)
305{
306 int ret = remove_ftrace_ops(list, ops);
307 if (!ret && *list == &ftrace_list_end)
308 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
309 return ret;
310}
311
262static int __register_ftrace_function(struct ftrace_ops *ops) 312static int __register_ftrace_function(struct ftrace_ops *ops)
263{ 313{
264 if (ftrace_disabled) 314 if (ftrace_disabled)
@@ -270,15 +320,20 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
270 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) 320 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
271 return -EBUSY; 321 return -EBUSY;
272 322
323 /* We don't support both control and global flags set. */
324 if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
325 return -EINVAL;
326
273 if (!core_kernel_data((unsigned long)ops)) 327 if (!core_kernel_data((unsigned long)ops))
274 ops->flags |= FTRACE_OPS_FL_DYNAMIC; 328 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
275 329
276 if (ops->flags & FTRACE_OPS_FL_GLOBAL) { 330 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
277 int first = ftrace_global_list == &ftrace_list_end; 331 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
278 add_ftrace_ops(&ftrace_global_list, ops);
279 ops->flags |= FTRACE_OPS_FL_ENABLED; 332 ops->flags |= FTRACE_OPS_FL_ENABLED;
280 if (first) 333 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
281 add_ftrace_ops(&ftrace_ops_list, &global_ops); 334 if (control_ops_alloc(ops))
335 return -ENOMEM;
336 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
282 } else 337 } else
283 add_ftrace_ops(&ftrace_ops_list, ops); 338 add_ftrace_ops(&ftrace_ops_list, ops);
284 339
@@ -302,11 +357,23 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
302 return -EINVAL; 357 return -EINVAL;
303 358
304 if (ops->flags & FTRACE_OPS_FL_GLOBAL) { 359 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
305 ret = remove_ftrace_ops(&ftrace_global_list, ops); 360 ret = remove_ftrace_list_ops(&ftrace_global_list,
306 if (!ret && ftrace_global_list == &ftrace_list_end) 361 &global_ops, ops);
307 ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
308 if (!ret) 362 if (!ret)
309 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 363 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
364 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
365 ret = remove_ftrace_list_ops(&ftrace_control_list,
366 &control_ops, ops);
367 if (!ret) {
368 /*
369 * The ftrace_ops is now removed from the list,
370 * so there'll be no new users. We must ensure
371 * all current users are done before we free
372 * the control data.
373 */
374 synchronize_sched();
375 control_ops_free(ops);
376 }
310 } else 377 } else
311 ret = remove_ftrace_ops(&ftrace_ops_list, ops); 378 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
312 379
@@ -3874,6 +3941,36 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3874#endif /* CONFIG_DYNAMIC_FTRACE */ 3941#endif /* CONFIG_DYNAMIC_FTRACE */
3875 3942
3876static void 3943static void
3944ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
3945{
3946 struct ftrace_ops *op;
3947
3948 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
3949 return;
3950
3951 /*
3952 * Some of the ops may be dynamically allocated,
3953 * they must be freed after a synchronize_sched().
3954 */
3955 preempt_disable_notrace();
3956 trace_recursion_set(TRACE_CONTROL_BIT);
3957 op = rcu_dereference_raw(ftrace_control_list);
3958 while (op != &ftrace_list_end) {
3959 if (!ftrace_function_local_disabled(op) &&
3960 ftrace_ops_test(op, ip))
3961 op->func(ip, parent_ip);
3962
3963 op = rcu_dereference_raw(op->next);
3964 };
3965 trace_recursion_clear(TRACE_CONTROL_BIT);
3966 preempt_enable_notrace();
3967}
3968
3969static struct ftrace_ops control_ops = {
3970 .func = ftrace_ops_control_func,
3971};
3972
3973static void
3877ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) 3974ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3878{ 3975{
3879 struct ftrace_ops *op; 3976 struct ftrace_ops *op;