aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2014-01-10 17:01:58 -0500
committerSteven Rostedt <rostedt@goodmis.org>2014-04-21 13:59:25 -0400
commit4104d326b670c2b66f575d2004daa28b2d1b4c8d (patch)
tree9eb7a3084d9bc9d7d5b6eccb0d17d37481020c61
parenta798c10faf62a505d24e5f6213fbaf904a39623f (diff)
ftrace: Remove global function list and call function directly
Instead of having a list of global functions that are called, as only one global function is allow to be enabled at a time, there's no reason to have a list. Instead, simply have all the users of the global ops, use the global ops directly, instead of registering their own ftrace_ops. Just switch what function is used before enabling the function tracer. This removes a lot of code as well as the complexity involved with it. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--include/linux/ftrace.h20
-rw-r--r--kernel/trace/ftrace.c152
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace.h19
-rw-r--r--kernel/trace/trace_functions.c55
-rw-r--r--kernel/trace/trace_irqsoff.c33
-rw-r--r--kernel/trace/trace_sched_wakeup.c40
-rw-r--r--kernel/trace/trace_selftest.c33
8 files changed, 133 insertions, 221 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 9212b017bc72..f0ff2c2453e7 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -62,9 +62,6 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
62 * set in the flags member. 62 * set in the flags member.
63 * 63 *
64 * ENABLED - set/unset when ftrace_ops is registered/unregistered 64 * ENABLED - set/unset when ftrace_ops is registered/unregistered
65 * GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops
66 * is part of the global tracers sharing the same filter
67 * via set_ftrace_* debugfs files.
68 * DYNAMIC - set when ftrace_ops is registered to denote dynamically 65 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
69 * allocated ftrace_ops which need special care 66 * allocated ftrace_ops which need special care
70 * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops 67 * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
@@ -96,15 +93,14 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
96 */ 93 */
97enum { 94enum {
98 FTRACE_OPS_FL_ENABLED = 1 << 0, 95 FTRACE_OPS_FL_ENABLED = 1 << 0,
99 FTRACE_OPS_FL_GLOBAL = 1 << 1, 96 FTRACE_OPS_FL_DYNAMIC = 1 << 1,
100 FTRACE_OPS_FL_DYNAMIC = 1 << 2, 97 FTRACE_OPS_FL_CONTROL = 1 << 2,
101 FTRACE_OPS_FL_CONTROL = 1 << 3, 98 FTRACE_OPS_FL_SAVE_REGS = 1 << 3,
102 FTRACE_OPS_FL_SAVE_REGS = 1 << 4, 99 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4,
103 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, 100 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5,
104 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, 101 FTRACE_OPS_FL_STUB = 1 << 6,
105 FTRACE_OPS_FL_STUB = 1 << 7, 102 FTRACE_OPS_FL_INITIALIZED = 1 << 7,
106 FTRACE_OPS_FL_INITIALIZED = 1 << 8, 103 FTRACE_OPS_FL_DELETED = 1 << 8,
107 FTRACE_OPS_FL_DELETED = 1 << 9,
108}; 104};
109 105
110/* 106/*
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1fd4b9479210..8f61ef70a297 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -62,7 +62,7 @@
62#define FTRACE_HASH_DEFAULT_BITS 10 62#define FTRACE_HASH_DEFAULT_BITS 10
63#define FTRACE_HASH_MAX_BITS 12 63#define FTRACE_HASH_MAX_BITS 12
64 64
65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) 65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
66 66
67#ifdef CONFIG_DYNAMIC_FTRACE 67#ifdef CONFIG_DYNAMIC_FTRACE
68#define INIT_REGEX_LOCK(opsname) \ 68#define INIT_REGEX_LOCK(opsname) \
@@ -103,7 +103,6 @@ static int ftrace_disabled __read_mostly;
103 103
104static DEFINE_MUTEX(ftrace_lock); 104static DEFINE_MUTEX(ftrace_lock);
105 105
106static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
107static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; 106static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
108static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 107static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
109ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 108ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
@@ -171,23 +170,6 @@ int ftrace_nr_registered_ops(void)
171 return cnt; 170 return cnt;
172} 171}
173 172
174static void
175ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
176 struct ftrace_ops *op, struct pt_regs *regs)
177{
178 int bit;
179
180 bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
181 if (bit < 0)
182 return;
183
184 do_for_each_ftrace_op(op, ftrace_global_list) {
185 op->func(ip, parent_ip, op, regs);
186 } while_for_each_ftrace_op(op);
187
188 trace_clear_recursion(bit);
189}
190
191static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, 173static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
192 struct ftrace_ops *op, struct pt_regs *regs) 174 struct ftrace_ops *op, struct pt_regs *regs)
193{ 175{
@@ -237,43 +219,6 @@ static int control_ops_alloc(struct ftrace_ops *ops)
237 return 0; 219 return 0;
238} 220}
239 221
240static void update_global_ops(void)
241{
242 ftrace_func_t func = ftrace_global_list_func;
243 void *private = NULL;
244
245 /* The list has its own recursion protection. */
246 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
247
248 /*
249 * If there's only one function registered, then call that
250 * function directly. Otherwise, we need to iterate over the
251 * registered callers.
252 */
253 if (ftrace_global_list == &ftrace_list_end ||
254 ftrace_global_list->next == &ftrace_list_end) {
255 func = ftrace_global_list->func;
256 private = ftrace_global_list->private;
257 /*
258 * As we are calling the function directly.
259 * If it does not have recursion protection,
260 * the function_trace_op needs to be updated
261 * accordingly.
262 */
263 if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE))
264 global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
265 }
266
267 /* If we filter on pids, update to use the pid function */
268 if (!list_empty(&ftrace_pids)) {
269 set_ftrace_pid_function(func);
270 func = ftrace_pid_func;
271 }
272
273 global_ops.func = func;
274 global_ops.private = private;
275}
276
277static void ftrace_sync(struct work_struct *work) 222static void ftrace_sync(struct work_struct *work)
278{ 223{
279 /* 224 /*
@@ -301,8 +246,6 @@ static void update_ftrace_function(void)
301{ 246{
302 ftrace_func_t func; 247 ftrace_func_t func;
303 248
304 update_global_ops();
305
306 /* 249 /*
307 * If we are at the end of the list and this ops is 250 * If we are at the end of the list and this ops is
308 * recursion safe and not dynamic and the arch supports passing ops, 251 * recursion safe and not dynamic and the arch supports passing ops,
@@ -314,10 +257,7 @@ static void update_ftrace_function(void)
314 (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) && 257 (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
315 !FTRACE_FORCE_LIST_FUNC)) { 258 !FTRACE_FORCE_LIST_FUNC)) {
316 /* Set the ftrace_ops that the arch callback uses */ 259 /* Set the ftrace_ops that the arch callback uses */
317 if (ftrace_ops_list == &global_ops) 260 set_function_trace_op = ftrace_ops_list;
318 set_function_trace_op = ftrace_global_list;
319 else
320 set_function_trace_op = ftrace_ops_list;
321 func = ftrace_ops_list->func; 261 func = ftrace_ops_list->func;
322 } else { 262 } else {
323 /* Just use the default ftrace_ops */ 263 /* Just use the default ftrace_ops */
@@ -434,16 +374,9 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
434 if (ops->flags & FTRACE_OPS_FL_DELETED) 374 if (ops->flags & FTRACE_OPS_FL_DELETED)
435 return -EINVAL; 375 return -EINVAL;
436 376
437 if (FTRACE_WARN_ON(ops == &global_ops))
438 return -EINVAL;
439
440 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) 377 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
441 return -EBUSY; 378 return -EBUSY;
442 379
443 /* We don't support both control and global flags set. */
444 if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
445 return -EINVAL;
446
447#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS 380#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
448 /* 381 /*
449 * If the ftrace_ops specifies SAVE_REGS, then it only can be used 382 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
@@ -461,10 +394,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
461 if (!core_kernel_data((unsigned long)ops)) 394 if (!core_kernel_data((unsigned long)ops))
462 ops->flags |= FTRACE_OPS_FL_DYNAMIC; 395 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
463 396
464 if (ops->flags & FTRACE_OPS_FL_GLOBAL) { 397 if (ops->flags & FTRACE_OPS_FL_CONTROL) {
465 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
466 ops->flags |= FTRACE_OPS_FL_ENABLED;
467 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
468 if (control_ops_alloc(ops)) 398 if (control_ops_alloc(ops))
469 return -ENOMEM; 399 return -ENOMEM;
470 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops); 400 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
@@ -484,15 +414,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
484 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) 414 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
485 return -EBUSY; 415 return -EBUSY;
486 416
487 if (FTRACE_WARN_ON(ops == &global_ops)) 417 if (ops->flags & FTRACE_OPS_FL_CONTROL) {
488 return -EINVAL;
489
490 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
491 ret = remove_ftrace_list_ops(&ftrace_global_list,
492 &global_ops, ops);
493 if (!ret)
494 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
495 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
496 ret = remove_ftrace_list_ops(&ftrace_control_list, 418 ret = remove_ftrace_list_ops(&ftrace_control_list,
497 &control_ops, ops); 419 &control_ops, ops);
498 } else 420 } else
@@ -2128,15 +2050,6 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
2128 ftrace_start_up++; 2050 ftrace_start_up++;
2129 command |= FTRACE_UPDATE_CALLS; 2051 command |= FTRACE_UPDATE_CALLS;
2130 2052
2131 /* ops marked global share the filter hashes */
2132 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2133 ops = &global_ops;
2134 /* Don't update hash if global is already set */
2135 if (global_start_up)
2136 hash_enable = false;
2137 global_start_up++;
2138 }
2139
2140 ops->flags |= FTRACE_OPS_FL_ENABLED; 2053 ops->flags |= FTRACE_OPS_FL_ENABLED;
2141 if (hash_enable) 2054 if (hash_enable)
2142 ftrace_hash_rec_enable(ops, 1); 2055 ftrace_hash_rec_enable(ops, 1);
@@ -2166,21 +2079,10 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2166 */ 2079 */
2167 WARN_ON_ONCE(ftrace_start_up < 0); 2080 WARN_ON_ONCE(ftrace_start_up < 0);
2168 2081
2169 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2170 ops = &global_ops;
2171 global_start_up--;
2172 WARN_ON_ONCE(global_start_up < 0);
2173 /* Don't update hash if global still has users */
2174 if (global_start_up) {
2175 WARN_ON_ONCE(!ftrace_start_up);
2176 hash_disable = false;
2177 }
2178 }
2179
2180 if (hash_disable) 2082 if (hash_disable)
2181 ftrace_hash_rec_disable(ops, 1); 2083 ftrace_hash_rec_disable(ops, 1);
2182 2084
2183 if (ops != &global_ops || !global_start_up) 2085 if (!global_start_up)
2184 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 2086 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2185 2087
2186 command |= FTRACE_UPDATE_CALLS; 2088 command |= FTRACE_UPDATE_CALLS;
@@ -3524,10 +3426,6 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3524 struct ftrace_hash *hash; 3426 struct ftrace_hash *hash;
3525 int ret; 3427 int ret;
3526 3428
3527 /* All global ops uses the global ops filters */
3528 if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3529 ops = &global_ops;
3530
3531 if (unlikely(ftrace_disabled)) 3429 if (unlikely(ftrace_disabled))
3532 return -ENODEV; 3430 return -ENODEV;
3533 3431
@@ -4462,6 +4360,34 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4462 4360
4463#endif /* CONFIG_DYNAMIC_FTRACE */ 4361#endif /* CONFIG_DYNAMIC_FTRACE */
4464 4362
4363__init void ftrace_init_global_array_ops(struct trace_array *tr)
4364{
4365 tr->ops = &global_ops;
4366 tr->ops->private = tr;
4367}
4368
4369void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
4370{
4371 /* If we filter on pids, update to use the pid function */
4372 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4373 if (WARN_ON(tr->ops->func != ftrace_stub))
4374 printk("ftrace ops had %pS for function\n",
4375 tr->ops->func);
4376 /* Only the top level instance does pid tracing */
4377 if (!list_empty(&ftrace_pids)) {
4378 set_ftrace_pid_function(func);
4379 func = ftrace_pid_func;
4380 }
4381 }
4382 tr->ops->func = func;
4383 tr->ops->private = tr;
4384}
4385
4386void ftrace_reset_array_ops(struct trace_array *tr)
4387{
4388 tr->ops->func = ftrace_stub;
4389}
4390
4465static void 4391static void
4466ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, 4392ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4467 struct ftrace_ops *op, struct pt_regs *regs) 4393 struct ftrace_ops *op, struct pt_regs *regs)
@@ -4520,9 +4446,16 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4520 */ 4446 */
4521 preempt_disable_notrace(); 4447 preempt_disable_notrace();
4522 do_for_each_ftrace_op(op, ftrace_ops_list) { 4448 do_for_each_ftrace_op(op, ftrace_ops_list) {
4523 if (ftrace_ops_test(op, ip, regs)) 4449 if (ftrace_ops_test(op, ip, regs)) {
4450 if (WARN_ON(!op->func)) {
4451 function_trace_stop = 1;
4452 printk("op=%p %pS\n", op, op);
4453 goto out;
4454 }
4524 op->func(ip, parent_ip, op, regs); 4455 op->func(ip, parent_ip, op, regs);
4456 }
4525 } while_for_each_ftrace_op(op); 4457 } while_for_each_ftrace_op(op);
4458out:
4526 preempt_enable_notrace(); 4459 preempt_enable_notrace();
4527 trace_clear_recursion(bit); 4460 trace_clear_recursion(bit);
4528} 4461}
@@ -5076,8 +5009,7 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
5076/* Just a place holder for function graph */ 5009/* Just a place holder for function graph */
5077static struct ftrace_ops fgraph_ops __read_mostly = { 5010static struct ftrace_ops fgraph_ops __read_mostly = {
5078 .func = ftrace_stub, 5011 .func = ftrace_stub,
5079 .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL | 5012 .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_RECURSION_SAFE,
5080 FTRACE_OPS_FL_RECURSION_SAFE,
5081}; 5013};
5082 5014
5083static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) 5015static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 737b0efa1a62..fdd33aacdf05 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6629,6 +6629,8 @@ __init static int tracer_alloc_buffers(void)
6629 */ 6629 */
6630 global_trace.current_trace = &nop_trace; 6630 global_trace.current_trace = &nop_trace;
6631 6631
6632 ftrace_init_global_array_ops(&global_trace);
6633
6632 register_tracer(&nop_trace); 6634 register_tracer(&nop_trace);
6633 6635
6634 /* All seems OK, enable tracing */ 6636 /* All seems OK, enable tracing */
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 2e29d7ba5a52..df5256be64cd 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -416,13 +416,7 @@ enum {
416 TRACE_FTRACE_IRQ_BIT, 416 TRACE_FTRACE_IRQ_BIT,
417 TRACE_FTRACE_SIRQ_BIT, 417 TRACE_FTRACE_SIRQ_BIT,
418 418
419 /* GLOBAL_BITs must be greater than FTRACE_BITs */ 419 /* INTERNAL_BITs must be greater than FTRACE_BITs */
420 TRACE_GLOBAL_BIT,
421 TRACE_GLOBAL_NMI_BIT,
422 TRACE_GLOBAL_IRQ_BIT,
423 TRACE_GLOBAL_SIRQ_BIT,
424
425 /* INTERNAL_BITs must be greater than GLOBAL_BITs */
426 TRACE_INTERNAL_BIT, 420 TRACE_INTERNAL_BIT,
427 TRACE_INTERNAL_NMI_BIT, 421 TRACE_INTERNAL_NMI_BIT,
428 TRACE_INTERNAL_IRQ_BIT, 422 TRACE_INTERNAL_IRQ_BIT,
@@ -449,9 +443,6 @@ enum {
449#define TRACE_FTRACE_START TRACE_FTRACE_BIT 443#define TRACE_FTRACE_START TRACE_FTRACE_BIT
450#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) 444#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
451 445
452#define TRACE_GLOBAL_START TRACE_GLOBAL_BIT
453#define TRACE_GLOBAL_MAX ((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1)
454
455#define TRACE_LIST_START TRACE_INTERNAL_BIT 446#define TRACE_LIST_START TRACE_INTERNAL_BIT
456#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) 447#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
457 448
@@ -823,6 +814,9 @@ extern int ftrace_is_dead(void);
823int ftrace_create_function_files(struct trace_array *tr, 814int ftrace_create_function_files(struct trace_array *tr,
824 struct dentry *parent); 815 struct dentry *parent);
825void ftrace_destroy_function_files(struct trace_array *tr); 816void ftrace_destroy_function_files(struct trace_array *tr);
817void ftrace_init_global_array_ops(struct trace_array *tr);
818void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
819void ftrace_reset_array_ops(struct trace_array *tr);
826#else 820#else
827static inline int ftrace_trace_task(struct task_struct *task) 821static inline int ftrace_trace_task(struct task_struct *task)
828{ 822{
@@ -836,6 +830,11 @@ ftrace_create_function_files(struct trace_array *tr,
836 return 0; 830 return 0;
837} 831}
838static inline void ftrace_destroy_function_files(struct trace_array *tr) { } 832static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
833static inline __init void
834ftrace_init_global_array_ops(struct trace_array *tr) { }
835static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
836/* ftace_func_t type is not defined, use macro instead of static inline */
837#define ftrace_init_array_ops(tr, func) do { } while (0)
839#endif /* CONFIG_FUNCTION_TRACER */ 838#endif /* CONFIG_FUNCTION_TRACER */
840 839
841#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) 840#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index ffd56351b521..2d9482b8f26a 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -26,8 +26,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
26static void 26static void
27function_stack_trace_call(unsigned long ip, unsigned long parent_ip, 27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28 struct ftrace_ops *op, struct pt_regs *pt_regs); 28 struct ftrace_ops *op, struct pt_regs *pt_regs);
29static struct ftrace_ops trace_ops;
30static struct ftrace_ops trace_stack_ops;
31static struct tracer_flags func_flags; 29static struct tracer_flags func_flags;
32 30
33/* Our option */ 31/* Our option */
@@ -83,28 +81,24 @@ void ftrace_destroy_function_files(struct trace_array *tr)
83 81
84static int function_trace_init(struct trace_array *tr) 82static int function_trace_init(struct trace_array *tr)
85{ 83{
86 struct ftrace_ops *ops; 84 ftrace_func_t func;
87
88 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
89 /* There's only one global tr */
90 if (!trace_ops.private) {
91 trace_ops.private = tr;
92 trace_stack_ops.private = tr;
93 }
94 85
95 if (func_flags.val & TRACE_FUNC_OPT_STACK) 86 /*
96 ops = &trace_stack_ops; 87 * Instance trace_arrays get their ops allocated
97 else 88 * at instance creation. Unless it failed
98 ops = &trace_ops; 89 * the allocation.
99 tr->ops = ops; 90 */
100 } else if (!tr->ops) { 91 if (!tr->ops)
101 /*
102 * Instance trace_arrays get their ops allocated
103 * at instance creation. Unless it failed
104 * the allocation.
105 */
106 return -ENOMEM; 92 return -ENOMEM;
107 } 93
94 /* Currently only the global instance can do stack tracing */
95 if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
96 func_flags.val & TRACE_FUNC_OPT_STACK)
97 func = function_stack_trace_call;
98 else
99 func = function_trace_call;
100
101 ftrace_init_array_ops(tr, func);
108 102
109 tr->trace_buffer.cpu = get_cpu(); 103 tr->trace_buffer.cpu = get_cpu();
110 put_cpu(); 104 put_cpu();
@@ -118,6 +112,7 @@ static void function_trace_reset(struct trace_array *tr)
118{ 112{
119 tracing_stop_function_trace(tr); 113 tracing_stop_function_trace(tr);
120 tracing_stop_cmdline_record(); 114 tracing_stop_cmdline_record();
115 ftrace_reset_array_ops(tr);
121} 116}
122 117
123static void function_trace_start(struct trace_array *tr) 118static void function_trace_start(struct trace_array *tr)
@@ -199,18 +194,6 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
199 local_irq_restore(flags); 194 local_irq_restore(flags);
200} 195}
201 196
202static struct ftrace_ops trace_ops __read_mostly =
203{
204 .func = function_trace_call,
205 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
206};
207
208static struct ftrace_ops trace_stack_ops __read_mostly =
209{
210 .func = function_stack_trace_call,
211 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
212};
213
214static struct tracer_opt func_opts[] = { 197static struct tracer_opt func_opts[] = {
215#ifdef CONFIG_STACKTRACE 198#ifdef CONFIG_STACKTRACE
216 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, 199 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
@@ -248,10 +231,10 @@ func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
248 unregister_ftrace_function(tr->ops); 231 unregister_ftrace_function(tr->ops);
249 232
250 if (set) { 233 if (set) {
251 tr->ops = &trace_stack_ops; 234 tr->ops->func = function_stack_trace_call;
252 register_ftrace_function(tr->ops); 235 register_ftrace_function(tr->ops);
253 } else { 236 } else {
254 tr->ops = &trace_ops; 237 tr->ops->func = function_trace_call;
255 register_ftrace_function(tr->ops); 238 register_ftrace_function(tr->ops);
256 } 239 }
257 240
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 8ff02cbb892f..b5cb047df3e9 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -151,12 +151,6 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
151 151
152 atomic_dec(&data->disabled); 152 atomic_dec(&data->disabled);
153} 153}
154
155static struct ftrace_ops trace_ops __read_mostly =
156{
157 .func = irqsoff_tracer_call,
158 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
159};
160#endif /* CONFIG_FUNCTION_TRACER */ 154#endif /* CONFIG_FUNCTION_TRACER */
161 155
162#ifdef CONFIG_FUNCTION_GRAPH_TRACER 156#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -531,7 +525,7 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
531} 525}
532#endif /* CONFIG_PREEMPT_TRACER */ 526#endif /* CONFIG_PREEMPT_TRACER */
533 527
534static int register_irqsoff_function(int graph, int set) 528static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
535{ 529{
536 int ret; 530 int ret;
537 531
@@ -543,7 +537,7 @@ static int register_irqsoff_function(int graph, int set)
543 ret = register_ftrace_graph(&irqsoff_graph_return, 537 ret = register_ftrace_graph(&irqsoff_graph_return,
544 &irqsoff_graph_entry); 538 &irqsoff_graph_entry);
545 else 539 else
546 ret = register_ftrace_function(&trace_ops); 540 ret = register_ftrace_function(tr->ops);
547 541
548 if (!ret) 542 if (!ret)
549 function_enabled = true; 543 function_enabled = true;
@@ -551,7 +545,7 @@ static int register_irqsoff_function(int graph, int set)
551 return ret; 545 return ret;
552} 546}
553 547
554static void unregister_irqsoff_function(int graph) 548static void unregister_irqsoff_function(struct trace_array *tr, int graph)
555{ 549{
556 if (!function_enabled) 550 if (!function_enabled)
557 return; 551 return;
@@ -559,17 +553,17 @@ static void unregister_irqsoff_function(int graph)
559 if (graph) 553 if (graph)
560 unregister_ftrace_graph(); 554 unregister_ftrace_graph();
561 else 555 else
562 unregister_ftrace_function(&trace_ops); 556 unregister_ftrace_function(tr->ops);
563 557
564 function_enabled = false; 558 function_enabled = false;
565} 559}
566 560
567static void irqsoff_function_set(int set) 561static void irqsoff_function_set(struct trace_array *tr, int set)
568{ 562{
569 if (set) 563 if (set)
570 register_irqsoff_function(is_graph(), 1); 564 register_irqsoff_function(tr, is_graph(), 1);
571 else 565 else
572 unregister_irqsoff_function(is_graph()); 566 unregister_irqsoff_function(tr, is_graph());
573} 567}
574 568
575static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) 569static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
@@ -577,7 +571,7 @@ static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
577 struct tracer *tracer = tr->current_trace; 571 struct tracer *tracer = tr->current_trace;
578 572
579 if (mask & TRACE_ITER_FUNCTION) 573 if (mask & TRACE_ITER_FUNCTION)
580 irqsoff_function_set(set); 574 irqsoff_function_set(tr, set);
581 575
582 return trace_keep_overwrite(tracer, mask, set); 576 return trace_keep_overwrite(tracer, mask, set);
583} 577}
@@ -586,7 +580,7 @@ static int start_irqsoff_tracer(struct trace_array *tr, int graph)
586{ 580{
587 int ret; 581 int ret;
588 582
589 ret = register_irqsoff_function(graph, 0); 583 ret = register_irqsoff_function(tr, graph, 0);
590 584
591 if (!ret && tracing_is_enabled()) 585 if (!ret && tracing_is_enabled())
592 tracer_enabled = 1; 586 tracer_enabled = 1;
@@ -600,7 +594,7 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
600{ 594{
601 tracer_enabled = 0; 595 tracer_enabled = 0;
602 596
603 unregister_irqsoff_function(graph); 597 unregister_irqsoff_function(tr, graph);
604} 598}
605 599
606static void __irqsoff_tracer_init(struct trace_array *tr) 600static void __irqsoff_tracer_init(struct trace_array *tr)
@@ -617,7 +611,11 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
617 smp_wmb(); 611 smp_wmb();
618 tracing_reset_online_cpus(&tr->trace_buffer); 612 tracing_reset_online_cpus(&tr->trace_buffer);
619 613
620 if (start_irqsoff_tracer(tr, is_graph())) 614 ftrace_init_array_ops(tr, irqsoff_tracer_call);
615
616 /* Only toplevel instance supports graph tracing */
617 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
618 is_graph())))
621 printk(KERN_ERR "failed to start irqsoff tracer\n"); 619 printk(KERN_ERR "failed to start irqsoff tracer\n");
622} 620}
623 621
@@ -630,6 +628,7 @@ static void irqsoff_tracer_reset(struct trace_array *tr)
630 628
631 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); 629 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
632 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); 630 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
631 ftrace_reset_array_ops(tr);
633} 632}
634 633
635static void irqsoff_tracer_start(struct trace_array *tr) 634static void irqsoff_tracer_start(struct trace_array *tr)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index e14da5e97a69..4dd986defa60 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -130,15 +130,9 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
130 atomic_dec(&data->disabled); 130 atomic_dec(&data->disabled);
131 preempt_enable_notrace(); 131 preempt_enable_notrace();
132} 132}
133
134static struct ftrace_ops trace_ops __read_mostly =
135{
136 .func = wakeup_tracer_call,
137 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
138};
139#endif /* CONFIG_FUNCTION_TRACER */ 133#endif /* CONFIG_FUNCTION_TRACER */
140 134
141static int register_wakeup_function(int graph, int set) 135static int register_wakeup_function(struct trace_array *tr, int graph, int set)
142{ 136{
143 int ret; 137 int ret;
144 138
@@ -150,7 +144,7 @@ static int register_wakeup_function(int graph, int set)
150 ret = register_ftrace_graph(&wakeup_graph_return, 144 ret = register_ftrace_graph(&wakeup_graph_return,
151 &wakeup_graph_entry); 145 &wakeup_graph_entry);
152 else 146 else
153 ret = register_ftrace_function(&trace_ops); 147 ret = register_ftrace_function(tr->ops);
154 148
155 if (!ret) 149 if (!ret)
156 function_enabled = true; 150 function_enabled = true;
@@ -158,7 +152,7 @@ static int register_wakeup_function(int graph, int set)
158 return ret; 152 return ret;
159} 153}
160 154
161static void unregister_wakeup_function(int graph) 155static void unregister_wakeup_function(struct trace_array *tr, int graph)
162{ 156{
163 if (!function_enabled) 157 if (!function_enabled)
164 return; 158 return;
@@ -166,17 +160,17 @@ static void unregister_wakeup_function(int graph)
166 if (graph) 160 if (graph)
167 unregister_ftrace_graph(); 161 unregister_ftrace_graph();
168 else 162 else
169 unregister_ftrace_function(&trace_ops); 163 unregister_ftrace_function(tr->ops);
170 164
171 function_enabled = false; 165 function_enabled = false;
172} 166}
173 167
174static void wakeup_function_set(int set) 168static void wakeup_function_set(struct trace_array *tr, int set)
175{ 169{
176 if (set) 170 if (set)
177 register_wakeup_function(is_graph(), 1); 171 register_wakeup_function(tr, is_graph(), 1);
178 else 172 else
179 unregister_wakeup_function(is_graph()); 173 unregister_wakeup_function(tr, is_graph());
180} 174}
181 175
182static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) 176static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
@@ -184,16 +178,16 @@ static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
184 struct tracer *tracer = tr->current_trace; 178 struct tracer *tracer = tr->current_trace;
185 179
186 if (mask & TRACE_ITER_FUNCTION) 180 if (mask & TRACE_ITER_FUNCTION)
187 wakeup_function_set(set); 181 wakeup_function_set(tr, set);
188 182
189 return trace_keep_overwrite(tracer, mask, set); 183 return trace_keep_overwrite(tracer, mask, set);
190} 184}
191 185
192static int start_func_tracer(int graph) 186static int start_func_tracer(struct trace_array *tr, int graph)
193{ 187{
194 int ret; 188 int ret;
195 189
196 ret = register_wakeup_function(graph, 0); 190 ret = register_wakeup_function(tr, graph, 0);
197 191
198 if (!ret && tracing_is_enabled()) 192 if (!ret && tracing_is_enabled())
199 tracer_enabled = 1; 193 tracer_enabled = 1;
@@ -203,11 +197,11 @@ static int start_func_tracer(int graph)
203 return ret; 197 return ret;
204} 198}
205 199
206static void stop_func_tracer(int graph) 200static void stop_func_tracer(struct trace_array *tr, int graph)
207{ 201{
208 tracer_enabled = 0; 202 tracer_enabled = 0;
209 203
210 unregister_wakeup_function(graph); 204 unregister_wakeup_function(tr, graph);
211} 205}
212 206
213#ifdef CONFIG_FUNCTION_GRAPH_TRACER 207#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -221,12 +215,12 @@ wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
221 if (!(is_graph() ^ set)) 215 if (!(is_graph() ^ set))
222 return 0; 216 return 0;
223 217
224 stop_func_tracer(!set); 218 stop_func_tracer(tr, !set);
225 219
226 wakeup_reset(wakeup_trace); 220 wakeup_reset(wakeup_trace);
227 tracing_max_latency = 0; 221 tracing_max_latency = 0;
228 222
229 return start_func_tracer(set); 223 return start_func_tracer(tr, set);
230} 224}
231 225
232static int wakeup_graph_entry(struct ftrace_graph_ent *trace) 226static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
@@ -587,7 +581,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
587 */ 581 */
588 smp_wmb(); 582 smp_wmb();
589 583
590 if (start_func_tracer(is_graph())) 584 if (start_func_tracer(tr, is_graph()))
591 printk(KERN_ERR "failed to start wakeup tracer\n"); 585 printk(KERN_ERR "failed to start wakeup tracer\n");
592 586
593 return; 587 return;
@@ -600,7 +594,7 @@ fail_deprobe:
600static void stop_wakeup_tracer(struct trace_array *tr) 594static void stop_wakeup_tracer(struct trace_array *tr)
601{ 595{
602 tracer_enabled = 0; 596 tracer_enabled = 0;
603 stop_func_tracer(is_graph()); 597 stop_func_tracer(tr, is_graph());
604 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); 598 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
605 unregister_trace_sched_wakeup_new(probe_wakeup, NULL); 599 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
606 unregister_trace_sched_wakeup(probe_wakeup, NULL); 600 unregister_trace_sched_wakeup(probe_wakeup, NULL);
@@ -617,6 +611,7 @@ static int __wakeup_tracer_init(struct trace_array *tr)
617 611
618 tracing_max_latency = 0; 612 tracing_max_latency = 0;
619 wakeup_trace = tr; 613 wakeup_trace = tr;
614 ftrace_init_array_ops(tr, wakeup_tracer_call);
620 start_wakeup_tracer(tr); 615 start_wakeup_tracer(tr);
621 return 0; 616 return 0;
622} 617}
@@ -653,6 +648,7 @@ static void wakeup_tracer_reset(struct trace_array *tr)
653 648
654 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); 649 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
655 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); 650 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
651 ftrace_reset_array_ops(tr);
656} 652}
657 653
658static void wakeup_tracer_start(struct trace_array *tr) 654static void wakeup_tracer_start(struct trace_array *tr)
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index e98fca60974f..519d04affe38 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -161,11 +161,6 @@ static struct ftrace_ops test_probe3 = {
161 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 161 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
162}; 162};
163 163
164static struct ftrace_ops test_global = {
165 .func = trace_selftest_test_global_func,
166 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
167};
168
169static void print_counts(void) 164static void print_counts(void)
170{ 165{
171 printk("(%d %d %d %d %d) ", 166 printk("(%d %d %d %d %d) ",
@@ -185,7 +180,7 @@ static void reset_counts(void)
185 trace_selftest_test_dyn_cnt = 0; 180 trace_selftest_test_dyn_cnt = 0;
186} 181}
187 182
188static int trace_selftest_ops(int cnt) 183static int trace_selftest_ops(struct trace_array *tr, int cnt)
189{ 184{
190 int save_ftrace_enabled = ftrace_enabled; 185 int save_ftrace_enabled = ftrace_enabled;
191 struct ftrace_ops *dyn_ops; 186 struct ftrace_ops *dyn_ops;
@@ -220,7 +215,11 @@ static int trace_selftest_ops(int cnt)
220 register_ftrace_function(&test_probe1); 215 register_ftrace_function(&test_probe1);
221 register_ftrace_function(&test_probe2); 216 register_ftrace_function(&test_probe2);
222 register_ftrace_function(&test_probe3); 217 register_ftrace_function(&test_probe3);
223 register_ftrace_function(&test_global); 218 /* First time we are running with main function */
219 if (cnt > 1) {
220 ftrace_init_array_ops(tr, trace_selftest_test_global_func);
221 register_ftrace_function(tr->ops);
222 }
224 223
225 DYN_FTRACE_TEST_NAME(); 224 DYN_FTRACE_TEST_NAME();
226 225
@@ -232,8 +231,10 @@ static int trace_selftest_ops(int cnt)
232 goto out; 231 goto out;
233 if (trace_selftest_test_probe3_cnt != 1) 232 if (trace_selftest_test_probe3_cnt != 1)
234 goto out; 233 goto out;
235 if (trace_selftest_test_global_cnt == 0) 234 if (cnt > 1) {
236 goto out; 235 if (trace_selftest_test_global_cnt == 0)
236 goto out;
237 }
237 238
238 DYN_FTRACE_TEST_NAME2(); 239 DYN_FTRACE_TEST_NAME2();
239 240
@@ -269,8 +270,10 @@ static int trace_selftest_ops(int cnt)
269 goto out_free; 270 goto out_free;
270 if (trace_selftest_test_probe3_cnt != 3) 271 if (trace_selftest_test_probe3_cnt != 3)
271 goto out_free; 272 goto out_free;
272 if (trace_selftest_test_global_cnt == 0) 273 if (cnt > 1) {
273 goto out; 274 if (trace_selftest_test_global_cnt == 0)
275 goto out;
276 }
274 if (trace_selftest_test_dyn_cnt == 0) 277 if (trace_selftest_test_dyn_cnt == 0)
275 goto out_free; 278 goto out_free;
276 279
@@ -295,7 +298,9 @@ static int trace_selftest_ops(int cnt)
295 unregister_ftrace_function(&test_probe1); 298 unregister_ftrace_function(&test_probe1);
296 unregister_ftrace_function(&test_probe2); 299 unregister_ftrace_function(&test_probe2);
297 unregister_ftrace_function(&test_probe3); 300 unregister_ftrace_function(&test_probe3);
298 unregister_ftrace_function(&test_global); 301 if (cnt > 1)
302 unregister_ftrace_function(tr->ops);
303 ftrace_reset_array_ops(tr);
299 304
300 /* Make sure everything is off */ 305 /* Make sure everything is off */
301 reset_counts(); 306 reset_counts();
@@ -388,7 +393,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
388 } 393 }
389 394
390 /* Test the ops with global tracing running */ 395 /* Test the ops with global tracing running */
391 ret = trace_selftest_ops(1); 396 ret = trace_selftest_ops(tr, 1);
392 trace->reset(tr); 397 trace->reset(tr);
393 398
394 out: 399 out:
@@ -399,7 +404,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
399 404
400 /* Test the ops with global tracing off */ 405 /* Test the ops with global tracing off */
401 if (!ret) 406 if (!ret)
402 ret = trace_selftest_ops(2); 407 ret = trace_selftest_ops(tr, 2);
403 408
404 return ret; 409 return ret;
405} 410}