aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2014-08-15 17:23:02 -0400
committerSteven Rostedt <rostedt@goodmis.org>2014-08-22 13:18:48 -0400
commit33b7f99cf003ca6c1d31c42b50e1100ad71aaec0 (patch)
tree476509236acaa87a4aa729244c703a214a5558e3 /kernel/trace/ftrace.c
parent7d1311b93e58ed55f3a31cc8f94c4b8fe988a2b9 (diff)
ftrace: Allow ftrace_ops to use the hashes from other ops
Currently the top level debug file system function tracer shares its ftrace_ops with the function graph tracer. This was thought to be fine because the tracers are not used together, as one can only enable function or function_graph tracer in the current_tracer file. But that assumption proved to be incorrect. The function profiler can use the function graph tracer when function tracing is enabled. Since all function graph users uses the function tracing ftrace_ops this causes a conflict and when a user enables both function profiling as well as the function tracer it will crash ftrace and disable it. The quick solution so far is to move them as separate ftrace_ops like it was earlier. The problem though is to synchronize the functions that are traced because both function and function_graph tracer are limited by the selections made in the set_ftrace_filter and set_ftrace_notrace files. To handle this, a new structure is made called ftrace_ops_hash. This structure will now hold the filter_hash and notrace_hash, and the ftrace_ops will point to this structure. That will allow two ftrace_ops to share the same hashes. Since most ftrace_ops do not share the hashes, and to keep allocation simple, the ftrace_ops structure will include both a pointer to the ftrace_ops_hash called func_hash, as well as the structure itself, called local_hash. When the ops are registered, the func_hash pointer will be initialized to point to the local_hash within the ftrace_ops structure. Some of the ftrace internal ftrace_ops will be initialized statically. This will allow for the function and function_graph tracer to have separate ops but still share the same hash tables that determine what functions they trace. Cc: stable@vger.kernel.org # 3.16 (apply after 3.17-rc4 is out) Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c100
1 files changed, 52 insertions, 48 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1654b12c891a..c92757adba79 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -65,15 +65,17 @@
65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL) 65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
66 66
67#ifdef CONFIG_DYNAMIC_FTRACE 67#ifdef CONFIG_DYNAMIC_FTRACE
68#define INIT_REGEX_LOCK(opsname) \ 68#define INIT_OPS_HASH(opsname) \
69 .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock), 69 .func_hash = &opsname.local_hash, \
70 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
70#else 71#else
71#define INIT_REGEX_LOCK(opsname) 72#define INIT_OPS_HASH(opsname)
72#endif 73#endif
73 74
74static struct ftrace_ops ftrace_list_end __read_mostly = { 75static struct ftrace_ops ftrace_list_end __read_mostly = {
75 .func = ftrace_stub, 76 .func = ftrace_stub,
76 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, 77 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
78 INIT_OPS_HASH(ftrace_list_end)
77}; 79};
78 80
79/* ftrace_enabled is a method to turn ftrace on or off */ 81/* ftrace_enabled is a method to turn ftrace on or off */
@@ -140,7 +142,8 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops)
140{ 142{
141#ifdef CONFIG_DYNAMIC_FTRACE 143#ifdef CONFIG_DYNAMIC_FTRACE
142 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { 144 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
143 mutex_init(&ops->regex_lock); 145 mutex_init(&ops->local_hash.regex_lock);
146 ops->func_hash = &ops->local_hash;
144 ops->flags |= FTRACE_OPS_FL_INITIALIZED; 147 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
145 } 148 }
146#endif 149#endif
@@ -899,7 +902,7 @@ static void unregister_ftrace_profiler(void)
899static struct ftrace_ops ftrace_profile_ops __read_mostly = { 902static struct ftrace_ops ftrace_profile_ops __read_mostly = {
900 .func = function_profile_call, 903 .func = function_profile_call,
901 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 904 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
902 INIT_REGEX_LOCK(ftrace_profile_ops) 905 INIT_OPS_HASH(ftrace_profile_ops)
903}; 906};
904 907
905static int register_ftrace_profiler(void) 908static int register_ftrace_profiler(void)
@@ -1081,11 +1084,12 @@ static const struct ftrace_hash empty_hash = {
1081#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) 1084#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1082 1085
1083static struct ftrace_ops global_ops = { 1086static struct ftrace_ops global_ops = {
1084 .func = ftrace_stub, 1087 .func = ftrace_stub,
1085 .notrace_hash = EMPTY_HASH, 1088 .local_hash.notrace_hash = EMPTY_HASH,
1086 .filter_hash = EMPTY_HASH, 1089 .local_hash.filter_hash = EMPTY_HASH,
1087 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 1090 INIT_OPS_HASH(global_ops)
1088 INIT_REGEX_LOCK(global_ops) 1091 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
1092 FTRACE_OPS_FL_INITIALIZED,
1089}; 1093};
1090 1094
1091struct ftrace_page { 1095struct ftrace_page {
@@ -1226,8 +1230,8 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1226void ftrace_free_filter(struct ftrace_ops *ops) 1230void ftrace_free_filter(struct ftrace_ops *ops)
1227{ 1231{
1228 ftrace_ops_init(ops); 1232 ftrace_ops_init(ops);
1229 free_ftrace_hash(ops->filter_hash); 1233 free_ftrace_hash(ops->func_hash->filter_hash);
1230 free_ftrace_hash(ops->notrace_hash); 1234 free_ftrace_hash(ops->func_hash->notrace_hash);
1231} 1235}
1232 1236
1233static struct ftrace_hash *alloc_ftrace_hash(int size_bits) 1237static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
@@ -1382,8 +1386,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1382 return 0; 1386 return 0;
1383#endif 1387#endif
1384 1388
1385 filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); 1389 filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
1386 notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); 1390 notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
1387 1391
1388 if ((ftrace_hash_empty(filter_hash) || 1392 if ((ftrace_hash_empty(filter_hash) ||
1389 ftrace_lookup_ip(filter_hash, ip)) && 1393 ftrace_lookup_ip(filter_hash, ip)) &&
@@ -1554,14 +1558,14 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1554 * gets inversed. 1558 * gets inversed.
1555 */ 1559 */
1556 if (filter_hash) { 1560 if (filter_hash) {
1557 hash = ops->filter_hash; 1561 hash = ops->func_hash->filter_hash;
1558 other_hash = ops->notrace_hash; 1562 other_hash = ops->func_hash->notrace_hash;
1559 if (ftrace_hash_empty(hash)) 1563 if (ftrace_hash_empty(hash))
1560 all = 1; 1564 all = 1;
1561 } else { 1565 } else {
1562 inc = !inc; 1566 inc = !inc;
1563 hash = ops->notrace_hash; 1567 hash = ops->func_hash->notrace_hash;
1564 other_hash = ops->filter_hash; 1568 other_hash = ops->func_hash->filter_hash;
1565 /* 1569 /*
1566 * If the notrace hash has no items, 1570 * If the notrace hash has no items,
1567 * then there's nothing to do. 1571 * then there's nothing to do.
@@ -2436,8 +2440,8 @@ static inline int ops_traces_mod(struct ftrace_ops *ops)
2436 * Filter_hash being empty will default to trace module. 2440 * Filter_hash being empty will default to trace module.
2437 * But notrace hash requires a test of individual module functions. 2441 * But notrace hash requires a test of individual module functions.
2438 */ 2442 */
2439 return ftrace_hash_empty(ops->filter_hash) && 2443 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2440 ftrace_hash_empty(ops->notrace_hash); 2444 ftrace_hash_empty(ops->func_hash->notrace_hash);
2441} 2445}
2442 2446
2443/* 2447/*
@@ -2459,12 +2463,12 @@ ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2459 return 0; 2463 return 0;
2460 2464
2461 /* The function must be in the filter */ 2465 /* The function must be in the filter */
2462 if (!ftrace_hash_empty(ops->filter_hash) && 2466 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2463 !ftrace_lookup_ip(ops->filter_hash, rec->ip)) 2467 !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
2464 return 0; 2468 return 0;
2465 2469
2466 /* If in notrace hash, we ignore it too */ 2470 /* If in notrace hash, we ignore it too */
2467 if (ftrace_lookup_ip(ops->notrace_hash, rec->ip)) 2471 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
2468 return 0; 2472 return 0;
2469 2473
2470 return 1; 2474 return 1;
@@ -2785,10 +2789,10 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
2785 } else { 2789 } else {
2786 rec = &iter->pg->records[iter->idx++]; 2790 rec = &iter->pg->records[iter->idx++];
2787 if (((iter->flags & FTRACE_ITER_FILTER) && 2791 if (((iter->flags & FTRACE_ITER_FILTER) &&
2788 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) || 2792 !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
2789 2793
2790 ((iter->flags & FTRACE_ITER_NOTRACE) && 2794 ((iter->flags & FTRACE_ITER_NOTRACE) &&
2791 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || 2795 !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
2792 2796
2793 ((iter->flags & FTRACE_ITER_ENABLED) && 2797 ((iter->flags & FTRACE_ITER_ENABLED) &&
2794 !(rec->flags & FTRACE_FL_ENABLED))) { 2798 !(rec->flags & FTRACE_FL_ENABLED))) {
@@ -2837,9 +2841,9 @@ static void *t_start(struct seq_file *m, loff_t *pos)
2837 * functions are enabled. 2841 * functions are enabled.
2838 */ 2842 */
2839 if ((iter->flags & FTRACE_ITER_FILTER && 2843 if ((iter->flags & FTRACE_ITER_FILTER &&
2840 ftrace_hash_empty(ops->filter_hash)) || 2844 ftrace_hash_empty(ops->func_hash->filter_hash)) ||
2841 (iter->flags & FTRACE_ITER_NOTRACE && 2845 (iter->flags & FTRACE_ITER_NOTRACE &&
2842 ftrace_hash_empty(ops->notrace_hash))) { 2846 ftrace_hash_empty(ops->func_hash->notrace_hash))) {
2843 if (*pos > 0) 2847 if (*pos > 0)
2844 return t_hash_start(m, pos); 2848 return t_hash_start(m, pos);
2845 iter->flags |= FTRACE_ITER_PRINTALL; 2849 iter->flags |= FTRACE_ITER_PRINTALL;
@@ -3001,12 +3005,12 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
3001 iter->ops = ops; 3005 iter->ops = ops;
3002 iter->flags = flag; 3006 iter->flags = flag;
3003 3007
3004 mutex_lock(&ops->regex_lock); 3008 mutex_lock(&ops->func_hash->regex_lock);
3005 3009
3006 if (flag & FTRACE_ITER_NOTRACE) 3010 if (flag & FTRACE_ITER_NOTRACE)
3007 hash = ops->notrace_hash; 3011 hash = ops->func_hash->notrace_hash;
3008 else 3012 else
3009 hash = ops->filter_hash; 3013 hash = ops->func_hash->filter_hash;
3010 3014
3011 if (file->f_mode & FMODE_WRITE) { 3015 if (file->f_mode & FMODE_WRITE) {
3012 const int size_bits = FTRACE_HASH_DEFAULT_BITS; 3016 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
@@ -3041,7 +3045,7 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
3041 file->private_data = iter; 3045 file->private_data = iter;
3042 3046
3043 out_unlock: 3047 out_unlock:
3044 mutex_unlock(&ops->regex_lock); 3048 mutex_unlock(&ops->func_hash->regex_lock);
3045 3049
3046 return ret; 3050 return ret;
3047} 3051}
@@ -3279,7 +3283,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
3279{ 3283{
3280 .func = function_trace_probe_call, 3284 .func = function_trace_probe_call,
3281 .flags = FTRACE_OPS_FL_INITIALIZED, 3285 .flags = FTRACE_OPS_FL_INITIALIZED,
3282 INIT_REGEX_LOCK(trace_probe_ops) 3286 INIT_OPS_HASH(trace_probe_ops)
3283}; 3287};
3284 3288
3285static int ftrace_probe_registered; 3289static int ftrace_probe_registered;
@@ -3342,7 +3346,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3342 void *data) 3346 void *data)
3343{ 3347{
3344 struct ftrace_func_probe *entry; 3348 struct ftrace_func_probe *entry;
3345 struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; 3349 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3346 struct ftrace_hash *hash; 3350 struct ftrace_hash *hash;
3347 struct ftrace_page *pg; 3351 struct ftrace_page *pg;
3348 struct dyn_ftrace *rec; 3352 struct dyn_ftrace *rec;
@@ -3359,7 +3363,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3359 if (WARN_ON(not)) 3363 if (WARN_ON(not))
3360 return -EINVAL; 3364 return -EINVAL;
3361 3365
3362 mutex_lock(&trace_probe_ops.regex_lock); 3366 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3363 3367
3364 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3368 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3365 if (!hash) { 3369 if (!hash) {
@@ -3428,7 +3432,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3428 out_unlock: 3432 out_unlock:
3429 mutex_unlock(&ftrace_lock); 3433 mutex_unlock(&ftrace_lock);
3430 out: 3434 out:
3431 mutex_unlock(&trace_probe_ops.regex_lock); 3435 mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3432 free_ftrace_hash(hash); 3436 free_ftrace_hash(hash);
3433 3437
3434 return count; 3438 return count;
@@ -3446,7 +3450,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3446 struct ftrace_func_entry *rec_entry; 3450 struct ftrace_func_entry *rec_entry;
3447 struct ftrace_func_probe *entry; 3451 struct ftrace_func_probe *entry;
3448 struct ftrace_func_probe *p; 3452 struct ftrace_func_probe *p;
3449 struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; 3453 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3450 struct list_head free_list; 3454 struct list_head free_list;
3451 struct ftrace_hash *hash; 3455 struct ftrace_hash *hash;
3452 struct hlist_node *tmp; 3456 struct hlist_node *tmp;
@@ -3468,7 +3472,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3468 return; 3472 return;
3469 } 3473 }
3470 3474
3471 mutex_lock(&trace_probe_ops.regex_lock); 3475 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3472 3476
3473 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3477 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3474 if (!hash) 3478 if (!hash)
@@ -3521,7 +3525,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3521 mutex_unlock(&ftrace_lock); 3525 mutex_unlock(&ftrace_lock);
3522 3526
3523 out_unlock: 3527 out_unlock:
3524 mutex_unlock(&trace_probe_ops.regex_lock); 3528 mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3525 free_ftrace_hash(hash); 3529 free_ftrace_hash(hash);
3526} 3530}
3527 3531
@@ -3717,12 +3721,12 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3717 if (unlikely(ftrace_disabled)) 3721 if (unlikely(ftrace_disabled))
3718 return -ENODEV; 3722 return -ENODEV;
3719 3723
3720 mutex_lock(&ops->regex_lock); 3724 mutex_lock(&ops->func_hash->regex_lock);
3721 3725
3722 if (enable) 3726 if (enable)
3723 orig_hash = &ops->filter_hash; 3727 orig_hash = &ops->func_hash->filter_hash;
3724 else 3728 else
3725 orig_hash = &ops->notrace_hash; 3729 orig_hash = &ops->func_hash->notrace_hash;
3726 3730
3727 if (reset) 3731 if (reset)
3728 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 3732 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
@@ -3752,7 +3756,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3752 mutex_unlock(&ftrace_lock); 3756 mutex_unlock(&ftrace_lock);
3753 3757
3754 out_regex_unlock: 3758 out_regex_unlock:
3755 mutex_unlock(&ops->regex_lock); 3759 mutex_unlock(&ops->func_hash->regex_lock);
3756 3760
3757 free_ftrace_hash(hash); 3761 free_ftrace_hash(hash);
3758 return ret; 3762 return ret;
@@ -3975,15 +3979,15 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
3975 3979
3976 trace_parser_put(parser); 3980 trace_parser_put(parser);
3977 3981
3978 mutex_lock(&iter->ops->regex_lock); 3982 mutex_lock(&iter->ops->func_hash->regex_lock);
3979 3983
3980 if (file->f_mode & FMODE_WRITE) { 3984 if (file->f_mode & FMODE_WRITE) {
3981 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); 3985 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3982 3986
3983 if (filter_hash) 3987 if (filter_hash)
3984 orig_hash = &iter->ops->filter_hash; 3988 orig_hash = &iter->ops->func_hash->filter_hash;
3985 else 3989 else
3986 orig_hash = &iter->ops->notrace_hash; 3990 orig_hash = &iter->ops->func_hash->notrace_hash;
3987 3991
3988 mutex_lock(&ftrace_lock); 3992 mutex_lock(&ftrace_lock);
3989 ret = ftrace_hash_move(iter->ops, filter_hash, 3993 ret = ftrace_hash_move(iter->ops, filter_hash,
@@ -3994,7 +3998,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
3994 mutex_unlock(&ftrace_lock); 3998 mutex_unlock(&ftrace_lock);
3995 } 3999 }
3996 4000
3997 mutex_unlock(&iter->ops->regex_lock); 4001 mutex_unlock(&iter->ops->func_hash->regex_lock);
3998 free_ftrace_hash(iter->hash); 4002 free_ftrace_hash(iter->hash);
3999 kfree(iter); 4003 kfree(iter);
4000 4004
@@ -4611,7 +4615,7 @@ void __init ftrace_init(void)
4611static struct ftrace_ops global_ops = { 4615static struct ftrace_ops global_ops = {
4612 .func = ftrace_stub, 4616 .func = ftrace_stub,
4613 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 4617 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4614 INIT_REGEX_LOCK(global_ops) 4618 INIT_OPS_HASH(global_ops)
4615}; 4619};
4616 4620
4617static int __init ftrace_nodyn_init(void) 4621static int __init ftrace_nodyn_init(void)
@@ -4713,7 +4717,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4713static struct ftrace_ops control_ops = { 4717static struct ftrace_ops control_ops = {
4714 .func = ftrace_ops_control_func, 4718 .func = ftrace_ops_control_func,
4715 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 4719 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4716 INIT_REGEX_LOCK(control_ops) 4720 INIT_OPS_HASH(control_ops)
4717}; 4721};
4718 4722
4719static inline void 4723static inline void