aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/ftrace.h4
-rw-r--r--include/linux/ftrace_event.h1
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/ftrace.c126
-rw-r--r--kernel/trace/trace_events.c54
-rw-r--r--kernel/trace/trace_kprobe.c289
6 files changed, 368 insertions, 108 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index f83e17a40e8b..99d0fbcbaf79 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -90,6 +90,8 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
90 * not set this, then the ftrace infrastructure will add recursion 90 * not set this, then the ftrace infrastructure will add recursion
91 * protection for the caller. 91 * protection for the caller.
92 * STUB - The ftrace_ops is just a place holder. 92 * STUB - The ftrace_ops is just a place holder.
93 * INITIALIZED - The ftrace_ops has already been initialized (first use time
94 * register_ftrace_function() is called, it will initialized the ops)
93 */ 95 */
94enum { 96enum {
95 FTRACE_OPS_FL_ENABLED = 1 << 0, 97 FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -100,6 +102,7 @@ enum {
100 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, 102 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
101 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, 103 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
102 FTRACE_OPS_FL_STUB = 1 << 7, 104 FTRACE_OPS_FL_STUB = 1 << 7,
105 FTRACE_OPS_FL_INITIALIZED = 1 << 8,
103}; 106};
104 107
105struct ftrace_ops { 108struct ftrace_ops {
@@ -110,6 +113,7 @@ struct ftrace_ops {
110#ifdef CONFIG_DYNAMIC_FTRACE 113#ifdef CONFIG_DYNAMIC_FTRACE
111 struct ftrace_hash *notrace_hash; 114 struct ftrace_hash *notrace_hash;
112 struct ftrace_hash *filter_hash; 115 struct ftrace_hash *filter_hash;
116 struct mutex regex_lock;
113#endif 117#endif
114}; 118};
115 119
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 34e00fb49bec..4372658c73ae 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -293,6 +293,7 @@ struct ftrace_event_file {
293 * caching and such. Which is mostly OK ;-) 293 * caching and such. Which is mostly OK ;-)
294 */ 294 */
295 unsigned long flags; 295 unsigned long flags;
296 atomic_t sm_ref; /* soft-mode reference counter */
296}; 297};
297 298
298#define __TRACE_EVENT_FLAGS(name, value) \ 299#define __TRACE_EVENT_FLAGS(name, value) \
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 5e9efd4b83a4..015f85aaca08 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -71,6 +71,7 @@ config TRACE_CLOCK
71config RING_BUFFER 71config RING_BUFFER
72 bool 72 bool
73 select TRACE_CLOCK 73 select TRACE_CLOCK
74 select IRQ_WORK
74 75
75config FTRACE_NMI_ENTER 76config FTRACE_NMI_ENTER
76 bool 77 bool
@@ -107,7 +108,6 @@ config TRACING
107 select BINARY_PRINTF 108 select BINARY_PRINTF
108 select EVENT_TRACING 109 select EVENT_TRACING
109 select TRACE_CLOCK 110 select TRACE_CLOCK
110 select IRQ_WORK
111 111
112config GENERIC_TRACER 112config GENERIC_TRACER
113 bool 113 bool
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8a5c017bb50c..b549b0f5b977 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -64,6 +64,13 @@
64 64
65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) 65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66 66
67#ifdef CONFIG_DYNAMIC_FTRACE
68#define INIT_REGEX_LOCK(opsname) \
69 .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock),
70#else
71#define INIT_REGEX_LOCK(opsname)
72#endif
73
67static struct ftrace_ops ftrace_list_end __read_mostly = { 74static struct ftrace_ops ftrace_list_end __read_mostly = {
68 .func = ftrace_stub, 75 .func = ftrace_stub,
69 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, 76 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
@@ -131,6 +138,16 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
131 while (likely(op = rcu_dereference_raw((op)->next)) && \ 138 while (likely(op = rcu_dereference_raw((op)->next)) && \
132 unlikely((op) != &ftrace_list_end)) 139 unlikely((op) != &ftrace_list_end))
133 140
141static inline void ftrace_ops_init(struct ftrace_ops *ops)
142{
143#ifdef CONFIG_DYNAMIC_FTRACE
144 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
145 mutex_init(&ops->regex_lock);
146 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
147 }
148#endif
149}
150
134/** 151/**
135 * ftrace_nr_registered_ops - return number of ops registered 152 * ftrace_nr_registered_ops - return number of ops registered
136 * 153 *
@@ -907,7 +924,8 @@ static void unregister_ftrace_profiler(void)
907#else 924#else
908static struct ftrace_ops ftrace_profile_ops __read_mostly = { 925static struct ftrace_ops ftrace_profile_ops __read_mostly = {
909 .func = function_profile_call, 926 .func = function_profile_call,
910 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 927 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
928 INIT_REGEX_LOCK(ftrace_profile_ops)
911}; 929};
912 930
913static int register_ftrace_profiler(void) 931static int register_ftrace_profiler(void)
@@ -1103,11 +1121,10 @@ static struct ftrace_ops global_ops = {
1103 .func = ftrace_stub, 1121 .func = ftrace_stub,
1104 .notrace_hash = EMPTY_HASH, 1122 .notrace_hash = EMPTY_HASH,
1105 .filter_hash = EMPTY_HASH, 1123 .filter_hash = EMPTY_HASH,
1106 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 1124 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
1125 INIT_REGEX_LOCK(global_ops)
1107}; 1126};
1108 1127
1109static DEFINE_MUTEX(ftrace_regex_lock);
1110
1111struct ftrace_page { 1128struct ftrace_page {
1112 struct ftrace_page *next; 1129 struct ftrace_page *next;
1113 struct dyn_ftrace *records; 1130 struct dyn_ftrace *records;
@@ -1247,6 +1264,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1247 1264
1248void ftrace_free_filter(struct ftrace_ops *ops) 1265void ftrace_free_filter(struct ftrace_ops *ops)
1249{ 1266{
1267 ftrace_ops_init(ops);
1250 free_ftrace_hash(ops->filter_hash); 1268 free_ftrace_hash(ops->filter_hash);
1251 free_ftrace_hash(ops->notrace_hash); 1269 free_ftrace_hash(ops->notrace_hash);
1252} 1270}
@@ -2441,7 +2459,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
2441 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || 2459 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2442 2460
2443 ((iter->flags & FTRACE_ITER_ENABLED) && 2461 ((iter->flags & FTRACE_ITER_ENABLED) &&
2444 !(rec->flags & ~FTRACE_FL_MASK))) { 2462 !(rec->flags & FTRACE_FL_ENABLED))) {
2445 2463
2446 rec = NULL; 2464 rec = NULL;
2447 goto retry; 2465 goto retry;
@@ -2624,6 +2642,8 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
2624 struct ftrace_hash *hash; 2642 struct ftrace_hash *hash;
2625 int ret = 0; 2643 int ret = 0;
2626 2644
2645 ftrace_ops_init(ops);
2646
2627 if (unlikely(ftrace_disabled)) 2647 if (unlikely(ftrace_disabled))
2628 return -ENODEV; 2648 return -ENODEV;
2629 2649
@@ -2636,28 +2656,26 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
2636 return -ENOMEM; 2656 return -ENOMEM;
2637 } 2657 }
2638 2658
2659 iter->ops = ops;
2660 iter->flags = flag;
2661
2662 mutex_lock(&ops->regex_lock);
2663
2639 if (flag & FTRACE_ITER_NOTRACE) 2664 if (flag & FTRACE_ITER_NOTRACE)
2640 hash = ops->notrace_hash; 2665 hash = ops->notrace_hash;
2641 else 2666 else
2642 hash = ops->filter_hash; 2667 hash = ops->filter_hash;
2643 2668
2644 iter->ops = ops;
2645 iter->flags = flag;
2646
2647 if (file->f_mode & FMODE_WRITE) { 2669 if (file->f_mode & FMODE_WRITE) {
2648 mutex_lock(&ftrace_lock);
2649 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash); 2670 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2650 mutex_unlock(&ftrace_lock);
2651
2652 if (!iter->hash) { 2671 if (!iter->hash) {
2653 trace_parser_put(&iter->parser); 2672 trace_parser_put(&iter->parser);
2654 kfree(iter); 2673 kfree(iter);
2655 return -ENOMEM; 2674 ret = -ENOMEM;
2675 goto out_unlock;
2656 } 2676 }
2657 } 2677 }
2658 2678
2659 mutex_lock(&ftrace_regex_lock);
2660
2661 if ((file->f_mode & FMODE_WRITE) && 2679 if ((file->f_mode & FMODE_WRITE) &&
2662 (file->f_flags & O_TRUNC)) 2680 (file->f_flags & O_TRUNC))
2663 ftrace_filter_reset(iter->hash); 2681 ftrace_filter_reset(iter->hash);
@@ -2677,7 +2695,9 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
2677 } 2695 }
2678 } else 2696 } else
2679 file->private_data = iter; 2697 file->private_data = iter;
2680 mutex_unlock(&ftrace_regex_lock); 2698
2699 out_unlock:
2700 mutex_unlock(&ops->regex_lock);
2681 2701
2682 return ret; 2702 return ret;
2683} 2703}
@@ -2910,6 +2930,8 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2910static struct ftrace_ops trace_probe_ops __read_mostly = 2930static struct ftrace_ops trace_probe_ops __read_mostly =
2911{ 2931{
2912 .func = function_trace_probe_call, 2932 .func = function_trace_probe_call,
2933 .flags = FTRACE_OPS_FL_INITIALIZED,
2934 INIT_REGEX_LOCK(trace_probe_ops)
2913}; 2935};
2914 2936
2915static int ftrace_probe_registered; 2937static int ftrace_probe_registered;
@@ -2919,8 +2941,12 @@ static void __enable_ftrace_function_probe(void)
2919 int ret; 2941 int ret;
2920 int i; 2942 int i;
2921 2943
2922 if (ftrace_probe_registered) 2944 if (ftrace_probe_registered) {
2945 /* still need to update the function call sites */
2946 if (ftrace_enabled)
2947 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2923 return; 2948 return;
2949 }
2924 2950
2925 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 2951 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2926 struct hlist_head *hhd = &ftrace_func_hash[i]; 2952 struct hlist_head *hhd = &ftrace_func_hash[i];
@@ -2990,19 +3016,21 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2990 if (WARN_ON(not)) 3016 if (WARN_ON(not))
2991 return -EINVAL; 3017 return -EINVAL;
2992 3018
2993 mutex_lock(&ftrace_lock); 3019 mutex_lock(&trace_probe_ops.regex_lock);
2994 3020
2995 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3021 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2996 if (!hash) { 3022 if (!hash) {
2997 count = -ENOMEM; 3023 count = -ENOMEM;
2998 goto out_unlock; 3024 goto out;
2999 } 3025 }
3000 3026
3001 if (unlikely(ftrace_disabled)) { 3027 if (unlikely(ftrace_disabled)) {
3002 count = -ENODEV; 3028 count = -ENODEV;
3003 goto out_unlock; 3029 goto out;
3004 } 3030 }
3005 3031
3032 mutex_lock(&ftrace_lock);
3033
3006 do_for_each_ftrace_rec(pg, rec) { 3034 do_for_each_ftrace_rec(pg, rec) {
3007 3035
3008 if (!ftrace_match_record(rec, NULL, search, len, type)) 3036 if (!ftrace_match_record(rec, NULL, search, len, type))
@@ -3056,6 +3084,8 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3056 3084
3057 out_unlock: 3085 out_unlock:
3058 mutex_unlock(&ftrace_lock); 3086 mutex_unlock(&ftrace_lock);
3087 out:
3088 mutex_unlock(&trace_probe_ops.regex_lock);
3059 free_ftrace_hash(hash); 3089 free_ftrace_hash(hash);
3060 3090
3061 return count; 3091 return count;
@@ -3095,7 +3125,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3095 return; 3125 return;
3096 } 3126 }
3097 3127
3098 mutex_lock(&ftrace_lock); 3128 mutex_lock(&trace_probe_ops.regex_lock);
3099 3129
3100 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3130 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3101 if (!hash) 3131 if (!hash)
@@ -3133,6 +3163,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3133 list_add(&entry->free_list, &free_list); 3163 list_add(&entry->free_list, &free_list);
3134 } 3164 }
3135 } 3165 }
3166 mutex_lock(&ftrace_lock);
3136 __disable_ftrace_function_probe(); 3167 __disable_ftrace_function_probe();
3137 /* 3168 /*
3138 * Remove after the disable is called. Otherwise, if the last 3169 * Remove after the disable is called. Otherwise, if the last
@@ -3144,9 +3175,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3144 list_del(&entry->free_list); 3175 list_del(&entry->free_list);
3145 ftrace_free_entry(entry); 3176 ftrace_free_entry(entry);
3146 } 3177 }
3178 mutex_unlock(&ftrace_lock);
3147 3179
3148 out_unlock: 3180 out_unlock:
3149 mutex_unlock(&ftrace_lock); 3181 mutex_unlock(&trace_probe_ops.regex_lock);
3150 free_ftrace_hash(hash); 3182 free_ftrace_hash(hash);
3151} 3183}
3152 3184
@@ -3256,18 +3288,17 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
3256 if (!cnt) 3288 if (!cnt)
3257 return 0; 3289 return 0;
3258 3290
3259 mutex_lock(&ftrace_regex_lock);
3260
3261 ret = -ENODEV;
3262 if (unlikely(ftrace_disabled))
3263 goto out_unlock;
3264
3265 if (file->f_mode & FMODE_READ) { 3291 if (file->f_mode & FMODE_READ) {
3266 struct seq_file *m = file->private_data; 3292 struct seq_file *m = file->private_data;
3267 iter = m->private; 3293 iter = m->private;
3268 } else 3294 } else
3269 iter = file->private_data; 3295 iter = file->private_data;
3270 3296
3297 if (unlikely(ftrace_disabled))
3298 return -ENODEV;
3299
3300 /* iter->hash is a local copy, so we don't need regex_lock */
3301
3271 parser = &iter->parser; 3302 parser = &iter->parser;
3272 read = trace_get_user(parser, ubuf, cnt, ppos); 3303 read = trace_get_user(parser, ubuf, cnt, ppos);
3273 3304
@@ -3276,14 +3307,12 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
3276 ret = ftrace_process_regex(iter->hash, parser->buffer, 3307 ret = ftrace_process_regex(iter->hash, parser->buffer,
3277 parser->idx, enable); 3308 parser->idx, enable);
3278 trace_parser_clear(parser); 3309 trace_parser_clear(parser);
3279 if (ret) 3310 if (ret < 0)
3280 goto out_unlock; 3311 goto out;
3281 } 3312 }
3282 3313
3283 ret = read; 3314 ret = read;
3284out_unlock: 3315 out:
3285 mutex_unlock(&ftrace_regex_lock);
3286
3287 return ret; 3316 return ret;
3288} 3317}
3289 3318
@@ -3335,16 +3364,19 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3335 if (unlikely(ftrace_disabled)) 3364 if (unlikely(ftrace_disabled))
3336 return -ENODEV; 3365 return -ENODEV;
3337 3366
3367 mutex_lock(&ops->regex_lock);
3368
3338 if (enable) 3369 if (enable)
3339 orig_hash = &ops->filter_hash; 3370 orig_hash = &ops->filter_hash;
3340 else 3371 else
3341 orig_hash = &ops->notrace_hash; 3372 orig_hash = &ops->notrace_hash;
3342 3373
3343 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3374 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3344 if (!hash) 3375 if (!hash) {
3345 return -ENOMEM; 3376 ret = -ENOMEM;
3377 goto out_regex_unlock;
3378 }
3346 3379
3347 mutex_lock(&ftrace_regex_lock);
3348 if (reset) 3380 if (reset)
3349 ftrace_filter_reset(hash); 3381 ftrace_filter_reset(hash);
3350 if (buf && !ftrace_match_records(hash, buf, len)) { 3382 if (buf && !ftrace_match_records(hash, buf, len)) {
@@ -3366,7 +3398,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3366 mutex_unlock(&ftrace_lock); 3398 mutex_unlock(&ftrace_lock);
3367 3399
3368 out_regex_unlock: 3400 out_regex_unlock:
3369 mutex_unlock(&ftrace_regex_lock); 3401 mutex_unlock(&ops->regex_lock);
3370 3402
3371 free_ftrace_hash(hash); 3403 free_ftrace_hash(hash);
3372 return ret; 3404 return ret;
@@ -3392,6 +3424,7 @@ ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3392int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, 3424int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3393 int remove, int reset) 3425 int remove, int reset)
3394{ 3426{
3427 ftrace_ops_init(ops);
3395 return ftrace_set_addr(ops, ip, remove, reset, 1); 3428 return ftrace_set_addr(ops, ip, remove, reset, 1);
3396} 3429}
3397EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); 3430EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
@@ -3416,6 +3449,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3416int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 3449int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3417 int len, int reset) 3450 int len, int reset)
3418{ 3451{
3452 ftrace_ops_init(ops);
3419 return ftrace_set_regex(ops, buf, len, reset, 1); 3453 return ftrace_set_regex(ops, buf, len, reset, 1);
3420} 3454}
3421EXPORT_SYMBOL_GPL(ftrace_set_filter); 3455EXPORT_SYMBOL_GPL(ftrace_set_filter);
@@ -3434,6 +3468,7 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter);
3434int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 3468int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3435 int len, int reset) 3469 int len, int reset)
3436{ 3470{
3471 ftrace_ops_init(ops);
3437 return ftrace_set_regex(ops, buf, len, reset, 0); 3472 return ftrace_set_regex(ops, buf, len, reset, 0);
3438} 3473}
3439EXPORT_SYMBOL_GPL(ftrace_set_notrace); 3474EXPORT_SYMBOL_GPL(ftrace_set_notrace);
@@ -3524,6 +3559,8 @@ ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3524{ 3559{
3525 char *func; 3560 char *func;
3526 3561
3562 ftrace_ops_init(ops);
3563
3527 while (buf) { 3564 while (buf) {
3528 func = strsep(&buf, ","); 3565 func = strsep(&buf, ",");
3529 ftrace_set_regex(ops, func, strlen(func), 0, enable); 3566 ftrace_set_regex(ops, func, strlen(func), 0, enable);
@@ -3551,10 +3588,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
3551 int filter_hash; 3588 int filter_hash;
3552 int ret; 3589 int ret;
3553 3590
3554 mutex_lock(&ftrace_regex_lock);
3555 if (file->f_mode & FMODE_READ) { 3591 if (file->f_mode & FMODE_READ) {
3556 iter = m->private; 3592 iter = m->private;
3557
3558 seq_release(inode, file); 3593 seq_release(inode, file);
3559 } else 3594 } else
3560 iter = file->private_data; 3595 iter = file->private_data;
@@ -3567,6 +3602,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
3567 3602
3568 trace_parser_put(parser); 3603 trace_parser_put(parser);
3569 3604
3605 mutex_lock(&iter->ops->regex_lock);
3606
3570 if (file->f_mode & FMODE_WRITE) { 3607 if (file->f_mode & FMODE_WRITE) {
3571 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); 3608 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3572 3609
@@ -3584,10 +3621,11 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
3584 3621
3585 mutex_unlock(&ftrace_lock); 3622 mutex_unlock(&ftrace_lock);
3586 } 3623 }
3624
3625 mutex_unlock(&iter->ops->regex_lock);
3587 free_ftrace_hash(iter->hash); 3626 free_ftrace_hash(iter->hash);
3588 kfree(iter); 3627 kfree(iter);
3589 3628
3590 mutex_unlock(&ftrace_regex_lock);
3591 return 0; 3629 return 0;
3592} 3630}
3593 3631
@@ -4126,7 +4164,8 @@ void __init ftrace_init(void)
4126 4164
4127static struct ftrace_ops global_ops = { 4165static struct ftrace_ops global_ops = {
4128 .func = ftrace_stub, 4166 .func = ftrace_stub,
4129 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 4167 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4168 INIT_REGEX_LOCK(global_ops)
4130}; 4169};
4131 4170
4132static int __init ftrace_nodyn_init(void) 4171static int __init ftrace_nodyn_init(void)
@@ -4180,8 +4219,9 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4180} 4219}
4181 4220
4182static struct ftrace_ops control_ops = { 4221static struct ftrace_ops control_ops = {
4183 .func = ftrace_ops_control_func, 4222 .func = ftrace_ops_control_func,
4184 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 4223 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4224 INIT_REGEX_LOCK(control_ops)
4185}; 4225};
4186 4226
4187static inline void 4227static inline void
@@ -4539,6 +4579,8 @@ int register_ftrace_function(struct ftrace_ops *ops)
4539{ 4579{
4540 int ret = -1; 4580 int ret = -1;
4541 4581
4582 ftrace_ops_init(ops);
4583
4542 mutex_lock(&ftrace_lock); 4584 mutex_lock(&ftrace_lock);
4543 4585
4544 ret = __register_ftrace_function(ops); 4586 ret = __register_ftrace_function(ops);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 53582e982e51..7a0cf68027cc 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -251,7 +251,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
251 switch (enable) { 251 switch (enable) {
252 case 0: 252 case 0:
253 /* 253 /*
254 * When soft_disable is set and enable is cleared, we want 254 * When soft_disable is set and enable is cleared, the sm_ref
255 * reference counter is decremented. If it reaches 0, we want
255 * to clear the SOFT_DISABLED flag but leave the event in the 256 * to clear the SOFT_DISABLED flag but leave the event in the
256 * state that it was. That is, if the event was enabled and 257 * state that it was. That is, if the event was enabled and
257 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED 258 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
@@ -263,6 +264,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
263 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work. 264 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
264 */ 265 */
265 if (soft_disable) { 266 if (soft_disable) {
267 if (atomic_dec_return(&file->sm_ref) > 0)
268 break;
266 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED; 269 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
267 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); 270 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
268 } else 271 } else
@@ -291,8 +294,11 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
291 */ 294 */
292 if (!soft_disable) 295 if (!soft_disable)
293 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); 296 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
294 else 297 else {
298 if (atomic_inc_return(&file->sm_ref) > 1)
299 break;
295 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); 300 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
301 }
296 302
297 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) { 303 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
298 304
@@ -623,6 +629,8 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
623 if (file->flags & FTRACE_EVENT_FL_ENABLED) { 629 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
624 if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED) 630 if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
625 buf = "0*\n"; 631 buf = "0*\n";
632 else if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
633 buf = "1*\n";
626 else 634 else
627 buf = "1\n"; 635 buf = "1\n";
628 } else 636 } else
@@ -1521,6 +1529,24 @@ __register_event(struct ftrace_event_call *call, struct module *mod)
1521 return 0; 1529 return 0;
1522} 1530}
1523 1531
1532static struct ftrace_event_file *
1533trace_create_new_event(struct ftrace_event_call *call,
1534 struct trace_array *tr)
1535{
1536 struct ftrace_event_file *file;
1537
1538 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1539 if (!file)
1540 return NULL;
1541
1542 file->event_call = call;
1543 file->tr = tr;
1544 atomic_set(&file->sm_ref, 0);
1545 list_add(&file->list, &tr->events);
1546
1547 return file;
1548}
1549
1524/* Add an event to a trace directory */ 1550/* Add an event to a trace directory */
1525static int 1551static int
1526__trace_add_new_event(struct ftrace_event_call *call, 1552__trace_add_new_event(struct ftrace_event_call *call,
@@ -1532,14 +1558,10 @@ __trace_add_new_event(struct ftrace_event_call *call,
1532{ 1558{
1533 struct ftrace_event_file *file; 1559 struct ftrace_event_file *file;
1534 1560
1535 file = kmem_cache_alloc(file_cachep, GFP_TRACE); 1561 file = trace_create_new_event(call, tr);
1536 if (!file) 1562 if (!file)
1537 return -ENOMEM; 1563 return -ENOMEM;
1538 1564
1539 file->event_call = call;
1540 file->tr = tr;
1541 list_add(&file->list, &tr->events);
1542
1543 return event_create_dir(tr->event_dir, file, id, enable, filter, format); 1565 return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1544} 1566}
1545 1567
@@ -1554,14 +1576,10 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
1554{ 1576{
1555 struct ftrace_event_file *file; 1577 struct ftrace_event_file *file;
1556 1578
1557 file = kmem_cache_alloc(file_cachep, GFP_TRACE); 1579 file = trace_create_new_event(call, tr);
1558 if (!file) 1580 if (!file)
1559 return -ENOMEM; 1581 return -ENOMEM;
1560 1582
1561 file->event_call = call;
1562 file->tr = tr;
1563 list_add(&file->list, &tr->events);
1564
1565 return 0; 1583 return 0;
1566} 1584}
1567 1585
@@ -2061,8 +2079,18 @@ event_enable_func(struct ftrace_hash *hash,
2061 if (ret < 0) 2079 if (ret < 0)
2062 goto out_put; 2080 goto out_put;
2063 ret = register_ftrace_function_probe(glob, ops, data); 2081 ret = register_ftrace_function_probe(glob, ops, data);
2064 if (!ret) 2082 /*
2083 * The above returns on success the # of functions enabled,
2084 * but if it didn't find any functions it returns zero.
2085 * Consider no functions a failure too.
2086 */
2087 if (!ret) {
2088 ret = -ENOENT;
2089 goto out_disable;
2090 } else if (ret < 0)
2065 goto out_disable; 2091 goto out_disable;
2092 /* Just return zero, not the number of enabled functions */
2093 ret = 0;
2066 out: 2094 out:
2067 mutex_unlock(&event_mutex); 2095 mutex_unlock(&event_mutex);
2068 return ret; 2096 return ret;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 1865d5f76538..636d45fe69b3 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -27,7 +27,6 @@
27/** 27/**
28 * Kprobe event core functions 28 * Kprobe event core functions
29 */ 29 */
30
31struct trace_probe { 30struct trace_probe {
32 struct list_head list; 31 struct list_head list;
33 struct kretprobe rp; /* Use rp.kp for kprobe use */ 32 struct kretprobe rp; /* Use rp.kp for kprobe use */
@@ -36,6 +35,7 @@ struct trace_probe {
36 const char *symbol; /* symbol name */ 35 const char *symbol; /* symbol name */
37 struct ftrace_event_class class; 36 struct ftrace_event_class class;
38 struct ftrace_event_call call; 37 struct ftrace_event_call call;
38 struct ftrace_event_file **files;
39 ssize_t size; /* trace entry size */ 39 ssize_t size; /* trace entry size */
40 unsigned int nr_args; 40 unsigned int nr_args;
41 struct probe_arg args[]; 41 struct probe_arg args[];
@@ -46,7 +46,7 @@ struct trace_probe {
46 (sizeof(struct probe_arg) * (n))) 46 (sizeof(struct probe_arg) * (n)))
47 47
48 48
49static __kprobes int trace_probe_is_return(struct trace_probe *tp) 49static __kprobes bool trace_probe_is_return(struct trace_probe *tp)
50{ 50{
51 return tp->rp.handler != NULL; 51 return tp->rp.handler != NULL;
52} 52}
@@ -183,12 +183,57 @@ static struct trace_probe *find_trace_probe(const char *event,
183 return NULL; 183 return NULL;
184} 184}
185 185
186/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */ 186static int trace_probe_nr_files(struct trace_probe *tp)
187static int enable_trace_probe(struct trace_probe *tp, int flag) 187{
188 struct ftrace_event_file **file = tp->files;
189 int ret = 0;
190
191 if (file)
192 while (*(file++))
193 ret++;
194
195 return ret;
196}
197
198static DEFINE_MUTEX(probe_enable_lock);
199
200/*
201 * Enable trace_probe
202 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
203 */
204static int
205enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
188{ 206{
189 int ret = 0; 207 int ret = 0;
190 208
191 tp->flags |= flag; 209 mutex_lock(&probe_enable_lock);
210
211 if (file) {
212 struct ftrace_event_file **new, **old = tp->files;
213 int n = trace_probe_nr_files(tp);
214
215 /* 1 is for new one and 1 is for stopper */
216 new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
217 GFP_KERNEL);
218 if (!new) {
219 ret = -ENOMEM;
220 goto out_unlock;
221 }
222 memcpy(new, old, n * sizeof(struct ftrace_event_file *));
223 new[n] = file;
224 /* The last one keeps a NULL */
225
226 rcu_assign_pointer(tp->files, new);
227 tp->flags |= TP_FLAG_TRACE;
228
229 if (old) {
230 /* Make sure the probe is done with old files */
231 synchronize_sched();
232 kfree(old);
233 }
234 } else
235 tp->flags |= TP_FLAG_PROFILE;
236
192 if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) && 237 if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
193 !trace_probe_has_gone(tp)) { 238 !trace_probe_has_gone(tp)) {
194 if (trace_probe_is_return(tp)) 239 if (trace_probe_is_return(tp))
@@ -197,19 +242,83 @@ static int enable_trace_probe(struct trace_probe *tp, int flag)
197 ret = enable_kprobe(&tp->rp.kp); 242 ret = enable_kprobe(&tp->rp.kp);
198 } 243 }
199 244
245 out_unlock:
246 mutex_unlock(&probe_enable_lock);
247
200 return ret; 248 return ret;
201} 249}
202 250
203/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */ 251static int
204static void disable_trace_probe(struct trace_probe *tp, int flag) 252trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
253{
254 int i;
255
256 if (tp->files) {
257 for (i = 0; tp->files[i]; i++)
258 if (tp->files[i] == file)
259 return i;
260 }
261
262 return -1;
263}
264
265/*
266 * Disable trace_probe
267 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
268 */
269static int
270disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
205{ 271{
206 tp->flags &= ~flag; 272 int ret = 0;
273
274 mutex_lock(&probe_enable_lock);
275
276 if (file) {
277 struct ftrace_event_file **new, **old = tp->files;
278 int n = trace_probe_nr_files(tp);
279 int i, j;
280
281 if (n == 0 || trace_probe_file_index(tp, file) < 0) {
282 ret = -EINVAL;
283 goto out_unlock;
284 }
285
286 if (n == 1) { /* Remove the last file */
287 tp->flags &= ~TP_FLAG_TRACE;
288 new = NULL;
289 } else {
290 new = kzalloc(n * sizeof(struct ftrace_event_file *),
291 GFP_KERNEL);
292 if (!new) {
293 ret = -ENOMEM;
294 goto out_unlock;
295 }
296
297 /* This copy & check loop copies the NULL stopper too */
298 for (i = 0, j = 0; j < n && i < n + 1; i++)
299 if (old[i] != file)
300 new[j++] = old[i];
301 }
302
303 rcu_assign_pointer(tp->files, new);
304
305 /* Make sure the probe is done with old files */
306 synchronize_sched();
307 kfree(old);
308 } else
309 tp->flags &= ~TP_FLAG_PROFILE;
310
207 if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) { 311 if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
208 if (trace_probe_is_return(tp)) 312 if (trace_probe_is_return(tp))
209 disable_kretprobe(&tp->rp); 313 disable_kretprobe(&tp->rp);
210 else 314 else
211 disable_kprobe(&tp->rp.kp); 315 disable_kprobe(&tp->rp.kp);
212 } 316 }
317
318 out_unlock:
319 mutex_unlock(&probe_enable_lock);
320
321 return ret;
213} 322}
214 323
215/* Internal register function - just handle k*probes and flags */ 324/* Internal register function - just handle k*probes and flags */
@@ -723,9 +832,10 @@ static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
723} 832}
724 833
725/* Kprobe handler */ 834/* Kprobe handler */
726static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) 835static __kprobes void
836__kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
837 struct ftrace_event_file *ftrace_file)
727{ 838{
728 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
729 struct kprobe_trace_entry_head *entry; 839 struct kprobe_trace_entry_head *entry;
730 struct ring_buffer_event *event; 840 struct ring_buffer_event *event;
731 struct ring_buffer *buffer; 841 struct ring_buffer *buffer;
@@ -733,7 +843,10 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
733 unsigned long irq_flags; 843 unsigned long irq_flags;
734 struct ftrace_event_call *call = &tp->call; 844 struct ftrace_event_call *call = &tp->call;
735 845
736 tp->nhit++; 846 WARN_ON(call != ftrace_file->event_call);
847
848 if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
849 return;
737 850
738 local_save_flags(irq_flags); 851 local_save_flags(irq_flags);
739 pc = preempt_count(); 852 pc = preempt_count();
@@ -741,13 +854,14 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
741 dsize = __get_data_size(tp, regs); 854 dsize = __get_data_size(tp, regs);
742 size = sizeof(*entry) + tp->size + dsize; 855 size = sizeof(*entry) + tp->size + dsize;
743 856
744 event = trace_current_buffer_lock_reserve(&buffer, call->event.type, 857 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
745 size, irq_flags, pc); 858 call->event.type,
859 size, irq_flags, pc);
746 if (!event) 860 if (!event)
747 return; 861 return;
748 862
749 entry = ring_buffer_event_data(event); 863 entry = ring_buffer_event_data(event);
750 entry->ip = (unsigned long)kp->addr; 864 entry->ip = (unsigned long)tp->rp.kp.addr;
751 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 865 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
752 866
753 if (!filter_current_check_discard(buffer, call, entry, event)) 867 if (!filter_current_check_discard(buffer, call, entry, event))
@@ -755,11 +869,24 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
755 irq_flags, pc, regs); 869 irq_flags, pc, regs);
756} 870}
757 871
872static __kprobes void
873kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
874{
875 struct ftrace_event_file **file = tp->files;
876
877 /* Note: preempt is already disabled around the kprobe handler */
878 while (*file) {
879 __kprobe_trace_func(tp, regs, *file);
880 file++;
881 }
882}
883
758/* Kretprobe handler */ 884/* Kretprobe handler */
759static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, 885static __kprobes void
760 struct pt_regs *regs) 886__kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
887 struct pt_regs *regs,
888 struct ftrace_event_file *ftrace_file)
761{ 889{
762 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
763 struct kretprobe_trace_entry_head *entry; 890 struct kretprobe_trace_entry_head *entry;
764 struct ring_buffer_event *event; 891 struct ring_buffer_event *event;
765 struct ring_buffer *buffer; 892 struct ring_buffer *buffer;
@@ -767,14 +894,20 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
767 unsigned long irq_flags; 894 unsigned long irq_flags;
768 struct ftrace_event_call *call = &tp->call; 895 struct ftrace_event_call *call = &tp->call;
769 896
897 WARN_ON(call != ftrace_file->event_call);
898
899 if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
900 return;
901
770 local_save_flags(irq_flags); 902 local_save_flags(irq_flags);
771 pc = preempt_count(); 903 pc = preempt_count();
772 904
773 dsize = __get_data_size(tp, regs); 905 dsize = __get_data_size(tp, regs);
774 size = sizeof(*entry) + tp->size + dsize; 906 size = sizeof(*entry) + tp->size + dsize;
775 907
776 event = trace_current_buffer_lock_reserve(&buffer, call->event.type, 908 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
777 size, irq_flags, pc); 909 call->event.type,
910 size, irq_flags, pc);
778 if (!event) 911 if (!event)
779 return; 912 return;
780 913
@@ -788,6 +921,19 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
788 irq_flags, pc, regs); 921 irq_flags, pc, regs);
789} 922}
790 923
924static __kprobes void
925kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
926 struct pt_regs *regs)
927{
928 struct ftrace_event_file **file = tp->files;
929
930 /* Note: preempt is already disabled around the kprobe handler */
931 while (*file) {
932 __kretprobe_trace_func(tp, ri, regs, *file);
933 file++;
934 }
935}
936
791/* Event entry printers */ 937/* Event entry printers */
792enum print_line_t 938enum print_line_t
793print_kprobe_event(struct trace_iterator *iter, int flags, 939print_kprobe_event(struct trace_iterator *iter, int flags,
@@ -975,10 +1121,9 @@ static int set_print_fmt(struct trace_probe *tp)
975#ifdef CONFIG_PERF_EVENTS 1121#ifdef CONFIG_PERF_EVENTS
976 1122
977/* Kprobe profile handler */ 1123/* Kprobe profile handler */
978static __kprobes void kprobe_perf_func(struct kprobe *kp, 1124static __kprobes void
979 struct pt_regs *regs) 1125kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
980{ 1126{
981 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
982 struct ftrace_event_call *call = &tp->call; 1127 struct ftrace_event_call *call = &tp->call;
983 struct kprobe_trace_entry_head *entry; 1128 struct kprobe_trace_entry_head *entry;
984 struct hlist_head *head; 1129 struct hlist_head *head;
@@ -997,7 +1142,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
997 if (!entry) 1142 if (!entry)
998 return; 1143 return;
999 1144
1000 entry->ip = (unsigned long)kp->addr; 1145 entry->ip = (unsigned long)tp->rp.kp.addr;
1001 memset(&entry[1], 0, dsize); 1146 memset(&entry[1], 0, dsize);
1002 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 1147 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1003 1148
@@ -1007,10 +1152,10 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
1007} 1152}
1008 1153
1009/* Kretprobe profile handler */ 1154/* Kretprobe profile handler */
1010static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, 1155static __kprobes void
1011 struct pt_regs *regs) 1156kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
1157 struct pt_regs *regs)
1012{ 1158{
1013 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1014 struct ftrace_event_call *call = &tp->call; 1159 struct ftrace_event_call *call = &tp->call;
1015 struct kretprobe_trace_entry_head *entry; 1160 struct kretprobe_trace_entry_head *entry;
1016 struct hlist_head *head; 1161 struct hlist_head *head;
@@ -1044,20 +1189,19 @@ int kprobe_register(struct ftrace_event_call *event,
1044 enum trace_reg type, void *data) 1189 enum trace_reg type, void *data)
1045{ 1190{
1046 struct trace_probe *tp = (struct trace_probe *)event->data; 1191 struct trace_probe *tp = (struct trace_probe *)event->data;
1192 struct ftrace_event_file *file = data;
1047 1193
1048 switch (type) { 1194 switch (type) {
1049 case TRACE_REG_REGISTER: 1195 case TRACE_REG_REGISTER:
1050 return enable_trace_probe(tp, TP_FLAG_TRACE); 1196 return enable_trace_probe(tp, file);
1051 case TRACE_REG_UNREGISTER: 1197 case TRACE_REG_UNREGISTER:
1052 disable_trace_probe(tp, TP_FLAG_TRACE); 1198 return disable_trace_probe(tp, file);
1053 return 0;
1054 1199
1055#ifdef CONFIG_PERF_EVENTS 1200#ifdef CONFIG_PERF_EVENTS
1056 case TRACE_REG_PERF_REGISTER: 1201 case TRACE_REG_PERF_REGISTER:
1057 return enable_trace_probe(tp, TP_FLAG_PROFILE); 1202 return enable_trace_probe(tp, NULL);
1058 case TRACE_REG_PERF_UNREGISTER: 1203 case TRACE_REG_PERF_UNREGISTER:
1059 disable_trace_probe(tp, TP_FLAG_PROFILE); 1204 return disable_trace_probe(tp, NULL);
1060 return 0;
1061 case TRACE_REG_PERF_OPEN: 1205 case TRACE_REG_PERF_OPEN:
1062 case TRACE_REG_PERF_CLOSE: 1206 case TRACE_REG_PERF_CLOSE:
1063 case TRACE_REG_PERF_ADD: 1207 case TRACE_REG_PERF_ADD:
@@ -1073,11 +1217,13 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1073{ 1217{
1074 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1218 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1075 1219
1220 tp->nhit++;
1221
1076 if (tp->flags & TP_FLAG_TRACE) 1222 if (tp->flags & TP_FLAG_TRACE)
1077 kprobe_trace_func(kp, regs); 1223 kprobe_trace_func(tp, regs);
1078#ifdef CONFIG_PERF_EVENTS 1224#ifdef CONFIG_PERF_EVENTS
1079 if (tp->flags & TP_FLAG_PROFILE) 1225 if (tp->flags & TP_FLAG_PROFILE)
1080 kprobe_perf_func(kp, regs); 1226 kprobe_perf_func(tp, regs);
1081#endif 1227#endif
1082 return 0; /* We don't tweek kernel, so just return 0 */ 1228 return 0; /* We don't tweek kernel, so just return 0 */
1083} 1229}
@@ -1087,11 +1233,13 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1087{ 1233{
1088 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1234 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1089 1235
1236 tp->nhit++;
1237
1090 if (tp->flags & TP_FLAG_TRACE) 1238 if (tp->flags & TP_FLAG_TRACE)
1091 kretprobe_trace_func(ri, regs); 1239 kretprobe_trace_func(tp, ri, regs);
1092#ifdef CONFIG_PERF_EVENTS 1240#ifdef CONFIG_PERF_EVENTS
1093 if (tp->flags & TP_FLAG_PROFILE) 1241 if (tp->flags & TP_FLAG_PROFILE)
1094 kretprobe_perf_func(ri, regs); 1242 kretprobe_perf_func(tp, ri, regs);
1095#endif 1243#endif
1096 return 0; /* We don't tweek kernel, so just return 0 */ 1244 return 0; /* We don't tweek kernel, so just return 0 */
1097} 1245}
@@ -1189,11 +1337,24 @@ static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
1189 return a1 + a2 + a3 + a4 + a5 + a6; 1337 return a1 + a2 + a3 + a4 + a5 + a6;
1190} 1338}
1191 1339
1340static struct ftrace_event_file *
1341find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
1342{
1343 struct ftrace_event_file *file;
1344
1345 list_for_each_entry(file, &tr->events, list)
1346 if (file->event_call == &tp->call)
1347 return file;
1348
1349 return NULL;
1350}
1351
1192static __init int kprobe_trace_self_tests_init(void) 1352static __init int kprobe_trace_self_tests_init(void)
1193{ 1353{
1194 int ret, warn = 0; 1354 int ret, warn = 0;
1195 int (*target)(int, int, int, int, int, int); 1355 int (*target)(int, int, int, int, int, int);
1196 struct trace_probe *tp; 1356 struct trace_probe *tp;
1357 struct ftrace_event_file *file;
1197 1358
1198 target = kprobe_trace_selftest_target; 1359 target = kprobe_trace_selftest_target;
1199 1360
@@ -1203,31 +1364,43 @@ static __init int kprobe_trace_self_tests_init(void)
1203 "$stack $stack0 +0($stack)", 1364 "$stack $stack0 +0($stack)",
1204 create_trace_probe); 1365 create_trace_probe);
1205 if (WARN_ON_ONCE(ret)) { 1366 if (WARN_ON_ONCE(ret)) {
1206 pr_warning("error on probing function entry.\n"); 1367 pr_warn("error on probing function entry.\n");
1207 warn++; 1368 warn++;
1208 } else { 1369 } else {
1209 /* Enable trace point */ 1370 /* Enable trace point */
1210 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM); 1371 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1211 if (WARN_ON_ONCE(tp == NULL)) { 1372 if (WARN_ON_ONCE(tp == NULL)) {
1212 pr_warning("error on getting new probe.\n"); 1373 pr_warn("error on getting new probe.\n");
1213 warn++; 1374 warn++;
1214 } else 1375 } else {
1215 enable_trace_probe(tp, TP_FLAG_TRACE); 1376 file = find_trace_probe_file(tp, top_trace_array());
1377 if (WARN_ON_ONCE(file == NULL)) {
1378 pr_warn("error on getting probe file.\n");
1379 warn++;
1380 } else
1381 enable_trace_probe(tp, file);
1382 }
1216 } 1383 }
1217 1384
1218 ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target " 1385 ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1219 "$retval", create_trace_probe); 1386 "$retval", create_trace_probe);
1220 if (WARN_ON_ONCE(ret)) { 1387 if (WARN_ON_ONCE(ret)) {
1221 pr_warning("error on probing function return.\n"); 1388 pr_warn("error on probing function return.\n");
1222 warn++; 1389 warn++;
1223 } else { 1390 } else {
1224 /* Enable trace point */ 1391 /* Enable trace point */
1225 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM); 1392 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1226 if (WARN_ON_ONCE(tp == NULL)) { 1393 if (WARN_ON_ONCE(tp == NULL)) {
1227 pr_warning("error on getting new probe.\n"); 1394 pr_warn("error on getting 2nd new probe.\n");
1228 warn++; 1395 warn++;
1229 } else 1396 } else {
1230 enable_trace_probe(tp, TP_FLAG_TRACE); 1397 file = find_trace_probe_file(tp, top_trace_array());
1398 if (WARN_ON_ONCE(file == NULL)) {
1399 pr_warn("error on getting probe file.\n");
1400 warn++;
1401 } else
1402 enable_trace_probe(tp, file);
1403 }
1231 } 1404 }
1232 1405
1233 if (warn) 1406 if (warn)
@@ -1238,27 +1411,39 @@ static __init int kprobe_trace_self_tests_init(void)
1238 /* Disable trace points before removing it */ 1411 /* Disable trace points before removing it */
1239 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM); 1412 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1240 if (WARN_ON_ONCE(tp == NULL)) { 1413 if (WARN_ON_ONCE(tp == NULL)) {
1241 pr_warning("error on getting test probe.\n"); 1414 pr_warn("error on getting test probe.\n");
1242 warn++; 1415 warn++;
1243 } else 1416 } else {
1244 disable_trace_probe(tp, TP_FLAG_TRACE); 1417 file = find_trace_probe_file(tp, top_trace_array());
1418 if (WARN_ON_ONCE(file == NULL)) {
1419 pr_warn("error on getting probe file.\n");
1420 warn++;
1421 } else
1422 disable_trace_probe(tp, file);
1423 }
1245 1424
1246 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM); 1425 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1247 if (WARN_ON_ONCE(tp == NULL)) { 1426 if (WARN_ON_ONCE(tp == NULL)) {
1248 pr_warning("error on getting 2nd test probe.\n"); 1427 pr_warn("error on getting 2nd test probe.\n");
1249 warn++; 1428 warn++;
1250 } else 1429 } else {
1251 disable_trace_probe(tp, TP_FLAG_TRACE); 1430 file = find_trace_probe_file(tp, top_trace_array());
1431 if (WARN_ON_ONCE(file == NULL)) {
1432 pr_warn("error on getting probe file.\n");
1433 warn++;
1434 } else
1435 disable_trace_probe(tp, file);
1436 }
1252 1437
1253 ret = traceprobe_command("-:testprobe", create_trace_probe); 1438 ret = traceprobe_command("-:testprobe", create_trace_probe);
1254 if (WARN_ON_ONCE(ret)) { 1439 if (WARN_ON_ONCE(ret)) {
1255 pr_warning("error on deleting a probe.\n"); 1440 pr_warn("error on deleting a probe.\n");
1256 warn++; 1441 warn++;
1257 } 1442 }
1258 1443
1259 ret = traceprobe_command("-:testprobe2", create_trace_probe); 1444 ret = traceprobe_command("-:testprobe2", create_trace_probe);
1260 if (WARN_ON_ONCE(ret)) { 1445 if (WARN_ON_ONCE(ret)) {
1261 pr_warning("error on deleting a probe.\n"); 1446 pr_warn("error on deleting a probe.\n");
1262 warn++; 1447 warn++;
1263 } 1448 }
1264 1449