aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c996
1 files changed, 632 insertions, 364 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index dd3e91d68dc7..00077a57b746 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -36,6 +36,7 @@
36 36
37#include <trace/events/sched.h> 37#include <trace/events/sched.h>
38 38
39#include <asm/sections.h>
39#include <asm/setup.h> 40#include <asm/setup.h>
40 41
41#include "trace_output.h" 42#include "trace_output.h"
@@ -1095,22 +1096,20 @@ static bool update_all_ops;
1095# error Dynamic ftrace depends on MCOUNT_RECORD 1096# error Dynamic ftrace depends on MCOUNT_RECORD
1096#endif 1097#endif
1097 1098
1098static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1099
1100struct ftrace_func_probe {
1101 struct hlist_node node;
1102 struct ftrace_probe_ops *ops;
1103 unsigned long flags;
1104 unsigned long ip;
1105 void *data;
1106 struct list_head free_list;
1107};
1108
1109struct ftrace_func_entry { 1099struct ftrace_func_entry {
1110 struct hlist_node hlist; 1100 struct hlist_node hlist;
1111 unsigned long ip; 1101 unsigned long ip;
1112}; 1102};
1113 1103
1104struct ftrace_func_probe {
1105 struct ftrace_probe_ops *probe_ops;
1106 struct ftrace_ops ops;
1107 struct trace_array *tr;
1108 struct list_head list;
1109 void *data;
1110 int ref;
1111};
1112
1114/* 1113/*
1115 * We make these constant because no one should touch them, 1114 * We make these constant because no one should touch them,
1116 * but they are used as the default "empty hash", to avoid allocating 1115 * but they are used as the default "empty hash", to avoid allocating
@@ -1271,7 +1270,7 @@ static void
1271remove_hash_entry(struct ftrace_hash *hash, 1270remove_hash_entry(struct ftrace_hash *hash,
1272 struct ftrace_func_entry *entry) 1271 struct ftrace_func_entry *entry)
1273{ 1272{
1274 hlist_del(&entry->hlist); 1273 hlist_del_rcu(&entry->hlist);
1275 hash->count--; 1274 hash->count--;
1276} 1275}
1277 1276
@@ -2807,18 +2806,28 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2807 * callers are done before leaving this function. 2806 * callers are done before leaving this function.
2808 * The same goes for freeing the per_cpu data of the per_cpu 2807 * The same goes for freeing the per_cpu data of the per_cpu
2809 * ops. 2808 * ops.
2810 *
2811 * Again, normal synchronize_sched() is not good enough.
2812 * We need to do a hard force of sched synchronization.
2813 * This is because we use preempt_disable() to do RCU, but
2814 * the function tracers can be called where RCU is not watching
2815 * (like before user_exit()). We can not rely on the RCU
2816 * infrastructure to do the synchronization, thus we must do it
2817 * ourselves.
2818 */ 2809 */
2819 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) { 2810 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) {
2811 /*
2812 * We need to do a hard force of sched synchronization.
2813 * This is because we use preempt_disable() to do RCU, but
2814 * the function tracers can be called where RCU is not watching
2815 * (like before user_exit()). We can not rely on the RCU
2816 * infrastructure to do the synchronization, thus we must do it
2817 * ourselves.
2818 */
2820 schedule_on_each_cpu(ftrace_sync); 2819 schedule_on_each_cpu(ftrace_sync);
2821 2820
2821 /*
2822 * When the kernel is preeptive, tasks can be preempted
2823 * while on a ftrace trampoline. Just scheduling a task on
2824 * a CPU is not good enough to flush them. Calling
2825 * synchornize_rcu_tasks() will wait for those tasks to
2826 * execute and either schedule voluntarily or enter user space.
2827 */
2828 if (IS_ENABLED(CONFIG_PREEMPT))
2829 synchronize_rcu_tasks();
2830
2822 arch_ftrace_trampoline_free(ops); 2831 arch_ftrace_trampoline_free(ops);
2823 2832
2824 if (ops->flags & FTRACE_OPS_FL_PER_CPU) 2833 if (ops->flags & FTRACE_OPS_FL_PER_CPU)
@@ -3055,34 +3064,63 @@ struct ftrace_iterator {
3055 struct ftrace_page *pg; 3064 struct ftrace_page *pg;
3056 struct dyn_ftrace *func; 3065 struct dyn_ftrace *func;
3057 struct ftrace_func_probe *probe; 3066 struct ftrace_func_probe *probe;
3067 struct ftrace_func_entry *probe_entry;
3058 struct trace_parser parser; 3068 struct trace_parser parser;
3059 struct ftrace_hash *hash; 3069 struct ftrace_hash *hash;
3060 struct ftrace_ops *ops; 3070 struct ftrace_ops *ops;
3061 int hidx; 3071 int pidx;
3062 int idx; 3072 int idx;
3063 unsigned flags; 3073 unsigned flags;
3064}; 3074};
3065 3075
3066static void * 3076static void *
3067t_hash_next(struct seq_file *m, loff_t *pos) 3077t_probe_next(struct seq_file *m, loff_t *pos)
3068{ 3078{
3069 struct ftrace_iterator *iter = m->private; 3079 struct ftrace_iterator *iter = m->private;
3080 struct trace_array *tr = iter->ops->private;
3081 struct list_head *func_probes;
3082 struct ftrace_hash *hash;
3083 struct list_head *next;
3070 struct hlist_node *hnd = NULL; 3084 struct hlist_node *hnd = NULL;
3071 struct hlist_head *hhd; 3085 struct hlist_head *hhd;
3086 int size;
3072 3087
3073 (*pos)++; 3088 (*pos)++;
3074 iter->pos = *pos; 3089 iter->pos = *pos;
3075 3090
3076 if (iter->probe) 3091 if (!tr)
3077 hnd = &iter->probe->node;
3078 retry:
3079 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
3080 return NULL; 3092 return NULL;
3081 3093
3082 hhd = &ftrace_func_hash[iter->hidx]; 3094 func_probes = &tr->func_probes;
3095 if (list_empty(func_probes))
3096 return NULL;
3097
3098 if (!iter->probe) {
3099 next = func_probes->next;
3100 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3101 }
3102
3103 if (iter->probe_entry)
3104 hnd = &iter->probe_entry->hlist;
3105
3106 hash = iter->probe->ops.func_hash->filter_hash;
3107 size = 1 << hash->size_bits;
3108
3109 retry:
3110 if (iter->pidx >= size) {
3111 if (iter->probe->list.next == func_probes)
3112 return NULL;
3113 next = iter->probe->list.next;
3114 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3115 hash = iter->probe->ops.func_hash->filter_hash;
3116 size = 1 << hash->size_bits;
3117 iter->pidx = 0;
3118 }
3119
3120 hhd = &hash->buckets[iter->pidx];
3083 3121
3084 if (hlist_empty(hhd)) { 3122 if (hlist_empty(hhd)) {
3085 iter->hidx++; 3123 iter->pidx++;
3086 hnd = NULL; 3124 hnd = NULL;
3087 goto retry; 3125 goto retry;
3088 } 3126 }
@@ -3092,7 +3130,7 @@ t_hash_next(struct seq_file *m, loff_t *pos)
3092 else { 3130 else {
3093 hnd = hnd->next; 3131 hnd = hnd->next;
3094 if (!hnd) { 3132 if (!hnd) {
3095 iter->hidx++; 3133 iter->pidx++;
3096 goto retry; 3134 goto retry;
3097 } 3135 }
3098 } 3136 }
@@ -3100,26 +3138,28 @@ t_hash_next(struct seq_file *m, loff_t *pos)
3100 if (WARN_ON_ONCE(!hnd)) 3138 if (WARN_ON_ONCE(!hnd))
3101 return NULL; 3139 return NULL;
3102 3140
3103 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node); 3141 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3104 3142
3105 return iter; 3143 return iter;
3106} 3144}
3107 3145
3108static void *t_hash_start(struct seq_file *m, loff_t *pos) 3146static void *t_probe_start(struct seq_file *m, loff_t *pos)
3109{ 3147{
3110 struct ftrace_iterator *iter = m->private; 3148 struct ftrace_iterator *iter = m->private;
3111 void *p = NULL; 3149 void *p = NULL;
3112 loff_t l; 3150 loff_t l;
3113 3151
3114 if (!(iter->flags & FTRACE_ITER_DO_HASH)) 3152 if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3115 return NULL; 3153 return NULL;
3116 3154
3117 if (iter->func_pos > *pos) 3155 if (iter->func_pos > *pos)
3118 return NULL; 3156 return NULL;
3119 3157
3120 iter->hidx = 0; 3158 iter->probe = NULL;
3159 iter->probe_entry = NULL;
3160 iter->pidx = 0;
3121 for (l = 0; l <= (*pos - iter->func_pos); ) { 3161 for (l = 0; l <= (*pos - iter->func_pos); ) {
3122 p = t_hash_next(m, &l); 3162 p = t_probe_next(m, &l);
3123 if (!p) 3163 if (!p)
3124 break; 3164 break;
3125 } 3165 }
@@ -3127,50 +3167,42 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
3127 return NULL; 3167 return NULL;
3128 3168
3129 /* Only set this if we have an item */ 3169 /* Only set this if we have an item */
3130 iter->flags |= FTRACE_ITER_HASH; 3170 iter->flags |= FTRACE_ITER_PROBE;
3131 3171
3132 return iter; 3172 return iter;
3133} 3173}
3134 3174
3135static int 3175static int
3136t_hash_show(struct seq_file *m, struct ftrace_iterator *iter) 3176t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3137{ 3177{
3138 struct ftrace_func_probe *rec; 3178 struct ftrace_func_entry *probe_entry;
3179 struct ftrace_probe_ops *probe_ops;
3180 struct ftrace_func_probe *probe;
3181
3182 probe = iter->probe;
3183 probe_entry = iter->probe_entry;
3139 3184
3140 rec = iter->probe; 3185 if (WARN_ON_ONCE(!probe || !probe_entry))
3141 if (WARN_ON_ONCE(!rec))
3142 return -EIO; 3186 return -EIO;
3143 3187
3144 if (rec->ops->print) 3188 probe_ops = probe->probe_ops;
3145 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
3146 3189
3147 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func); 3190 if (probe_ops->print)
3191 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
3148 3192
3149 if (rec->data) 3193 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3150 seq_printf(m, ":%p", rec->data); 3194 (void *)probe_ops->func);
3151 seq_putc(m, '\n');
3152 3195
3153 return 0; 3196 return 0;
3154} 3197}
3155 3198
3156static void * 3199static void *
3157t_next(struct seq_file *m, void *v, loff_t *pos) 3200t_func_next(struct seq_file *m, loff_t *pos)
3158{ 3201{
3159 struct ftrace_iterator *iter = m->private; 3202 struct ftrace_iterator *iter = m->private;
3160 struct ftrace_ops *ops = iter->ops;
3161 struct dyn_ftrace *rec = NULL; 3203 struct dyn_ftrace *rec = NULL;
3162 3204
3163 if (unlikely(ftrace_disabled))
3164 return NULL;
3165
3166 if (iter->flags & FTRACE_ITER_HASH)
3167 return t_hash_next(m, pos);
3168
3169 (*pos)++; 3205 (*pos)++;
3170 iter->pos = iter->func_pos = *pos;
3171
3172 if (iter->flags & FTRACE_ITER_PRINTALL)
3173 return t_hash_start(m, pos);
3174 3206
3175 retry: 3207 retry:
3176 if (iter->idx >= iter->pg->index) { 3208 if (iter->idx >= iter->pg->index) {
@@ -3181,11 +3213,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
3181 } 3213 }
3182 } else { 3214 } else {
3183 rec = &iter->pg->records[iter->idx++]; 3215 rec = &iter->pg->records[iter->idx++];
3184 if (((iter->flags & FTRACE_ITER_FILTER) && 3216 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3185 !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) || 3217 !ftrace_lookup_ip(iter->hash, rec->ip)) ||
3186
3187 ((iter->flags & FTRACE_ITER_NOTRACE) &&
3188 !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
3189 3218
3190 ((iter->flags & FTRACE_ITER_ENABLED) && 3219 ((iter->flags & FTRACE_ITER_ENABLED) &&
3191 !(rec->flags & FTRACE_FL_ENABLED))) { 3220 !(rec->flags & FTRACE_FL_ENABLED))) {
@@ -3196,24 +3225,51 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
3196 } 3225 }
3197 3226
3198 if (!rec) 3227 if (!rec)
3199 return t_hash_start(m, pos); 3228 return NULL;
3200 3229
3230 iter->pos = iter->func_pos = *pos;
3201 iter->func = rec; 3231 iter->func = rec;
3202 3232
3203 return iter; 3233 return iter;
3204} 3234}
3205 3235
3236static void *
3237t_next(struct seq_file *m, void *v, loff_t *pos)
3238{
3239 struct ftrace_iterator *iter = m->private;
3240 loff_t l = *pos; /* t_hash_start() must use original pos */
3241 void *ret;
3242
3243 if (unlikely(ftrace_disabled))
3244 return NULL;
3245
3246 if (iter->flags & FTRACE_ITER_PROBE)
3247 return t_probe_next(m, pos);
3248
3249 if (iter->flags & FTRACE_ITER_PRINTALL) {
3250 /* next must increment pos, and t_probe_start does not */
3251 (*pos)++;
3252 return t_probe_start(m, &l);
3253 }
3254
3255 ret = t_func_next(m, pos);
3256
3257 if (!ret)
3258 return t_probe_start(m, &l);
3259
3260 return ret;
3261}
3262
3206static void reset_iter_read(struct ftrace_iterator *iter) 3263static void reset_iter_read(struct ftrace_iterator *iter)
3207{ 3264{
3208 iter->pos = 0; 3265 iter->pos = 0;
3209 iter->func_pos = 0; 3266 iter->func_pos = 0;
3210 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH); 3267 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE);
3211} 3268}
3212 3269
3213static void *t_start(struct seq_file *m, loff_t *pos) 3270static void *t_start(struct seq_file *m, loff_t *pos)
3214{ 3271{
3215 struct ftrace_iterator *iter = m->private; 3272 struct ftrace_iterator *iter = m->private;
3216 struct ftrace_ops *ops = iter->ops;
3217 void *p = NULL; 3273 void *p = NULL;
3218 loff_t l; 3274 loff_t l;
3219 3275
@@ -3233,20 +3289,19 @@ static void *t_start(struct seq_file *m, loff_t *pos)
3233 * off, we can short cut and just print out that all 3289 * off, we can short cut and just print out that all
3234 * functions are enabled. 3290 * functions are enabled.
3235 */ 3291 */
3236 if ((iter->flags & FTRACE_ITER_FILTER && 3292 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3237 ftrace_hash_empty(ops->func_hash->filter_hash)) || 3293 ftrace_hash_empty(iter->hash)) {
3238 (iter->flags & FTRACE_ITER_NOTRACE && 3294 iter->func_pos = 1; /* Account for the message */
3239 ftrace_hash_empty(ops->func_hash->notrace_hash))) {
3240 if (*pos > 0) 3295 if (*pos > 0)
3241 return t_hash_start(m, pos); 3296 return t_probe_start(m, pos);
3242 iter->flags |= FTRACE_ITER_PRINTALL; 3297 iter->flags |= FTRACE_ITER_PRINTALL;
3243 /* reset in case of seek/pread */ 3298 /* reset in case of seek/pread */
3244 iter->flags &= ~FTRACE_ITER_HASH; 3299 iter->flags &= ~FTRACE_ITER_PROBE;
3245 return iter; 3300 return iter;
3246 } 3301 }
3247 3302
3248 if (iter->flags & FTRACE_ITER_HASH) 3303 if (iter->flags & FTRACE_ITER_PROBE)
3249 return t_hash_start(m, pos); 3304 return t_probe_start(m, pos);
3250 3305
3251 /* 3306 /*
3252 * Unfortunately, we need to restart at ftrace_pages_start 3307 * Unfortunately, we need to restart at ftrace_pages_start
@@ -3256,13 +3311,13 @@ static void *t_start(struct seq_file *m, loff_t *pos)
3256 iter->pg = ftrace_pages_start; 3311 iter->pg = ftrace_pages_start;
3257 iter->idx = 0; 3312 iter->idx = 0;
3258 for (l = 0; l <= *pos; ) { 3313 for (l = 0; l <= *pos; ) {
3259 p = t_next(m, p, &l); 3314 p = t_func_next(m, &l);
3260 if (!p) 3315 if (!p)
3261 break; 3316 break;
3262 } 3317 }
3263 3318
3264 if (!p) 3319 if (!p)
3265 return t_hash_start(m, pos); 3320 return t_probe_start(m, pos);
3266 3321
3267 return iter; 3322 return iter;
3268} 3323}
@@ -3293,8 +3348,8 @@ static int t_show(struct seq_file *m, void *v)
3293 struct ftrace_iterator *iter = m->private; 3348 struct ftrace_iterator *iter = m->private;
3294 struct dyn_ftrace *rec; 3349 struct dyn_ftrace *rec;
3295 3350
3296 if (iter->flags & FTRACE_ITER_HASH) 3351 if (iter->flags & FTRACE_ITER_PROBE)
3297 return t_hash_show(m, iter); 3352 return t_probe_show(m, iter);
3298 3353
3299 if (iter->flags & FTRACE_ITER_PRINTALL) { 3354 if (iter->flags & FTRACE_ITER_PRINTALL) {
3300 if (iter->flags & FTRACE_ITER_NOTRACE) 3355 if (iter->flags & FTRACE_ITER_NOTRACE)
@@ -3355,12 +3410,13 @@ ftrace_avail_open(struct inode *inode, struct file *file)
3355 return -ENODEV; 3410 return -ENODEV;
3356 3411
3357 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 3412 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3358 if (iter) { 3413 if (!iter)
3359 iter->pg = ftrace_pages_start; 3414 return -ENOMEM;
3360 iter->ops = &global_ops;
3361 }
3362 3415
3363 return iter ? 0 : -ENOMEM; 3416 iter->pg = ftrace_pages_start;
3417 iter->ops = &global_ops;
3418
3419 return 0;
3364} 3420}
3365 3421
3366static int 3422static int
@@ -3369,13 +3425,14 @@ ftrace_enabled_open(struct inode *inode, struct file *file)
3369 struct ftrace_iterator *iter; 3425 struct ftrace_iterator *iter;
3370 3426
3371 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 3427 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3372 if (iter) { 3428 if (!iter)
3373 iter->pg = ftrace_pages_start; 3429 return -ENOMEM;
3374 iter->flags = FTRACE_ITER_ENABLED;
3375 iter->ops = &global_ops;
3376 }
3377 3430
3378 return iter ? 0 : -ENOMEM; 3431 iter->pg = ftrace_pages_start;
3432 iter->flags = FTRACE_ITER_ENABLED;
3433 iter->ops = &global_ops;
3434
3435 return 0;
3379} 3436}
3380 3437
3381/** 3438/**
@@ -3440,7 +3497,8 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
3440 ret = -ENOMEM; 3497 ret = -ENOMEM;
3441 goto out_unlock; 3498 goto out_unlock;
3442 } 3499 }
3443 } 3500 } else
3501 iter->hash = hash;
3444 3502
3445 if (file->f_mode & FMODE_READ) { 3503 if (file->f_mode & FMODE_READ) {
3446 iter->pg = ftrace_pages_start; 3504 iter->pg = ftrace_pages_start;
@@ -3470,7 +3528,7 @@ ftrace_filter_open(struct inode *inode, struct file *file)
3470 struct ftrace_ops *ops = inode->i_private; 3528 struct ftrace_ops *ops = inode->i_private;
3471 3529
3472 return ftrace_regex_open(ops, 3530 return ftrace_regex_open(ops,
3473 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, 3531 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
3474 inode, file); 3532 inode, file);
3475} 3533}
3476 3534
@@ -3654,6 +3712,56 @@ ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3654 return match_records(hash, buff, len, NULL); 3712 return match_records(hash, buff, len, NULL);
3655} 3713}
3656 3714
3715static void ftrace_ops_update_code(struct ftrace_ops *ops,
3716 struct ftrace_ops_hash *old_hash)
3717{
3718 struct ftrace_ops *op;
3719
3720 if (!ftrace_enabled)
3721 return;
3722
3723 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
3724 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
3725 return;
3726 }
3727
3728 /*
3729 * If this is the shared global_ops filter, then we need to
3730 * check if there is another ops that shares it, is enabled.
3731 * If so, we still need to run the modify code.
3732 */
3733 if (ops->func_hash != &global_ops.local_hash)
3734 return;
3735
3736 do_for_each_ftrace_op(op, ftrace_ops_list) {
3737 if (op->func_hash == &global_ops.local_hash &&
3738 op->flags & FTRACE_OPS_FL_ENABLED) {
3739 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
3740 /* Only need to do this once */
3741 return;
3742 }
3743 } while_for_each_ftrace_op(op);
3744}
3745
3746static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
3747 struct ftrace_hash **orig_hash,
3748 struct ftrace_hash *hash,
3749 int enable)
3750{
3751 struct ftrace_ops_hash old_hash_ops;
3752 struct ftrace_hash *old_hash;
3753 int ret;
3754
3755 old_hash = *orig_hash;
3756 old_hash_ops.filter_hash = ops->func_hash->filter_hash;
3757 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
3758 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3759 if (!ret) {
3760 ftrace_ops_update_code(ops, &old_hash_ops);
3761 free_ftrace_hash_rcu(old_hash);
3762 }
3763 return ret;
3764}
3657 3765
3658/* 3766/*
3659 * We register the module command as a template to show others how 3767 * We register the module command as a template to show others how
@@ -3661,7 +3769,7 @@ ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3661 */ 3769 */
3662 3770
3663static int 3771static int
3664ftrace_mod_callback(struct ftrace_hash *hash, 3772ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
3665 char *func, char *cmd, char *module, int enable) 3773 char *func, char *cmd, char *module, int enable)
3666{ 3774{
3667 int ret; 3775 int ret;
@@ -3695,16 +3803,11 @@ core_initcall(ftrace_mod_cmd_init);
3695static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, 3803static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3696 struct ftrace_ops *op, struct pt_regs *pt_regs) 3804 struct ftrace_ops *op, struct pt_regs *pt_regs)
3697{ 3805{
3698 struct ftrace_func_probe *entry; 3806 struct ftrace_probe_ops *probe_ops;
3699 struct hlist_head *hhd; 3807 struct ftrace_func_probe *probe;
3700 unsigned long key;
3701 3808
3702 key = hash_long(ip, FTRACE_HASH_BITS); 3809 probe = container_of(op, struct ftrace_func_probe, ops);
3703 3810 probe_ops = probe->probe_ops;
3704 hhd = &ftrace_func_hash[key];
3705
3706 if (hlist_empty(hhd))
3707 return;
3708 3811
3709 /* 3812 /*
3710 * Disable preemption for these calls to prevent a RCU grace 3813 * Disable preemption for these calls to prevent a RCU grace
@@ -3712,209 +3815,336 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3712 * on the hash. rcu_read_lock is too dangerous here. 3815 * on the hash. rcu_read_lock is too dangerous here.
3713 */ 3816 */
3714 preempt_disable_notrace(); 3817 preempt_disable_notrace();
3715 hlist_for_each_entry_rcu_notrace(entry, hhd, node) { 3818 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
3716 if (entry->ip == ip)
3717 entry->ops->func(ip, parent_ip, &entry->data);
3718 }
3719 preempt_enable_notrace(); 3819 preempt_enable_notrace();
3720} 3820}
3721 3821
3722static struct ftrace_ops trace_probe_ops __read_mostly = 3822struct ftrace_func_map {
3723{ 3823 struct ftrace_func_entry entry;
3724 .func = function_trace_probe_call, 3824 void *data;
3725 .flags = FTRACE_OPS_FL_INITIALIZED,
3726 INIT_OPS_HASH(trace_probe_ops)
3727}; 3825};
3728 3826
3729static int ftrace_probe_registered; 3827struct ftrace_func_mapper {
3828 struct ftrace_hash hash;
3829};
3730 3830
3731static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash) 3831/**
3832 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
3833 *
3834 * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
3835 */
3836struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
3732{ 3837{
3733 int ret; 3838 struct ftrace_hash *hash;
3734 int i;
3735 3839
3736 if (ftrace_probe_registered) { 3840 /*
3737 /* still need to update the function call sites */ 3841 * The mapper is simply a ftrace_hash, but since the entries
3738 if (ftrace_enabled) 3842 * in the hash are not ftrace_func_entry type, we define it
3739 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS, 3843 * as a separate structure.
3740 old_hash); 3844 */
3741 return; 3845 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
3742 } 3846 return (struct ftrace_func_mapper *)hash;
3847}
3743 3848
3744 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 3849/**
3745 struct hlist_head *hhd = &ftrace_func_hash[i]; 3850 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
3746 if (hhd->first) 3851 * @mapper: The mapper that has the ip maps
3747 break; 3852 * @ip: the instruction pointer to find the data for
3748 } 3853 *
3749 /* Nothing registered? */ 3854 * Returns the data mapped to @ip if found otherwise NULL. The return
3750 if (i == FTRACE_FUNC_HASHSIZE) 3855 * is actually the address of the mapper data pointer. The address is
3751 return; 3856 * returned for use cases where the data is no bigger than a long, and
3857 * the user can use the data pointer as its data instead of having to
3858 * allocate more memory for the reference.
3859 */
3860void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
3861 unsigned long ip)
3862{
3863 struct ftrace_func_entry *entry;
3864 struct ftrace_func_map *map;
3752 3865
3753 ret = ftrace_startup(&trace_probe_ops, 0); 3866 entry = ftrace_lookup_ip(&mapper->hash, ip);
3867 if (!entry)
3868 return NULL;
3754 3869
3755 ftrace_probe_registered = 1; 3870 map = (struct ftrace_func_map *)entry;
3871 return &map->data;
3756} 3872}
3757 3873
3758static bool __disable_ftrace_function_probe(void) 3874/**
3875 * ftrace_func_mapper_add_ip - Map some data to an ip
3876 * @mapper: The mapper that has the ip maps
3877 * @ip: The instruction pointer address to map @data to
3878 * @data: The data to map to @ip
3879 *
3880 * Returns 0 on succes otherwise an error.
3881 */
3882int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
3883 unsigned long ip, void *data)
3759{ 3884{
3760 int i; 3885 struct ftrace_func_entry *entry;
3886 struct ftrace_func_map *map;
3761 3887
3762 if (!ftrace_probe_registered) 3888 entry = ftrace_lookup_ip(&mapper->hash, ip);
3763 return false; 3889 if (entry)
3890 return -EBUSY;
3764 3891
3765 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 3892 map = kmalloc(sizeof(*map), GFP_KERNEL);
3766 struct hlist_head *hhd = &ftrace_func_hash[i]; 3893 if (!map)
3767 if (hhd->first) 3894 return -ENOMEM;
3768 return false;
3769 }
3770 3895
3771 /* no more funcs left */ 3896 map->entry.ip = ip;
3772 ftrace_shutdown(&trace_probe_ops, 0); 3897 map->data = data;
3773 3898
3774 ftrace_probe_registered = 0; 3899 __add_hash_entry(&mapper->hash, &map->entry);
3775 return true;
3776}
3777 3900
3901 return 0;
3902}
3778 3903
3779static void ftrace_free_entry(struct ftrace_func_probe *entry) 3904/**
3905 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
3906 * @mapper: The mapper that has the ip maps
3907 * @ip: The instruction pointer address to remove the data from
3908 *
3909 * Returns the data if it is found, otherwise NULL.
3910 * Note, if the data pointer is used as the data itself, (see
3911 * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
3912 * if the data pointer was set to zero.
3913 */
3914void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
3915 unsigned long ip)
3780{ 3916{
3781 if (entry->ops->free) 3917 struct ftrace_func_entry *entry;
3782 entry->ops->free(entry->ops, entry->ip, &entry->data); 3918 struct ftrace_func_map *map;
3919 void *data;
3920
3921 entry = ftrace_lookup_ip(&mapper->hash, ip);
3922 if (!entry)
3923 return NULL;
3924
3925 map = (struct ftrace_func_map *)entry;
3926 data = map->data;
3927
3928 remove_hash_entry(&mapper->hash, entry);
3783 kfree(entry); 3929 kfree(entry);
3930
3931 return data;
3932}
3933
3934/**
3935 * free_ftrace_func_mapper - free a mapping of ips and data
3936 * @mapper: The mapper that has the ip maps
3937 * @free_func: A function to be called on each data item.
3938 *
3939 * This is used to free the function mapper. The @free_func is optional
3940 * and can be used if the data needs to be freed as well.
3941 */
3942void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
3943 ftrace_mapper_func free_func)
3944{
3945 struct ftrace_func_entry *entry;
3946 struct ftrace_func_map *map;
3947 struct hlist_head *hhd;
3948 int size = 1 << mapper->hash.size_bits;
3949 int i;
3950
3951 if (free_func && mapper->hash.count) {
3952 for (i = 0; i < size; i++) {
3953 hhd = &mapper->hash.buckets[i];
3954 hlist_for_each_entry(entry, hhd, hlist) {
3955 map = (struct ftrace_func_map *)entry;
3956 free_func(map);
3957 }
3958 }
3959 }
3960 free_ftrace_hash(&mapper->hash);
3961}
3962
3963static void release_probe(struct ftrace_func_probe *probe)
3964{
3965 struct ftrace_probe_ops *probe_ops;
3966
3967 mutex_lock(&ftrace_lock);
3968
3969 WARN_ON(probe->ref <= 0);
3970
3971 /* Subtract the ref that was used to protect this instance */
3972 probe->ref--;
3973
3974 if (!probe->ref) {
3975 probe_ops = probe->probe_ops;
3976 /*
3977 * Sending zero as ip tells probe_ops to free
3978 * the probe->data itself
3979 */
3980 if (probe_ops->free)
3981 probe_ops->free(probe_ops, probe->tr, 0, probe->data);
3982 list_del(&probe->list);
3983 kfree(probe);
3984 }
3985 mutex_unlock(&ftrace_lock);
3986}
3987
3988static void acquire_probe_locked(struct ftrace_func_probe *probe)
3989{
3990 /*
3991 * Add one ref to keep it from being freed when releasing the
3992 * ftrace_lock mutex.
3993 */
3994 probe->ref++;
3784} 3995}
3785 3996
3786int 3997int
3787register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 3998register_ftrace_function_probe(char *glob, struct trace_array *tr,
3788 void *data) 3999 struct ftrace_probe_ops *probe_ops,
4000 void *data)
3789{ 4001{
3790 struct ftrace_ops_hash old_hash_ops; 4002 struct ftrace_func_entry *entry;
3791 struct ftrace_func_probe *entry; 4003 struct ftrace_func_probe *probe;
3792 struct ftrace_glob func_g; 4004 struct ftrace_hash **orig_hash;
3793 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; 4005 struct ftrace_hash *old_hash;
3794 struct ftrace_hash *old_hash = *orig_hash;
3795 struct ftrace_hash *hash; 4006 struct ftrace_hash *hash;
3796 struct ftrace_page *pg;
3797 struct dyn_ftrace *rec;
3798 int not;
3799 unsigned long key;
3800 int count = 0; 4007 int count = 0;
4008 int size;
3801 int ret; 4009 int ret;
4010 int i;
3802 4011
3803 func_g.type = filter_parse_regex(glob, strlen(glob), 4012 if (WARN_ON(!tr))
3804 &func_g.search, &not);
3805 func_g.len = strlen(func_g.search);
3806
3807 /* we do not support '!' for function probes */
3808 if (WARN_ON(not))
3809 return -EINVAL; 4013 return -EINVAL;
3810 4014
3811 mutex_lock(&trace_probe_ops.func_hash->regex_lock); 4015 /* We do not support '!' for function probes */
4016 if (WARN_ON(glob[0] == '!'))
4017 return -EINVAL;
3812 4018
3813 old_hash_ops.filter_hash = old_hash;
3814 /* Probes only have filters */
3815 old_hash_ops.notrace_hash = NULL;
3816 4019
3817 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 4020 mutex_lock(&ftrace_lock);
3818 if (!hash) { 4021 /* Check if the probe_ops is already registered */
3819 count = -ENOMEM; 4022 list_for_each_entry(probe, &tr->func_probes, list) {
3820 goto out; 4023 if (probe->probe_ops == probe_ops)
4024 break;
3821 } 4025 }
3822 4026 if (&probe->list == &tr->func_probes) {
3823 if (unlikely(ftrace_disabled)) { 4027 probe = kzalloc(sizeof(*probe), GFP_KERNEL);
3824 count = -ENODEV; 4028 if (!probe) {
3825 goto out; 4029 mutex_unlock(&ftrace_lock);
4030 return -ENOMEM;
4031 }
4032 probe->probe_ops = probe_ops;
4033 probe->ops.func = function_trace_probe_call;
4034 probe->tr = tr;
4035 ftrace_ops_init(&probe->ops);
4036 list_add(&probe->list, &tr->func_probes);
3826 } 4037 }
3827 4038
3828 mutex_lock(&ftrace_lock); 4039 acquire_probe_locked(probe);
3829 4040
3830 do_for_each_ftrace_rec(pg, rec) { 4041 mutex_unlock(&ftrace_lock);
3831 4042
3832 if (rec->flags & FTRACE_FL_DISABLED) 4043 mutex_lock(&probe->ops.func_hash->regex_lock);
3833 continue;
3834 4044
3835 if (!ftrace_match_record(rec, &func_g, NULL, 0)) 4045 orig_hash = &probe->ops.func_hash->filter_hash;
3836 continue; 4046 old_hash = *orig_hash;
4047 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
3837 4048
3838 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 4049 ret = ftrace_match_records(hash, glob, strlen(glob));
3839 if (!entry) {
3840 /* If we did not process any, then return error */
3841 if (!count)
3842 count = -ENOMEM;
3843 goto out_unlock;
3844 }
3845 4050
3846 count++; 4051 /* Nothing found? */
4052 if (!ret)
4053 ret = -EINVAL;
3847 4054
3848 entry->data = data; 4055 if (ret < 0)
4056 goto out;
3849 4057
3850 /* 4058 size = 1 << hash->size_bits;
3851 * The caller might want to do something special 4059 for (i = 0; i < size; i++) {
3852 * for each function we find. We call the callback 4060 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
3853 * to give the caller an opportunity to do so. 4061 if (ftrace_lookup_ip(old_hash, entry->ip))
3854 */
3855 if (ops->init) {
3856 if (ops->init(ops, rec->ip, &entry->data) < 0) {
3857 /* caller does not like this func */
3858 kfree(entry);
3859 continue; 4062 continue;
4063 /*
4064 * The caller might want to do something special
4065 * for each function we find. We call the callback
4066 * to give the caller an opportunity to do so.
4067 */
4068 if (probe_ops->init) {
4069 ret = probe_ops->init(probe_ops, tr,
4070 entry->ip, data,
4071 &probe->data);
4072 if (ret < 0) {
4073 if (probe_ops->free && count)
4074 probe_ops->free(probe_ops, tr,
4075 0, probe->data);
4076 probe->data = NULL;
4077 goto out;
4078 }
3860 } 4079 }
4080 count++;
3861 } 4081 }
4082 }
3862 4083
3863 ret = enter_record(hash, rec, 0); 4084 mutex_lock(&ftrace_lock);
3864 if (ret < 0) {
3865 kfree(entry);
3866 count = ret;
3867 goto out_unlock;
3868 }
3869
3870 entry->ops = ops;
3871 entry->ip = rec->ip;
3872
3873 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3874 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3875 4085
3876 } while_for_each_ftrace_rec(); 4086 if (!count) {
4087 /* Nothing was added? */
4088 ret = -EINVAL;
4089 goto out_unlock;
4090 }
3877 4091
3878 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 4092 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4093 hash, 1);
4094 if (ret < 0)
4095 goto err_unlock;
3879 4096
3880 __enable_ftrace_function_probe(&old_hash_ops); 4097 /* One ref for each new function traced */
4098 probe->ref += count;
3881 4099
3882 if (!ret) 4100 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
3883 free_ftrace_hash_rcu(old_hash); 4101 ret = ftrace_startup(&probe->ops, 0);
3884 else
3885 count = ret;
3886 4102
3887 out_unlock: 4103 out_unlock:
3888 mutex_unlock(&ftrace_lock); 4104 mutex_unlock(&ftrace_lock);
4105
4106 if (!ret)
4107 ret = count;
3889 out: 4108 out:
3890 mutex_unlock(&trace_probe_ops.func_hash->regex_lock); 4109 mutex_unlock(&probe->ops.func_hash->regex_lock);
3891 free_ftrace_hash(hash); 4110 free_ftrace_hash(hash);
3892 4111
3893 return count; 4112 release_probe(probe);
3894}
3895 4113
3896enum { 4114 return ret;
3897 PROBE_TEST_FUNC = 1,
3898 PROBE_TEST_DATA = 2
3899};
3900 4115
3901static void 4116 err_unlock:
3902__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 4117 if (!probe_ops->free || !count)
3903 void *data, int flags) 4118 goto out_unlock;
4119
4120 /* Failed to do the move, need to call the free functions */
4121 for (i = 0; i < size; i++) {
4122 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4123 if (ftrace_lookup_ip(old_hash, entry->ip))
4124 continue;
4125 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4126 }
4127 }
4128 goto out_unlock;
4129}
4130
4131int
4132unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4133 struct ftrace_probe_ops *probe_ops)
3904{ 4134{
3905 struct ftrace_ops_hash old_hash_ops; 4135 struct ftrace_ops_hash old_hash_ops;
3906 struct ftrace_func_entry *rec_entry; 4136 struct ftrace_func_entry *entry;
3907 struct ftrace_func_probe *entry; 4137 struct ftrace_func_probe *probe;
3908 struct ftrace_func_probe *p;
3909 struct ftrace_glob func_g; 4138 struct ftrace_glob func_g;
3910 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; 4139 struct ftrace_hash **orig_hash;
3911 struct ftrace_hash *old_hash = *orig_hash; 4140 struct ftrace_hash *old_hash;
3912 struct list_head free_list; 4141 struct ftrace_hash *hash = NULL;
3913 struct ftrace_hash *hash;
3914 struct hlist_node *tmp; 4142 struct hlist_node *tmp;
4143 struct hlist_head hhd;
3915 char str[KSYM_SYMBOL_LEN]; 4144 char str[KSYM_SYMBOL_LEN];
3916 int i, ret; 4145 int count = 0;
3917 bool disabled; 4146 int i, ret = -ENODEV;
4147 int size;
3918 4148
3919 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) 4149 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3920 func_g.search = NULL; 4150 func_g.search = NULL;
@@ -3928,95 +4158,104 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3928 4158
3929 /* we do not support '!' for function probes */ 4159 /* we do not support '!' for function probes */
3930 if (WARN_ON(not)) 4160 if (WARN_ON(not))
3931 return; 4161 return -EINVAL;
4162 }
4163
4164 mutex_lock(&ftrace_lock);
4165 /* Check if the probe_ops is already registered */
4166 list_for_each_entry(probe, &tr->func_probes, list) {
4167 if (probe->probe_ops == probe_ops)
4168 break;
3932 } 4169 }
4170 if (&probe->list == &tr->func_probes)
4171 goto err_unlock_ftrace;
4172
4173 ret = -EINVAL;
4174 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4175 goto err_unlock_ftrace;
4176
4177 acquire_probe_locked(probe);
4178
4179 mutex_unlock(&ftrace_lock);
3933 4180
3934 mutex_lock(&trace_probe_ops.func_hash->regex_lock); 4181 mutex_lock(&probe->ops.func_hash->regex_lock);
4182
4183 orig_hash = &probe->ops.func_hash->filter_hash;
4184 old_hash = *orig_hash;
4185
4186 if (ftrace_hash_empty(old_hash))
4187 goto out_unlock;
3935 4188
3936 old_hash_ops.filter_hash = old_hash; 4189 old_hash_ops.filter_hash = old_hash;
3937 /* Probes only have filters */ 4190 /* Probes only have filters */
3938 old_hash_ops.notrace_hash = NULL; 4191 old_hash_ops.notrace_hash = NULL;
3939 4192
3940 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 4193 ret = -ENOMEM;
4194 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
3941 if (!hash) 4195 if (!hash)
3942 /* Hmm, should report this somehow */
3943 goto out_unlock; 4196 goto out_unlock;
3944 4197
3945 INIT_LIST_HEAD(&free_list); 4198 INIT_HLIST_HEAD(&hhd);
3946
3947 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3948 struct hlist_head *hhd = &ftrace_func_hash[i];
3949 4199
3950 hlist_for_each_entry_safe(entry, tmp, hhd, node) { 4200 size = 1 << hash->size_bits;
3951 4201 for (i = 0; i < size; i++) {
3952 /* break up if statements for readability */ 4202 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
3953 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3954 continue;
3955
3956 if ((flags & PROBE_TEST_DATA) && entry->data != data)
3957 continue;
3958 4203
3959 /* do this last, since it is the most expensive */
3960 if (func_g.search) { 4204 if (func_g.search) {
3961 kallsyms_lookup(entry->ip, NULL, NULL, 4205 kallsyms_lookup(entry->ip, NULL, NULL,
3962 NULL, str); 4206 NULL, str);
3963 if (!ftrace_match(str, &func_g)) 4207 if (!ftrace_match(str, &func_g))
3964 continue; 4208 continue;
3965 } 4209 }
3966 4210 count++;
3967 rec_entry = ftrace_lookup_ip(hash, entry->ip); 4211 remove_hash_entry(hash, entry);
3968 /* It is possible more than one entry had this ip */ 4212 hlist_add_head(&entry->hlist, &hhd);
3969 if (rec_entry)
3970 free_hash_entry(hash, rec_entry);
3971
3972 hlist_del_rcu(&entry->node);
3973 list_add(&entry->free_list, &free_list);
3974 } 4213 }
3975 } 4214 }
4215
4216 /* Nothing found? */
4217 if (!count) {
4218 ret = -EINVAL;
4219 goto out_unlock;
4220 }
4221
3976 mutex_lock(&ftrace_lock); 4222 mutex_lock(&ftrace_lock);
3977 disabled = __disable_ftrace_function_probe(); 4223
3978 /* 4224 WARN_ON(probe->ref < count);
3979 * Remove after the disable is called. Otherwise, if the last 4225
3980 * probe is removed, a null hash means *all enabled*. 4226 probe->ref -= count;
3981 */ 4227
3982 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 4228 if (ftrace_hash_empty(hash))
4229 ftrace_shutdown(&probe->ops, 0);
4230
4231 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4232 hash, 1);
3983 4233
3984 /* still need to update the function call sites */ 4234 /* still need to update the function call sites */
3985 if (ftrace_enabled && !disabled) 4235 if (ftrace_enabled && !ftrace_hash_empty(hash))
3986 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS, 4236 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
3987 &old_hash_ops); 4237 &old_hash_ops);
3988 synchronize_sched(); 4238 synchronize_sched();
3989 if (!ret)
3990 free_ftrace_hash_rcu(old_hash);
3991 4239
3992 list_for_each_entry_safe(entry, p, &free_list, free_list) { 4240 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
3993 list_del(&entry->free_list); 4241 hlist_del(&entry->hlist);
3994 ftrace_free_entry(entry); 4242 if (probe_ops->free)
4243 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4244 kfree(entry);
3995 } 4245 }
3996 mutex_unlock(&ftrace_lock); 4246 mutex_unlock(&ftrace_lock);
3997 4247
3998 out_unlock: 4248 out_unlock:
3999 mutex_unlock(&trace_probe_ops.func_hash->regex_lock); 4249 mutex_unlock(&probe->ops.func_hash->regex_lock);
4000 free_ftrace_hash(hash); 4250 free_ftrace_hash(hash);
4001}
4002 4251
4003void 4252 release_probe(probe);
4004unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
4005 void *data)
4006{
4007 __unregister_ftrace_function_probe(glob, ops, data,
4008 PROBE_TEST_FUNC | PROBE_TEST_DATA);
4009}
4010 4253
4011void 4254 return ret;
4012unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
4013{
4014 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
4015}
4016 4255
4017void unregister_ftrace_function_probe_all(char *glob) 4256 err_unlock_ftrace:
4018{ 4257 mutex_unlock(&ftrace_lock);
4019 __unregister_ftrace_function_probe(glob, NULL, NULL, 0); 4258 return ret;
4020} 4259}
4021 4260
4022static LIST_HEAD(ftrace_commands); 4261static LIST_HEAD(ftrace_commands);
@@ -4068,9 +4307,11 @@ __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
4068 return ret; 4307 return ret;
4069} 4308}
4070 4309
4071static int ftrace_process_regex(struct ftrace_hash *hash, 4310static int ftrace_process_regex(struct ftrace_iterator *iter,
4072 char *buff, int len, int enable) 4311 char *buff, int len, int enable)
4073{ 4312{
4313 struct ftrace_hash *hash = iter->hash;
4314 struct trace_array *tr = iter->ops->private;
4074 char *func, *command, *next = buff; 4315 char *func, *command, *next = buff;
4075 struct ftrace_func_command *p; 4316 struct ftrace_func_command *p;
4076 int ret = -EINVAL; 4317 int ret = -EINVAL;
@@ -4090,10 +4331,13 @@ static int ftrace_process_regex(struct ftrace_hash *hash,
4090 4331
4091 command = strsep(&next, ":"); 4332 command = strsep(&next, ":");
4092 4333
4334 if (WARN_ON_ONCE(!tr))
4335 return -EINVAL;
4336
4093 mutex_lock(&ftrace_cmd_mutex); 4337 mutex_lock(&ftrace_cmd_mutex);
4094 list_for_each_entry(p, &ftrace_commands, list) { 4338 list_for_each_entry(p, &ftrace_commands, list) {
4095 if (strcmp(p->name, command) == 0) { 4339 if (strcmp(p->name, command) == 0) {
4096 ret = p->func(hash, func, command, next, enable); 4340 ret = p->func(tr, hash, func, command, next, enable);
4097 goto out_unlock; 4341 goto out_unlock;
4098 } 4342 }
4099 } 4343 }
@@ -4130,7 +4374,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
4130 4374
4131 if (read >= 0 && trace_parser_loaded(parser) && 4375 if (read >= 0 && trace_parser_loaded(parser) &&
4132 !trace_parser_cont(parser)) { 4376 !trace_parser_cont(parser)) {
4133 ret = ftrace_process_regex(iter->hash, parser->buffer, 4377 ret = ftrace_process_regex(iter, parser->buffer,
4134 parser->idx, enable); 4378 parser->idx, enable);
4135 trace_parser_clear(parser); 4379 trace_parser_clear(parser);
4136 if (ret < 0) 4380 if (ret < 0)
@@ -4175,44 +4419,11 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
4175 return add_hash_entry(hash, ip); 4419 return add_hash_entry(hash, ip);
4176} 4420}
4177 4421
4178static void ftrace_ops_update_code(struct ftrace_ops *ops,
4179 struct ftrace_ops_hash *old_hash)
4180{
4181 struct ftrace_ops *op;
4182
4183 if (!ftrace_enabled)
4184 return;
4185
4186 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4187 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4188 return;
4189 }
4190
4191 /*
4192 * If this is the shared global_ops filter, then we need to
4193 * check if there is another ops that shares it, is enabled.
4194 * If so, we still need to run the modify code.
4195 */
4196 if (ops->func_hash != &global_ops.local_hash)
4197 return;
4198
4199 do_for_each_ftrace_op(op, ftrace_ops_list) {
4200 if (op->func_hash == &global_ops.local_hash &&
4201 op->flags & FTRACE_OPS_FL_ENABLED) {
4202 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4203 /* Only need to do this once */
4204 return;
4205 }
4206 } while_for_each_ftrace_op(op);
4207}
4208
4209static int 4422static int
4210ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, 4423ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4211 unsigned long ip, int remove, int reset, int enable) 4424 unsigned long ip, int remove, int reset, int enable)
4212{ 4425{
4213 struct ftrace_hash **orig_hash; 4426 struct ftrace_hash **orig_hash;
4214 struct ftrace_ops_hash old_hash_ops;
4215 struct ftrace_hash *old_hash;
4216 struct ftrace_hash *hash; 4427 struct ftrace_hash *hash;
4217 int ret; 4428 int ret;
4218 4429
@@ -4247,14 +4458,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4247 } 4458 }
4248 4459
4249 mutex_lock(&ftrace_lock); 4460 mutex_lock(&ftrace_lock);
4250 old_hash = *orig_hash; 4461 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
4251 old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4252 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4253 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4254 if (!ret) {
4255 ftrace_ops_update_code(ops, &old_hash_ops);
4256 free_ftrace_hash_rcu(old_hash);
4257 }
4258 mutex_unlock(&ftrace_lock); 4462 mutex_unlock(&ftrace_lock);
4259 4463
4260 out_regex_unlock: 4464 out_regex_unlock:
@@ -4493,10 +4697,8 @@ static void __init set_ftrace_early_filters(void)
4493int ftrace_regex_release(struct inode *inode, struct file *file) 4697int ftrace_regex_release(struct inode *inode, struct file *file)
4494{ 4698{
4495 struct seq_file *m = (struct seq_file *)file->private_data; 4699 struct seq_file *m = (struct seq_file *)file->private_data;
4496 struct ftrace_ops_hash old_hash_ops;
4497 struct ftrace_iterator *iter; 4700 struct ftrace_iterator *iter;
4498 struct ftrace_hash **orig_hash; 4701 struct ftrace_hash **orig_hash;
4499 struct ftrace_hash *old_hash;
4500 struct trace_parser *parser; 4702 struct trace_parser *parser;
4501 int filter_hash; 4703 int filter_hash;
4502 int ret; 4704 int ret;
@@ -4526,16 +4728,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
4526 orig_hash = &iter->ops->func_hash->notrace_hash; 4728 orig_hash = &iter->ops->func_hash->notrace_hash;
4527 4729
4528 mutex_lock(&ftrace_lock); 4730 mutex_lock(&ftrace_lock);
4529 old_hash = *orig_hash; 4731 ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
4530 old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash; 4732 iter->hash, filter_hash);
4531 old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
4532 ret = ftrace_hash_move(iter->ops, filter_hash,
4533 orig_hash, iter->hash);
4534 if (!ret) {
4535 ftrace_ops_update_code(iter->ops, &old_hash_ops);
4536 free_ftrace_hash_rcu(old_hash);
4537 }
4538 mutex_unlock(&ftrace_lock); 4733 mutex_unlock(&ftrace_lock);
4734 } else {
4735 /* For read only, the hash is the ops hash */
4736 iter->hash = NULL;
4539 } 4737 }
4540 4738
4541 mutex_unlock(&iter->ops->func_hash->regex_lock); 4739 mutex_unlock(&iter->ops->func_hash->regex_lock);
@@ -5274,6 +5472,50 @@ void ftrace_module_init(struct module *mod)
5274} 5472}
5275#endif /* CONFIG_MODULES */ 5473#endif /* CONFIG_MODULES */
5276 5474
5475void __init ftrace_free_init_mem(void)
5476{
5477 unsigned long start = (unsigned long)(&__init_begin);
5478 unsigned long end = (unsigned long)(&__init_end);
5479 struct ftrace_page **last_pg = &ftrace_pages_start;
5480 struct ftrace_page *pg;
5481 struct dyn_ftrace *rec;
5482 struct dyn_ftrace key;
5483 int order;
5484
5485 key.ip = start;
5486 key.flags = end; /* overload flags, as it is unsigned long */
5487
5488 mutex_lock(&ftrace_lock);
5489
5490 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
5491 if (end < pg->records[0].ip ||
5492 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
5493 continue;
5494 again:
5495 rec = bsearch(&key, pg->records, pg->index,
5496 sizeof(struct dyn_ftrace),
5497 ftrace_cmp_recs);
5498 if (!rec)
5499 continue;
5500 pg->index--;
5501 if (!pg->index) {
5502 *last_pg = pg->next;
5503 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
5504 free_pages((unsigned long)pg->records, order);
5505 kfree(pg);
5506 pg = container_of(last_pg, struct ftrace_page, next);
5507 if (!(*last_pg))
5508 ftrace_pages = pg;
5509 continue;
5510 }
5511 memmove(rec, rec + 1,
5512 (pg->index - (rec - pg->records)) * sizeof(*rec));
5513 /* More than one function may be in this block */
5514 goto again;
5515 }
5516 mutex_unlock(&ftrace_lock);
5517}
5518
5277void __init ftrace_init(void) 5519void __init ftrace_init(void)
5278{ 5520{
5279 extern unsigned long __start_mcount_loc[]; 5521 extern unsigned long __start_mcount_loc[];
@@ -5316,25 +5558,13 @@ void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
5316 5558
5317static void ftrace_update_trampoline(struct ftrace_ops *ops) 5559static void ftrace_update_trampoline(struct ftrace_ops *ops)
5318{ 5560{
5319
5320/*
5321 * Currently there's no safe way to free a trampoline when the kernel
5322 * is configured with PREEMPT. That is because a task could be preempted
5323 * when it jumped to the trampoline, it may be preempted for a long time
5324 * depending on the system load, and currently there's no way to know
5325 * when it will be off the trampoline. If the trampoline is freed
5326 * too early, when the task runs again, it will be executing on freed
5327 * memory and crash.
5328 */
5329#ifdef CONFIG_PREEMPT
5330 /* Currently, only non dynamic ops can have a trampoline */
5331 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
5332 return;
5333#endif
5334
5335 arch_ftrace_update_trampoline(ops); 5561 arch_ftrace_update_trampoline(ops);
5336} 5562}
5337 5563
5564void ftrace_init_trace_array(struct trace_array *tr)
5565{
5566 INIT_LIST_HEAD(&tr->func_probes);
5567}
5338#else 5568#else
5339 5569
5340static struct ftrace_ops global_ops = { 5570static struct ftrace_ops global_ops = {
@@ -5389,6 +5619,7 @@ __init void ftrace_init_global_array_ops(struct trace_array *tr)
5389{ 5619{
5390 tr->ops = &global_ops; 5620 tr->ops = &global_ops;
5391 tr->ops->private = tr; 5621 tr->ops->private = tr;
5622 ftrace_init_trace_array(tr);
5392} 5623}
5393 5624
5394void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) 5625void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
@@ -5543,6 +5774,43 @@ ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
5543 trace_ignore_this_task(pid_list, next)); 5774 trace_ignore_this_task(pid_list, next));
5544} 5775}
5545 5776
5777static void
5778ftrace_pid_follow_sched_process_fork(void *data,
5779 struct task_struct *self,
5780 struct task_struct *task)
5781{
5782 struct trace_pid_list *pid_list;
5783 struct trace_array *tr = data;
5784
5785 pid_list = rcu_dereference_sched(tr->function_pids);
5786 trace_filter_add_remove_task(pid_list, self, task);
5787}
5788
5789static void
5790ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
5791{
5792 struct trace_pid_list *pid_list;
5793 struct trace_array *tr = data;
5794
5795 pid_list = rcu_dereference_sched(tr->function_pids);
5796 trace_filter_add_remove_task(pid_list, NULL, task);
5797}
5798
5799void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
5800{
5801 if (enable) {
5802 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
5803 tr);
5804 register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
5805 tr);
5806 } else {
5807 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
5808 tr);
5809 unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
5810 tr);
5811 }
5812}
5813
5546static void clear_ftrace_pids(struct trace_array *tr) 5814static void clear_ftrace_pids(struct trace_array *tr)
5547{ 5815{
5548 struct trace_pid_list *pid_list; 5816 struct trace_pid_list *pid_list;