aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-04-04 21:31:28 -0400
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-04-20 22:06:43 -0400
commiteee8ded131f15e0f5b1897c9c4a7687fabd28822 (patch)
tree20ade2877030156bf2624556ed1d9f950d50c335 /kernel
parent1ec3a81a0cf4236b644282794932c4eda9c1714a (diff)
ftrace: Have the function probes call their own function
Now that the function probes have their own ftrace_ops, there's no reason to continue using the ftrace_func_hash to find which probe to call in the function callback. The ops that is passed in to the function callback is part of the probe_ops to call. Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ftrace.c225
-rw-r--r--kernel/trace/trace.h1
2 files changed, 99 insertions, 127 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index cf6b7263199a..493c7ff7e860 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1096,14 +1096,7 @@ static bool update_all_ops;
1096# error Dynamic ftrace depends on MCOUNT_RECORD 1096# error Dynamic ftrace depends on MCOUNT_RECORD
1097#endif 1097#endif
1098 1098
1099static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly; 1099static LIST_HEAD(ftrace_func_probes);
1100
1101struct ftrace_func_probe {
1102 struct hlist_node node;
1103 struct ftrace_probe_ops *ops;
1104 unsigned long ip;
1105 struct list_head free_list;
1106};
1107 1100
1108struct ftrace_func_entry { 1101struct ftrace_func_entry {
1109 struct hlist_node hlist; 1102 struct hlist_node hlist;
@@ -1270,7 +1263,7 @@ static void
1270remove_hash_entry(struct ftrace_hash *hash, 1263remove_hash_entry(struct ftrace_hash *hash,
1271 struct ftrace_func_entry *entry) 1264 struct ftrace_func_entry *entry)
1272{ 1265{
1273 hlist_del(&entry->hlist); 1266 hlist_del_rcu(&entry->hlist);
1274 hash->count--; 1267 hash->count--;
1275} 1268}
1276 1269
@@ -3063,35 +3056,58 @@ struct ftrace_iterator {
3063 loff_t func_pos; 3056 loff_t func_pos;
3064 struct ftrace_page *pg; 3057 struct ftrace_page *pg;
3065 struct dyn_ftrace *func; 3058 struct dyn_ftrace *func;
3066 struct ftrace_func_probe *probe; 3059 struct ftrace_probe_ops *probe;
3060 struct ftrace_func_entry *probe_entry;
3067 struct trace_parser parser; 3061 struct trace_parser parser;
3068 struct ftrace_hash *hash; 3062 struct ftrace_hash *hash;
3069 struct ftrace_ops *ops; 3063 struct ftrace_ops *ops;
3070 int hidx; 3064 int pidx;
3071 int idx; 3065 int idx;
3072 unsigned flags; 3066 unsigned flags;
3073}; 3067};
3074 3068
3075static void * 3069static void *
3076t_hash_next(struct seq_file *m, loff_t *pos) 3070t_probe_next(struct seq_file *m, loff_t *pos)
3077{ 3071{
3078 struct ftrace_iterator *iter = m->private; 3072 struct ftrace_iterator *iter = m->private;
3073 struct ftrace_hash *hash;
3074 struct list_head *next;
3079 struct hlist_node *hnd = NULL; 3075 struct hlist_node *hnd = NULL;
3080 struct hlist_head *hhd; 3076 struct hlist_head *hhd;
3077 int size;
3081 3078
3082 (*pos)++; 3079 (*pos)++;
3083 iter->pos = *pos; 3080 iter->pos = *pos;
3084 3081
3085 if (iter->probe) 3082 if (list_empty(&ftrace_func_probes))
3086 hnd = &iter->probe->node;
3087 retry:
3088 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
3089 return NULL; 3083 return NULL;
3090 3084
3091 hhd = &ftrace_func_hash[iter->hidx]; 3085 if (!iter->probe) {
3086 next = ftrace_func_probes.next;
3087 iter->probe = list_entry(next, struct ftrace_probe_ops, list);
3088 }
3089
3090 if (iter->probe_entry)
3091 hnd = &iter->probe_entry->hlist;
3092
3093 hash = iter->probe->ops.func_hash->filter_hash;
3094 size = 1 << hash->size_bits;
3095
3096 retry:
3097 if (iter->pidx >= size) {
3098 if (iter->probe->list.next == &ftrace_func_probes)
3099 return NULL;
3100 next = iter->probe->list.next;
3101 iter->probe = list_entry(next, struct ftrace_probe_ops, list);
3102 hash = iter->probe->ops.func_hash->filter_hash;
3103 size = 1 << hash->size_bits;
3104 iter->pidx = 0;
3105 }
3106
3107 hhd = &hash->buckets[iter->pidx];
3092 3108
3093 if (hlist_empty(hhd)) { 3109 if (hlist_empty(hhd)) {
3094 iter->hidx++; 3110 iter->pidx++;
3095 hnd = NULL; 3111 hnd = NULL;
3096 goto retry; 3112 goto retry;
3097 } 3113 }
@@ -3101,7 +3117,7 @@ t_hash_next(struct seq_file *m, loff_t *pos)
3101 else { 3117 else {
3102 hnd = hnd->next; 3118 hnd = hnd->next;
3103 if (!hnd) { 3119 if (!hnd) {
3104 iter->hidx++; 3120 iter->pidx++;
3105 goto retry; 3121 goto retry;
3106 } 3122 }
3107 } 3123 }
@@ -3109,26 +3125,28 @@ t_hash_next(struct seq_file *m, loff_t *pos)
3109 if (WARN_ON_ONCE(!hnd)) 3125 if (WARN_ON_ONCE(!hnd))
3110 return NULL; 3126 return NULL;
3111 3127
3112 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node); 3128 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3113 3129
3114 return iter; 3130 return iter;
3115} 3131}
3116 3132
3117static void *t_hash_start(struct seq_file *m, loff_t *pos) 3133static void *t_probe_start(struct seq_file *m, loff_t *pos)
3118{ 3134{
3119 struct ftrace_iterator *iter = m->private; 3135 struct ftrace_iterator *iter = m->private;
3120 void *p = NULL; 3136 void *p = NULL;
3121 loff_t l; 3137 loff_t l;
3122 3138
3123 if (!(iter->flags & FTRACE_ITER_DO_HASH)) 3139 if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3124 return NULL; 3140 return NULL;
3125 3141
3126 if (iter->func_pos > *pos) 3142 if (iter->func_pos > *pos)
3127 return NULL; 3143 return NULL;
3128 3144
3129 iter->hidx = 0; 3145 iter->probe = NULL;
3146 iter->probe_entry = NULL;
3147 iter->pidx = 0;
3130 for (l = 0; l <= (*pos - iter->func_pos); ) { 3148 for (l = 0; l <= (*pos - iter->func_pos); ) {
3131 p = t_hash_next(m, &l); 3149 p = t_probe_next(m, &l);
3132 if (!p) 3150 if (!p)
3133 break; 3151 break;
3134 } 3152 }
@@ -3136,24 +3154,27 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
3136 return NULL; 3154 return NULL;
3137 3155
3138 /* Only set this if we have an item */ 3156 /* Only set this if we have an item */
3139 iter->flags |= FTRACE_ITER_HASH; 3157 iter->flags |= FTRACE_ITER_PROBE;
3140 3158
3141 return iter; 3159 return iter;
3142} 3160}
3143 3161
3144static int 3162static int
3145t_hash_show(struct seq_file *m, struct ftrace_iterator *iter) 3163t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3146{ 3164{
3147 struct ftrace_func_probe *rec; 3165 struct ftrace_probe_ops *probe;
3166 struct ftrace_func_entry *probe_entry;
3148 3167
3149 rec = iter->probe; 3168 probe = iter->probe;
3150 if (WARN_ON_ONCE(!rec)) 3169 probe_entry = iter->probe_entry;
3170
3171 if (WARN_ON_ONCE(!probe || !probe_entry))
3151 return -EIO; 3172 return -EIO;
3152 3173
3153 if (rec->ops->print) 3174 if (probe->print)
3154 return rec->ops->print(m, rec->ip, rec->ops, NULL); 3175 return probe->print(m, probe_entry->ip, probe, NULL);
3155 3176
3156 seq_printf(m, "%ps:%ps\n", (void *)rec->ip, (void *)rec->ops->func); 3177 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip, (void *)probe->func);
3157 3178
3158 return 0; 3179 return 0;
3159} 3180}
@@ -3205,19 +3226,19 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
3205 if (unlikely(ftrace_disabled)) 3226 if (unlikely(ftrace_disabled))
3206 return NULL; 3227 return NULL;
3207 3228
3208 if (iter->flags & FTRACE_ITER_HASH) 3229 if (iter->flags & FTRACE_ITER_PROBE)
3209 return t_hash_next(m, pos); 3230 return t_probe_next(m, pos);
3210 3231
3211 if (iter->flags & FTRACE_ITER_PRINTALL) { 3232 if (iter->flags & FTRACE_ITER_PRINTALL) {
3212 /* next must increment pos, and t_hash_start does not */ 3233 /* next must increment pos, and t_probe_start does not */
3213 (*pos)++; 3234 (*pos)++;
3214 return t_hash_start(m, &l); 3235 return t_probe_start(m, &l);
3215 } 3236 }
3216 3237
3217 ret = t_func_next(m, pos); 3238 ret = t_func_next(m, pos);
3218 3239
3219 if (!ret) 3240 if (!ret)
3220 return t_hash_start(m, &l); 3241 return t_probe_start(m, &l);
3221 3242
3222 return ret; 3243 return ret;
3223} 3244}
@@ -3226,7 +3247,7 @@ static void reset_iter_read(struct ftrace_iterator *iter)
3226{ 3247{
3227 iter->pos = 0; 3248 iter->pos = 0;
3228 iter->func_pos = 0; 3249 iter->func_pos = 0;
3229 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH); 3250 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE);
3230} 3251}
3231 3252
3232static void *t_start(struct seq_file *m, loff_t *pos) 3253static void *t_start(struct seq_file *m, loff_t *pos)
@@ -3255,15 +3276,15 @@ static void *t_start(struct seq_file *m, loff_t *pos)
3255 ftrace_hash_empty(iter->hash)) { 3276 ftrace_hash_empty(iter->hash)) {
3256 iter->func_pos = 1; /* Account for the message */ 3277 iter->func_pos = 1; /* Account for the message */
3257 if (*pos > 0) 3278 if (*pos > 0)
3258 return t_hash_start(m, pos); 3279 return t_probe_start(m, pos);
3259 iter->flags |= FTRACE_ITER_PRINTALL; 3280 iter->flags |= FTRACE_ITER_PRINTALL;
3260 /* reset in case of seek/pread */ 3281 /* reset in case of seek/pread */
3261 iter->flags &= ~FTRACE_ITER_HASH; 3282 iter->flags &= ~FTRACE_ITER_PROBE;
3262 return iter; 3283 return iter;
3263 } 3284 }
3264 3285
3265 if (iter->flags & FTRACE_ITER_HASH) 3286 if (iter->flags & FTRACE_ITER_PROBE)
3266 return t_hash_start(m, pos); 3287 return t_probe_start(m, pos);
3267 3288
3268 /* 3289 /*
3269 * Unfortunately, we need to restart at ftrace_pages_start 3290 * Unfortunately, we need to restart at ftrace_pages_start
@@ -3279,7 +3300,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
3279 } 3300 }
3280 3301
3281 if (!p) 3302 if (!p)
3282 return t_hash_start(m, pos); 3303 return t_probe_start(m, pos);
3283 3304
3284 return iter; 3305 return iter;
3285} 3306}
@@ -3310,8 +3331,8 @@ static int t_show(struct seq_file *m, void *v)
3310 struct ftrace_iterator *iter = m->private; 3331 struct ftrace_iterator *iter = m->private;
3311 struct dyn_ftrace *rec; 3332 struct dyn_ftrace *rec;
3312 3333
3313 if (iter->flags & FTRACE_ITER_HASH) 3334 if (iter->flags & FTRACE_ITER_PROBE)
3314 return t_hash_show(m, iter); 3335 return t_probe_show(m, iter);
3315 3336
3316 if (iter->flags & FTRACE_ITER_PRINTALL) { 3337 if (iter->flags & FTRACE_ITER_PRINTALL) {
3317 if (iter->flags & FTRACE_ITER_NOTRACE) 3338 if (iter->flags & FTRACE_ITER_NOTRACE)
@@ -3490,7 +3511,7 @@ ftrace_filter_open(struct inode *inode, struct file *file)
3490 struct ftrace_ops *ops = inode->i_private; 3511 struct ftrace_ops *ops = inode->i_private;
3491 3512
3492 return ftrace_regex_open(ops, 3513 return ftrace_regex_open(ops,
3493 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, 3514 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
3494 inode, file); 3515 inode, file);
3495} 3516}
3496 3517
@@ -3765,16 +3786,9 @@ core_initcall(ftrace_mod_cmd_init);
3765static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, 3786static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3766 struct ftrace_ops *op, struct pt_regs *pt_regs) 3787 struct ftrace_ops *op, struct pt_regs *pt_regs)
3767{ 3788{
3768 struct ftrace_func_probe *entry; 3789 struct ftrace_probe_ops *probe_ops;
3769 struct hlist_head *hhd;
3770 unsigned long key;
3771
3772 key = hash_long(ip, FTRACE_HASH_BITS);
3773 3790
3774 hhd = &ftrace_func_hash[key]; 3791 probe_ops = container_of(op, struct ftrace_probe_ops, ops);
3775
3776 if (hlist_empty(hhd))
3777 return;
3778 3792
3779 /* 3793 /*
3780 * Disable preemption for these calls to prevent a RCU grace 3794 * Disable preemption for these calls to prevent a RCU grace
@@ -3782,20 +3796,10 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3782 * on the hash. rcu_read_lock is too dangerous here. 3796 * on the hash. rcu_read_lock is too dangerous here.
3783 */ 3797 */
3784 preempt_disable_notrace(); 3798 preempt_disable_notrace();
3785 hlist_for_each_entry_rcu_notrace(entry, hhd, node) { 3799 probe_ops->func(ip, parent_ip, probe_ops, NULL);
3786 if (entry->ip == ip)
3787 entry->ops->func(ip, parent_ip, entry->ops, NULL);
3788 }
3789 preempt_enable_notrace(); 3800 preempt_enable_notrace();
3790} 3801}
3791 3802
3792static void ftrace_free_entry(struct ftrace_func_probe *entry)
3793{
3794 if (entry->ops->free)
3795 entry->ops->free(entry->ops, entry->ip, NULL);
3796 kfree(entry);
3797}
3798
3799struct ftrace_func_map { 3803struct ftrace_func_map {
3800 struct ftrace_func_entry entry; 3804 struct ftrace_func_entry entry;
3801 void *data; 3805 void *data;
@@ -3942,13 +3946,9 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3942 void *data) 3946 void *data)
3943{ 3947{
3944 struct ftrace_func_entry *entry; 3948 struct ftrace_func_entry *entry;
3945 struct ftrace_func_probe *probe;
3946 struct ftrace_hash **orig_hash; 3949 struct ftrace_hash **orig_hash;
3947 struct ftrace_hash *old_hash; 3950 struct ftrace_hash *old_hash;
3948 struct ftrace_hash *hash; 3951 struct ftrace_hash *hash;
3949 struct hlist_head hl;
3950 struct hlist_node *n;
3951 unsigned long key;
3952 int count = 0; 3952 int count = 0;
3953 int size; 3953 int size;
3954 int ret; 3954 int ret;
@@ -3961,6 +3961,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3961 if (!(ops->ops.flags & FTRACE_OPS_FL_INITIALIZED)) { 3961 if (!(ops->ops.flags & FTRACE_OPS_FL_INITIALIZED)) {
3962 ops->ops.func = function_trace_probe_call; 3962 ops->ops.func = function_trace_probe_call;
3963 ftrace_ops_init(&ops->ops); 3963 ftrace_ops_init(&ops->ops);
3964 INIT_LIST_HEAD(&ops->list);
3964 } 3965 }
3965 3966
3966 mutex_lock(&ops->ops.func_hash->regex_lock); 3967 mutex_lock(&ops->ops.func_hash->regex_lock);
@@ -3978,31 +3979,21 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3978 if (ret < 0) 3979 if (ret < 0)
3979 goto out; 3980 goto out;
3980 3981
3981 INIT_HLIST_HEAD(&hl);
3982
3983 size = 1 << hash->size_bits; 3982 size = 1 << hash->size_bits;
3984 for (i = 0; i < size; i++) { 3983 for (i = 0; i < size; i++) {
3985 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 3984 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
3986 if (ftrace_lookup_ip(old_hash, entry->ip)) 3985 if (ftrace_lookup_ip(old_hash, entry->ip))
3987 continue; 3986 continue;
3988 probe = kmalloc(sizeof(*probe), GFP_KERNEL);
3989 if (!probe) {
3990 count = -ENOMEM;
3991 goto err_free;
3992 }
3993 probe->ops = ops;
3994 probe->ip = entry->ip;
3995 /* 3987 /*
3996 * The caller might want to do something special 3988 * The caller might want to do something special
3997 * for each function we find. We call the callback 3989 * for each function we find. We call the callback
3998 * to give the caller an opportunity to do so. 3990 * to give the caller an opportunity to do so.
3999 */ 3991 */
4000 if (ops->init && ops->init(ops, entry->ip, data) < 0) { 3992 if (ops->init) {
4001 kfree(probe); 3993 ret = ops->init(ops, entry->ip, data);
4002 goto err_free; 3994 if (ret < 0)
3995 goto out;
4003 } 3996 }
4004 hlist_add_head(&probe->node, &hl);
4005
4006 count++; 3997 count++;
4007 } 3998 }
4008 } 3999 }
@@ -4012,17 +4003,15 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
4012 ret = ftrace_hash_move_and_update_ops(&ops->ops, orig_hash, 4003 ret = ftrace_hash_move_and_update_ops(&ops->ops, orig_hash,
4013 hash, 1); 4004 hash, 1);
4014 if (ret < 0) 4005 if (ret < 0)
4015 goto err_free_unlock; 4006 goto out_unlock;
4016 4007
4017 hlist_for_each_entry_safe(probe, n, &hl, node) { 4008 if (list_empty(&ops->list))
4018 hlist_del(&probe->node); 4009 list_add(&ops->list, &ftrace_func_probes);
4019 key = hash_long(probe->ip, FTRACE_HASH_BITS);
4020 hlist_add_head_rcu(&probe->node, &ftrace_func_hash[key]);
4021 }
4022 4010
4023 if (!(ops->ops.flags & FTRACE_OPS_FL_ENABLED)) 4011 if (!(ops->ops.flags & FTRACE_OPS_FL_ENABLED))
4024 ret = ftrace_startup(&ops->ops, 0); 4012 ret = ftrace_startup(&ops->ops, 0);
4025 4013
4014 out_unlock:
4026 mutex_unlock(&ftrace_lock); 4015 mutex_unlock(&ftrace_lock);
4027 4016
4028 if (!ret) 4017 if (!ret)
@@ -4032,34 +4021,22 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
4032 free_ftrace_hash(hash); 4021 free_ftrace_hash(hash);
4033 4022
4034 return ret; 4023 return ret;
4035
4036 err_free_unlock:
4037 mutex_unlock(&ftrace_lock);
4038 err_free:
4039 hlist_for_each_entry_safe(probe, n, &hl, node) {
4040 hlist_del(&probe->node);
4041 if (ops->free)
4042 ops->free(ops, probe->ip, NULL);
4043 kfree(probe);
4044 }
4045 goto out;
4046} 4024}
4047 4025
4048int 4026int
4049unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) 4027unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
4050{ 4028{
4051 struct ftrace_ops_hash old_hash_ops; 4029 struct ftrace_ops_hash old_hash_ops;
4052 struct ftrace_func_entry *rec_entry; 4030 struct ftrace_func_entry *entry;
4053 struct ftrace_func_probe *entry;
4054 struct ftrace_func_probe *p;
4055 struct ftrace_glob func_g; 4031 struct ftrace_glob func_g;
4056 struct ftrace_hash **orig_hash; 4032 struct ftrace_hash **orig_hash;
4057 struct ftrace_hash *old_hash; 4033 struct ftrace_hash *old_hash;
4058 struct list_head free_list;
4059 struct ftrace_hash *hash = NULL; 4034 struct ftrace_hash *hash = NULL;
4060 struct hlist_node *tmp; 4035 struct hlist_node *tmp;
4036 struct hlist_head hhd;
4061 char str[KSYM_SYMBOL_LEN]; 4037 char str[KSYM_SYMBOL_LEN];
4062 int i, ret; 4038 int i, ret;
4039 int size;
4063 4040
4064 if (!(ops->ops.flags & FTRACE_OPS_FL_INITIALIZED)) 4041 if (!(ops->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4065 return -EINVAL; 4042 return -EINVAL;
@@ -4097,18 +4074,12 @@ unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
4097 if (!hash) 4074 if (!hash)
4098 goto out_unlock; 4075 goto out_unlock;
4099 4076
4100 INIT_LIST_HEAD(&free_list); 4077 INIT_HLIST_HEAD(&hhd);
4101
4102 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
4103 struct hlist_head *hhd = &ftrace_func_hash[i];
4104 4078
4105 hlist_for_each_entry_safe(entry, tmp, hhd, node) { 4079 size = 1 << hash->size_bits;
4106 4080 for (i = 0; i < size; i++) {
4107 /* break up if statements for readability */ 4081 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
4108 if (entry->ops != ops)
4109 continue;
4110 4082
4111 /* do this last, since it is the most expensive */
4112 if (func_g.search) { 4083 if (func_g.search) {
4113 kallsyms_lookup(entry->ip, NULL, NULL, 4084 kallsyms_lookup(entry->ip, NULL, NULL,
4114 NULL, str); 4085 NULL, str);
@@ -4116,26 +4087,24 @@ unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
4116 continue; 4087 continue;
4117 } 4088 }
4118 4089
4119 rec_entry = ftrace_lookup_ip(hash, entry->ip); 4090 remove_hash_entry(hash, entry);
4120 /* It is possible more than one entry had this ip */ 4091 hlist_add_head(&entry->hlist, &hhd);
4121 if (rec_entry)
4122 free_hash_entry(hash, rec_entry);
4123
4124 hlist_del_rcu(&entry->node);
4125 list_add(&entry->free_list, &free_list);
4126 } 4092 }
4127 } 4093 }
4128 4094
4129 /* Nothing found? */ 4095 /* Nothing found? */
4130 if (list_empty(&free_list)) { 4096 if (hlist_empty(&hhd)) {
4131 ret = -EINVAL; 4097 ret = -EINVAL;
4132 goto out_unlock; 4098 goto out_unlock;
4133 } 4099 }
4134 4100
4135 mutex_lock(&ftrace_lock); 4101 mutex_lock(&ftrace_lock);
4136 4102
4137 if (ftrace_hash_empty(hash)) 4103 if (ftrace_hash_empty(hash)) {
4138 ftrace_shutdown(&ops->ops, 0); 4104 ftrace_shutdown(&ops->ops, 0);
4105 list_del_init(&ops->list);
4106 }
4107
4139 4108
4140 ret = ftrace_hash_move_and_update_ops(&ops->ops, orig_hash, 4109 ret = ftrace_hash_move_and_update_ops(&ops->ops, orig_hash,
4141 hash, 1); 4110 hash, 1);
@@ -4146,9 +4115,11 @@ unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
4146 &old_hash_ops); 4115 &old_hash_ops);
4147 synchronize_sched(); 4116 synchronize_sched();
4148 4117
4149 list_for_each_entry_safe(entry, p, &free_list, free_list) { 4118 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4150 list_del(&entry->free_list); 4119 hlist_del(&entry->hlist);
4151 ftrace_free_entry(entry); 4120 if (ops->free)
4121 ops->free(ops, entry->ip, NULL);
4122 kfree(entry);
4152 } 4123 }
4153 mutex_unlock(&ftrace_lock); 4124 mutex_unlock(&ftrace_lock);
4154 4125
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index e16c67c49de4..d457addcc224 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -933,6 +933,7 @@ static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) {
933 933
934struct ftrace_probe_ops { 934struct ftrace_probe_ops {
935 struct ftrace_ops ops; 935 struct ftrace_ops ops;
936 struct list_head list;
936 void (*func)(unsigned long ip, 937 void (*func)(unsigned long ip,
937 unsigned long parent_ip, 938 unsigned long parent_ip,
938 struct ftrace_probe_ops *ops, 939 struct ftrace_probe_ops *ops,