aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-04-18 14:50:39 -0400
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-04-20 22:06:46 -0400
commit7b60f3d8761561d95d7e962522d6338143fc2329 (patch)
treec129464e646cabd898b8c2ddde08f52f1afd6053
parentb5f081b563a6cdcb85a543df8c851951a8978275 (diff)
ftrace: Dynamically create the probe ftrace_ops for the trace_array
In order to eventually have each trace_array instance have its own unique set of function probes (triggers), the trace array needs to hold the ops and the filters for the probes. This is the first step to accomplish this. Instead of having the private data of the probe ops point to the trace_array, create a separate list that the trace_array holds. There's only one private_data for a probe, we need one per trace_array. The probe ftrace_ops will be dynamically created for each instance, instead of being static. Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
-rw-r--r--kernel/trace/ftrace.c192
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace.h5
-rw-r--r--kernel/trace/trace_events.c2
-rw-r--r--kernel/trace/trace_functions.c2
5 files changed, 146 insertions, 57 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index e51cd6b51253..8fdc18500c61 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1101,6 +1101,14 @@ struct ftrace_func_entry {
1101 unsigned long ip; 1101 unsigned long ip;
1102}; 1102};
1103 1103
1104struct ftrace_func_probe {
1105 struct ftrace_probe_ops *probe_ops;
1106 struct ftrace_ops ops;
1107 struct trace_array *tr;
1108 struct list_head list;
1109 int ref;
1110};
1111
1104/* 1112/*
1105 * We make these constant because no one should touch them, 1113 * We make these constant because no one should touch them,
1106 * but they are used as the default "empty hash", to avoid allocating 1114 * but they are used as the default "empty hash", to avoid allocating
@@ -3054,7 +3062,7 @@ struct ftrace_iterator {
3054 loff_t func_pos; 3062 loff_t func_pos;
3055 struct ftrace_page *pg; 3063 struct ftrace_page *pg;
3056 struct dyn_ftrace *func; 3064 struct dyn_ftrace *func;
3057 struct ftrace_probe_ops *probe; 3065 struct ftrace_func_probe *probe;
3058 struct ftrace_func_entry *probe_entry; 3066 struct ftrace_func_entry *probe_entry;
3059 struct trace_parser parser; 3067 struct trace_parser parser;
3060 struct ftrace_hash *hash; 3068 struct ftrace_hash *hash;
@@ -3088,7 +3096,7 @@ t_probe_next(struct seq_file *m, loff_t *pos)
3088 3096
3089 if (!iter->probe) { 3097 if (!iter->probe) {
3090 next = func_probes->next; 3098 next = func_probes->next;
3091 iter->probe = list_entry(next, struct ftrace_probe_ops, list); 3099 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3092 } 3100 }
3093 3101
3094 if (iter->probe_entry) 3102 if (iter->probe_entry)
@@ -3102,7 +3110,7 @@ t_probe_next(struct seq_file *m, loff_t *pos)
3102 if (iter->probe->list.next == func_probes) 3110 if (iter->probe->list.next == func_probes)
3103 return NULL; 3111 return NULL;
3104 next = iter->probe->list.next; 3112 next = iter->probe->list.next;
3105 iter->probe = list_entry(next, struct ftrace_probe_ops, list); 3113 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3106 hash = iter->probe->ops.func_hash->filter_hash; 3114 hash = iter->probe->ops.func_hash->filter_hash;
3107 size = 1 << hash->size_bits; 3115 size = 1 << hash->size_bits;
3108 iter->pidx = 0; 3116 iter->pidx = 0;
@@ -3166,8 +3174,9 @@ static void *t_probe_start(struct seq_file *m, loff_t *pos)
3166static int 3174static int
3167t_probe_show(struct seq_file *m, struct ftrace_iterator *iter) 3175t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3168{ 3176{
3169 struct ftrace_probe_ops *probe;
3170 struct ftrace_func_entry *probe_entry; 3177 struct ftrace_func_entry *probe_entry;
3178 struct ftrace_probe_ops *probe_ops;
3179 struct ftrace_func_probe *probe;
3171 3180
3172 probe = iter->probe; 3181 probe = iter->probe;
3173 probe_entry = iter->probe_entry; 3182 probe_entry = iter->probe_entry;
@@ -3175,10 +3184,13 @@ t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3175 if (WARN_ON_ONCE(!probe || !probe_entry)) 3184 if (WARN_ON_ONCE(!probe || !probe_entry))
3176 return -EIO; 3185 return -EIO;
3177 3186
3178 if (probe->print) 3187 probe_ops = probe->probe_ops;
3179 return probe->print(m, probe_entry->ip, probe, NULL); 3188
3189 if (probe_ops->print)
3190 return probe_ops->print(m, probe_entry->ip, probe_ops, NULL);
3180 3191
3181 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip, (void *)probe->func); 3192 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3193 (void *)probe_ops->func);
3182 3194
3183 return 0; 3195 return 0;
3184} 3196}
@@ -3791,9 +3803,10 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3791 struct ftrace_ops *op, struct pt_regs *pt_regs) 3803 struct ftrace_ops *op, struct pt_regs *pt_regs)
3792{ 3804{
3793 struct ftrace_probe_ops *probe_ops; 3805 struct ftrace_probe_ops *probe_ops;
3794 struct trace_array *tr = op->private; 3806 struct ftrace_func_probe *probe;
3795 3807
3796 probe_ops = container_of(op, struct ftrace_probe_ops, ops); 3808 probe = container_of(op, struct ftrace_func_probe, ops);
3809 probe_ops = probe->probe_ops;
3797 3810
3798 /* 3811 /*
3799 * Disable preemption for these calls to prevent a RCU grace 3812 * Disable preemption for these calls to prevent a RCU grace
@@ -3801,7 +3814,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3801 * on the hash. rcu_read_lock is too dangerous here. 3814 * on the hash. rcu_read_lock is too dangerous here.
3802 */ 3815 */
3803 preempt_disable_notrace(); 3816 preempt_disable_notrace();
3804 probe_ops->func(ip, parent_ip, tr, probe_ops, NULL); 3817 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, NULL);
3805 preempt_enable_notrace(); 3818 preempt_enable_notrace();
3806} 3819}
3807 3820
@@ -3946,11 +3959,41 @@ void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
3946 free_ftrace_hash(&mapper->hash); 3959 free_ftrace_hash(&mapper->hash);
3947} 3960}
3948 3961
3962static void release_probe(struct ftrace_func_probe *probe)
3963{
3964 struct ftrace_probe_ops *probe_ops;
3965
3966 mutex_lock(&ftrace_lock);
3967
3968 WARN_ON(probe->ref <= 0);
3969
3970 /* Subtract the ref that was used to protect this instance */
3971 probe->ref--;
3972
3973 if (!probe->ref) {
3974 probe_ops = probe->probe_ops;
3975 list_del(&probe->list);
3976 kfree(probe);
3977 }
3978 mutex_unlock(&ftrace_lock);
3979}
3980
3981static void acquire_probe_locked(struct ftrace_func_probe *probe)
3982{
3983 /*
3984 * Add one ref to keep it from being freed when releasing the
3985 * ftrace_lock mutex.
3986 */
3987 probe->ref++;
3988}
3989
3949int 3990int
3950register_ftrace_function_probe(char *glob, struct trace_array *tr, 3991register_ftrace_function_probe(char *glob, struct trace_array *tr,
3951 struct ftrace_probe_ops *ops, void *data) 3992 struct ftrace_probe_ops *probe_ops,
3993 void *data)
3952{ 3994{
3953 struct ftrace_func_entry *entry; 3995 struct ftrace_func_entry *entry;
3996 struct ftrace_func_probe *probe;
3954 struct ftrace_hash **orig_hash; 3997 struct ftrace_hash **orig_hash;
3955 struct ftrace_hash *old_hash; 3998 struct ftrace_hash *old_hash;
3956 struct ftrace_hash *hash; 3999 struct ftrace_hash *hash;
@@ -3966,16 +4009,33 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
3966 if (WARN_ON(glob[0] == '!')) 4009 if (WARN_ON(glob[0] == '!'))
3967 return -EINVAL; 4010 return -EINVAL;
3968 4011
3969 if (!(ops->ops.flags & FTRACE_OPS_FL_INITIALIZED)) { 4012
3970 ops->ops.func = function_trace_probe_call; 4013 mutex_lock(&ftrace_lock);
3971 ftrace_ops_init(&ops->ops); 4014 /* Check if the probe_ops is already registered */
3972 INIT_LIST_HEAD(&ops->list); 4015 list_for_each_entry(probe, &tr->func_probes, list) {
3973 ops->ops.private = tr; 4016 if (probe->probe_ops == probe_ops)
4017 break;
3974 } 4018 }
4019 if (&probe->list == &tr->func_probes) {
4020 probe = kzalloc(sizeof(*probe), GFP_KERNEL);
4021 if (!probe) {
4022 mutex_unlock(&ftrace_lock);
4023 return -ENOMEM;
4024 }
4025 probe->probe_ops = probe_ops;
4026 probe->ops.func = function_trace_probe_call;
4027 probe->tr = tr;
4028 ftrace_ops_init(&probe->ops);
4029 list_add(&probe->list, &tr->func_probes);
4030 }
4031
4032 acquire_probe_locked(probe);
3975 4033
3976 mutex_lock(&ops->ops.func_hash->regex_lock); 4034 mutex_unlock(&ftrace_lock);
4035
4036 mutex_lock(&probe->ops.func_hash->regex_lock);
3977 4037
3978 orig_hash = &ops->ops.func_hash->filter_hash; 4038 orig_hash = &probe->ops.func_hash->filter_hash;
3979 old_hash = *orig_hash; 4039 old_hash = *orig_hash;
3980 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 4040 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
3981 4041
@@ -3998,8 +4058,9 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
3998 * for each function we find. We call the callback 4058 * for each function we find. We call the callback
3999 * to give the caller an opportunity to do so. 4059 * to give the caller an opportunity to do so.
4000 */ 4060 */
4001 if (ops->init) { 4061 if (probe_ops->init) {
4002 ret = ops->init(ops, tr, entry->ip, data); 4062 ret = probe_ops->init(probe_ops, tr,
4063 entry->ip, data);
4003 if (ret < 0) 4064 if (ret < 0)
4004 goto out; 4065 goto out;
4005 } 4066 }
@@ -4009,16 +4070,22 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
4009 4070
4010 mutex_lock(&ftrace_lock); 4071 mutex_lock(&ftrace_lock);
4011 4072
4012 ret = ftrace_hash_move_and_update_ops(&ops->ops, orig_hash, 4073 if (!count) {
4013 hash, 1); 4074 /* Nothing was added? */
4075 ret = -EINVAL;
4076 goto out_unlock;
4077 }
4078
4079 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4080 hash, 1);
4014 if (ret < 0) 4081 if (ret < 0)
4015 goto err_unlock; 4082 goto err_unlock;
4016 4083
4017 if (list_empty(&ops->list)) 4084 /* One ref for each new function traced */
4018 list_add(&ops->list, &tr->func_probes); 4085 probe->ref += count;
4019 4086
4020 if (!(ops->ops.flags & FTRACE_OPS_FL_ENABLED)) 4087 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
4021 ret = ftrace_startup(&ops->ops, 0); 4088 ret = ftrace_startup(&probe->ops, 0);
4022 4089
4023 out_unlock: 4090 out_unlock:
4024 mutex_unlock(&ftrace_lock); 4091 mutex_unlock(&ftrace_lock);
@@ -4026,13 +4093,15 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
4026 if (!ret) 4093 if (!ret)
4027 ret = count; 4094 ret = count;
4028 out: 4095 out:
4029 mutex_unlock(&ops->ops.func_hash->regex_lock); 4096 mutex_unlock(&probe->ops.func_hash->regex_lock);
4030 free_ftrace_hash(hash); 4097 free_ftrace_hash(hash);
4031 4098
4099 release_probe(probe);
4100
4032 return ret; 4101 return ret;
4033 4102
4034 err_unlock: 4103 err_unlock:
4035 if (!ops->free) 4104 if (!probe_ops->free || !count)
4036 goto out_unlock; 4105 goto out_unlock;
4037 4106
4038 /* Failed to do the move, need to call the free functions */ 4107 /* Failed to do the move, need to call the free functions */
@@ -4040,33 +4109,30 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
4040 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 4109 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4041 if (ftrace_lookup_ip(old_hash, entry->ip)) 4110 if (ftrace_lookup_ip(old_hash, entry->ip))
4042 continue; 4111 continue;
4043 ops->free(ops, tr, entry->ip, NULL); 4112 probe_ops->free(probe_ops, tr, entry->ip, NULL);
4044 } 4113 }
4045 } 4114 }
4046 goto out_unlock; 4115 goto out_unlock;
4047} 4116}
4048 4117
4049int 4118int
4050unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) 4119unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4120 struct ftrace_probe_ops *probe_ops)
4051{ 4121{
4052 struct ftrace_ops_hash old_hash_ops; 4122 struct ftrace_ops_hash old_hash_ops;
4053 struct ftrace_func_entry *entry; 4123 struct ftrace_func_entry *entry;
4124 struct ftrace_func_probe *probe;
4054 struct ftrace_glob func_g; 4125 struct ftrace_glob func_g;
4055 struct ftrace_hash **orig_hash; 4126 struct ftrace_hash **orig_hash;
4056 struct ftrace_hash *old_hash; 4127 struct ftrace_hash *old_hash;
4057 struct ftrace_hash *hash = NULL; 4128 struct ftrace_hash *hash = NULL;
4058 struct hlist_node *tmp; 4129 struct hlist_node *tmp;
4059 struct hlist_head hhd; 4130 struct hlist_head hhd;
4060 struct trace_array *tr;
4061 char str[KSYM_SYMBOL_LEN]; 4131 char str[KSYM_SYMBOL_LEN];
4062 int i, ret; 4132 int count = 0;
4133 int i, ret = -ENODEV;
4063 int size; 4134 int size;
4064 4135
4065 if (!(ops->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4066 return -EINVAL;
4067
4068 tr = ops->ops.private;
4069
4070 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) 4136 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
4071 func_g.search = NULL; 4137 func_g.search = NULL;
4072 else if (glob) { 4138 else if (glob) {
@@ -4082,12 +4148,28 @@ unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
4082 return -EINVAL; 4148 return -EINVAL;
4083 } 4149 }
4084 4150
4085 mutex_lock(&ops->ops.func_hash->regex_lock); 4151 mutex_lock(&ftrace_lock);
4152 /* Check if the probe_ops is already registered */
4153 list_for_each_entry(probe, &tr->func_probes, list) {
4154 if (probe->probe_ops == probe_ops)
4155 break;
4156 }
4157 if (&probe->list == &tr->func_probes)
4158 goto err_unlock_ftrace;
4159
4160 ret = -EINVAL;
4161 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4162 goto err_unlock_ftrace;
4163
4164 acquire_probe_locked(probe);
4086 4165
4087 orig_hash = &ops->ops.func_hash->filter_hash; 4166 mutex_unlock(&ftrace_lock);
4167
4168 mutex_lock(&probe->ops.func_hash->regex_lock);
4169
4170 orig_hash = &probe->ops.func_hash->filter_hash;
4088 old_hash = *orig_hash; 4171 old_hash = *orig_hash;
4089 4172
4090 ret = -EINVAL;
4091 if (ftrace_hash_empty(old_hash)) 4173 if (ftrace_hash_empty(old_hash))
4092 goto out_unlock; 4174 goto out_unlock;
4093 4175
@@ -4112,46 +4194,54 @@ unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
4112 if (!ftrace_match(str, &func_g)) 4194 if (!ftrace_match(str, &func_g))
4113 continue; 4195 continue;
4114 } 4196 }
4115 4197 count++;
4116 remove_hash_entry(hash, entry); 4198 remove_hash_entry(hash, entry);
4117 hlist_add_head(&entry->hlist, &hhd); 4199 hlist_add_head(&entry->hlist, &hhd);
4118 } 4200 }
4119 } 4201 }
4120 4202
4121 /* Nothing found? */ 4203 /* Nothing found? */
4122 if (hlist_empty(&hhd)) { 4204 if (!count) {
4123 ret = -EINVAL; 4205 ret = -EINVAL;
4124 goto out_unlock; 4206 goto out_unlock;
4125 } 4207 }
4126 4208
4127 mutex_lock(&ftrace_lock); 4209 mutex_lock(&ftrace_lock);
4128 4210
4129 if (ftrace_hash_empty(hash)) { 4211 WARN_ON(probe->ref < count);
4130 ftrace_shutdown(&ops->ops, 0);
4131 list_del_init(&ops->list);
4132 }
4133 4212
4213 probe->ref -= count;
4134 4214
4135 ret = ftrace_hash_move_and_update_ops(&ops->ops, orig_hash, 4215 if (ftrace_hash_empty(hash))
4216 ftrace_shutdown(&probe->ops, 0);
4217
4218 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4136 hash, 1); 4219 hash, 1);
4137 4220
4138 /* still need to update the function call sites */ 4221 /* still need to update the function call sites */
4139 if (ftrace_enabled && !ftrace_hash_empty(hash)) 4222 if (ftrace_enabled && !ftrace_hash_empty(hash))
4140 ftrace_run_modify_code(&ops->ops, FTRACE_UPDATE_CALLS, 4223 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
4141 &old_hash_ops); 4224 &old_hash_ops);
4142 synchronize_sched(); 4225 synchronize_sched();
4143 4226
4144 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { 4227 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4145 hlist_del(&entry->hlist); 4228 hlist_del(&entry->hlist);
4146 if (ops->free) 4229 if (probe_ops->free)
4147 ops->free(ops, tr, entry->ip, NULL); 4230 probe_ops->free(probe_ops, tr, entry->ip, NULL);
4148 kfree(entry); 4231 kfree(entry);
4149 } 4232 }
4150 mutex_unlock(&ftrace_lock); 4233 mutex_unlock(&ftrace_lock);
4151 4234
4152 out_unlock: 4235 out_unlock:
4153 mutex_unlock(&ops->ops.func_hash->regex_lock); 4236 mutex_unlock(&probe->ops.func_hash->regex_lock);
4154 free_ftrace_hash(hash); 4237 free_ftrace_hash(hash);
4238
4239 release_probe(probe);
4240
4241 return ret;
4242
4243 err_unlock_ftrace:
4244 mutex_unlock(&ftrace_lock);
4155 return ret; 4245 return ret;
4156} 4246}
4157 4247
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 368310e78d45..e61610e5e6e3 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6832,7 +6832,7 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
6832 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; 6832 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6833 6833
6834 if (glob[0] == '!') 6834 if (glob[0] == '!')
6835 return unregister_ftrace_function_probe_func(glob+1, ops); 6835 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
6836 6836
6837 if (!param) 6837 if (!param)
6838 goto out_reg; 6838 goto out_reg;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 390761804886..e978ecd257b8 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -939,8 +939,6 @@ static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) {
939#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) 939#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
940 940
941struct ftrace_probe_ops { 941struct ftrace_probe_ops {
942 struct ftrace_ops ops;
943 struct list_head list;
944 void (*func)(unsigned long ip, 942 void (*func)(unsigned long ip,
945 unsigned long parent_ip, 943 unsigned long parent_ip,
946 struct trace_array *tr, 944 struct trace_array *tr,
@@ -976,7 +974,8 @@ extern int
976register_ftrace_function_probe(char *glob, struct trace_array *tr, 974register_ftrace_function_probe(char *glob, struct trace_array *tr,
977 struct ftrace_probe_ops *ops, void *data); 975 struct ftrace_probe_ops *ops, void *data);
978extern int 976extern int
979unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); 977unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
978 struct ftrace_probe_ops *ops);
980 979
981int register_ftrace_command(struct ftrace_func_command *cmd); 980int register_ftrace_command(struct ftrace_func_command *cmd);
982int unregister_ftrace_command(struct ftrace_func_command *cmd); 981int unregister_ftrace_command(struct ftrace_func_command *cmd);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 713bec614312..48c7f70cbac7 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2653,7 +2653,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
2653 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops; 2653 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2654 2654
2655 if (glob[0] == '!') { 2655 if (glob[0] == '!') {
2656 ret = unregister_ftrace_function_probe_func(glob+1, ops); 2656 ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
2657 goto out; 2657 goto out;
2658 } 2658 }
2659 2659
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 797f087183c5..b95f56ba9744 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -597,7 +597,7 @@ ftrace_trace_probe_callback(struct trace_array *tr,
597 return -EINVAL; 597 return -EINVAL;
598 598
599 if (glob[0] == '!') 599 if (glob[0] == '!')
600 return unregister_ftrace_function_probe_func(glob+1, ops); 600 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
601 601
602 if (!param) 602 if (!param)
603 goto out_reg; 603 goto out_reg;