diff options
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 137 |
1 files changed, 122 insertions, 15 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 683d559a0eef..0fa92f677c92 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -62,6 +62,8 @@ | |||
62 | #define FTRACE_HASH_DEFAULT_BITS 10 | 62 | #define FTRACE_HASH_DEFAULT_BITS 10 |
63 | #define FTRACE_HASH_MAX_BITS 12 | 63 | #define FTRACE_HASH_MAX_BITS 12 |
64 | 64 | ||
65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) | ||
66 | |||
65 | /* ftrace_enabled is a method to turn ftrace on or off */ | 67 | /* ftrace_enabled is a method to turn ftrace on or off */ |
66 | int ftrace_enabled __read_mostly; | 68 | int ftrace_enabled __read_mostly; |
67 | static int last_ftrace_enabled; | 69 | static int last_ftrace_enabled; |
@@ -89,12 +91,14 @@ static struct ftrace_ops ftrace_list_end __read_mostly = { | |||
89 | }; | 91 | }; |
90 | 92 | ||
91 | static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; | 93 | static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; |
94 | static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; | ||
92 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; | 95 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; |
93 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 96 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
94 | static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub; | 97 | static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub; |
95 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | 98 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; |
96 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | 99 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; |
97 | static struct ftrace_ops global_ops; | 100 | static struct ftrace_ops global_ops; |
101 | static struct ftrace_ops control_ops; | ||
98 | 102 | ||
99 | static void | 103 | static void |
100 | ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); | 104 | ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); |
@@ -168,6 +172,32 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) | |||
168 | } | 172 | } |
169 | #endif | 173 | #endif |
170 | 174 | ||
175 | static void control_ops_disable_all(struct ftrace_ops *ops) | ||
176 | { | ||
177 | int cpu; | ||
178 | |||
179 | for_each_possible_cpu(cpu) | ||
180 | *per_cpu_ptr(ops->disabled, cpu) = 1; | ||
181 | } | ||
182 | |||
183 | static int control_ops_alloc(struct ftrace_ops *ops) | ||
184 | { | ||
185 | int __percpu *disabled; | ||
186 | |||
187 | disabled = alloc_percpu(int); | ||
188 | if (!disabled) | ||
189 | return -ENOMEM; | ||
190 | |||
191 | ops->disabled = disabled; | ||
192 | control_ops_disable_all(ops); | ||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | static void control_ops_free(struct ftrace_ops *ops) | ||
197 | { | ||
198 | free_percpu(ops->disabled); | ||
199 | } | ||
200 | |||
171 | static void update_global_ops(void) | 201 | static void update_global_ops(void) |
172 | { | 202 | { |
173 | ftrace_func_t func; | 203 | ftrace_func_t func; |
@@ -219,7 +249,8 @@ static void update_ftrace_function(void) | |||
219 | #else | 249 | #else |
220 | __ftrace_trace_function = func; | 250 | __ftrace_trace_function = func; |
221 | #endif | 251 | #endif |
222 | ftrace_trace_function = ftrace_test_stop_func; | 252 | ftrace_trace_function = |
253 | (func == ftrace_stub) ? func : ftrace_test_stop_func; | ||
223 | #endif | 254 | #endif |
224 | } | 255 | } |
225 | 256 | ||
@@ -259,6 +290,26 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) | |||
259 | return 0; | 290 | return 0; |
260 | } | 291 | } |
261 | 292 | ||
293 | static void add_ftrace_list_ops(struct ftrace_ops **list, | ||
294 | struct ftrace_ops *main_ops, | ||
295 | struct ftrace_ops *ops) | ||
296 | { | ||
297 | int first = *list == &ftrace_list_end; | ||
298 | add_ftrace_ops(list, ops); | ||
299 | if (first) | ||
300 | add_ftrace_ops(&ftrace_ops_list, main_ops); | ||
301 | } | ||
302 | |||
303 | static int remove_ftrace_list_ops(struct ftrace_ops **list, | ||
304 | struct ftrace_ops *main_ops, | ||
305 | struct ftrace_ops *ops) | ||
306 | { | ||
307 | int ret = remove_ftrace_ops(list, ops); | ||
308 | if (!ret && *list == &ftrace_list_end) | ||
309 | ret = remove_ftrace_ops(&ftrace_ops_list, main_ops); | ||
310 | return ret; | ||
311 | } | ||
312 | |||
262 | static int __register_ftrace_function(struct ftrace_ops *ops) | 313 | static int __register_ftrace_function(struct ftrace_ops *ops) |
263 | { | 314 | { |
264 | if (ftrace_disabled) | 315 | if (ftrace_disabled) |
@@ -270,15 +321,20 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
270 | if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) | 321 | if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) |
271 | return -EBUSY; | 322 | return -EBUSY; |
272 | 323 | ||
324 | /* We don't support both control and global flags set. */ | ||
325 | if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK) | ||
326 | return -EINVAL; | ||
327 | |||
273 | if (!core_kernel_data((unsigned long)ops)) | 328 | if (!core_kernel_data((unsigned long)ops)) |
274 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; | 329 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; |
275 | 330 | ||
276 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | 331 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { |
277 | int first = ftrace_global_list == &ftrace_list_end; | 332 | add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops); |
278 | add_ftrace_ops(&ftrace_global_list, ops); | ||
279 | ops->flags |= FTRACE_OPS_FL_ENABLED; | 333 | ops->flags |= FTRACE_OPS_FL_ENABLED; |
280 | if (first) | 334 | } else if (ops->flags & FTRACE_OPS_FL_CONTROL) { |
281 | add_ftrace_ops(&ftrace_ops_list, &global_ops); | 335 | if (control_ops_alloc(ops)) |
336 | return -ENOMEM; | ||
337 | add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops); | ||
282 | } else | 338 | } else |
283 | add_ftrace_ops(&ftrace_ops_list, ops); | 339 | add_ftrace_ops(&ftrace_ops_list, ops); |
284 | 340 | ||
@@ -302,11 +358,23 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
302 | return -EINVAL; | 358 | return -EINVAL; |
303 | 359 | ||
304 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | 360 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { |
305 | ret = remove_ftrace_ops(&ftrace_global_list, ops); | 361 | ret = remove_ftrace_list_ops(&ftrace_global_list, |
306 | if (!ret && ftrace_global_list == &ftrace_list_end) | 362 | &global_ops, ops); |
307 | ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops); | ||
308 | if (!ret) | 363 | if (!ret) |
309 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | 364 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; |
365 | } else if (ops->flags & FTRACE_OPS_FL_CONTROL) { | ||
366 | ret = remove_ftrace_list_ops(&ftrace_control_list, | ||
367 | &control_ops, ops); | ||
368 | if (!ret) { | ||
369 | /* | ||
370 | * The ftrace_ops is now removed from the list, | ||
371 | * so there'll be no new users. We must ensure | ||
372 | * all current users are done before we free | ||
373 | * the control data. | ||
374 | */ | ||
375 | synchronize_sched(); | ||
376 | control_ops_free(ops); | ||
377 | } | ||
310 | } else | 378 | } else |
311 | ret = remove_ftrace_ops(&ftrace_ops_list, ops); | 379 | ret = remove_ftrace_ops(&ftrace_ops_list, ops); |
312 | 380 | ||
@@ -1119,6 +1187,12 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash) | |||
1119 | call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); | 1187 | call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); |
1120 | } | 1188 | } |
1121 | 1189 | ||
1190 | void ftrace_free_filter(struct ftrace_ops *ops) | ||
1191 | { | ||
1192 | free_ftrace_hash(ops->filter_hash); | ||
1193 | free_ftrace_hash(ops->notrace_hash); | ||
1194 | } | ||
1195 | |||
1122 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) | 1196 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) |
1123 | { | 1197 | { |
1124 | struct ftrace_hash *hash; | 1198 | struct ftrace_hash *hash; |
@@ -1129,7 +1203,7 @@ static struct ftrace_hash *alloc_ftrace_hash(int size_bits) | |||
1129 | return NULL; | 1203 | return NULL; |
1130 | 1204 | ||
1131 | size = 1 << size_bits; | 1205 | size = 1 << size_bits; |
1132 | hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL); | 1206 | hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); |
1133 | 1207 | ||
1134 | if (!hash->buckets) { | 1208 | if (!hash->buckets) { |
1135 | kfree(hash); | 1209 | kfree(hash); |
@@ -3146,8 +3220,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
3146 | mutex_lock(&ftrace_regex_lock); | 3220 | mutex_lock(&ftrace_regex_lock); |
3147 | if (reset) | 3221 | if (reset) |
3148 | ftrace_filter_reset(hash); | 3222 | ftrace_filter_reset(hash); |
3149 | if (buf) | 3223 | if (buf && !ftrace_match_records(hash, buf, len)) { |
3150 | ftrace_match_records(hash, buf, len); | 3224 | ret = -EINVAL; |
3225 | goto out_regex_unlock; | ||
3226 | } | ||
3151 | 3227 | ||
3152 | mutex_lock(&ftrace_lock); | 3228 | mutex_lock(&ftrace_lock); |
3153 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); | 3229 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); |
@@ -3157,6 +3233,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
3157 | 3233 | ||
3158 | mutex_unlock(&ftrace_lock); | 3234 | mutex_unlock(&ftrace_lock); |
3159 | 3235 | ||
3236 | out_regex_unlock: | ||
3160 | mutex_unlock(&ftrace_regex_lock); | 3237 | mutex_unlock(&ftrace_regex_lock); |
3161 | 3238 | ||
3162 | free_ftrace_hash(hash); | 3239 | free_ftrace_hash(hash); |
@@ -3173,10 +3250,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
3173 | * Filters denote which functions should be enabled when tracing is enabled. | 3250 | * Filters denote which functions should be enabled when tracing is enabled. |
3174 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. | 3251 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. |
3175 | */ | 3252 | */ |
3176 | void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, | 3253 | int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
3177 | int len, int reset) | 3254 | int len, int reset) |
3178 | { | 3255 | { |
3179 | ftrace_set_regex(ops, buf, len, reset, 1); | 3256 | return ftrace_set_regex(ops, buf, len, reset, 1); |
3180 | } | 3257 | } |
3181 | EXPORT_SYMBOL_GPL(ftrace_set_filter); | 3258 | EXPORT_SYMBOL_GPL(ftrace_set_filter); |
3182 | 3259 | ||
@@ -3191,10 +3268,10 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter); | |||
3191 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled | 3268 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled |
3192 | * for tracing. | 3269 | * for tracing. |
3193 | */ | 3270 | */ |
3194 | void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, | 3271 | int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
3195 | int len, int reset) | 3272 | int len, int reset) |
3196 | { | 3273 | { |
3197 | ftrace_set_regex(ops, buf, len, reset, 0); | 3274 | return ftrace_set_regex(ops, buf, len, reset, 0); |
3198 | } | 3275 | } |
3199 | EXPORT_SYMBOL_GPL(ftrace_set_notrace); | 3276 | EXPORT_SYMBOL_GPL(ftrace_set_notrace); |
3200 | /** | 3277 | /** |
@@ -3871,6 +3948,36 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | |||
3871 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 3948 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
3872 | 3949 | ||
3873 | static void | 3950 | static void |
3951 | ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip) | ||
3952 | { | ||
3953 | struct ftrace_ops *op; | ||
3954 | |||
3955 | if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT))) | ||
3956 | return; | ||
3957 | |||
3958 | /* | ||
3959 | * Some of the ops may be dynamically allocated, | ||
3960 | * they must be freed after a synchronize_sched(). | ||
3961 | */ | ||
3962 | preempt_disable_notrace(); | ||
3963 | trace_recursion_set(TRACE_CONTROL_BIT); | ||
3964 | op = rcu_dereference_raw(ftrace_control_list); | ||
3965 | while (op != &ftrace_list_end) { | ||
3966 | if (!ftrace_function_local_disabled(op) && | ||
3967 | ftrace_ops_test(op, ip)) | ||
3968 | op->func(ip, parent_ip); | ||
3969 | |||
3970 | op = rcu_dereference_raw(op->next); | ||
3971 | }; | ||
3972 | trace_recursion_clear(TRACE_CONTROL_BIT); | ||
3973 | preempt_enable_notrace(); | ||
3974 | } | ||
3975 | |||
3976 | static struct ftrace_ops control_ops = { | ||
3977 | .func = ftrace_ops_control_func, | ||
3978 | }; | ||
3979 | |||
3980 | static void | ||
3874 | ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) | 3981 | ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) |
3875 | { | 3982 | { |
3876 | struct ftrace_ops *op; | 3983 | struct ftrace_ops *op; |