diff options
author | Steven Rostedt <srostedt@redhat.com> | 2011-05-05 21:14:55 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2011-05-18 15:29:51 -0400 |
commit | cdbe61bfe70440939e457fb4a8d0995eaaed17de (patch) | |
tree | 6e82066db25ab6fa42455a42bb77783dac5260b8 /kernel/trace | |
parent | b848914ce39589d89ee0078a6d1ef452b464729e (diff) |
ftrace: Allow dynamically allocated function tracers
Now that functions may be selected individually, it only makes sense
that we should allow dynamically allocated trace structures to
be traced. This will allow perf to allocate a ftrace_ops structure
at runtime and use it to pick and choose which functions that
structure will trace.
Note, a dynamically allocated ftrace_ops will always be called
indirectly instead of being called directly from the mcount in
entry.S. This is because there's no safe way to prevent mcount
from being preempted before calling the function, unless we
modify every entry.S to do so (not likely). Thus, dynamically allocated
functions will now be called by the ftrace_ops_list_func() that
loops through the ops that are allocated if there are more than
one op allocated at a time. This loop is protected with a
preempt_disable.
To determine if an ftrace_ops structure is allocated or not, a new
util function was added to the kernel/extable.c called
core_kernel_data(), which returns 1 if the address is between
_sdata and _edata.
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ftrace.c | 37 |
1 files changed, 30 insertions, 7 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6c7e1df39b57..5b3ee04e39d9 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -189,8 +189,14 @@ static void update_ftrace_function(void) | |||
189 | 189 | ||
190 | update_global_ops(); | 190 | update_global_ops(); |
191 | 191 | ||
192 | /* | ||
193 | * If we are at the end of the list and this ops is | ||
194 | * not dynamic, then have the mcount trampoline call | ||
195 | * the function directly | ||
196 | */ | ||
192 | if (ftrace_ops_list == &ftrace_list_end || | 197 | if (ftrace_ops_list == &ftrace_list_end || |
193 | ftrace_ops_list->next == &ftrace_list_end) | 198 | (ftrace_ops_list->next == &ftrace_list_end && |
199 | !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC))) | ||
194 | func = ftrace_ops_list->func; | 200 | func = ftrace_ops_list->func; |
195 | else | 201 | else |
196 | func = ftrace_ops_list_func; | 202 | func = ftrace_ops_list_func; |
@@ -250,6 +256,9 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
250 | if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) | 256 | if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) |
251 | return -EBUSY; | 257 | return -EBUSY; |
252 | 258 | ||
259 | if (!core_kernel_data((unsigned long)ops)) | ||
260 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; | ||
261 | |||
253 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | 262 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { |
254 | int first = ftrace_global_list == &ftrace_list_end; | 263 | int first = ftrace_global_list == &ftrace_list_end; |
255 | add_ftrace_ops(&ftrace_global_list, ops); | 264 | add_ftrace_ops(&ftrace_global_list, ops); |
@@ -293,6 +302,13 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
293 | if (ftrace_enabled) | 302 | if (ftrace_enabled) |
294 | update_ftrace_function(); | 303 | update_ftrace_function(); |
295 | 304 | ||
305 | /* | ||
306 | * Dynamic ops may be freed, we must make sure that all | ||
307 | * callers are done before leaving this function. | ||
308 | */ | ||
309 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) | ||
310 | synchronize_sched(); | ||
311 | |||
296 | return 0; | 312 | return 0; |
297 | } | 313 | } |
298 | 314 | ||
@@ -1225,6 +1241,9 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) | |||
1225 | * the filter_hash does not exist or is empty, | 1241 | * the filter_hash does not exist or is empty, |
1226 | * AND | 1242 | * AND |
1227 | * the ip is not in the ops->notrace_hash. | 1243 | * the ip is not in the ops->notrace_hash. |
1244 | * | ||
1245 | * This needs to be called with preemption disabled as | ||
1246 | * the hashes are freed with call_rcu_sched(). | ||
1228 | */ | 1247 | */ |
1229 | static int | 1248 | static int |
1230 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | 1249 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) |
@@ -1233,9 +1252,6 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | |||
1233 | struct ftrace_hash *notrace_hash; | 1252 | struct ftrace_hash *notrace_hash; |
1234 | int ret; | 1253 | int ret; |
1235 | 1254 | ||
1236 | /* The hashes are freed with call_rcu_sched() */ | ||
1237 | preempt_disable_notrace(); | ||
1238 | |||
1239 | filter_hash = rcu_dereference_raw(ops->filter_hash); | 1255 | filter_hash = rcu_dereference_raw(ops->filter_hash); |
1240 | notrace_hash = rcu_dereference_raw(ops->notrace_hash); | 1256 | notrace_hash = rcu_dereference_raw(ops->notrace_hash); |
1241 | 1257 | ||
@@ -1246,7 +1262,6 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | |||
1246 | ret = 1; | 1262 | ret = 1; |
1247 | else | 1263 | else |
1248 | ret = 0; | 1264 | ret = 0; |
1249 | preempt_enable_notrace(); | ||
1250 | 1265 | ||
1251 | return ret; | 1266 | return ret; |
1252 | } | 1267 | } |
@@ -3425,14 +3440,20 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | |||
3425 | static void | 3440 | static void |
3426 | ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) | 3441 | ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) |
3427 | { | 3442 | { |
3428 | /* see comment above ftrace_global_list_func */ | 3443 | struct ftrace_ops *op; |
3429 | struct ftrace_ops *op = rcu_dereference_raw(ftrace_ops_list); | ||
3430 | 3444 | ||
3445 | /* | ||
3446 | * Some of the ops may be dynamically allocated, | ||
3447 | * they must be freed after a synchronize_sched(). | ||
3448 | */ | ||
3449 | preempt_disable_notrace(); | ||
3450 | op = rcu_dereference_raw(ftrace_ops_list); | ||
3431 | while (op != &ftrace_list_end) { | 3451 | while (op != &ftrace_list_end) { |
3432 | if (ftrace_ops_test(op, ip)) | 3452 | if (ftrace_ops_test(op, ip)) |
3433 | op->func(ip, parent_ip); | 3453 | op->func(ip, parent_ip); |
3434 | op = rcu_dereference_raw(op->next); | 3454 | op = rcu_dereference_raw(op->next); |
3435 | }; | 3455 | }; |
3456 | preempt_enable_notrace(); | ||
3436 | } | 3457 | } |
3437 | 3458 | ||
3438 | static void clear_ftrace_swapper(void) | 3459 | static void clear_ftrace_swapper(void) |
@@ -3743,6 +3764,7 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
3743 | mutex_unlock(&ftrace_lock); | 3764 | mutex_unlock(&ftrace_lock); |
3744 | return ret; | 3765 | return ret; |
3745 | } | 3766 | } |
3767 | EXPORT_SYMBOL_GPL(register_ftrace_function); | ||
3746 | 3768 | ||
3747 | /** | 3769 | /** |
3748 | * unregister_ftrace_function - unregister a function for profiling. | 3770 | * unregister_ftrace_function - unregister a function for profiling. |
@@ -3762,6 +3784,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops) | |||
3762 | 3784 | ||
3763 | return ret; | 3785 | return ret; |
3764 | } | 3786 | } |
3787 | EXPORT_SYMBOL_GPL(unregister_ftrace_function); | ||
3765 | 3788 | ||
3766 | int | 3789 | int |
3767 | ftrace_enable_sysctl(struct ctl_table *table, int write, | 3790 | ftrace_enable_sysctl(struct ctl_table *table, int write, |