aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2011-05-05 21:14:55 -0400
committerSteven Rostedt <rostedt@goodmis.org>2011-05-18 15:29:51 -0400
commitcdbe61bfe70440939e457fb4a8d0995eaaed17de (patch)
tree6e82066db25ab6fa42455a42bb77783dac5260b8
parentb848914ce39589d89ee0078a6d1ef452b464729e (diff)
ftrace: Allow dynamically allocated function tracers
Now that functions may be selected individually, it only makes sense that we should allow dynamically allocated trace structures to be traced. This will allow perf to allocate a ftrace_ops structure at runtime and use it to pick and choose which functions that structure will trace. Note, a dynamically allocated ftrace_ops will always be called indirectly instead of being called directly from the mcount in entry.S. This is because there's no safe way to prevent mcount from being preempted before calling the function, unless we modify every entry.S to do so (not likely). Thus, dynamically allocated functions will now be called by the ftrace_ops_list_func() that loops through the ops that are allocated if there are more than one op allocated at a time. This loop is protected with a preempt_disable. To determine if an ftrace_ops structure is allocated or not, a new util function was added to the kernel/extable.c called core_kernel_data(), which returns 1 if the address is between _sdata and _edata. Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--include/linux/ftrace.h1
-rw-r--r--include/linux/kernel.h1
-rw-r--r--kernel/extable.c8
-rw-r--r--kernel/trace/ftrace.c37
4 files changed, 40 insertions, 7 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 4609c0ece79a..caba694a62b6 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -34,6 +34,7 @@ struct ftrace_hash;
34enum { 34enum {
35 FTRACE_OPS_FL_ENABLED = 1 << 0, 35 FTRACE_OPS_FL_ENABLED = 1 << 0,
36 FTRACE_OPS_FL_GLOBAL = 1 << 1, 36 FTRACE_OPS_FL_GLOBAL = 1 << 1,
37 FTRACE_OPS_FL_DYNAMIC = 1 << 2,
37}; 38};
38 39
39struct ftrace_ops { 40struct ftrace_ops {
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 00cec4dc0ae2..f37ba716ef8b 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -283,6 +283,7 @@ extern char *get_options(const char *str, int nints, int *ints);
283extern unsigned long long memparse(const char *ptr, char **retptr); 283extern unsigned long long memparse(const char *ptr, char **retptr);
284 284
285extern int core_kernel_text(unsigned long addr); 285extern int core_kernel_text(unsigned long addr);
286extern int core_kernel_data(unsigned long addr);
286extern int __kernel_text_address(unsigned long addr); 287extern int __kernel_text_address(unsigned long addr);
287extern int kernel_text_address(unsigned long addr); 288extern int kernel_text_address(unsigned long addr);
288extern int func_ptr_is_kernel_text(void *ptr); 289extern int func_ptr_is_kernel_text(void *ptr);
diff --git a/kernel/extable.c b/kernel/extable.c
index 7f8f263f8524..c2d625fcda77 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -72,6 +72,14 @@ int core_kernel_text(unsigned long addr)
72 return 0; 72 return 0;
73} 73}
74 74
75int core_kernel_data(unsigned long addr)
76{
77 if (addr >= (unsigned long)_sdata &&
78 addr < (unsigned long)_edata)
79 return 1;
80 return 0;
81}
82
75int __kernel_text_address(unsigned long addr) 83int __kernel_text_address(unsigned long addr)
76{ 84{
77 if (core_kernel_text(addr)) 85 if (core_kernel_text(addr))
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 6c7e1df39b57..5b3ee04e39d9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -189,8 +189,14 @@ static void update_ftrace_function(void)
189 189
190 update_global_ops(); 190 update_global_ops();
191 191
192 /*
193 * If we are at the end of the list and this ops is
194 * not dynamic, then have the mcount trampoline call
195 * the function directly
196 */
192 if (ftrace_ops_list == &ftrace_list_end || 197 if (ftrace_ops_list == &ftrace_list_end ||
193 ftrace_ops_list->next == &ftrace_list_end) 198 (ftrace_ops_list->next == &ftrace_list_end &&
199 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
194 func = ftrace_ops_list->func; 200 func = ftrace_ops_list->func;
195 else 201 else
196 func = ftrace_ops_list_func; 202 func = ftrace_ops_list_func;
@@ -250,6 +256,9 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
250 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) 256 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
251 return -EBUSY; 257 return -EBUSY;
252 258
259 if (!core_kernel_data((unsigned long)ops))
260 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
261
253 if (ops->flags & FTRACE_OPS_FL_GLOBAL) { 262 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
254 int first = ftrace_global_list == &ftrace_list_end; 263 int first = ftrace_global_list == &ftrace_list_end;
255 add_ftrace_ops(&ftrace_global_list, ops); 264 add_ftrace_ops(&ftrace_global_list, ops);
@@ -293,6 +302,13 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
293 if (ftrace_enabled) 302 if (ftrace_enabled)
294 update_ftrace_function(); 303 update_ftrace_function();
295 304
305 /*
306 * Dynamic ops may be freed, we must make sure that all
307 * callers are done before leaving this function.
308 */
309 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
310 synchronize_sched();
311
296 return 0; 312 return 0;
297} 313}
298 314
@@ -1225,6 +1241,9 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1225 * the filter_hash does not exist or is empty, 1241 * the filter_hash does not exist or is empty,
1226 * AND 1242 * AND
1227 * the ip is not in the ops->notrace_hash. 1243 * the ip is not in the ops->notrace_hash.
1244 *
1245 * This needs to be called with preemption disabled as
1246 * the hashes are freed with call_rcu_sched().
1228 */ 1247 */
1229static int 1248static int
1230ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) 1249ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
@@ -1233,9 +1252,6 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1233 struct ftrace_hash *notrace_hash; 1252 struct ftrace_hash *notrace_hash;
1234 int ret; 1253 int ret;
1235 1254
1236 /* The hashes are freed with call_rcu_sched() */
1237 preempt_disable_notrace();
1238
1239 filter_hash = rcu_dereference_raw(ops->filter_hash); 1255 filter_hash = rcu_dereference_raw(ops->filter_hash);
1240 notrace_hash = rcu_dereference_raw(ops->notrace_hash); 1256 notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1241 1257
@@ -1246,7 +1262,6 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1246 ret = 1; 1262 ret = 1;
1247 else 1263 else
1248 ret = 0; 1264 ret = 0;
1249 preempt_enable_notrace();
1250 1265
1251 return ret; 1266 return ret;
1252} 1267}
@@ -3425,14 +3440,20 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3425static void 3440static void
3426ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) 3441ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3427{ 3442{
3428 /* see comment above ftrace_global_list_func */ 3443 struct ftrace_ops *op;
3429 struct ftrace_ops *op = rcu_dereference_raw(ftrace_ops_list);
3430 3444
3445 /*
3446 * Some of the ops may be dynamically allocated,
3447 * they must be freed after a synchronize_sched().
3448 */
3449 preempt_disable_notrace();
3450 op = rcu_dereference_raw(ftrace_ops_list);
3431 while (op != &ftrace_list_end) { 3451 while (op != &ftrace_list_end) {
3432 if (ftrace_ops_test(op, ip)) 3452 if (ftrace_ops_test(op, ip))
3433 op->func(ip, parent_ip); 3453 op->func(ip, parent_ip);
3434 op = rcu_dereference_raw(op->next); 3454 op = rcu_dereference_raw(op->next);
3435 }; 3455 };
3456 preempt_enable_notrace();
3436} 3457}
3437 3458
3438static void clear_ftrace_swapper(void) 3459static void clear_ftrace_swapper(void)
@@ -3743,6 +3764,7 @@ int register_ftrace_function(struct ftrace_ops *ops)
3743 mutex_unlock(&ftrace_lock); 3764 mutex_unlock(&ftrace_lock);
3744 return ret; 3765 return ret;
3745} 3766}
3767EXPORT_SYMBOL_GPL(register_ftrace_function);
3746 3768
3747/** 3769/**
3748 * unregister_ftrace_function - unregister a function for profiling. 3770 * unregister_ftrace_function - unregister a function for profiling.
@@ -3762,6 +3784,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
3762 3784
3763 return ret; 3785 return ret;
3764} 3786}
3787EXPORT_SYMBOL_GPL(unregister_ftrace_function);
3765 3788
3766int 3789int
3767ftrace_enable_sysctl(struct ctl_table *table, int write, 3790ftrace_enable_sysctl(struct ctl_table *table, int write,