aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c143
1 files changed, 120 insertions, 23 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 6c508ff33c62..a6d098c6df3f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -413,6 +413,17 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
413 return 0; 413 return 0;
414} 414}
415 415
416static void ftrace_sync(struct work_struct *work)
417{
418 /*
419 * This function is just a stub to implement a hard force
420 * of synchronize_sched(). This requires synchronizing
421 * tasks even in userspace and idle.
422 *
423 * Yes, function tracing is rude.
424 */
425}
426
416static int __unregister_ftrace_function(struct ftrace_ops *ops) 427static int __unregister_ftrace_function(struct ftrace_ops *ops)
417{ 428{
418 int ret; 429 int ret;
@@ -440,8 +451,12 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
440 * so there'll be no new users. We must ensure 451 * so there'll be no new users. We must ensure
441 * all current users are done before we free 452 * all current users are done before we free
442 * the control data. 453 * the control data.
454 * Note synchronize_sched() is not enough, as we
455 * use preempt_disable() to do RCU, but the function
456 * tracer can be called where RCU is not active
457 * (before user_exit()).
443 */ 458 */
444 synchronize_sched(); 459 schedule_on_each_cpu(ftrace_sync);
445 control_ops_free(ops); 460 control_ops_free(ops);
446 } 461 }
447 } else 462 } else
@@ -456,9 +471,13 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
456 /* 471 /*
457 * Dynamic ops may be freed, we must make sure that all 472 * Dynamic ops may be freed, we must make sure that all
458 * callers are done before leaving this function. 473 * callers are done before leaving this function.
474 *
475 * Again, normal synchronize_sched() is not good enough.
476 * We need to do a hard force of sched synchronization.
459 */ 477 */
460 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) 478 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
461 synchronize_sched(); 479 schedule_on_each_cpu(ftrace_sync);
480
462 481
463 return 0; 482 return 0;
464} 483}
@@ -622,12 +641,18 @@ static int function_stat_show(struct seq_file *m, void *v)
622 if (rec->counter <= 1) 641 if (rec->counter <= 1)
623 stddev = 0; 642 stddev = 0;
624 else { 643 else {
625 stddev = rec->time_squared - rec->counter * avg * avg; 644 /*
645 * Apply Welford's method:
646 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
647 */
648 stddev = rec->counter * rec->time_squared -
649 rec->time * rec->time;
650
626 /* 651 /*
627 * Divide only 1000 for ns^2 -> us^2 conversion. 652 * Divide only 1000 for ns^2 -> us^2 conversion.
628 * trace_print_graph_duration will divide 1000 again. 653 * trace_print_graph_duration will divide 1000 again.
629 */ 654 */
630 do_div(stddev, (rec->counter - 1) * 1000); 655 do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
631 } 656 }
632 657
633 trace_seq_init(&s); 658 trace_seq_init(&s);
@@ -1416,12 +1441,22 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1416 * the hashes are freed with call_rcu_sched(). 1441 * the hashes are freed with call_rcu_sched().
1417 */ 1442 */
1418static int 1443static int
1419ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) 1444ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1420{ 1445{
1421 struct ftrace_hash *filter_hash; 1446 struct ftrace_hash *filter_hash;
1422 struct ftrace_hash *notrace_hash; 1447 struct ftrace_hash *notrace_hash;
1423 int ret; 1448 int ret;
1424 1449
1450#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1451 /*
1452 * There's a small race when adding ops that the ftrace handler
1453 * that wants regs, may be called without them. We can not
1454 * allow that handler to be called if regs is NULL.
1455 */
1456 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1457 return 0;
1458#endif
1459
1425 filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); 1460 filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1426 notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); 1461 notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1427 1462
@@ -2134,12 +2169,57 @@ static cycle_t ftrace_update_time;
2134static unsigned long ftrace_update_cnt; 2169static unsigned long ftrace_update_cnt;
2135unsigned long ftrace_update_tot_cnt; 2170unsigned long ftrace_update_tot_cnt;
2136 2171
2137static int ops_traces_mod(struct ftrace_ops *ops) 2172static inline int ops_traces_mod(struct ftrace_ops *ops)
2138{ 2173{
2139 struct ftrace_hash *hash; 2174 /*
2175 * Filter_hash being empty will default to trace module.
2176 * But notrace hash requires a test of individual module functions.
2177 */
2178 return ftrace_hash_empty(ops->filter_hash) &&
2179 ftrace_hash_empty(ops->notrace_hash);
2180}
2181
2182/*
2183 * Check if the current ops references the record.
2184 *
2185 * If the ops traces all functions, then it was already accounted for.
2186 * If the ops does not trace the current record function, skip it.
2187 * If the ops ignores the function via notrace filter, skip it.
2188 */
2189static inline bool
2190ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2191{
2192 /* If ops isn't enabled, ignore it */
2193 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2194 return 0;
2140 2195
2141 hash = ops->filter_hash; 2196 /* If ops traces all mods, we already accounted for it */
2142 return ftrace_hash_empty(hash); 2197 if (ops_traces_mod(ops))
2198 return 0;
2199
2200 /* The function must be in the filter */
2201 if (!ftrace_hash_empty(ops->filter_hash) &&
2202 !ftrace_lookup_ip(ops->filter_hash, rec->ip))
2203 return 0;
2204
2205 /* If in notrace hash, we ignore it too */
2206 if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
2207 return 0;
2208
2209 return 1;
2210}
2211
2212static int referenced_filters(struct dyn_ftrace *rec)
2213{
2214 struct ftrace_ops *ops;
2215 int cnt = 0;
2216
2217 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2218 if (ops_references_rec(ops, rec))
2219 cnt++;
2220 }
2221
2222 return cnt;
2143} 2223}
2144 2224
2145static int ftrace_update_code(struct module *mod) 2225static int ftrace_update_code(struct module *mod)
@@ -2148,6 +2228,7 @@ static int ftrace_update_code(struct module *mod)
2148 struct dyn_ftrace *p; 2228 struct dyn_ftrace *p;
2149 cycle_t start, stop; 2229 cycle_t start, stop;
2150 unsigned long ref = 0; 2230 unsigned long ref = 0;
2231 bool test = false;
2151 int i; 2232 int i;
2152 2233
2153 /* 2234 /*
@@ -2161,9 +2242,12 @@ static int ftrace_update_code(struct module *mod)
2161 2242
2162 for (ops = ftrace_ops_list; 2243 for (ops = ftrace_ops_list;
2163 ops != &ftrace_list_end; ops = ops->next) { 2244 ops != &ftrace_list_end; ops = ops->next) {
2164 if (ops->flags & FTRACE_OPS_FL_ENABLED && 2245 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2165 ops_traces_mod(ops)) 2246 if (ops_traces_mod(ops))
2166 ref++; 2247 ref++;
2248 else
2249 test = true;
2250 }
2167 } 2251 }
2168 } 2252 }
2169 2253
@@ -2173,12 +2257,16 @@ static int ftrace_update_code(struct module *mod)
2173 for (pg = ftrace_new_pgs; pg; pg = pg->next) { 2257 for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2174 2258
2175 for (i = 0; i < pg->index; i++) { 2259 for (i = 0; i < pg->index; i++) {
2260 int cnt = ref;
2261
2176 /* If something went wrong, bail without enabling anything */ 2262 /* If something went wrong, bail without enabling anything */
2177 if (unlikely(ftrace_disabled)) 2263 if (unlikely(ftrace_disabled))
2178 return -1; 2264 return -1;
2179 2265
2180 p = &pg->records[i]; 2266 p = &pg->records[i];
2181 p->flags = ref; 2267 if (test)
2268 cnt += referenced_filters(p);
2269 p->flags = cnt;
2182 2270
2183 /* 2271 /*
2184 * Do the initial record conversion from mcount jump 2272 * Do the initial record conversion from mcount jump
@@ -2198,7 +2286,7 @@ static int ftrace_update_code(struct module *mod)
2198 * conversion puts the module to the correct state, thus 2286 * conversion puts the module to the correct state, thus
2199 * passing the ftrace_make_call check. 2287 * passing the ftrace_make_call check.
2200 */ 2288 */
2201 if (ftrace_start_up && ref) { 2289 if (ftrace_start_up && cnt) {
2202 int failed = __ftrace_replace_code(p, 1); 2290 int failed = __ftrace_replace_code(p, 1);
2203 if (failed) 2291 if (failed)
2204 ftrace_bug(failed, p->ip); 2292 ftrace_bug(failed, p->ip);
@@ -3349,6 +3437,12 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3349 return add_hash_entry(hash, ip); 3437 return add_hash_entry(hash, ip);
3350} 3438}
3351 3439
3440static void ftrace_ops_update_code(struct ftrace_ops *ops)
3441{
3442 if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
3443 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3444}
3445
3352static int 3446static int
3353ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, 3447ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3354 unsigned long ip, int remove, int reset, int enable) 3448 unsigned long ip, int remove, int reset, int enable)
@@ -3391,9 +3485,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3391 3485
3392 mutex_lock(&ftrace_lock); 3486 mutex_lock(&ftrace_lock);
3393 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 3487 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3394 if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED 3488 if (!ret)
3395 && ftrace_enabled) 3489 ftrace_ops_update_code(ops);
3396 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3397 3490
3398 mutex_unlock(&ftrace_lock); 3491 mutex_unlock(&ftrace_lock);
3399 3492
@@ -3512,8 +3605,12 @@ EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3512static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 3605static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3513static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; 3606static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3514 3607
3608/* Used by function selftest to not test if filter is set */
3609bool ftrace_filter_param __initdata;
3610
3515static int __init set_ftrace_notrace(char *str) 3611static int __init set_ftrace_notrace(char *str)
3516{ 3612{
3613 ftrace_filter_param = true;
3517 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); 3614 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3518 return 1; 3615 return 1;
3519} 3616}
@@ -3521,6 +3618,7 @@ __setup("ftrace_notrace=", set_ftrace_notrace);
3521 3618
3522static int __init set_ftrace_filter(char *str) 3619static int __init set_ftrace_filter(char *str)
3523{ 3620{
3621 ftrace_filter_param = true;
3524 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); 3622 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3525 return 1; 3623 return 1;
3526} 3624}
@@ -3615,9 +3713,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
3615 mutex_lock(&ftrace_lock); 3713 mutex_lock(&ftrace_lock);
3616 ret = ftrace_hash_move(iter->ops, filter_hash, 3714 ret = ftrace_hash_move(iter->ops, filter_hash,
3617 orig_hash, iter->hash); 3715 orig_hash, iter->hash);
3618 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED) 3716 if (!ret)
3619 && ftrace_enabled) 3717 ftrace_ops_update_code(iter->ops);
3620 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3621 3718
3622 mutex_unlock(&ftrace_lock); 3719 mutex_unlock(&ftrace_lock);
3623 } 3720 }
@@ -4188,7 +4285,7 @@ static inline void ftrace_startup_enable(int command) { }
4188# define ftrace_shutdown_sysctl() do { } while (0) 4285# define ftrace_shutdown_sysctl() do { } while (0)
4189 4286
4190static inline int 4287static inline int
4191ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) 4288ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4192{ 4289{
4193 return 1; 4290 return 1;
4194} 4291}
@@ -4211,7 +4308,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4211 do_for_each_ftrace_op(op, ftrace_control_list) { 4308 do_for_each_ftrace_op(op, ftrace_control_list) {
4212 if (!(op->flags & FTRACE_OPS_FL_STUB) && 4309 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4213 !ftrace_function_local_disabled(op) && 4310 !ftrace_function_local_disabled(op) &&
4214 ftrace_ops_test(op, ip)) 4311 ftrace_ops_test(op, ip, regs))
4215 op->func(ip, parent_ip, op, regs); 4312 op->func(ip, parent_ip, op, regs);
4216 } while_for_each_ftrace_op(op); 4313 } while_for_each_ftrace_op(op);
4217 trace_recursion_clear(TRACE_CONTROL_BIT); 4314 trace_recursion_clear(TRACE_CONTROL_BIT);
@@ -4244,7 +4341,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4244 */ 4341 */
4245 preempt_disable_notrace(); 4342 preempt_disable_notrace();
4246 do_for_each_ftrace_op(op, ftrace_ops_list) { 4343 do_for_each_ftrace_op(op, ftrace_ops_list) {
4247 if (ftrace_ops_test(op, ip)) 4344 if (ftrace_ops_test(op, ip, regs))
4248 op->func(ip, parent_ip, op, regs); 4345 op->func(ip, parent_ip, op, regs);
4249 } while_for_each_ftrace_op(op); 4346 } while_for_each_ftrace_op(op);
4250 preempt_enable_notrace(); 4347 preempt_enable_notrace();