summaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2013-07-12 06:34:42 -0400
committerThomas Gleixner <tglx@linutronix.de>2013-07-12 06:34:42 -0400
commitf2006e27396f55276f24434f56e208d86e7f9908 (patch)
tree71896db916d33888b4286f80117d3cac0da40e6d /kernel/trace/ftrace.c
parente399eb56a6110e13f97e644658648602e2b08de7 (diff)
parent9903883f1dd6e86f286b7bfa6e4b423f98c1cd9e (diff)
Merge branch 'linus' into timers/urgent
Get upstream changes so we can apply fixes against them Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c38
1 files changed, 34 insertions, 4 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 6c508ff33c62..67708f46baae 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -413,6 +413,17 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
413 return 0; 413 return 0;
414} 414}
415 415
416static void ftrace_sync(struct work_struct *work)
417{
418 /*
419 * This function is just a stub to implement a hard force
420 * of synchronize_sched(). This requires synchronizing
421 * tasks even in userspace and idle.
422 *
423 * Yes, function tracing is rude.
424 */
425}
426
416static int __unregister_ftrace_function(struct ftrace_ops *ops) 427static int __unregister_ftrace_function(struct ftrace_ops *ops)
417{ 428{
418 int ret; 429 int ret;
@@ -440,8 +451,12 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
440 * so there'll be no new users. We must ensure 451 * so there'll be no new users. We must ensure
441 * all current users are done before we free 452 * all current users are done before we free
442 * the control data. 453 * the control data.
454 * Note synchronize_sched() is not enough, as we
455 * use preempt_disable() to do RCU, but the function
456 * tracer can be called where RCU is not active
457 * (before user_exit()).
443 */ 458 */
444 synchronize_sched(); 459 schedule_on_each_cpu(ftrace_sync);
445 control_ops_free(ops); 460 control_ops_free(ops);
446 } 461 }
447 } else 462 } else
@@ -456,9 +471,13 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
456 /* 471 /*
457 * Dynamic ops may be freed, we must make sure that all 472 * Dynamic ops may be freed, we must make sure that all
458 * callers are done before leaving this function. 473 * callers are done before leaving this function.
474 *
475 * Again, normal synchronize_sched() is not good enough.
476 * We need to do a hard force of sched synchronization.
459 */ 477 */
460 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) 478 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
461 synchronize_sched(); 479 schedule_on_each_cpu(ftrace_sync);
480
462 481
463 return 0; 482 return 0;
464} 483}
@@ -622,12 +641,18 @@ static int function_stat_show(struct seq_file *m, void *v)
622 if (rec->counter <= 1) 641 if (rec->counter <= 1)
623 stddev = 0; 642 stddev = 0;
624 else { 643 else {
625 stddev = rec->time_squared - rec->counter * avg * avg; 644 /*
645 * Apply Welford's method:
646 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
647 */
648 stddev = rec->counter * rec->time_squared -
649 rec->time * rec->time;
650
626 /* 651 /*
627 * Divide only 1000 for ns^2 -> us^2 conversion. 652 * Divide only 1000 for ns^2 -> us^2 conversion.
628 * trace_print_graph_duration will divide 1000 again. 653 * trace_print_graph_duration will divide 1000 again.
629 */ 654 */
630 do_div(stddev, (rec->counter - 1) * 1000); 655 do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
631 } 656 }
632 657
633 trace_seq_init(&s); 658 trace_seq_init(&s);
@@ -3512,8 +3537,12 @@ EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3512static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 3537static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3513static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; 3538static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3514 3539
3540/* Used by function selftest to not test if filter is set */
3541bool ftrace_filter_param __initdata;
3542
3515static int __init set_ftrace_notrace(char *str) 3543static int __init set_ftrace_notrace(char *str)
3516{ 3544{
3545 ftrace_filter_param = true;
3517 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); 3546 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3518 return 1; 3547 return 1;
3519} 3548}
@@ -3521,6 +3550,7 @@ __setup("ftrace_notrace=", set_ftrace_notrace);
3521 3550
3522static int __init set_ftrace_filter(char *str) 3551static int __init set_ftrace_filter(char *str)
3523{ 3552{
3553 ftrace_filter_param = true;
3524 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); 3554 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3525 return 1; 3555 return 1;
3526} 3556}