aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-06-01 21:51:28 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-06-01 23:26:23 -0400
commit0f6ce3de4ef6ff940308087c49760d068851c1a7 (patch)
tree26a732ee4dd5b0f5612f0385893ec483c6ba4ab3 /kernel/trace/ftrace.c
parent112f38a7e36e9d688b389507136bf3af3e6d159b (diff)
ftrace: do not profile functions when disabled
A race was found that if one were to enable and disable the function profiler repeatedly, then the system can panic. This was because a profiled function may be preempted just before disabling interrupts. While the profiler is disabled and then reenabled, the preempted function could start again, and access the hash as it is being initialized. This just adds a check in the irq disabled part to check if the profiler is enabled, and if it is not then it will just exit. When the system is disabled, the profile_enabled variable is cleared before calling the unregistering of the function profiler. This unregistering calls stop machine which also acts as a synchronize schedule. [ Impact: fix panic in enabling/disabling function profiler ] Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 2074e5b7766b..d6973dfadb36 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -599,7 +599,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip)
599 local_irq_save(flags); 599 local_irq_save(flags);
600 600
601 stat = &__get_cpu_var(ftrace_profile_stats); 601 stat = &__get_cpu_var(ftrace_profile_stats);
602 if (!stat->hash) 602 if (!stat->hash || !ftrace_profile_enabled)
603 goto out; 603 goto out;
604 604
605 rec = ftrace_find_profiled_func(stat, ip); 605 rec = ftrace_find_profiled_func(stat, ip);
@@ -630,7 +630,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
630 630
631 local_irq_save(flags); 631 local_irq_save(flags);
632 stat = &__get_cpu_var(ftrace_profile_stats); 632 stat = &__get_cpu_var(ftrace_profile_stats);
633 if (!stat->hash) 633 if (!stat->hash || !ftrace_profile_enabled)
634 goto out; 634 goto out;
635 635
636 calltime = trace->rettime - trace->calltime; 636 calltime = trace->rettime - trace->calltime;
@@ -724,6 +724,10 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
724 ftrace_profile_enabled = 1; 724 ftrace_profile_enabled = 1;
725 } else { 725 } else {
726 ftrace_profile_enabled = 0; 726 ftrace_profile_enabled = 0;
727 /*
728 * unregister_ftrace_profiler calls stop_machine
729 * so this acts like an synchronize_sched.
730 */
727 unregister_ftrace_profiler(); 731 unregister_ftrace_profiler();
728 } 732 }
729 } 733 }