aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-06-02 21:52:29 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-06-03 19:09:41 -0400
commitd1f74e20b5b064a130cd0743a256c2d3cfe84010 (patch)
treeb93ab590c24bb08f6db0dfc2a3ba60fa43af7f92 /kernel/sched.c
parent9dda696f0de87a2e5cfabb147e28c76b7d3c6846 (diff)
tracing/sched: Make preempt_schedule() notrace
The function tracer code uses ftrace_preempt_disable() to disable preemption instead of normal preempt_disable(). But there's a slight race condition that may cause it to lose a preemption check. This was made to keep the function tracer from recursing on itself by disabling preemption then having the enable call the function tracer again, causing infinite recursion. The bug was assumed to happen if the call was just in schedule, but this is incorrect. The bug is caused by preempt_schedule() which is called by preempt_enable(). The calling of preempt_enable() when NEED_RESCHED was set would call preempt_schedule() which would call the function tracer again. By making the preempt_schedule() and add_preempt_count() notrace then this will prevent the inifinite recursion. This is because the add_preempt_count() would stop the preempt_enable() in the function tracer from calling preempt_schedule() again. The sub_preempt_count() is also made notrace just to keep it symmetric. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 15b93f617fd7..cd6787e57174 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3730,7 +3730,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3730 * off of preempt_enable. Kernel preemptions off return from interrupt 3730 * off of preempt_enable. Kernel preemptions off return from interrupt
3731 * occur there and call schedule directly. 3731 * occur there and call schedule directly.
3732 */ 3732 */
3733asmlinkage void __sched preempt_schedule(void) 3733asmlinkage void __sched notrace preempt_schedule(void)
3734{ 3734{
3735 struct thread_info *ti = current_thread_info(); 3735 struct thread_info *ti = current_thread_info();
3736 3736
@@ -3742,9 +3742,9 @@ asmlinkage void __sched preempt_schedule(void)
3742 return; 3742 return;
3743 3743
3744 do { 3744 do {
3745 add_preempt_count(PREEMPT_ACTIVE); 3745 add_preempt_count_notrace(PREEMPT_ACTIVE);
3746 schedule(); 3746 schedule();
3747 sub_preempt_count(PREEMPT_ACTIVE); 3747 sub_preempt_count_notrace(PREEMPT_ACTIVE);
3748 3748
3749 /* 3749 /*
3750 * Check again in case we missed a preemption opportunity 3750 * Check again in case we missed a preemption opportunity