aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpu
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-11-20 06:22:37 -0500
committerIngo Molnar <mingo@kernel.org>2014-01-13 11:38:55 -0500
commit8cb75e0c4ec9786b81439761eac1d18d4a931af3 (patch)
tree9d13e6c3580a36cd76d1b3a96827795949519409 /kernel/cpu
parentc9c8986847d2f4fc474c10ee08afa57e7474096d (diff)
sched/preempt: Fix up missed PREEMPT_NEED_RESCHED folding
With various drivers wanting to inject idle time; we get people calling idle routines outside of the idle loop proper. Therefore we need to be extra careful about not missing TIF_NEED_RESCHED -> PREEMPT_NEED_RESCHED propagations. While looking at this, I also realized there's a small window in the existing idle loop where we can miss TIF_NEED_RESCHED; when it hits right after the tif_need_resched() test at the end of the loop but right before the need_resched() test at the start of the loop. So move preempt_fold_need_resched() out of the loop where we're guaranteed to have TIF_NEED_RESCHED set. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-x9jgh45oeayzajz2mjt0y7d6@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/cpu')
-rw-r--r--kernel/cpu/idle.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c
index 988573a9a387..277f494c2a9a 100644
--- a/kernel/cpu/idle.c
+++ b/kernel/cpu/idle.c
@@ -105,14 +105,17 @@ static void cpu_idle_loop(void)
105 __current_set_polling(); 105 __current_set_polling();
106 } 106 }
107 arch_cpu_idle_exit(); 107 arch_cpu_idle_exit();
108 /*
109 * We need to test and propagate the TIF_NEED_RESCHED
110 * bit here because we might not have send the
111 * reschedule IPI to idle tasks.
112 */
113 if (tif_need_resched())
114 set_preempt_need_resched();
115 } 108 }
109
110 /*
111 * Since we fell out of the loop above, we know
112 * TIF_NEED_RESCHED must be set, propagate it into
113 * PREEMPT_NEED_RESCHED.
114 *
115 * This is required because for polling idle loops we will
116 * not have had an IPI to fold the state for us.
117 */
118 preempt_set_need_resched();
116 tick_nohz_idle_exit(); 119 tick_nohz_idle_exit();
117 schedule_preempt_disabled(); 120 schedule_preempt_disabled();
118 } 121 }