aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@amacapital.net>2014-06-04 13:31:16 -0400
committerIngo Molnar <mingo@kernel.org>2014-06-05 06:09:51 -0400
commit82c65d60d64401aedc1006d6572469bbfdf148de (patch)
treeffa4aa0ece7bcff7bf5426b882c90e80172ff15b /kernel
parentdfc68f29ae67f2a6e799b44e6a4eb3417dffbfcd (diff)
sched/idle: Clear polling before descheduling the idle thread
Currently, the only real guarantee provided by the polling bit is that, if you hold rq->lock and the polling bit is set, then you can set need_resched to force a reschedule. The only reason the lock is needed is that the idle thread might not be running at all when setting its need_resched bit, and rq->lock keeps it pinned. This is easy to fix: just clear the polling bit before scheduling. Now the idle thread's polling bit is only ever set when rq->curr == rq->idle. Signed-off-by: Andy Lutomirski <luto@amacapital.net> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: nicolas.pitre@linaro.org Cc: daniel.lezcano@linaro.org Cc: umgwanakikbuti@gmail.com Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/b2059fcb4c613d520cb503b6fad6e47033c7c203.1401902905.git.luto@amacapital.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/idle.c26
1 files changed, 25 insertions, 1 deletions
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 25b9423abce9..fe4b24bf33ca 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -67,6 +67,10 @@ void __weak arch_cpu_idle(void)
67 * cpuidle_idle_call - the main idle function 67 * cpuidle_idle_call - the main idle function
68 * 68 *
69 * NOTE: no locks or semaphores should be used here 69 * NOTE: no locks or semaphores should be used here
70 *
71 * On archs that support TIF_POLLING_NRFLAG, is called with polling
72 * set, and it returns with polling set. If it ever stops polling, it
73 * must clear the polling bit.
70 */ 74 */
71static void cpuidle_idle_call(void) 75static void cpuidle_idle_call(void)
72{ 76{
@@ -175,10 +179,22 @@ exit_idle:
175 179
176/* 180/*
177 * Generic idle loop implementation 181 * Generic idle loop implementation
182 *
183 * Called with polling cleared.
178 */ 184 */
179static void cpu_idle_loop(void) 185static void cpu_idle_loop(void)
180{ 186{
181 while (1) { 187 while (1) {
188 /*
189 * If the arch has a polling bit, we maintain an invariant:
190 *
191 * Our polling bit is clear if we're not scheduled (i.e. if
192 * rq->curr != rq->idle). This means that, if rq->idle has
193 * the polling bit set, then setting need_resched is
194 * guaranteed to cause the cpu to reschedule.
195 */
196
197 __current_set_polling();
182 tick_nohz_idle_enter(); 198 tick_nohz_idle_enter();
183 199
184 while (!need_resched()) { 200 while (!need_resched()) {
@@ -218,6 +234,15 @@ static void cpu_idle_loop(void)
218 */ 234 */
219 preempt_set_need_resched(); 235 preempt_set_need_resched();
220 tick_nohz_idle_exit(); 236 tick_nohz_idle_exit();
237 __current_clr_polling();
238
239 /*
240 * We promise to reschedule if need_resched is set while
241 * polling is set. That means that clearing polling
242 * needs to be visible before rescheduling.
243 */
244 smp_mb__after_atomic();
245
221 schedule_preempt_disabled(); 246 schedule_preempt_disabled();
222 } 247 }
223} 248}
@@ -239,7 +264,6 @@ void cpu_startup_entry(enum cpuhp_state state)
239 */ 264 */
240 boot_init_stack_canary(); 265 boot_init_stack_canary();
241#endif 266#endif
242 __current_set_polling();
243 arch_cpu_idle_prepare(); 267 arch_cpu_idle_prepare();
244 cpu_idle_loop(); 268 cpu_idle_loop();
245} 269}