diff options
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/idle.c | 26 |
1 files changed, 25 insertions, 1 deletions
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 25b9423abce9..fe4b24bf33ca 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
@@ -67,6 +67,10 @@ void __weak arch_cpu_idle(void) | |||
67 | * cpuidle_idle_call - the main idle function | 67 | * cpuidle_idle_call - the main idle function |
68 | * | 68 | * |
69 | * NOTE: no locks or semaphores should be used here | 69 | * NOTE: no locks or semaphores should be used here |
70 | * | ||
71 | * On archs that support TIF_POLLING_NRFLAG, is called with polling | ||
72 | * set, and it returns with polling set. If it ever stops polling, it | ||
73 | * must clear the polling bit. | ||
70 | */ | 74 | */ |
71 | static void cpuidle_idle_call(void) | 75 | static void cpuidle_idle_call(void) |
72 | { | 76 | { |
@@ -175,10 +179,22 @@ exit_idle: | |||
175 | 179 | ||
176 | /* | 180 | /* |
177 | * Generic idle loop implementation | 181 | * Generic idle loop implementation |
182 | * | ||
183 | * Called with polling cleared. | ||
178 | */ | 184 | */ |
179 | static void cpu_idle_loop(void) | 185 | static void cpu_idle_loop(void) |
180 | { | 186 | { |
181 | while (1) { | 187 | while (1) { |
188 | /* | ||
189 | * If the arch has a polling bit, we maintain an invariant: | ||
190 | * | ||
191 | * Our polling bit is clear if we're not scheduled (i.e. if | ||
192 | * rq->curr != rq->idle). This means that, if rq->idle has | ||
193 | * the polling bit set, then setting need_resched is | ||
194 | * guaranteed to cause the cpu to reschedule. | ||
195 | */ | ||
196 | |||
197 | __current_set_polling(); | ||
182 | tick_nohz_idle_enter(); | 198 | tick_nohz_idle_enter(); |
183 | 199 | ||
184 | while (!need_resched()) { | 200 | while (!need_resched()) { |
@@ -218,6 +234,15 @@ static void cpu_idle_loop(void) | |||
218 | */ | 234 | */ |
219 | preempt_set_need_resched(); | 235 | preempt_set_need_resched(); |
220 | tick_nohz_idle_exit(); | 236 | tick_nohz_idle_exit(); |
237 | __current_clr_polling(); | ||
238 | |||
239 | /* | ||
240 | * We promise to reschedule if need_resched is set while | ||
241 | * polling is set. That means that clearing polling | ||
242 | * needs to be visible before rescheduling. | ||
243 | */ | ||
244 | smp_mb__after_atomic(); | ||
245 | |||
221 | schedule_preempt_disabled(); | 246 | schedule_preempt_disabled(); |
222 | } | 247 | } |
223 | } | 248 | } |
@@ -239,7 +264,6 @@ void cpu_startup_entry(enum cpuhp_state state) | |||
239 | */ | 264 | */ |
240 | boot_init_stack_canary(); | 265 | boot_init_stack_canary(); |
241 | #endif | 266 | #endif |
242 | __current_set_polling(); | ||
243 | arch_cpu_idle_prepare(); | 267 | arch_cpu_idle_prepare(); |
244 | cpu_idle_loop(); | 268 | cpu_idle_loop(); |
245 | } | 269 | } |