diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-11-20 06:22:37 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-01-13 11:38:55 -0500 |
commit | 8cb75e0c4ec9786b81439761eac1d18d4a931af3 (patch) | |
tree | 9d13e6c3580a36cd76d1b3a96827795949519409 /include/linux/sched.h | |
parent | c9c8986847d2f4fc474c10ee08afa57e7474096d (diff) |
sched/preempt: Fix up missed PREEMPT_NEED_RESCHED folding
With various drivers wanting to inject idle time; we get people
calling idle routines outside of the idle loop proper.
Therefore we need to be extra careful about not missing
TIF_NEED_RESCHED -> PREEMPT_NEED_RESCHED propagations.
While looking at this, I also realized there's a small window in the
existing idle loop where we can miss TIF_NEED_RESCHED; when it hits
right after the tif_need_resched() test at the end of the loop but
right before the need_resched() test at the start of the loop.
So move preempt_fold_need_resched() out of the loop where we're
guaranteed to have TIF_NEED_RESCHED set.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-x9jgh45oeayzajz2mjt0y7d6@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 15 |
1 files changed, 15 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index a03875221663..ffccdad050b5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -2745,6 +2745,21 @@ static inline bool __must_check current_clr_polling_and_test(void) | |||
2745 | } | 2745 | } |
2746 | #endif | 2746 | #endif |
2747 | 2747 | ||
2748 | static inline void current_clr_polling(void) | ||
2749 | { | ||
2750 | __current_clr_polling(); | ||
2751 | |||
2752 | /* | ||
2753 | * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. | ||
2754 | * Once the bit is cleared, we'll get IPIs with every new | ||
2755 | * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also | ||
2756 | * fold. | ||
2757 | */ | ||
2758 | smp_mb(); /* paired with resched_task() */ | ||
2759 | |||
2760 | preempt_fold_need_resched(); | ||
2761 | } | ||
2762 | |||
2748 | static __always_inline bool need_resched(void) | 2763 | static __always_inline bool need_resched(void) |
2749 | { | 2764 | { |
2750 | return unlikely(tif_need_resched()); | 2765 | return unlikely(tif_need_resched()); |