diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2013-11-20 06:22:37 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2014-01-13 11:38:55 -0500 |
| commit | 8cb75e0c4ec9786b81439761eac1d18d4a931af3 (patch) | |
| tree | 9d13e6c3580a36cd76d1b3a96827795949519409 | |
| parent | c9c8986847d2f4fc474c10ee08afa57e7474096d (diff) | |
sched/preempt: Fix up missed PREEMPT_NEED_RESCHED folding
With various drivers wanting to inject idle time; we get people
calling idle routines outside of the idle loop proper.
Therefore we need to be extra careful about not missing
TIF_NEED_RESCHED -> PREEMPT_NEED_RESCHED propagations.
While looking at this, I also realized there's a small window in the
existing idle loop where we can miss TIF_NEED_RESCHED; when it hits
right after the tif_need_resched() test at the end of the loop but
right before the need_resched() test at the start of the loop.
So move preempt_fold_need_resched() out of the loop where we're
guaranteed to have TIF_NEED_RESCHED set.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-x9jgh45oeayzajz2mjt0y7d6@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
| -rw-r--r-- | arch/x86/include/asm/mwait.h | 2 | ||||
| -rw-r--r-- | include/linux/preempt.h | 15 | ||||
| -rw-r--r-- | include/linux/sched.h | 15 | ||||
| -rw-r--r-- | kernel/cpu/idle.c | 17 | ||||
| -rw-r--r-- | kernel/sched/core.c | 3 |
5 files changed, 42 insertions, 10 deletions
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index 19b71c439256..1da25a5f96f9 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h | |||
| @@ -53,7 +53,7 @@ static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) | |||
| 53 | if (!need_resched()) | 53 | if (!need_resched()) |
| 54 | __mwait(eax, ecx); | 54 | __mwait(eax, ecx); |
| 55 | } | 55 | } |
| 56 | __current_clr_polling(); | 56 | current_clr_polling(); |
| 57 | } | 57 | } |
| 58 | 58 | ||
| 59 | #endif /* _ASM_X86_MWAIT_H */ | 59 | #endif /* _ASM_X86_MWAIT_H */ |
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index dd9ddf8af205..59749fc48328 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
| @@ -134,6 +134,21 @@ do { \ | |||
| 134 | #undef preempt_check_resched | 134 | #undef preempt_check_resched |
| 135 | #endif | 135 | #endif |
| 136 | 136 | ||
| 137 | #ifdef CONFIG_PREEMPT | ||
| 138 | #define preempt_set_need_resched() \ | ||
| 139 | do { \ | ||
| 140 | set_preempt_need_resched(); \ | ||
| 141 | } while (0) | ||
| 142 | #define preempt_fold_need_resched() \ | ||
| 143 | do { \ | ||
| 144 | if (tif_need_resched()) \ | ||
| 145 | set_preempt_need_resched(); \ | ||
| 146 | } while (0) | ||
| 147 | #else | ||
| 148 | #define preempt_set_need_resched() do { } while (0) | ||
| 149 | #define preempt_fold_need_resched() do { } while (0) | ||
| 150 | #endif | ||
| 151 | |||
| 137 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 152 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 138 | 153 | ||
| 139 | struct preempt_notifier; | 154 | struct preempt_notifier; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index a03875221663..ffccdad050b5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -2745,6 +2745,21 @@ static inline bool __must_check current_clr_polling_and_test(void) | |||
| 2745 | } | 2745 | } |
| 2746 | #endif | 2746 | #endif |
| 2747 | 2747 | ||
| 2748 | static inline void current_clr_polling(void) | ||
| 2749 | { | ||
| 2750 | __current_clr_polling(); | ||
| 2751 | |||
| 2752 | /* | ||
| 2753 | * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. | ||
| 2754 | * Once the bit is cleared, we'll get IPIs with every new | ||
| 2755 | * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also | ||
| 2756 | * fold. | ||
| 2757 | */ | ||
| 2758 | smp_mb(); /* paired with resched_task() */ | ||
| 2759 | |||
| 2760 | preempt_fold_need_resched(); | ||
| 2761 | } | ||
| 2762 | |||
| 2748 | static __always_inline bool need_resched(void) | 2763 | static __always_inline bool need_resched(void) |
| 2749 | { | 2764 | { |
| 2750 | return unlikely(tif_need_resched()); | 2765 | return unlikely(tif_need_resched()); |
diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c index 988573a9a387..277f494c2a9a 100644 --- a/kernel/cpu/idle.c +++ b/kernel/cpu/idle.c | |||
| @@ -105,14 +105,17 @@ static void cpu_idle_loop(void) | |||
| 105 | __current_set_polling(); | 105 | __current_set_polling(); |
| 106 | } | 106 | } |
| 107 | arch_cpu_idle_exit(); | 107 | arch_cpu_idle_exit(); |
| 108 | /* | ||
| 109 | * We need to test and propagate the TIF_NEED_RESCHED | ||
| 110 | * bit here because we might not have send the | ||
| 111 | * reschedule IPI to idle tasks. | ||
| 112 | */ | ||
| 113 | if (tif_need_resched()) | ||
| 114 | set_preempt_need_resched(); | ||
| 115 | } | 108 | } |
| 109 | |||
| 110 | /* | ||
| 111 | * Since we fell out of the loop above, we know | ||
| 112 | * TIF_NEED_RESCHED must be set, propagate it into | ||
| 113 | * PREEMPT_NEED_RESCHED. | ||
| 114 | * | ||
| 115 | * This is required because for polling idle loops we will | ||
| 116 | * not have had an IPI to fold the state for us. | ||
| 117 | */ | ||
| 118 | preempt_set_need_resched(); | ||
| 116 | tick_nohz_idle_exit(); | 119 | tick_nohz_idle_exit(); |
| 117 | schedule_preempt_disabled(); | 120 | schedule_preempt_disabled(); |
| 118 | } | 121 | } |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 392c6f87906e..0326c06953eb 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -1510,8 +1510,7 @@ void scheduler_ipi(void) | |||
| 1510 | * TIF_NEED_RESCHED remotely (for the first time) will also send | 1510 | * TIF_NEED_RESCHED remotely (for the first time) will also send |
| 1511 | * this IPI. | 1511 | * this IPI. |
| 1512 | */ | 1512 | */ |
| 1513 | if (tif_need_resched()) | 1513 | preempt_fold_need_resched(); |
| 1514 | set_preempt_need_resched(); | ||
| 1515 | 1514 | ||
| 1516 | if (llist_empty(&this_rq()->wake_list) | 1515 | if (llist_empty(&this_rq()->wake_list) |
| 1517 | && !tick_nohz_full_cpu(smp_processor_id()) | 1516 | && !tick_nohz_full_cpu(smp_processor_id()) |
