aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/mwait.h2
-rw-r--r--include/linux/preempt.h15
-rw-r--r--include/linux/sched.h15
-rw-r--r--kernel/cpu/idle.c17
-rw-r--r--kernel/sched/core.c3
5 files changed, 42 insertions, 10 deletions
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index 19b71c439256..1da25a5f96f9 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -53,7 +53,7 @@ static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
53 if (!need_resched()) 53 if (!need_resched())
54 __mwait(eax, ecx); 54 __mwait(eax, ecx);
55 } 55 }
56 __current_clr_polling(); 56 current_clr_polling();
57} 57}
58 58
59#endif /* _ASM_X86_MWAIT_H */ 59#endif /* _ASM_X86_MWAIT_H */
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index dd9ddf8af205..59749fc48328 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -134,6 +134,21 @@ do { \
134#undef preempt_check_resched 134#undef preempt_check_resched
135#endif 135#endif
136 136
137#ifdef CONFIG_PREEMPT
138#define preempt_set_need_resched() \
139do { \
140 set_preempt_need_resched(); \
141} while (0)
142#define preempt_fold_need_resched() \
143do { \
144 if (tif_need_resched()) \
145 set_preempt_need_resched(); \
146} while (0)
147#else
148#define preempt_set_need_resched() do { } while (0)
149#define preempt_fold_need_resched() do { } while (0)
150#endif
151
137#ifdef CONFIG_PREEMPT_NOTIFIERS 152#ifdef CONFIG_PREEMPT_NOTIFIERS
138 153
139struct preempt_notifier; 154struct preempt_notifier;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a03875221663..ffccdad050b5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2745,6 +2745,21 @@ static inline bool __must_check current_clr_polling_and_test(void)
2745} 2745}
2746#endif 2746#endif
2747 2747
2748static inline void current_clr_polling(void)
2749{
2750 __current_clr_polling();
2751
2752 /*
2753 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
2754 * Once the bit is cleared, we'll get IPIs with every new
2755 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
2756 * fold.
2757 */
2758 smp_mb(); /* paired with resched_task() */
2759
2760 preempt_fold_need_resched();
2761}
2762
2748static __always_inline bool need_resched(void) 2763static __always_inline bool need_resched(void)
2749{ 2764{
2750 return unlikely(tif_need_resched()); 2765 return unlikely(tif_need_resched());
diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c
index 988573a9a387..277f494c2a9a 100644
--- a/kernel/cpu/idle.c
+++ b/kernel/cpu/idle.c
@@ -105,14 +105,17 @@ static void cpu_idle_loop(void)
105 __current_set_polling(); 105 __current_set_polling();
106 } 106 }
107 arch_cpu_idle_exit(); 107 arch_cpu_idle_exit();
108 /*
109 * We need to test and propagate the TIF_NEED_RESCHED
110 * bit here because we might not have send the
111 * reschedule IPI to idle tasks.
112 */
113 if (tif_need_resched())
114 set_preempt_need_resched();
115 } 108 }
109
110 /*
111 * Since we fell out of the loop above, we know
112 * TIF_NEED_RESCHED must be set, propagate it into
113 * PREEMPT_NEED_RESCHED.
114 *
115 * This is required because for polling idle loops we will
116 * not have had an IPI to fold the state for us.
117 */
118 preempt_set_need_resched();
116 tick_nohz_idle_exit(); 119 tick_nohz_idle_exit();
117 schedule_preempt_disabled(); 120 schedule_preempt_disabled();
118 } 121 }
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 392c6f87906e..0326c06953eb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1510,8 +1510,7 @@ void scheduler_ipi(void)
1510 * TIF_NEED_RESCHED remotely (for the first time) will also send 1510 * TIF_NEED_RESCHED remotely (for the first time) will also send
1511 * this IPI. 1511 * this IPI.
1512 */ 1512 */
1513 if (tif_need_resched()) 1513 preempt_fold_need_resched();
1514 set_preempt_need_resched();
1515 1514
1516 if (llist_empty(&this_rq()->wake_list) 1515 if (llist_empty(&this_rq()->wake_list)
1517 && !tick_nohz_full_cpu(smp_processor_id()) 1516 && !tick_nohz_full_cpu(smp_processor_id())