diff options
Diffstat (limited to 'kernel/cpu/idle.c')
-rw-r--r-- | kernel/cpu/idle.c | 16 |
1 files changed, 11 insertions, 5 deletions
diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c index e695c0a0bcb5..988573a9a387 100644 --- a/kernel/cpu/idle.c +++ b/kernel/cpu/idle.c | |||
@@ -44,7 +44,7 @@ static inline int cpu_idle_poll(void) | |||
44 | rcu_idle_enter(); | 44 | rcu_idle_enter(); |
45 | trace_cpu_idle_rcuidle(0, smp_processor_id()); | 45 | trace_cpu_idle_rcuidle(0, smp_processor_id()); |
46 | local_irq_enable(); | 46 | local_irq_enable(); |
47 | while (!need_resched()) | 47 | while (!tif_need_resched()) |
48 | cpu_relax(); | 48 | cpu_relax(); |
49 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | 49 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
50 | rcu_idle_exit(); | 50 | rcu_idle_exit(); |
@@ -92,8 +92,7 @@ static void cpu_idle_loop(void) | |||
92 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) { | 92 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) { |
93 | cpu_idle_poll(); | 93 | cpu_idle_poll(); |
94 | } else { | 94 | } else { |
95 | current_clr_polling(); | 95 | if (!current_clr_polling_and_test()) { |
96 | if (!need_resched()) { | ||
97 | stop_critical_timings(); | 96 | stop_critical_timings(); |
98 | rcu_idle_enter(); | 97 | rcu_idle_enter(); |
99 | arch_cpu_idle(); | 98 | arch_cpu_idle(); |
@@ -103,9 +102,16 @@ static void cpu_idle_loop(void) | |||
103 | } else { | 102 | } else { |
104 | local_irq_enable(); | 103 | local_irq_enable(); |
105 | } | 104 | } |
106 | current_set_polling(); | 105 | __current_set_polling(); |
107 | } | 106 | } |
108 | arch_cpu_idle_exit(); | 107 | arch_cpu_idle_exit(); |
108 | /* | ||
109 | * We need to test and propagate the TIF_NEED_RESCHED | ||
110 | * bit here because we might not have send the | ||
111 | * reschedule IPI to idle tasks. | ||
112 | */ | ||
113 | if (tif_need_resched()) | ||
114 | set_preempt_need_resched(); | ||
109 | } | 115 | } |
110 | tick_nohz_idle_exit(); | 116 | tick_nohz_idle_exit(); |
111 | schedule_preempt_disabled(); | 117 | schedule_preempt_disabled(); |
@@ -129,7 +135,7 @@ void cpu_startup_entry(enum cpuhp_state state) | |||
129 | */ | 135 | */ |
130 | boot_init_stack_canary(); | 136 | boot_init_stack_canary(); |
131 | #endif | 137 | #endif |
132 | current_set_polling(); | 138 | __current_set_polling(); |
133 | arch_cpu_idle_prepare(); | 139 | arch_cpu_idle_prepare(); |
134 | cpu_idle_loop(); | 140 | cpu_idle_loop(); |
135 | } | 141 | } |