summaryrefslogtreecommitdiffstats
path: root/kernel/sched/idle.c
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2018-03-15 18:07:41 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2018-04-05 13:01:29 -0400
commited98c34919985a9f87c3edacb9a8d8c283c9e243 (patch)
tree342bb5a1961b6c594a38706f7ed6a5048ae63f29 /kernel/sched/idle.c
parent2aaf709a518d26563b80fd7a42379d7aa7ffed4a (diff)
sched: idle: Do not stop the tick before cpuidle_idle_call()
Make cpuidle_idle_call() decide whether or not to stop the tick. First, the cpuidle_enter_s2idle() path deals with the tick (and with the entire timekeeping for that matter) by itself and it doesn't need the tick to be stopped beforehand. Second, to address the issue with short idle duration predictions by the idle governor after the tick has been stopped, it will be necessary to change the ordering of cpuidle_select() with respect to tick_nohz_idle_stop_tick(). To prepare for that, put a tick_nohz_idle_stop_tick() call in the same branch in which cpuidle_select() is called. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Diffstat (limited to 'kernel/sched/idle.c')
-rw-r--r--kernel/sched/idle.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 3777e83c0b5a..4f64835d38a8 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -141,13 +141,15 @@ static void cpuidle_idle_call(void)
141 } 141 }
142 142
143 /* 143 /*
144 * Tell the RCU framework we are entering an idle section, 144 * The RCU framework needs to be told that we are entering an idle
145 * so no more rcu read side critical sections and one more 145 * section, so no more rcu read side critical sections and one more
146 * step to the grace period 146 * step to the grace period
147 */ 147 */
148 rcu_idle_enter();
149 148
150 if (cpuidle_not_available(drv, dev)) { 149 if (cpuidle_not_available(drv, dev)) {
150 tick_nohz_idle_stop_tick();
151 rcu_idle_enter();
152
151 default_idle_call(); 153 default_idle_call();
152 goto exit_idle; 154 goto exit_idle;
153 } 155 }
@@ -164,16 +166,26 @@ static void cpuidle_idle_call(void)
164 166
165 if (idle_should_enter_s2idle() || dev->use_deepest_state) { 167 if (idle_should_enter_s2idle() || dev->use_deepest_state) {
166 if (idle_should_enter_s2idle()) { 168 if (idle_should_enter_s2idle()) {
169 rcu_idle_enter();
170
167 entered_state = cpuidle_enter_s2idle(drv, dev); 171 entered_state = cpuidle_enter_s2idle(drv, dev);
168 if (entered_state > 0) { 172 if (entered_state > 0) {
169 local_irq_enable(); 173 local_irq_enable();
170 goto exit_idle; 174 goto exit_idle;
171 } 175 }
176
177 rcu_idle_exit();
172 } 178 }
173 179
180 tick_nohz_idle_stop_tick();
181 rcu_idle_enter();
182
174 next_state = cpuidle_find_deepest_state(drv, dev); 183 next_state = cpuidle_find_deepest_state(drv, dev);
175 call_cpuidle(drv, dev, next_state); 184 call_cpuidle(drv, dev, next_state);
176 } else { 185 } else {
186 tick_nohz_idle_stop_tick();
187 rcu_idle_enter();
188
177 /* 189 /*
178 * Ask the cpuidle framework to choose a convenient idle state. 190 * Ask the cpuidle framework to choose a convenient idle state.
179 */ 191 */
@@ -240,7 +252,6 @@ static void do_idle(void)
240 tick_nohz_idle_restart_tick(); 252 tick_nohz_idle_restart_tick();
241 cpu_idle_poll(); 253 cpu_idle_poll();
242 } else { 254 } else {
243 tick_nohz_idle_stop_tick();
244 cpuidle_idle_call(); 255 cpuidle_idle_call();
245 } 256 }
246 arch_cpu_idle_exit(); 257 arch_cpu_idle_exit();