aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2011-07-31 18:06:10 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2012-06-11 14:07:17 -0400
commit5b39939a40801f0c17e31adaf643d6e974227856 (patch)
treeed28770c22f2d7c2c8afca3a6bd0fe48d16e06fd /kernel
parentf5d411c91ede162240f34e05a233f2759412988e (diff)
nohz: Move ts->idle_calls incrementation into strict idle logic
Since we want to prepare for making the nohz API to work further the idle case, we need to pull ts->idle_calls incrementation up to the callers in idle. To perform this, we split tick_nohz_stop_sched_tick() in two parts: a first one that checks if we can really stop the tick for idle, and another that actually stops it. Then from the callers in idle, we check if we can stop the tick and only then we increment idle_calls and finally relay to the nohz API that won't care about these details anymore. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Alessio Igor Bogani <abogani@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Avi Kivity <avi@redhat.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Christoph Lameter <cl@linux.com> Cc: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: Geoff Levand <geoff@infradead.org> Cc: Gilad Ben Yossef <gilad@benyossef.com> Cc: Hakan Akkan <hakanakkan@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Kevin Hilman <khilman@ti.com> Cc: Max Krasnyansky <maxk@qualcomm.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephen Hemminger <shemminger@vyatta.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sven-Thorsten Dietrich <thebigcorporation@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/tick-sched.c86
1 files changed, 47 insertions, 39 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 73cc4901336d..430e1b6901cc 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -271,47 +271,15 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
271} 271}
272EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); 272EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
273 273
274static void tick_nohz_stop_sched_tick(struct tick_sched *ts, ktime_t now) 274static void tick_nohz_stop_sched_tick(struct tick_sched *ts,
275 ktime_t now, int cpu)
275{ 276{
276 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; 277 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
277 ktime_t last_update, expires; 278 ktime_t last_update, expires;
278 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 279 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
279 u64 time_delta; 280 u64 time_delta;
280 int cpu;
281
282 cpu = smp_processor_id();
283 ts = &per_cpu(tick_cpu_sched, cpu);
284
285 /*
286 * If this cpu is offline and it is the one which updates
287 * jiffies, then give up the assignment and let it be taken by
288 * the cpu which runs the tick timer next. If we don't drop
289 * this here the jiffies might be stale and do_timer() never
290 * invoked.
291 */
292 if (unlikely(!cpu_online(cpu))) {
293 if (cpu == tick_do_timer_cpu)
294 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
295 }
296
297 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
298 return;
299
300 if (need_resched())
301 return;
302 281
303 if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
304 static int ratelimit;
305
306 if (ratelimit < 10) {
307 printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
308 (unsigned int) local_softirq_pending());
309 ratelimit++;
310 }
311 return;
312 }
313 282
314 ts->idle_calls++;
315 /* Read jiffies and the time when jiffies were updated last */ 283 /* Read jiffies and the time when jiffies were updated last */
316 do { 284 do {
317 seq = read_seqbegin(&xtime_lock); 285 seq = read_seqbegin(&xtime_lock);
@@ -441,16 +409,56 @@ out:
441 ts->sleep_length = ktime_sub(dev->next_event, now); 409 ts->sleep_length = ktime_sub(dev->next_event, now);
442} 410}
443 411
412static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
413{
414 /*
415 * If this cpu is offline and it is the one which updates
416 * jiffies, then give up the assignment and let it be taken by
417 * the cpu which runs the tick timer next. If we don't drop
418 * this here the jiffies might be stale and do_timer() never
419 * invoked.
420 */
421 if (unlikely(!cpu_online(cpu))) {
422 if (cpu == tick_do_timer_cpu)
423 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
424 }
425
426 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
427 return false;
428
429 if (need_resched())
430 return false;
431
432 if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
433 static int ratelimit;
434
435 if (ratelimit < 10) {
436 printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
437 (unsigned int) local_softirq_pending());
438 ratelimit++;
439 }
440 return false;
441 }
442
443 return true;
444}
445
444static void __tick_nohz_idle_enter(struct tick_sched *ts) 446static void __tick_nohz_idle_enter(struct tick_sched *ts)
445{ 447{
446 ktime_t now; 448 ktime_t now;
447 int was_stopped = ts->tick_stopped; 449 int cpu = smp_processor_id();
448 450
449 now = tick_nohz_start_idle(smp_processor_id(), ts); 451 now = tick_nohz_start_idle(cpu, ts);
450 tick_nohz_stop_sched_tick(ts, now);
451 452
452 if (!was_stopped && ts->tick_stopped) 453 if (can_stop_idle_tick(cpu, ts)) {
453 ts->idle_jiffies = ts->last_jiffies; 454 int was_stopped = ts->tick_stopped;
455
456 ts->idle_calls++;
457 tick_nohz_stop_sched_tick(ts, now, cpu);
458
459 if (!was_stopped && ts->tick_stopped)
460 ts->idle_jiffies = ts->last_jiffies;
461 }
454} 462}
455 463
456/** 464/**