aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/tick-sched.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-22 14:35:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-22 14:35:46 -0400
commit3992c0321258bdff3666cbaf5225f538ad61a548 (patch)
tree42c98bcf601237b07ceac34b5bdb0b37558280dc /kernel/time/tick-sched.c
parent55acdddbac1725b80df0c41970505e8a41c84956 (diff)
parenteec19d1a0d04c80e66eef634f7b8f460f2ca5643 (diff)
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer core changes from Ingo Molnar: "Continued cleanups of the core time and NTP code, plus more nohz work preparing for tick-less userspace execution." * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: time: Rework timekeeping functions to take timekeeper ptr as argument time: Move xtime_nsec adjustment underflow handling timekeeping_adjust time: Move arch_gettimeoffset() usage into timekeeping_get_ns() time: Refactor accumulation of nsecs to secs time: Condense timekeeper.xtime into xtime_sec time: Explicitly use u32 instead of int for shift values time: Whitespace cleanups per Ingo%27s requests nohz: Move next idle expiry time record into idle logic area nohz: Move ts->idle_calls incrementation into strict idle logic nohz: Rename ts->idle_tick to ts->last_tick nohz: Make nohz API agnostic against idle ticks cputime accounting nohz: Separate idle sleeping time accounting from nohz logic timers: Improve get_next_timer_interrupt() timers: Add accounting of non deferrable timers timers: Consolidate base->next_timer update timers: Create detach_if_pending() and use it
Diffstat (limited to 'kernel/time/tick-sched.c')
-rw-r--r--kernel/time/tick-sched.c192
1 files changed, 108 insertions, 84 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 45b17aea79ef..024540f97f74 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -271,50 +271,15 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
271} 271}
272EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); 272EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
273 273
274static void tick_nohz_stop_sched_tick(struct tick_sched *ts) 274static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
275 ktime_t now, int cpu)
275{ 276{
276 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; 277 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
278 ktime_t last_update, expires, ret = { .tv64 = 0 };
277 unsigned long rcu_delta_jiffies; 279 unsigned long rcu_delta_jiffies;
278 ktime_t last_update, expires, now;
279 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 280 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
280 u64 time_delta; 281 u64 time_delta;
281 int cpu;
282
283 cpu = smp_processor_id();
284 ts = &per_cpu(tick_cpu_sched, cpu);
285
286 now = tick_nohz_start_idle(cpu, ts);
287
288 /*
289 * If this cpu is offline and it is the one which updates
290 * jiffies, then give up the assignment and let it be taken by
291 * the cpu which runs the tick timer next. If we don't drop
292 * this here the jiffies might be stale and do_timer() never
293 * invoked.
294 */
295 if (unlikely(!cpu_online(cpu))) {
296 if (cpu == tick_do_timer_cpu)
297 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
298 }
299
300 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
301 return;
302 282
303 if (need_resched())
304 return;
305
306 if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
307 static int ratelimit;
308
309 if (ratelimit < 10) {
310 printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
311 (unsigned int) local_softirq_pending());
312 ratelimit++;
313 }
314 return;
315 }
316
317 ts->idle_calls++;
318 /* Read jiffies and the time when jiffies were updated last */ 283 /* Read jiffies and the time when jiffies were updated last */
319 do { 284 do {
320 seq = read_seqbegin(&xtime_lock); 285 seq = read_seqbegin(&xtime_lock);
@@ -397,6 +362,8 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
397 if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) 362 if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
398 goto out; 363 goto out;
399 364
365 ret = expires;
366
400 /* 367 /*
401 * nohz_stop_sched_tick can be called several times before 368 * nohz_stop_sched_tick can be called several times before
402 * the nohz_restart_sched_tick is called. This happens when 369 * the nohz_restart_sched_tick is called. This happens when
@@ -408,16 +375,10 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
408 select_nohz_load_balancer(1); 375 select_nohz_load_balancer(1);
409 calc_load_enter_idle(); 376 calc_load_enter_idle();
410 377
411 ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); 378 ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
412 ts->tick_stopped = 1; 379 ts->tick_stopped = 1;
413 ts->idle_jiffies = last_jiffies;
414 } 380 }
415 381
416 ts->idle_sleeps++;
417
418 /* Mark expires */
419 ts->idle_expires = expires;
420
421 /* 382 /*
422 * If the expiration time == KTIME_MAX, then 383 * If the expiration time == KTIME_MAX, then
423 * in this case we simply stop the tick timer. 384 * in this case we simply stop the tick timer.
@@ -448,6 +409,65 @@ out:
448 ts->next_jiffies = next_jiffies; 409 ts->next_jiffies = next_jiffies;
449 ts->last_jiffies = last_jiffies; 410 ts->last_jiffies = last_jiffies;
450 ts->sleep_length = ktime_sub(dev->next_event, now); 411 ts->sleep_length = ktime_sub(dev->next_event, now);
412
413 return ret;
414}
415
416static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
417{
418 /*
419 * If this cpu is offline and it is the one which updates
420 * jiffies, then give up the assignment and let it be taken by
421 * the cpu which runs the tick timer next. If we don't drop
422 * this here the jiffies might be stale and do_timer() never
423 * invoked.
424 */
425 if (unlikely(!cpu_online(cpu))) {
426 if (cpu == tick_do_timer_cpu)
427 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
428 }
429
430 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
431 return false;
432
433 if (need_resched())
434 return false;
435
436 if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
437 static int ratelimit;
438
439 if (ratelimit < 10) {
440 printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
441 (unsigned int) local_softirq_pending());
442 ratelimit++;
443 }
444 return false;
445 }
446
447 return true;
448}
449
450static void __tick_nohz_idle_enter(struct tick_sched *ts)
451{
452 ktime_t now, expires;
453 int cpu = smp_processor_id();
454
455 now = tick_nohz_start_idle(cpu, ts);
456
457 if (can_stop_idle_tick(cpu, ts)) {
458 int was_stopped = ts->tick_stopped;
459
460 ts->idle_calls++;
461
462 expires = tick_nohz_stop_sched_tick(ts, now, cpu);
463 if (expires.tv64 > 0LL) {
464 ts->idle_sleeps++;
465 ts->idle_expires = expires;
466 }
467
468 if (!was_stopped && ts->tick_stopped)
469 ts->idle_jiffies = ts->last_jiffies;
470 }
451} 471}
452 472
453/** 473/**
@@ -485,7 +505,7 @@ void tick_nohz_idle_enter(void)
485 * update of the idle time accounting in tick_nohz_start_idle(). 505 * update of the idle time accounting in tick_nohz_start_idle().
486 */ 506 */
487 ts->inidle = 1; 507 ts->inidle = 1;
488 tick_nohz_stop_sched_tick(ts); 508 __tick_nohz_idle_enter(ts);
489 509
490 local_irq_enable(); 510 local_irq_enable();
491} 511}
@@ -505,7 +525,7 @@ void tick_nohz_irq_exit(void)
505 if (!ts->inidle) 525 if (!ts->inidle)
506 return; 526 return;
507 527
508 tick_nohz_stop_sched_tick(ts); 528 __tick_nohz_idle_enter(ts);
509} 529}
510 530
511/** 531/**
@@ -523,7 +543,7 @@ ktime_t tick_nohz_get_sleep_length(void)
523static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) 543static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
524{ 544{
525 hrtimer_cancel(&ts->sched_timer); 545 hrtimer_cancel(&ts->sched_timer);
526 hrtimer_set_expires(&ts->sched_timer, ts->idle_tick); 546 hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
527 547
528 while (1) { 548 while (1) {
529 /* Forward the time to expire in the future */ 549 /* Forward the time to expire in the future */
@@ -546,6 +566,41 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
546 } 566 }
547} 567}
548 568
569static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
570{
571 /* Update jiffies first */
572 select_nohz_load_balancer(0);
573 tick_do_update_jiffies64(now);
574 update_cpu_load_nohz();
575
576 touch_softlockup_watchdog();
577 /*
578 * Cancel the scheduled timer and restore the tick
579 */
580 ts->tick_stopped = 0;
581 ts->idle_exittime = now;
582
583 tick_nohz_restart(ts, now);
584}
585
586static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
587{
588#ifndef CONFIG_VIRT_CPU_ACCOUNTING
589 unsigned long ticks;
590 /*
591 * We stopped the tick in idle. Update process times would miss the
592 * time we slept as update_process_times does only a 1 tick
593 * accounting. Enforce that this is accounted to idle !
594 */
595 ticks = jiffies - ts->idle_jiffies;
596 /*
597 * We might be one off. Do not randomly account a huge number of ticks!
598 */
599 if (ticks && ticks < LONG_MAX)
600 account_idle_ticks(ticks);
601#endif
602}
603
549/** 604/**
550 * tick_nohz_idle_exit - restart the idle tick from the idle task 605 * tick_nohz_idle_exit - restart the idle tick from the idle task
551 * 606 *
@@ -557,9 +612,6 @@ void tick_nohz_idle_exit(void)
557{ 612{
558 int cpu = smp_processor_id(); 613 int cpu = smp_processor_id();
559 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 614 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
560#ifndef CONFIG_VIRT_CPU_ACCOUNTING
561 unsigned long ticks;
562#endif
563 ktime_t now; 615 ktime_t now;
564 616
565 local_irq_disable(); 617 local_irq_disable();
@@ -574,40 +626,11 @@ void tick_nohz_idle_exit(void)
574 if (ts->idle_active) 626 if (ts->idle_active)
575 tick_nohz_stop_idle(cpu, now); 627 tick_nohz_stop_idle(cpu, now);
576 628
577 if (!ts->tick_stopped) { 629 if (ts->tick_stopped) {
578 local_irq_enable(); 630 tick_nohz_restart_sched_tick(ts, now);
579 return; 631 tick_nohz_account_idle_ticks(ts);
580 } 632 }
581 633
582 /* Update jiffies first */
583 select_nohz_load_balancer(0);
584 tick_do_update_jiffies64(now);
585 update_cpu_load_nohz();
586
587#ifndef CONFIG_VIRT_CPU_ACCOUNTING
588 /*
589 * We stopped the tick in idle. Update process times would miss the
590 * time we slept as update_process_times does only a 1 tick
591 * accounting. Enforce that this is accounted to idle !
592 */
593 ticks = jiffies - ts->idle_jiffies;
594 /*
595 * We might be one off. Do not randomly account a huge number of ticks!
596 */
597 if (ticks && ticks < LONG_MAX)
598 account_idle_ticks(ticks);
599#endif
600
601 calc_load_exit_idle();
602 touch_softlockup_watchdog();
603 /*
604 * Cancel the scheduled timer and restore the tick
605 */
606 ts->tick_stopped = 0;
607 ts->idle_exittime = now;
608
609 tick_nohz_restart(ts, now);
610
611 local_irq_enable(); 634 local_irq_enable();
612} 635}
613 636
@@ -811,7 +834,8 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
811 */ 834 */
812 if (ts->tick_stopped) { 835 if (ts->tick_stopped) {
813 touch_softlockup_watchdog(); 836 touch_softlockup_watchdog();
814 ts->idle_jiffies++; 837 if (idle_cpu(cpu))
838 ts->idle_jiffies++;
815 } 839 }
816 update_process_times(user_mode(regs)); 840 update_process_times(user_mode(regs));
817 profile_tick(CPU_PROFILING); 841 profile_tick(CPU_PROFILING);