diff options
Diffstat (limited to 'kernel/time/tick-sched.c')
-rw-r--r-- | kernel/time/tick-sched.c | 106 |
1 files changed, 73 insertions, 33 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index cb02324bdb88..0581c11fe6c6 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/profile.h> | 20 | #include <linux/profile.h> |
21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
22 | #include <linux/tick.h> | 22 | #include <linux/tick.h> |
23 | #include <linux/module.h> | ||
23 | 24 | ||
24 | #include <asm/irq_regs.h> | 25 | #include <asm/irq_regs.h> |
25 | 26 | ||
@@ -154,7 +155,7 @@ void tick_nohz_update_jiffies(void) | |||
154 | touch_softlockup_watchdog(); | 155 | touch_softlockup_watchdog(); |
155 | } | 156 | } |
156 | 157 | ||
157 | void tick_nohz_stop_idle(int cpu) | 158 | static void tick_nohz_stop_idle(int cpu) |
158 | { | 159 | { |
159 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 160 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
160 | 161 | ||
@@ -190,9 +191,17 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | |||
190 | { | 191 | { |
191 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 192 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
192 | 193 | ||
193 | *last_update_time = ktime_to_us(ts->idle_lastupdate); | 194 | if (!tick_nohz_enabled) |
195 | return -1; | ||
196 | |||
197 | if (ts->idle_active) | ||
198 | *last_update_time = ktime_to_us(ts->idle_lastupdate); | ||
199 | else | ||
200 | *last_update_time = ktime_to_us(ktime_get()); | ||
201 | |||
194 | return ktime_to_us(ts->idle_sleeptime); | 202 | return ktime_to_us(ts->idle_sleeptime); |
195 | } | 203 | } |
204 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); | ||
196 | 205 | ||
197 | /** | 206 | /** |
198 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task | 207 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task |
@@ -261,7 +270,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
261 | next_jiffies = get_next_timer_interrupt(last_jiffies); | 270 | next_jiffies = get_next_timer_interrupt(last_jiffies); |
262 | delta_jiffies = next_jiffies - last_jiffies; | 271 | delta_jiffies = next_jiffies - last_jiffies; |
263 | 272 | ||
264 | if (rcu_needs_cpu(cpu)) | 273 | if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu)) |
265 | delta_jiffies = 1; | 274 | delta_jiffies = 1; |
266 | /* | 275 | /* |
267 | * Do not stop the tick, if we are only one off | 276 | * Do not stop the tick, if we are only one off |
@@ -368,6 +377,32 @@ ktime_t tick_nohz_get_sleep_length(void) | |||
368 | return ts->sleep_length; | 377 | return ts->sleep_length; |
369 | } | 378 | } |
370 | 379 | ||
380 | static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) | ||
381 | { | ||
382 | hrtimer_cancel(&ts->sched_timer); | ||
383 | ts->sched_timer.expires = ts->idle_tick; | ||
384 | |||
385 | while (1) { | ||
386 | /* Forward the time to expire in the future */ | ||
387 | hrtimer_forward(&ts->sched_timer, now, tick_period); | ||
388 | |||
389 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { | ||
390 | hrtimer_start(&ts->sched_timer, | ||
391 | ts->sched_timer.expires, | ||
392 | HRTIMER_MODE_ABS); | ||
393 | /* Check, if the timer was already in the past */ | ||
394 | if (hrtimer_active(&ts->sched_timer)) | ||
395 | break; | ||
396 | } else { | ||
397 | if (!tick_program_event(ts->sched_timer.expires, 0)) | ||
398 | break; | ||
399 | } | ||
400 | /* Update jiffies and reread time */ | ||
401 | tick_do_update_jiffies64(now); | ||
402 | now = ktime_get(); | ||
403 | } | ||
404 | } | ||
405 | |||
371 | /** | 406 | /** |
372 | * tick_nohz_restart_sched_tick - restart the idle tick from the idle task | 407 | * tick_nohz_restart_sched_tick - restart the idle tick from the idle task |
373 | * | 408 | * |
@@ -421,28 +456,7 @@ void tick_nohz_restart_sched_tick(void) | |||
421 | */ | 456 | */ |
422 | ts->tick_stopped = 0; | 457 | ts->tick_stopped = 0; |
423 | ts->idle_exittime = now; | 458 | ts->idle_exittime = now; |
424 | hrtimer_cancel(&ts->sched_timer); | 459 | tick_nohz_restart(ts, now); |
425 | ts->sched_timer.expires = ts->idle_tick; | ||
426 | |||
427 | while (1) { | ||
428 | /* Forward the time to expire in the future */ | ||
429 | hrtimer_forward(&ts->sched_timer, now, tick_period); | ||
430 | |||
431 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { | ||
432 | hrtimer_start(&ts->sched_timer, | ||
433 | ts->sched_timer.expires, | ||
434 | HRTIMER_MODE_ABS); | ||
435 | /* Check, if the timer was already in the past */ | ||
436 | if (hrtimer_active(&ts->sched_timer)) | ||
437 | break; | ||
438 | } else { | ||
439 | if (!tick_program_event(ts->sched_timer.expires, 0)) | ||
440 | break; | ||
441 | } | ||
442 | /* Update jiffies and reread time */ | ||
443 | tick_do_update_jiffies64(now); | ||
444 | now = ktime_get(); | ||
445 | } | ||
446 | local_irq_enable(); | 460 | local_irq_enable(); |
447 | } | 461 | } |
448 | 462 | ||
@@ -494,10 +508,6 @@ static void tick_nohz_handler(struct clock_event_device *dev) | |||
494 | update_process_times(user_mode(regs)); | 508 | update_process_times(user_mode(regs)); |
495 | profile_tick(CPU_PROFILING); | 509 | profile_tick(CPU_PROFILING); |
496 | 510 | ||
497 | /* Do not restart, when we are in the idle loop */ | ||
498 | if (ts->tick_stopped) | ||
499 | return; | ||
500 | |||
501 | while (tick_nohz_reprogram(ts, now)) { | 511 | while (tick_nohz_reprogram(ts, now)) { |
502 | now = ktime_get(); | 512 | now = ktime_get(); |
503 | tick_do_update_jiffies64(now); | 513 | tick_do_update_jiffies64(now); |
@@ -543,6 +553,27 @@ static void tick_nohz_switch_to_nohz(void) | |||
543 | smp_processor_id()); | 553 | smp_processor_id()); |
544 | } | 554 | } |
545 | 555 | ||
556 | /* | ||
557 | * When NOHZ is enabled and the tick is stopped, we need to kick the | ||
558 | * tick timer from irq_enter() so that the jiffies update is kept | ||
559 | * alive during long running softirqs. That's ugly as hell, but | ||
560 | * correctness is key even if we need to fix the offending softirq in | ||
561 | * the first place. | ||
562 | * | ||
563 | * Note, this is different to tick_nohz_restart. We just kick the | ||
564 | * timer and do not touch the other magic bits which need to be done | ||
565 | * when idle is left. | ||
566 | */ | ||
567 | static void tick_nohz_kick_tick(int cpu) | ||
568 | { | ||
569 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | ||
570 | |||
571 | if (!ts->tick_stopped) | ||
572 | return; | ||
573 | |||
574 | tick_nohz_restart(ts, ktime_get()); | ||
575 | } | ||
576 | |||
546 | #else | 577 | #else |
547 | 578 | ||
548 | static inline void tick_nohz_switch_to_nohz(void) { } | 579 | static inline void tick_nohz_switch_to_nohz(void) { } |
@@ -550,6 +581,19 @@ static inline void tick_nohz_switch_to_nohz(void) { } | |||
550 | #endif /* NO_HZ */ | 581 | #endif /* NO_HZ */ |
551 | 582 | ||
552 | /* | 583 | /* |
584 | * Called from irq_enter to notify about the possible interruption of idle() | ||
585 | */ | ||
586 | void tick_check_idle(int cpu) | ||
587 | { | ||
588 | tick_check_oneshot_broadcast(cpu); | ||
589 | #ifdef CONFIG_NO_HZ | ||
590 | tick_nohz_stop_idle(cpu); | ||
591 | tick_nohz_update_jiffies(); | ||
592 | tick_nohz_kick_tick(cpu); | ||
593 | #endif | ||
594 | } | ||
595 | |||
596 | /* | ||
553 | * High resolution timer specific code | 597 | * High resolution timer specific code |
554 | */ | 598 | */ |
555 | #ifdef CONFIG_HIGH_RES_TIMERS | 599 | #ifdef CONFIG_HIGH_RES_TIMERS |
@@ -602,10 +646,6 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
602 | profile_tick(CPU_PROFILING); | 646 | profile_tick(CPU_PROFILING); |
603 | } | 647 | } |
604 | 648 | ||
605 | /* Do not restart, when we are in the idle loop */ | ||
606 | if (ts->tick_stopped) | ||
607 | return HRTIMER_NORESTART; | ||
608 | |||
609 | hrtimer_forward(timer, now, tick_period); | 649 | hrtimer_forward(timer, now, tick_period); |
610 | 650 | ||
611 | return HRTIMER_RESTART; | 651 | return HRTIMER_RESTART; |