diff options
Diffstat (limited to 'kernel/time/tick-sched.c')
-rw-r--r-- | kernel/time/tick-sched.c | 128 |
1 files changed, 89 insertions, 39 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index f5da526424a9..0581c11fe6c6 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/profile.h> | 20 | #include <linux/profile.h> |
21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
22 | #include <linux/tick.h> | 22 | #include <linux/tick.h> |
23 | #include <linux/module.h> | ||
23 | 24 | ||
24 | #include <asm/irq_regs.h> | 25 | #include <asm/irq_regs.h> |
25 | 26 | ||
@@ -75,6 +76,9 @@ static void tick_do_update_jiffies64(ktime_t now) | |||
75 | incr * ticks); | 76 | incr * ticks); |
76 | } | 77 | } |
77 | do_timer(++ticks); | 78 | do_timer(++ticks); |
79 | |||
80 | /* Keep the tick_next_period variable up to date */ | ||
81 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | ||
78 | } | 82 | } |
79 | write_sequnlock(&xtime_lock); | 83 | write_sequnlock(&xtime_lock); |
80 | } | 84 | } |
@@ -151,7 +155,7 @@ void tick_nohz_update_jiffies(void) | |||
151 | touch_softlockup_watchdog(); | 155 | touch_softlockup_watchdog(); |
152 | } | 156 | } |
153 | 157 | ||
154 | void tick_nohz_stop_idle(int cpu) | 158 | static void tick_nohz_stop_idle(int cpu) |
155 | { | 159 | { |
156 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 160 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
157 | 161 | ||
@@ -162,6 +166,8 @@ void tick_nohz_stop_idle(int cpu) | |||
162 | ts->idle_lastupdate = now; | 166 | ts->idle_lastupdate = now; |
163 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | 167 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); |
164 | ts->idle_active = 0; | 168 | ts->idle_active = 0; |
169 | |||
170 | sched_clock_idle_wakeup_event(0); | ||
165 | } | 171 | } |
166 | } | 172 | } |
167 | 173 | ||
@@ -177,6 +183,7 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts) | |||
177 | } | 183 | } |
178 | ts->idle_entrytime = now; | 184 | ts->idle_entrytime = now; |
179 | ts->idle_active = 1; | 185 | ts->idle_active = 1; |
186 | sched_clock_idle_sleep_event(); | ||
180 | return now; | 187 | return now; |
181 | } | 188 | } |
182 | 189 | ||
@@ -184,9 +191,17 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | |||
184 | { | 191 | { |
185 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 192 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
186 | 193 | ||
187 | *last_update_time = ktime_to_us(ts->idle_lastupdate); | 194 | if (!tick_nohz_enabled) |
195 | return -1; | ||
196 | |||
197 | if (ts->idle_active) | ||
198 | *last_update_time = ktime_to_us(ts->idle_lastupdate); | ||
199 | else | ||
200 | *last_update_time = ktime_to_us(ktime_get()); | ||
201 | |||
188 | return ktime_to_us(ts->idle_sleeptime); | 202 | return ktime_to_us(ts->idle_sleeptime); |
189 | } | 203 | } |
204 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); | ||
190 | 205 | ||
191 | /** | 206 | /** |
192 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task | 207 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task |
@@ -218,7 +233,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
218 | */ | 233 | */ |
219 | if (unlikely(!cpu_online(cpu))) { | 234 | if (unlikely(!cpu_online(cpu))) { |
220 | if (cpu == tick_do_timer_cpu) | 235 | if (cpu == tick_do_timer_cpu) |
221 | tick_do_timer_cpu = -1; | 236 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
222 | } | 237 | } |
223 | 238 | ||
224 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | 239 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
@@ -255,7 +270,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
255 | next_jiffies = get_next_timer_interrupt(last_jiffies); | 270 | next_jiffies = get_next_timer_interrupt(last_jiffies); |
256 | delta_jiffies = next_jiffies - last_jiffies; | 271 | delta_jiffies = next_jiffies - last_jiffies; |
257 | 272 | ||
258 | if (rcu_needs_cpu(cpu)) | 273 | if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu)) |
259 | delta_jiffies = 1; | 274 | delta_jiffies = 1; |
260 | /* | 275 | /* |
261 | * Do not stop the tick, if we are only one off | 276 | * Do not stop the tick, if we are only one off |
@@ -300,7 +315,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
300 | * invoked. | 315 | * invoked. |
301 | */ | 316 | */ |
302 | if (cpu == tick_do_timer_cpu) | 317 | if (cpu == tick_do_timer_cpu) |
303 | tick_do_timer_cpu = -1; | 318 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
304 | 319 | ||
305 | ts->idle_sleeps++; | 320 | ts->idle_sleeps++; |
306 | 321 | ||
@@ -362,6 +377,32 @@ ktime_t tick_nohz_get_sleep_length(void) | |||
362 | return ts->sleep_length; | 377 | return ts->sleep_length; |
363 | } | 378 | } |
364 | 379 | ||
380 | static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) | ||
381 | { | ||
382 | hrtimer_cancel(&ts->sched_timer); | ||
383 | ts->sched_timer.expires = ts->idle_tick; | ||
384 | |||
385 | while (1) { | ||
386 | /* Forward the time to expire in the future */ | ||
387 | hrtimer_forward(&ts->sched_timer, now, tick_period); | ||
388 | |||
389 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { | ||
390 | hrtimer_start(&ts->sched_timer, | ||
391 | ts->sched_timer.expires, | ||
392 | HRTIMER_MODE_ABS); | ||
393 | /* Check, if the timer was already in the past */ | ||
394 | if (hrtimer_active(&ts->sched_timer)) | ||
395 | break; | ||
396 | } else { | ||
397 | if (!tick_program_event(ts->sched_timer.expires, 0)) | ||
398 | break; | ||
399 | } | ||
400 | /* Update jiffies and reread time */ | ||
401 | tick_do_update_jiffies64(now); | ||
402 | now = ktime_get(); | ||
403 | } | ||
404 | } | ||
405 | |||
365 | /** | 406 | /** |
366 | * tick_nohz_restart_sched_tick - restart the idle tick from the idle task | 407 | * tick_nohz_restart_sched_tick - restart the idle tick from the idle task |
367 | * | 408 | * |
@@ -415,28 +456,7 @@ void tick_nohz_restart_sched_tick(void) | |||
415 | */ | 456 | */ |
416 | ts->tick_stopped = 0; | 457 | ts->tick_stopped = 0; |
417 | ts->idle_exittime = now; | 458 | ts->idle_exittime = now; |
418 | hrtimer_cancel(&ts->sched_timer); | 459 | tick_nohz_restart(ts, now); |
419 | ts->sched_timer.expires = ts->idle_tick; | ||
420 | |||
421 | while (1) { | ||
422 | /* Forward the time to expire in the future */ | ||
423 | hrtimer_forward(&ts->sched_timer, now, tick_period); | ||
424 | |||
425 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { | ||
426 | hrtimer_start(&ts->sched_timer, | ||
427 | ts->sched_timer.expires, | ||
428 | HRTIMER_MODE_ABS); | ||
429 | /* Check, if the timer was already in the past */ | ||
430 | if (hrtimer_active(&ts->sched_timer)) | ||
431 | break; | ||
432 | } else { | ||
433 | if (!tick_program_event(ts->sched_timer.expires, 0)) | ||
434 | break; | ||
435 | } | ||
436 | /* Update jiffies and reread time */ | ||
437 | tick_do_update_jiffies64(now); | ||
438 | now = ktime_get(); | ||
439 | } | ||
440 | local_irq_enable(); | 460 | local_irq_enable(); |
441 | } | 461 | } |
442 | 462 | ||
@@ -465,7 +485,7 @@ static void tick_nohz_handler(struct clock_event_device *dev) | |||
465 | * this duty, then the jiffies update is still serialized by | 485 | * this duty, then the jiffies update is still serialized by |
466 | * xtime_lock. | 486 | * xtime_lock. |
467 | */ | 487 | */ |
468 | if (unlikely(tick_do_timer_cpu == -1)) | 488 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
469 | tick_do_timer_cpu = cpu; | 489 | tick_do_timer_cpu = cpu; |
470 | 490 | ||
471 | /* Check, if the jiffies need an update */ | 491 | /* Check, if the jiffies need an update */ |
@@ -488,10 +508,6 @@ static void tick_nohz_handler(struct clock_event_device *dev) | |||
488 | update_process_times(user_mode(regs)); | 508 | update_process_times(user_mode(regs)); |
489 | profile_tick(CPU_PROFILING); | 509 | profile_tick(CPU_PROFILING); |
490 | 510 | ||
491 | /* Do not restart, when we are in the idle loop */ | ||
492 | if (ts->tick_stopped) | ||
493 | return; | ||
494 | |||
495 | while (tick_nohz_reprogram(ts, now)) { | 511 | while (tick_nohz_reprogram(ts, now)) { |
496 | now = ktime_get(); | 512 | now = ktime_get(); |
497 | tick_do_update_jiffies64(now); | 513 | tick_do_update_jiffies64(now); |
@@ -537,6 +553,27 @@ static void tick_nohz_switch_to_nohz(void) | |||
537 | smp_processor_id()); | 553 | smp_processor_id()); |
538 | } | 554 | } |
539 | 555 | ||
556 | /* | ||
557 | * When NOHZ is enabled and the tick is stopped, we need to kick the | ||
558 | * tick timer from irq_enter() so that the jiffies update is kept | ||
559 | * alive during long running softirqs. That's ugly as hell, but | ||
560 | * correctness is key even if we need to fix the offending softirq in | ||
561 | * the first place. | ||
562 | * | ||
563 | * Note, this is different to tick_nohz_restart. We just kick the | ||
564 | * timer and do not touch the other magic bits which need to be done | ||
565 | * when idle is left. | ||
566 | */ | ||
567 | static void tick_nohz_kick_tick(int cpu) | ||
568 | { | ||
569 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | ||
570 | |||
571 | if (!ts->tick_stopped) | ||
572 | return; | ||
573 | |||
574 | tick_nohz_restart(ts, ktime_get()); | ||
575 | } | ||
576 | |||
540 | #else | 577 | #else |
541 | 578 | ||
542 | static inline void tick_nohz_switch_to_nohz(void) { } | 579 | static inline void tick_nohz_switch_to_nohz(void) { } |
@@ -544,6 +581,19 @@ static inline void tick_nohz_switch_to_nohz(void) { } | |||
544 | #endif /* NO_HZ */ | 581 | #endif /* NO_HZ */ |
545 | 582 | ||
546 | /* | 583 | /* |
584 | * Called from irq_enter to notify about the possible interruption of idle() | ||
585 | */ | ||
586 | void tick_check_idle(int cpu) | ||
587 | { | ||
588 | tick_check_oneshot_broadcast(cpu); | ||
589 | #ifdef CONFIG_NO_HZ | ||
590 | tick_nohz_stop_idle(cpu); | ||
591 | tick_nohz_update_jiffies(); | ||
592 | tick_nohz_kick_tick(cpu); | ||
593 | #endif | ||
594 | } | ||
595 | |||
596 | /* | ||
547 | * High resolution timer specific code | 597 | * High resolution timer specific code |
548 | */ | 598 | */ |
549 | #ifdef CONFIG_HIGH_RES_TIMERS | 599 | #ifdef CONFIG_HIGH_RES_TIMERS |
@@ -567,7 +617,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
567 | * this duty, then the jiffies update is still serialized by | 617 | * this duty, then the jiffies update is still serialized by |
568 | * xtime_lock. | 618 | * xtime_lock. |
569 | */ | 619 | */ |
570 | if (unlikely(tick_do_timer_cpu == -1)) | 620 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
571 | tick_do_timer_cpu = cpu; | 621 | tick_do_timer_cpu = cpu; |
572 | #endif | 622 | #endif |
573 | 623 | ||
@@ -596,10 +646,6 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
596 | profile_tick(CPU_PROFILING); | 646 | profile_tick(CPU_PROFILING); |
597 | } | 647 | } |
598 | 648 | ||
599 | /* Do not restart, when we are in the idle loop */ | ||
600 | if (ts->tick_stopped) | ||
601 | return HRTIMER_NORESTART; | ||
602 | |||
603 | hrtimer_forward(timer, now, tick_period); | 649 | hrtimer_forward(timer, now, tick_period); |
604 | 650 | ||
605 | return HRTIMER_RESTART; | 651 | return HRTIMER_RESTART; |
@@ -619,7 +665,7 @@ void tick_setup_sched_timer(void) | |||
619 | */ | 665 | */ |
620 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 666 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
621 | ts->sched_timer.function = tick_sched_timer; | 667 | ts->sched_timer.function = tick_sched_timer; |
622 | ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 668 | ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; |
623 | 669 | ||
624 | /* Get the next period (per cpu) */ | 670 | /* Get the next period (per cpu) */ |
625 | ts->sched_timer.expires = tick_init_jiffy_update(); | 671 | ts->sched_timer.expires = tick_init_jiffy_update(); |
@@ -643,17 +689,21 @@ void tick_setup_sched_timer(void) | |||
643 | ts->nohz_mode = NOHZ_MODE_HIGHRES; | 689 | ts->nohz_mode = NOHZ_MODE_HIGHRES; |
644 | #endif | 690 | #endif |
645 | } | 691 | } |
692 | #endif /* HIGH_RES_TIMERS */ | ||
646 | 693 | ||
694 | #if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS | ||
647 | void tick_cancel_sched_timer(int cpu) | 695 | void tick_cancel_sched_timer(int cpu) |
648 | { | 696 | { |
649 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 697 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
650 | 698 | ||
699 | # ifdef CONFIG_HIGH_RES_TIMERS | ||
651 | if (ts->sched_timer.base) | 700 | if (ts->sched_timer.base) |
652 | hrtimer_cancel(&ts->sched_timer); | 701 | hrtimer_cancel(&ts->sched_timer); |
702 | # endif | ||
653 | 703 | ||
654 | ts->nohz_mode = NOHZ_MODE_INACTIVE; | 704 | ts->nohz_mode = NOHZ_MODE_INACTIVE; |
655 | } | 705 | } |
656 | #endif /* HIGH_RES_TIMERS */ | 706 | #endif |
657 | 707 | ||
658 | /** | 708 | /** |
659 | * Async notification about clocksource changes | 709 | * Async notification about clocksource changes |