diff options
Diffstat (limited to 'kernel/time/tick-sched.c')
| -rw-r--r-- | kernel/time/tick-sched.c | 32 |
1 files changed, 30 insertions, 2 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 99578f06c8d4..f7cc7abfcf25 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -650,6 +650,11 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) | |||
| 650 | ts->next_tick = 0; | 650 | ts->next_tick = 0; |
| 651 | } | 651 | } |
| 652 | 652 | ||
| 653 | static inline bool local_timer_softirq_pending(void) | ||
| 654 | { | ||
| 655 | return local_softirq_pending() & TIMER_SOFTIRQ; | ||
| 656 | } | ||
| 657 | |||
| 653 | static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, | 658 | static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, |
| 654 | ktime_t now, int cpu) | 659 | ktime_t now, int cpu) |
| 655 | { | 660 | { |
| @@ -666,8 +671,18 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, | |||
| 666 | } while (read_seqretry(&jiffies_lock, seq)); | 671 | } while (read_seqretry(&jiffies_lock, seq)); |
| 667 | ts->last_jiffies = basejiff; | 672 | ts->last_jiffies = basejiff; |
| 668 | 673 | ||
| 669 | if (rcu_needs_cpu(basemono, &next_rcu) || | 674 | /* |
| 670 | arch_needs_cpu() || irq_work_needs_cpu()) { | 675 | * Keep the periodic tick, when RCU, architecture or irq_work |
| 676 | * requests it. | ||
| 677 | * Aside of that check whether the local timer softirq is | ||
| 678 | * pending. If so its a bad idea to call get_next_timer_interrupt() | ||
| 679 | * because there is an already expired timer, so it will request | ||
| 680 | * immeditate expiry, which rearms the hardware timer with a | ||
| 681 | * minimal delta which brings us back to this place | ||
| 682 | * immediately. Lather, rinse and repeat... | ||
| 683 | */ | ||
| 684 | if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || | ||
| 685 | irq_work_needs_cpu() || local_timer_softirq_pending()) { | ||
| 671 | next_tick = basemono + TICK_NSEC; | 686 | next_tick = basemono + TICK_NSEC; |
| 672 | } else { | 687 | } else { |
| 673 | /* | 688 | /* |
| @@ -986,6 +1001,19 @@ ktime_t tick_nohz_get_sleep_length(void) | |||
| 986 | } | 1001 | } |
| 987 | 1002 | ||
| 988 | /** | 1003 | /** |
| 1004 | * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value | ||
| 1005 | * for a particular CPU. | ||
| 1006 | * | ||
| 1007 | * Called from the schedutil frequency scaling governor in scheduler context. | ||
| 1008 | */ | ||
| 1009 | unsigned long tick_nohz_get_idle_calls_cpu(int cpu) | ||
| 1010 | { | ||
| 1011 | struct tick_sched *ts = tick_get_tick_sched(cpu); | ||
| 1012 | |||
| 1013 | return ts->idle_calls; | ||
| 1014 | } | ||
| 1015 | |||
| 1016 | /** | ||
| 989 | * tick_nohz_get_idle_calls - return the current idle calls counter value | 1017 | * tick_nohz_get_idle_calls - return the current idle calls counter value |
| 990 | * | 1018 | * |
| 991 | * Called from the schedutil frequency scaling governor in scheduler context. | 1019 | * Called from the schedutil frequency scaling governor in scheduler context. |
