diff options
-rw-r--r-- | kernel/sched.c | 38 |
1 files changed, 35 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 7fe334ead4f9..d8456a9ac9af 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -397,6 +397,7 @@ struct rq { | |||
397 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; | 397 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
398 | unsigned char idle_at_tick; | 398 | unsigned char idle_at_tick; |
399 | #ifdef CONFIG_NO_HZ | 399 | #ifdef CONFIG_NO_HZ |
400 | unsigned long last_tick_seen; | ||
400 | unsigned char in_nohz_recently; | 401 | unsigned char in_nohz_recently; |
401 | #endif | 402 | #endif |
402 | /* capture load from *all* tasks on this cpu: */ | 403 | /* capture load from *all* tasks on this cpu: */ |
@@ -500,6 +501,32 @@ static inline int cpu_of(struct rq *rq) | |||
500 | #endif | 501 | #endif |
501 | } | 502 | } |
502 | 503 | ||
504 | #ifdef CONFIG_NO_HZ | ||
505 | static inline bool nohz_on(int cpu) | ||
506 | { | ||
507 | return tick_get_tick_sched(cpu)->nohz_mode != NOHZ_MODE_INACTIVE; | ||
508 | } | ||
509 | |||
510 | static inline u64 max_skipped_ticks(struct rq *rq) | ||
511 | { | ||
512 | return nohz_on(cpu_of(rq)) ? jiffies - rq->last_tick_seen + 2 : 1; | ||
513 | } | ||
514 | |||
515 | static inline void update_last_tick_seen(struct rq *rq) | ||
516 | { | ||
517 | rq->last_tick_seen = jiffies; | ||
518 | } | ||
519 | #else | ||
520 | static inline u64 max_skipped_ticks(struct rq *rq) | ||
521 | { | ||
522 | return 1; | ||
523 | } | ||
524 | |||
525 | static inline void update_last_tick_seen(struct rq *rq) | ||
526 | { | ||
527 | } | ||
528 | #endif | ||
529 | |||
503 | /* | 530 | /* |
504 | * Update the per-runqueue clock, as finegrained as the platform can give | 531 | * Update the per-runqueue clock, as finegrained as the platform can give |
505 | * us, but without assuming monotonicity, etc.: | 532 | * us, but without assuming monotonicity, etc.: |
@@ -524,9 +551,12 @@ static void __update_rq_clock(struct rq *rq) | |||
524 | /* | 551 | /* |
525 | * Catch too large forward jumps too: | 552 | * Catch too large forward jumps too: |
526 | */ | 553 | */ |
527 | if (unlikely(clock + delta > rq->tick_timestamp + TICK_NSEC)) { | 554 | u64 max_jump = max_skipped_ticks(rq) * TICK_NSEC; |
528 | if (clock < rq->tick_timestamp + TICK_NSEC) | 555 | u64 max_time = rq->tick_timestamp + max_jump; |
529 | clock = rq->tick_timestamp + TICK_NSEC; | 556 | |
557 | if (unlikely(clock + delta > max_time)) { | ||
558 | if (clock < max_time) | ||
559 | clock = max_time; | ||
530 | else | 560 | else |
531 | clock++; | 561 | clock++; |
532 | rq->clock_overflows++; | 562 | rq->clock_overflows++; |
@@ -3812,6 +3842,7 @@ void scheduler_tick(void) | |||
3812 | rq->clock_underflows++; | 3842 | rq->clock_underflows++; |
3813 | } | 3843 | } |
3814 | rq->tick_timestamp = rq->clock; | 3844 | rq->tick_timestamp = rq->clock; |
3845 | update_last_tick_seen(rq); | ||
3815 | update_cpu_load(rq); | 3846 | update_cpu_load(rq); |
3816 | curr->sched_class->task_tick(rq, curr, 0); | 3847 | curr->sched_class->task_tick(rq, curr, 0); |
3817 | update_sched_rt_period(rq); | 3848 | update_sched_rt_period(rq); |
@@ -7261,6 +7292,7 @@ void __init sched_init(void) | |||
7261 | lockdep_set_class(&rq->lock, &rq->rq_lock_key); | 7292 | lockdep_set_class(&rq->lock, &rq->rq_lock_key); |
7262 | rq->nr_running = 0; | 7293 | rq->nr_running = 0; |
7263 | rq->clock = 1; | 7294 | rq->clock = 1; |
7295 | update_last_tick_seen(rq); | ||
7264 | init_cfs_rq(&rq->cfs, rq); | 7296 | init_cfs_rq(&rq->cfs, rq); |
7265 | init_rt_rq(&rq->rt, rq); | 7297 | init_rt_rq(&rq->rt, rq); |
7266 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7298 | #ifdef CONFIG_FAIR_GROUP_SCHED |