summaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-05-12 08:18:10 -0400
committerIngo Molnar <mingo@kernel.org>2017-09-29 13:35:17 -0400
commit9a2dd585b2c431ec1e5d46a9d9568291c7a534cc (patch)
tree987633c0ac6fa9ce7c9f278157d9621f6f1c0fbb /kernel/sched/fair.c
parentf207934fb79d1af1de1a62b09d56a3a1914172c4 (diff)
sched/fair: Implement more accurate async detach
The problem with the overestimate is that it will subtract too big a value from the load_sum, thereby pushing it down further than it ought to go. Since runnable_load_avg is not subject to a similar 'force', this results in the occasional 'runnable_load > load' situation. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c9
1 files changed, 3 insertions, 6 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 954b332cd899..67c39642a512 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3574,6 +3574,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
3574 3574
3575 if (cfs_rq->removed.nr) { 3575 if (cfs_rq->removed.nr) {
3576 unsigned long r; 3576 unsigned long r;
3577 u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
3577 3578
3578 raw_spin_lock(&cfs_rq->removed.lock); 3579 raw_spin_lock(&cfs_rq->removed.lock);
3579 swap(cfs_rq->removed.util_avg, removed_util); 3580 swap(cfs_rq->removed.util_avg, removed_util);
@@ -3582,17 +3583,13 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
3582 cfs_rq->removed.nr = 0; 3583 cfs_rq->removed.nr = 0;
3583 raw_spin_unlock(&cfs_rq->removed.lock); 3584 raw_spin_unlock(&cfs_rq->removed.lock);
3584 3585
3585 /*
3586 * The LOAD_AVG_MAX for _sum is a slight over-estimate,
3587 * which is safe due to sub_positive() clipping at 0.
3588 */
3589 r = removed_load; 3586 r = removed_load;
3590 sub_positive(&sa->load_avg, r); 3587 sub_positive(&sa->load_avg, r);
3591 sub_positive(&sa->load_sum, r * LOAD_AVG_MAX); 3588 sub_positive(&sa->load_sum, r * divider);
3592 3589
3593 r = removed_util; 3590 r = removed_util;
3594 sub_positive(&sa->util_avg, r); 3591 sub_positive(&sa->util_avg, r);
3595 sub_positive(&sa->util_sum, r * LOAD_AVG_MAX); 3592 sub_positive(&sa->util_sum, r * divider);
3596 3593
3597 add_tg_cfs_propagate(cfs_rq, -(long)removed_runnable_sum); 3594 add_tg_cfs_propagate(cfs_rq, -(long)removed_runnable_sum);
3598 3595