diff options
author | Peter Zijlstra <peterz@infradead.org> | 2017-05-06 09:59:54 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-09-29 13:35:15 -0400 |
commit | 1ea6c46a23f1213d1972bfae220db5c165e27bba (patch) | |
tree | 434a578058f52ae1f26b22b7f61183d0ba6131b2 /kernel/sched/debug.c | |
parent | 0e2d2aaaae52c247c047d14999b93486bdbd3431 (diff) |
sched/fair: Propagate an effective runnable_load_avg
The load balancer uses runnable_load_avg as load indicator. For
!cgroup this is:
runnable_load_avg = \Sum se->avg.load_avg ; where se->on_rq
That is, a direct sum of all runnable tasks on that runqueue. As
opposed to load_avg, which is a sum of all tasks on the runqueue,
which includes a blocked component.
However, in the cgroup case, this comes apart since the group entities
are always runnable, even if most of their constituent entities are
blocked.
Therefore introduce a runnable_weight which for task entities is the
same as the regular weight, but for group entities is a fraction of
the entity weight and represents the runnable part of the group
runqueue.
Then propagate this load through the PELT hierarchy to arrive at an
effective runnable load avgerage -- which we should not confuse with
the canonical runnable load average.
Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/debug.c')
-rw-r--r-- | kernel/sched/debug.c | 8 |
1 files changed, 7 insertions, 1 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 2e039a81864c..1ca0130ed4f9 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c | |||
@@ -441,9 +441,11 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group | |||
441 | P_SCHEDSTAT(se->statistics.wait_count); | 441 | P_SCHEDSTAT(se->statistics.wait_count); |
442 | } | 442 | } |
443 | P(se->load.weight); | 443 | P(se->load.weight); |
444 | P(se->runnable_weight); | ||
444 | #ifdef CONFIG_SMP | 445 | #ifdef CONFIG_SMP |
445 | P(se->avg.load_avg); | 446 | P(se->avg.load_avg); |
446 | P(se->avg.util_avg); | 447 | P(se->avg.util_avg); |
448 | P(se->avg.runnable_load_avg); | ||
447 | #endif | 449 | #endif |
448 | 450 | ||
449 | #undef PN_SCHEDSTAT | 451 | #undef PN_SCHEDSTAT |
@@ -558,10 +560,11 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
558 | SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); | 560 | SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); |
559 | SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); | 561 | SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); |
560 | #ifdef CONFIG_SMP | 562 | #ifdef CONFIG_SMP |
563 | SEQ_printf(m, " .%-30s: %ld\n", "runnable_weight", cfs_rq->runnable_weight); | ||
561 | SEQ_printf(m, " .%-30s: %lu\n", "load_avg", | 564 | SEQ_printf(m, " .%-30s: %lu\n", "load_avg", |
562 | cfs_rq->avg.load_avg); | 565 | cfs_rq->avg.load_avg); |
563 | SEQ_printf(m, " .%-30s: %lu\n", "runnable_load_avg", | 566 | SEQ_printf(m, " .%-30s: %lu\n", "runnable_load_avg", |
564 | cfs_rq->runnable_load_avg); | 567 | cfs_rq->avg.runnable_load_avg); |
565 | SEQ_printf(m, " .%-30s: %lu\n", "util_avg", | 568 | SEQ_printf(m, " .%-30s: %lu\n", "util_avg", |
566 | cfs_rq->avg.util_avg); | 569 | cfs_rq->avg.util_avg); |
567 | SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg", | 570 | SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg", |
@@ -1006,10 +1009,13 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, | |||
1006 | "nr_involuntary_switches", (long long)p->nivcsw); | 1009 | "nr_involuntary_switches", (long long)p->nivcsw); |
1007 | 1010 | ||
1008 | P(se.load.weight); | 1011 | P(se.load.weight); |
1012 | P(se.runnable_weight); | ||
1009 | #ifdef CONFIG_SMP | 1013 | #ifdef CONFIG_SMP |
1010 | P(se.avg.load_sum); | 1014 | P(se.avg.load_sum); |
1015 | P(se.avg.runnable_load_sum); | ||
1011 | P(se.avg.util_sum); | 1016 | P(se.avg.util_sum); |
1012 | P(se.avg.load_avg); | 1017 | P(se.avg.load_avg); |
1018 | P(se.avg.runnable_load_avg); | ||
1013 | P(se.avg.util_avg); | 1019 | P(se.avg.util_avg); |
1014 | P(se.avg.last_update_time); | 1020 | P(se.avg.last_update_time); |
1015 | #endif | 1021 | #endif |