diff options
author | Peter Zijlstra <peterz@infradead.org> | 2017-05-06 09:59:54 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-09-29 13:35:15 -0400 |
commit | 1ea6c46a23f1213d1972bfae220db5c165e27bba (patch) | |
tree | 434a578058f52ae1f26b22b7f61183d0ba6131b2 /include/linux/sched.h | |
parent | 0e2d2aaaae52c247c047d14999b93486bdbd3431 (diff) |
sched/fair: Propagate an effective runnable_load_avg
The load balancer uses runnable_load_avg as load indicator. For
!cgroup this is:
runnable_load_avg = \Sum se->avg.load_avg ; where se->on_rq
That is, a direct sum of all runnable tasks on that runqueue. As
opposed to load_avg, which is a sum of all tasks on the runqueue,
which includes a blocked component.
However, in the cgroup case, this comes apart since the group entities
are always runnable, even if most of their constituent entities are
blocked.
Therefore introduce a runnable_weight which for task entities is the
same as the regular weight, but for group entities is a fraction of
the entity weight and represents the runnable part of the group
runqueue.
Then propagate this load through the PELT hierarchy to arrive at an
effective runnable load avgerage -- which we should not confuse with
the canonical runnable load average.
Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 3 |
1 files changed, 3 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 26a7df4e558c..bdd6ad6fcce1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -331,9 +331,11 @@ struct load_weight { | |||
331 | struct sched_avg { | 331 | struct sched_avg { |
332 | u64 last_update_time; | 332 | u64 last_update_time; |
333 | u64 load_sum; | 333 | u64 load_sum; |
334 | u64 runnable_load_sum; | ||
334 | u32 util_sum; | 335 | u32 util_sum; |
335 | u32 period_contrib; | 336 | u32 period_contrib; |
336 | unsigned long load_avg; | 337 | unsigned long load_avg; |
338 | unsigned long runnable_load_avg; | ||
337 | unsigned long util_avg; | 339 | unsigned long util_avg; |
338 | }; | 340 | }; |
339 | 341 | ||
@@ -376,6 +378,7 @@ struct sched_statistics { | |||
376 | struct sched_entity { | 378 | struct sched_entity { |
377 | /* For load-balancing: */ | 379 | /* For load-balancing: */ |
378 | struct load_weight load; | 380 | struct load_weight load; |
381 | unsigned long runnable_weight; | ||
379 | struct rb_node run_node; | 382 | struct rb_node run_node; |
380 | struct list_head group_node; | 383 | struct list_head group_node; |
381 | unsigned int on_rq; | 384 | unsigned int on_rq; |