diff options
author | Paul Turner <pjt@google.com> | 2012-10-04 07:18:31 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-10-24 04:27:25 -0400 |
commit | 8165e145ceb62fc338e099c9b12b3239c83d2f8e (patch) | |
tree | be494c8d97adec0615f776515875e4961a40d920 /kernel/sched | |
parent | c566e8e9e44b72b53091da20e2dedefc730f2ee2 (diff) |
sched: Compute load contribution by a group entity
Unlike task entities who have a fixed weight, group entities instead own a
fraction of their parenting task_group's shares as their contributed weight.
Compute this fraction so that we can correctly account hierarchies and shared
entity nodes.
Signed-off-by: Paul Turner <pjt@google.com>
Reviewed-by: Ben Segall <bsegall@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20120823141506.855074415@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/fair.c | 33 |
1 files changed, 27 insertions, 6 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index db788222f198..e20cb2693ef7 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1117,22 +1117,43 @@ static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, | |||
1117 | cfs_rq->tg_load_contrib += tg_contrib; | 1117 | cfs_rq->tg_load_contrib += tg_contrib; |
1118 | } | 1118 | } |
1119 | } | 1119 | } |
1120 | |||
1121 | static inline void __update_group_entity_contrib(struct sched_entity *se) | ||
1122 | { | ||
1123 | struct cfs_rq *cfs_rq = group_cfs_rq(se); | ||
1124 | struct task_group *tg = cfs_rq->tg; | ||
1125 | u64 contrib; | ||
1126 | |||
1127 | contrib = cfs_rq->tg_load_contrib * tg->shares; | ||
1128 | se->avg.load_avg_contrib = div64_u64(contrib, | ||
1129 | atomic64_read(&tg->load_avg) + 1); | ||
1130 | } | ||
1120 | #else | 1131 | #else |
1121 | static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, | 1132 | static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, |
1122 | int force_update) {} | 1133 | int force_update) {} |
1134 | static inline void __update_group_entity_contrib(struct sched_entity *se) {} | ||
1123 | #endif | 1135 | #endif |
1124 | 1136 | ||
1137 | static inline void __update_task_entity_contrib(struct sched_entity *se) | ||
1138 | { | ||
1139 | u32 contrib; | ||
1140 | |||
1141 | /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */ | ||
1142 | contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight); | ||
1143 | contrib /= (se->avg.runnable_avg_period + 1); | ||
1144 | se->avg.load_avg_contrib = scale_load(contrib); | ||
1145 | } | ||
1146 | |||
1125 | /* Compute the current contribution to load_avg by se, return any delta */ | 1147 | /* Compute the current contribution to load_avg by se, return any delta */ |
1126 | static long __update_entity_load_avg_contrib(struct sched_entity *se) | 1148 | static long __update_entity_load_avg_contrib(struct sched_entity *se) |
1127 | { | 1149 | { |
1128 | long old_contrib = se->avg.load_avg_contrib; | 1150 | long old_contrib = se->avg.load_avg_contrib; |
1129 | 1151 | ||
1130 | if (!entity_is_task(se)) | 1152 | if (entity_is_task(se)) { |
1131 | return 0; | 1153 | __update_task_entity_contrib(se); |
1132 | 1154 | } else { | |
1133 | se->avg.load_avg_contrib = div64_u64(se->avg.runnable_avg_sum * | 1155 | __update_group_entity_contrib(se); |
1134 | se->load.weight, | 1156 | } |
1135 | se->avg.runnable_avg_period + 1); | ||
1136 | 1157 | ||
1137 | return se->avg.load_avg_contrib - old_contrib; | 1158 | return se->avg.load_avg_contrib - old_contrib; |
1138 | } | 1159 | } |