aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-09-23 09:33:42 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-23 10:23:15 -0400
commit940959e93949e839c14f8ddc3b9b0e34a2ab6e29 (patch)
tree3566ce5a8bb8db206193377bca37f5877b70adf6 /kernel/sched_fair.c
parent63e5c39859a41591662466028c4d1281c033c05a (diff)
sched: fixlet for group load balance
We should not only correct the increment for the initial group, but should be consistent and do so for all the groups we encounter. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c27
1 files changed, 14 insertions, 13 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c20899763457..0c59da7e3120 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1027,7 +1027,6 @@ static long effective_load(struct task_group *tg, int cpu,
1027 long wl, long wg) 1027 long wl, long wg)
1028{ 1028{
1029 struct sched_entity *se = tg->se[cpu]; 1029 struct sched_entity *se = tg->se[cpu];
1030 long more_w;
1031 1030
1032 if (!tg->parent) 1031 if (!tg->parent)
1033 return wl; 1032 return wl;
@@ -1039,18 +1038,17 @@ static long effective_load(struct task_group *tg, int cpu,
1039 if (!wl && sched_feat(ASYM_EFF_LOAD)) 1038 if (!wl && sched_feat(ASYM_EFF_LOAD))
1040 return wl; 1039 return wl;
1041 1040
1042 /*
1043 * Instead of using this increment, also add the difference
1044 * between when the shares were last updated and now.
1045 */
1046 more_w = se->my_q->load.weight - se->my_q->rq_weight;
1047 wl += more_w;
1048 wg += more_w;
1049
1050 for_each_sched_entity(se) { 1041 for_each_sched_entity(se) {
1051#define D(n) (likely(n) ? (n) : 1)
1052
1053 long S, rw, s, a, b; 1042 long S, rw, s, a, b;
1043 long more_w;
1044
1045 /*
1046 * Instead of using this increment, also add the difference
1047 * between when the shares were last updated and now.
1048 */
1049 more_w = se->my_q->load.weight - se->my_q->rq_weight;
1050 wl += more_w;
1051 wg += more_w;
1054 1052
1055 S = se->my_q->tg->shares; 1053 S = se->my_q->tg->shares;
1056 s = se->my_q->shares; 1054 s = se->my_q->shares;
@@ -1059,7 +1057,11 @@ static long effective_load(struct task_group *tg, int cpu,
1059 a = S*(rw + wl); 1057 a = S*(rw + wl);
1060 b = S*rw + s*wg; 1058 b = S*rw + s*wg;
1061 1059
1062 wl = s*(a-b)/D(b); 1060 wl = s*(a-b);
1061
1062 if (likely(b))
1063 wl /= b;
1064
1063 /* 1065 /*
1064 * Assume the group is already running and will 1066 * Assume the group is already running and will
1065 * thus already be accounted for in the weight. 1067 * thus already be accounted for in the weight.
@@ -1068,7 +1070,6 @@ static long effective_load(struct task_group *tg, int cpu,
1068 * alter the group weight. 1070 * alter the group weight.
1069 */ 1071 */
1070 wg = 0; 1072 wg = 0;
1071#undef D
1072 } 1073 }
1073 1074
1074 return wl; 1075 return wl;