diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-06-27 07:41:37 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 08:31:46 -0400 |
commit | 83378269a5fad98f562ebc0f09c349575e6cbfe1 (patch) | |
tree | 516128bff6cfe915f93b983e3fe96733707ce09c | |
parent | 243e0e7b7d3b54749ece2e879ecd7e2a11874443 (diff) |
sched: correct wakeup weight calculations
rw_i = {2, 4, 1, 0}
s_i = {2/7, 4/7, 1/7, 0}
wakeup on cpu0, weight=1
rw'_i = {3, 4, 1, 0}
s'_i = {3/8, 4/8, 1/8, 0}
s_0 = S * rw_0 / \Sum rw_j ->
\Sum rw_j = S*rw_0/s_0 = 1*2*7/2 = 7 (correct)
s'_0 = S * (rw_0 + 1) / (\Sum rw_j + 1) =
1 * (2+1) / (7+1) = 3/8 (correct
so we find that adding 1 to cpu0 gains 5/56 in weight
if say the other cpu were, cpu1, we'd also have to calculate its 4/56 loss
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/sched.c | 4 | ||||
-rw-r--r-- | kernel/sched_fair.c | 48 |
2 files changed, 30 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 62db0891025a..01d3e51b7116 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -365,6 +365,10 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu) | |||
365 | #else | 365 | #else |
366 | 366 | ||
367 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } | 367 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } |
368 | static inline struct task_group *task_group(struct task_struct *p) | ||
369 | { | ||
370 | return NULL; | ||
371 | } | ||
368 | 372 | ||
369 | #endif /* CONFIG_GROUP_SCHED */ | 373 | #endif /* CONFIG_GROUP_SCHED */ |
370 | 374 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 26ebe180cdea..bed2f71e63d9 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1074,10 +1074,10 @@ static inline int wake_idle(int cpu, struct task_struct *p) | |||
1074 | static const struct sched_class fair_sched_class; | 1074 | static const struct sched_class fair_sched_class; |
1075 | 1075 | ||
1076 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1076 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1077 | static unsigned long effective_load(struct task_group *tg, long wl, int cpu) | 1077 | static unsigned long effective_load(struct task_group *tg, int cpu, |
1078 | unsigned long wl, unsigned long wg) | ||
1078 | { | 1079 | { |
1079 | struct sched_entity *se = tg->se[cpu]; | 1080 | struct sched_entity *se = tg->se[cpu]; |
1080 | long wg = wl; | ||
1081 | 1081 | ||
1082 | for_each_sched_entity(se) { | 1082 | for_each_sched_entity(se) { |
1083 | #define D(n) (likely(n) ? (n) : 1) | 1083 | #define D(n) (likely(n) ? (n) : 1) |
@@ -1092,6 +1092,13 @@ static unsigned long effective_load(struct task_group *tg, long wl, int cpu) | |||
1092 | b = S*rw + s*wg; | 1092 | b = S*rw + s*wg; |
1093 | 1093 | ||
1094 | wl = s*(a-b)/D(b); | 1094 | wl = s*(a-b)/D(b); |
1095 | /* | ||
1096 | * Assume the group is already running and will | ||
1097 | * thus already be accounted for in the weight. | ||
1098 | * | ||
1099 | * That is, moving shares between CPUs, does not | ||
1100 | * alter the group weight. | ||
1101 | */ | ||
1095 | wg = 0; | 1102 | wg = 0; |
1096 | #undef D | 1103 | #undef D |
1097 | } | 1104 | } |
@@ -1099,26 +1106,12 @@ static unsigned long effective_load(struct task_group *tg, long wl, int cpu) | |||
1099 | return wl; | 1106 | return wl; |
1100 | } | 1107 | } |
1101 | 1108 | ||
1102 | static unsigned long task_load_sub(struct task_struct *p) | ||
1103 | { | ||
1104 | return effective_load(task_group(p), -(long)p->se.load.weight, task_cpu(p)); | ||
1105 | } | ||
1106 | |||
1107 | static unsigned long task_load_add(struct task_struct *p, int cpu) | ||
1108 | { | ||
1109 | return effective_load(task_group(p), p->se.load.weight, cpu); | ||
1110 | } | ||
1111 | |||
1112 | #else | 1109 | #else |
1113 | 1110 | ||
1114 | static unsigned long task_load_sub(struct task_struct *p) | 1111 | static inline unsigned long effective_load(struct task_group *tg, int cpu, |
1112 | unsigned long wl, unsigned long wg) | ||
1115 | { | 1113 | { |
1116 | return -p->se.load.weight; | 1114 | return wl; |
1117 | } | ||
1118 | |||
1119 | static unsigned long task_load_add(struct task_struct *p, int cpu) | ||
1120 | { | ||
1121 | return p->se.load.weight; | ||
1122 | } | 1115 | } |
1123 | 1116 | ||
1124 | #endif | 1117 | #endif |
@@ -1130,8 +1123,10 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, | |||
1130 | unsigned int imbalance) | 1123 | unsigned int imbalance) |
1131 | { | 1124 | { |
1132 | struct task_struct *curr = this_rq->curr; | 1125 | struct task_struct *curr = this_rq->curr; |
1126 | struct task_group *tg; | ||
1133 | unsigned long tl = this_load; | 1127 | unsigned long tl = this_load; |
1134 | unsigned long tl_per_task; | 1128 | unsigned long tl_per_task; |
1129 | unsigned long weight; | ||
1135 | int balanced; | 1130 | int balanced; |
1136 | 1131 | ||
1137 | if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) | 1132 | if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) |
@@ -1142,10 +1137,19 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, | |||
1142 | * effect of the currently running task from the load | 1137 | * effect of the currently running task from the load |
1143 | * of the current CPU: | 1138 | * of the current CPU: |
1144 | */ | 1139 | */ |
1145 | if (sync) | 1140 | if (sync) { |
1146 | tl += task_load_sub(current); | 1141 | tg = task_group(current); |
1142 | weight = current->se.load.weight; | ||
1143 | |||
1144 | tl += effective_load(tg, this_cpu, -weight, -weight); | ||
1145 | load += effective_load(tg, prev_cpu, 0, -weight); | ||
1146 | } | ||
1147 | |||
1148 | tg = task_group(p); | ||
1149 | weight = p->se.load.weight; | ||
1147 | 1150 | ||
1148 | balanced = 100*(tl + task_load_add(p, this_cpu)) <= imbalance*load; | 1151 | balanced = 100*(tl + effective_load(tg, this_cpu, weight, weight)) <= |
1152 | imbalance*(load + effective_load(tg, prev_cpu, 0, weight)); | ||
1149 | 1153 | ||
1150 | /* | 1154 | /* |
1151 | * If the currently running task will sleep within | 1155 | * If the currently running task will sleep within |