diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-06-27 07:41:38 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 08:31:47 -0400 |
commit | f1d239f73200a5803a89e5929fb3abc1596b7589 (patch) | |
tree | 33c59b6f2621284af91825ea7fbab718ffe65ade /kernel/sched_fair.c | |
parent | 83378269a5fad98f562ebc0f09c349575e6cbfe1 (diff) |
sched: incremental effective_load()
Increase the accuracy of the effective_load values.
Not only consider the current increment (as per the attempted wakeup), but
also consider the delta between when we last adjusted the shares and the
current situation.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 18 |
1 files changed, 15 insertions, 3 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index bed2f71e63d9..e87f1a52f625 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1074,10 +1074,22 @@ static inline int wake_idle(int cpu, struct task_struct *p) | |||
1074 | static const struct sched_class fair_sched_class; | 1074 | static const struct sched_class fair_sched_class; |
1075 | 1075 | ||
1076 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1076 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1077 | static unsigned long effective_load(struct task_group *tg, int cpu, | 1077 | static long effective_load(struct task_group *tg, int cpu, |
1078 | unsigned long wl, unsigned long wg) | 1078 | long wl, long wg) |
1079 | { | 1079 | { |
1080 | struct sched_entity *se = tg->se[cpu]; | 1080 | struct sched_entity *se = tg->se[cpu]; |
1081 | long more_w; | ||
1082 | |||
1083 | if (!tg->parent) | ||
1084 | return wl; | ||
1085 | |||
1086 | /* | ||
1087 | * Instead of using this increment, also add the difference | ||
1088 | * between when the shares were last updated and now. | ||
1089 | */ | ||
1090 | more_w = se->my_q->load.weight - se->my_q->rq_weight; | ||
1091 | wl += more_w; | ||
1092 | wg += more_w; | ||
1081 | 1093 | ||
1082 | for_each_sched_entity(se) { | 1094 | for_each_sched_entity(se) { |
1083 | #define D(n) (likely(n) ? (n) : 1) | 1095 | #define D(n) (likely(n) ? (n) : 1) |
@@ -1086,7 +1098,7 @@ static unsigned long effective_load(struct task_group *tg, int cpu, | |||
1086 | 1098 | ||
1087 | S = se->my_q->tg->shares; | 1099 | S = se->my_q->tg->shares; |
1088 | s = se->my_q->shares; | 1100 | s = se->my_q->shares; |
1089 | rw = se->my_q->load.weight; | 1101 | rw = se->my_q->rq_weight; |
1090 | 1102 | ||
1091 | a = S*(rw + wl); | 1103 | a = S*(rw + wl); |
1092 | b = S*rw + s*wg; | 1104 | b = S*rw + s*wg; |