diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-06-27 07:41:38 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 08:31:47 -0400 |
commit | f1d239f73200a5803a89e5929fb3abc1596b7589 (patch) | |
tree | 33c59b6f2621284af91825ea7fbab718ffe65ade /kernel | |
parent | 83378269a5fad98f562ebc0f09c349575e6cbfe1 (diff) |
sched: incremental effective_load()
Increase the accuracy of the effective_load values.
Not only consider the current increment (as per the attempted wakeup), but
also consider the delta between when we last adjusted the shares and the
current situation.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 6 | ||||
-rw-r--r-- | kernel/sched_fair.c | 18 |
2 files changed, 21 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 01d3e51b7116..7613f69f0978 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -427,6 +427,11 @@ struct cfs_rq { | |||
427 | * this cpu's part of tg->shares | 427 | * this cpu's part of tg->shares |
428 | */ | 428 | */ |
429 | unsigned long shares; | 429 | unsigned long shares; |
430 | |||
431 | /* | ||
432 | * load.weight at the time we set shares | ||
433 | */ | ||
434 | unsigned long rq_weight; | ||
430 | #endif | 435 | #endif |
431 | #endif | 436 | #endif |
432 | }; | 437 | }; |
@@ -1527,6 +1532,7 @@ __update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1527 | * record the actual number of shares, not the boosted amount. | 1532 | * record the actual number of shares, not the boosted amount. |
1528 | */ | 1533 | */ |
1529 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; | 1534 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; |
1535 | tg->cfs_rq[cpu]->rq_weight = rq_weight; | ||
1530 | 1536 | ||
1531 | if (shares < MIN_SHARES) | 1537 | if (shares < MIN_SHARES) |
1532 | shares = MIN_SHARES; | 1538 | shares = MIN_SHARES; |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index bed2f71e63d9..e87f1a52f625 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1074,10 +1074,22 @@ static inline int wake_idle(int cpu, struct task_struct *p) | |||
1074 | static const struct sched_class fair_sched_class; | 1074 | static const struct sched_class fair_sched_class; |
1075 | 1075 | ||
1076 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1076 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1077 | static unsigned long effective_load(struct task_group *tg, int cpu, | 1077 | static long effective_load(struct task_group *tg, int cpu, |
1078 | unsigned long wl, unsigned long wg) | 1078 | long wl, long wg) |
1079 | { | 1079 | { |
1080 | struct sched_entity *se = tg->se[cpu]; | 1080 | struct sched_entity *se = tg->se[cpu]; |
1081 | long more_w; | ||
1082 | |||
1083 | if (!tg->parent) | ||
1084 | return wl; | ||
1085 | |||
1086 | /* | ||
1087 | * Instead of using this increment, also add the difference | ||
1088 | * between when the shares were last updated and now. | ||
1089 | */ | ||
1090 | more_w = se->my_q->load.weight - se->my_q->rq_weight; | ||
1091 | wl += more_w; | ||
1092 | wg += more_w; | ||
1081 | 1093 | ||
1082 | for_each_sched_entity(se) { | 1094 | for_each_sched_entity(se) { |
1083 | #define D(n) (likely(n) ? (n) : 1) | 1095 | #define D(n) (likely(n) ? (n) : 1) |
@@ -1086,7 +1098,7 @@ static unsigned long effective_load(struct task_group *tg, int cpu, | |||
1086 | 1098 | ||
1087 | S = se->my_q->tg->shares; | 1099 | S = se->my_q->tg->shares; |
1088 | s = se->my_q->shares; | 1100 | s = se->my_q->shares; |
1089 | rw = se->my_q->load.weight; | 1101 | rw = se->my_q->rq_weight; |
1090 | 1102 | ||
1091 | a = S*(rw + wl); | 1103 | a = S*(rw + wl); |
1092 | b = S*rw + s*wg; | 1104 | b = S*rw + s*wg; |