diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-06-27 07:41:39 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 08:31:47 -0400 |
commit | f5bfb7d9ff73d72ee4f2f4830a6f0c9088d00f92 (patch) | |
tree | 402e8caaef4d3f0c26a52b171e04dbb67ea08cfa /kernel/sched_fair.c | |
parent | f1d239f73200a5803a89e5929fb3abc1596b7589 (diff) |
sched: bias effective_load() error towards failing wake_affine().
Measurement shows that the difference between cgroup:/ and cgroup:/foo
wake_affine() results is that the latter succeeds significantly more.
Therefore bias the calculations towards failing the test.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 28 |
1 files changed, 28 insertions, 0 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index e87f1a52f625..9bcc0030a58b 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1074,6 +1074,27 @@ static inline int wake_idle(int cpu, struct task_struct *p) | |||
1074 | static const struct sched_class fair_sched_class; | 1074 | static const struct sched_class fair_sched_class; |
1075 | 1075 | ||
1076 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1076 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1077 | /* | ||
1078 | * effective_load() calculates the load change as seen from the root_task_group | ||
1079 | * | ||
1080 | * Adding load to a group doesn't make a group heavier, but can cause movement | ||
1081 | * of group shares between cpus. Assuming the shares were perfectly aligned one | ||
1082 | * can calculate the shift in shares. | ||
1083 | * | ||
1084 | * The problem is that perfectly aligning the shares is rather expensive, hence | ||
1085 | * we try to avoid doing that too often - see update_shares(), which ratelimits | ||
1086 | * this change. | ||
1087 | * | ||
1088 | * We compensate this by not only taking the current delta into account, but | ||
1089 | * also considering the delta between when the shares were last adjusted and | ||
1090 | * now. | ||
1091 | * | ||
1092 | * We still saw a performance dip, some tracing learned us that between | ||
1093 | * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased | ||
1094 | * significantly. Therefore try to bias the error in direction of failing | ||
1095 | * the affine wakeup. | ||
1096 | * | ||
1097 | */ | ||
1077 | static long effective_load(struct task_group *tg, int cpu, | 1098 | static long effective_load(struct task_group *tg, int cpu, |
1078 | long wl, long wg) | 1099 | long wl, long wg) |
1079 | { | 1100 | { |
@@ -1084,6 +1105,13 @@ static long effective_load(struct task_group *tg, int cpu, | |||
1084 | return wl; | 1105 | return wl; |
1085 | 1106 | ||
1086 | /* | 1107 | /* |
1108 | * By not taking the decrease of shares on the other cpu into | ||
1109 | * account our error leans towards reducing the affine wakeups. | ||
1110 | */ | ||
1111 | if (!wl && sched_feat(ASYM_EFF_LOAD)) | ||
1112 | return wl; | ||
1113 | |||
1114 | /* | ||
1087 | * Instead of using this increment, also add the difference | 1115 | * Instead of using this increment, also add the difference |
1088 | * between when the shares were last updated and now. | 1116 | * between when the shares were last updated and now. |
1089 | */ | 1117 | */ |