diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-06-27 07:41:30 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 08:31:42 -0400 |
commit | 4be9daaa1b33701f011f4117f22dc1e45a3e6e34 (patch) | |
tree | 8c823cf397055c4919cc5f2a6275f125b0580a6b /kernel/sched_fair.c | |
parent | 42a3ac7d5cee89849448b41b86faeb86f98e92f6 (diff) |
sched: fix task_h_load()
Currently task_h_load() computes the load of a task and uses that to either
subtract it from the total, or add to it.
However, removing or adding a task need not have any effect on the total load
at all. Imagine adding a task to a group that is local to one cpu - in that
case the total load of that cpu is unaffected.
So properly compute addition/removal:
s_i = S * rw_i / \Sum_j rw_j
s'_i = S * (rw_i + wl) / (\Sum_j rw_j + wg)
then s'_i - s_i gives the change in load.
Where s_i is the shares for cpu i, S the group weight, rw_i the runqueue weight
for that cpu, wl the weight we add (subtract) and wg the weight contribution to
the runqueue.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 49 |
1 files changed, 40 insertions, 9 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 734e4c556fcb..a1694441f8b7 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1074,22 +1074,53 @@ static inline int wake_idle(int cpu, struct task_struct *p) | |||
1074 | static const struct sched_class fair_sched_class; | 1074 | static const struct sched_class fair_sched_class; |
1075 | 1075 | ||
1076 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1076 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1077 | static unsigned long task_h_load(struct task_struct *p) | 1077 | static unsigned long effective_load(struct task_group *tg, long wl, int cpu) |
1078 | { | 1078 | { |
1079 | unsigned long h_load = p->se.load.weight; | 1079 | struct sched_entity *se = tg->se[cpu]; |
1080 | struct cfs_rq *cfs_rq = cfs_rq_of(&p->se); | 1080 | long wg = wl; |
1081 | 1081 | ||
1082 | update_h_load(task_cpu(p)); | 1082 | for_each_sched_entity(se) { |
1083 | #define D(n) (likely(n) ? (n) : 1) | ||
1084 | |||
1085 | long S, Srw, rw, s, sn; | ||
1086 | |||
1087 | S = se->my_q->tg->shares; | ||
1088 | s = se->my_q->shares; | ||
1089 | rw = se->my_q->load.weight; | ||
1083 | 1090 | ||
1084 | h_load = calc_delta_mine(h_load, cfs_rq->h_load, &cfs_rq->load); | 1091 | Srw = S * rw / D(s); |
1092 | sn = S * (rw + wl) / D(Srw + wg); | ||
1093 | |||
1094 | wl = sn - s; | ||
1095 | wg = 0; | ||
1096 | #undef D | ||
1097 | } | ||
1085 | 1098 | ||
1086 | return h_load; | 1099 | return wl; |
1087 | } | 1100 | } |
1101 | |||
1102 | static unsigned long task_load_sub(struct task_struct *p) | ||
1103 | { | ||
1104 | return effective_load(task_group(p), -(long)p->se.load.weight, task_cpu(p)); | ||
1105 | } | ||
1106 | |||
1107 | static unsigned long task_load_add(struct task_struct *p, int cpu) | ||
1108 | { | ||
1109 | return effective_load(task_group(p), p->se.load.weight, cpu); | ||
1110 | } | ||
1111 | |||
1088 | #else | 1112 | #else |
1089 | static unsigned long task_h_load(struct task_struct *p) | 1113 | |
1114 | static unsigned long task_load_sub(struct task_struct *p) | ||
1115 | { | ||
1116 | return -p->se.load.weight; | ||
1117 | } | ||
1118 | |||
1119 | static unsigned long task_load_add(struct task_struct *p, int cpu) | ||
1090 | { | 1120 | { |
1091 | return p->se.load.weight; | 1121 | return p->se.load.weight; |
1092 | } | 1122 | } |
1123 | |||
1093 | #endif | 1124 | #endif |
1094 | 1125 | ||
1095 | static int | 1126 | static int |
@@ -1112,9 +1143,9 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, | |||
1112 | * of the current CPU: | 1143 | * of the current CPU: |
1113 | */ | 1144 | */ |
1114 | if (sync) | 1145 | if (sync) |
1115 | tl -= task_h_load(current); | 1146 | tl += task_load_sub(current); |
1116 | 1147 | ||
1117 | balanced = 100*(tl + task_h_load(p)) <= imbalance*load; | 1148 | balanced = 100*(tl + task_load_add(p, this_cpu)) <= imbalance*load; |
1118 | 1149 | ||
1119 | /* | 1150 | /* |
1120 | * If the currently running task will sleep within | 1151 | * If the currently running task will sleep within |