aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-06-27 07:41:27 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-27 08:31:40 -0400
commitbb3469ac9b50f14ad6eba129ca0ad4fd033097a0 (patch)
treee5c6c6563778593f53fca642675b4c3ba5603ed2 /kernel/sched_fair.c
parenta8a51d5e59561aa5b4d66e19eca819b537783e8f (diff)
sched: hierarchical load vs affine wakeups
With hierarchical grouping we can't just compare task weight to rq weight - we need to scale the weight appropriately. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c23
1 files changed, 21 insertions, 2 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 7b8d664d6f22..865cb53a7ccf 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1073,6 +1073,25 @@ static inline int wake_idle(int cpu, struct task_struct *p)
1073 1073
1074static const struct sched_class fair_sched_class; 1074static const struct sched_class fair_sched_class;
1075 1075
1076#ifdef CONFIG_FAIR_GROUP_SCHED
1077static unsigned long task_h_load(struct task_struct *p)
1078{
1079 unsigned long h_load = p->se.load.weight;
1080 struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);
1081
1082 update_h_load(task_cpu(p));
1083
1084 h_load = calc_delta_mine(h_load, cfs_rq->h_load, &cfs_rq->load);
1085
1086 return h_load;
1087}
1088#else
1089static unsigned long task_h_load(struct task_struct *p)
1090{
1091 return p->se.load.weight;
1092}
1093#endif
1094
1076static int 1095static int
1077wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, 1096wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
1078 struct task_struct *p, int prev_cpu, int this_cpu, int sync, 1097 struct task_struct *p, int prev_cpu, int this_cpu, int sync,
@@ -1093,9 +1112,9 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
1093 * of the current CPU: 1112 * of the current CPU:
1094 */ 1113 */
1095 if (sync) 1114 if (sync)
1096 tl -= current->se.load.weight; 1115 tl -= task_h_load(current);
1097 1116
1098 balanced = 100*(tl + p->se.load.weight) <= imbalance*load; 1117 balanced = 100*(tl + task_h_load(p)) <= imbalance*load;
1099 1118
1100 /* 1119 /*
1101 * If the currently running task will sleep within 1120 * If the currently running task will sleep within