diff options
-rw-r--r-- | kernel/sched_fair.c | 49 |
1 files changed, 40 insertions, 9 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 734e4c556fcb..a1694441f8b7 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1074,22 +1074,53 @@ static inline int wake_idle(int cpu, struct task_struct *p) | |||
1074 | static const struct sched_class fair_sched_class; | 1074 | static const struct sched_class fair_sched_class; |
1075 | 1075 | ||
1076 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1076 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1077 | static unsigned long task_h_load(struct task_struct *p) | 1077 | static unsigned long effective_load(struct task_group *tg, long wl, int cpu) |
1078 | { | 1078 | { |
1079 | unsigned long h_load = p->se.load.weight; | 1079 | struct sched_entity *se = tg->se[cpu]; |
1080 | struct cfs_rq *cfs_rq = cfs_rq_of(&p->se); | 1080 | long wg = wl; |
1081 | 1081 | ||
1082 | update_h_load(task_cpu(p)); | 1082 | for_each_sched_entity(se) { |
1083 | #define D(n) (likely(n) ? (n) : 1) | ||
1084 | |||
1085 | long S, Srw, rw, s, sn; | ||
1086 | |||
1087 | S = se->my_q->tg->shares; | ||
1088 | s = se->my_q->shares; | ||
1089 | rw = se->my_q->load.weight; | ||
1083 | 1090 | ||
1084 | h_load = calc_delta_mine(h_load, cfs_rq->h_load, &cfs_rq->load); | 1091 | Srw = S * rw / D(s); |
1092 | sn = S * (rw + wl) / D(Srw + wg); | ||
1093 | |||
1094 | wl = sn - s; | ||
1095 | wg = 0; | ||
1096 | #undef D | ||
1097 | } | ||
1085 | 1098 | ||
1086 | return h_load; | 1099 | return wl; |
1087 | } | 1100 | } |
1101 | |||
1102 | static unsigned long task_load_sub(struct task_struct *p) | ||
1103 | { | ||
1104 | return effective_load(task_group(p), -(long)p->se.load.weight, task_cpu(p)); | ||
1105 | } | ||
1106 | |||
1107 | static unsigned long task_load_add(struct task_struct *p, int cpu) | ||
1108 | { | ||
1109 | return effective_load(task_group(p), p->se.load.weight, cpu); | ||
1110 | } | ||
1111 | |||
1088 | #else | 1112 | #else |
1089 | static unsigned long task_h_load(struct task_struct *p) | 1113 | |
1114 | static unsigned long task_load_sub(struct task_struct *p) | ||
1115 | { | ||
1116 | return -p->se.load.weight; | ||
1117 | } | ||
1118 | |||
1119 | static unsigned long task_load_add(struct task_struct *p, int cpu) | ||
1090 | { | 1120 | { |
1091 | return p->se.load.weight; | 1121 | return p->se.load.weight; |
1092 | } | 1122 | } |
1123 | |||
1093 | #endif | 1124 | #endif |
1094 | 1125 | ||
1095 | static int | 1126 | static int |
@@ -1112,9 +1143,9 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, | |||
1112 | * of the current CPU: | 1143 | * of the current CPU: |
1113 | */ | 1144 | */ |
1114 | if (sync) | 1145 | if (sync) |
1115 | tl -= task_h_load(current); | 1146 | tl += task_load_sub(current); |
1116 | 1147 | ||
1117 | balanced = 100*(tl + task_h_load(p)) <= imbalance*load; | 1148 | balanced = 100*(tl + task_load_add(p, this_cpu)) <= imbalance*load; |
1118 | 1149 | ||
1119 | /* | 1150 | /* |
1120 | * If the currently running task will sleep within | 1151 | * If the currently running task will sleep within |