aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-03 07:16:51 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-15 10:01:07 -0400
commit78e7ed53c9f42f04f9401ada6f7047db60781676 (patch)
tree24f45333ce4479b27c96b425c7d09c080a26609f /kernel
parentd7c33c4930f569caf6b2ece597432853c4151a45 (diff)
sched: Tweak wake_idx
When merging select_task_rq_fair() and sched_balance_self() we lost the use of wake_idx, restore that and set them to 0 to make wake balancing more aggressive. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c21
1 files changed, 18 insertions, 3 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 8b3eddbcf9a4..19593568031a 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1232,12 +1232,27 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1232 * domain. 1232 * domain.
1233 */ 1233 */
1234static struct sched_group * 1234static struct sched_group *
1235find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) 1235find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1236 int this_cpu, int flag)
1236{ 1237{
1237 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; 1238 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
1238 unsigned long min_load = ULONG_MAX, this_load = 0; 1239 unsigned long min_load = ULONG_MAX, this_load = 0;
1239 int load_idx = sd->forkexec_idx;
1240 int imbalance = 100 + (sd->imbalance_pct-100)/2; 1240 int imbalance = 100 + (sd->imbalance_pct-100)/2;
1241 int load_idx = 0;
1242
1243 switch (flag) {
1244 case SD_BALANCE_FORK:
1245 case SD_BALANCE_EXEC:
1246 load_idx = sd->forkexec_idx;
1247 break;
1248
1249 case SD_BALANCE_WAKE:
1250 load_idx = sd->wake_idx;
1251 break;
1252
1253 default:
1254 break;
1255 }
1241 1256
1242 do { 1257 do {
1243 unsigned long load, avg_load; 1258 unsigned long load, avg_load;
@@ -1392,7 +1407,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
1392 continue; 1407 continue;
1393 } 1408 }
1394 1409
1395 group = find_idlest_group(sd, p, cpu); 1410 group = find_idlest_group(sd, p, cpu, flag);
1396 if (!group) { 1411 if (!group) {
1397 sd = sd->child; 1412 sd = sd->child;
1398 continue; 1413 continue;