aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-06-27 07:41:24 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-27 08:31:37 -0400
commit3e5459b4bea3ca2618cc02d56d12639f2cba531d (patch)
treebf931bd0cb3f653a62045c99beaed837242dd773 /kernel/sched.c
parentc8cba857b4997d5b00451d01474638f6a153f713 (diff)
sched: fix newidle smp group balancing
Re-compute the shares on newidle - so we can make a decision based on recent data. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c13
1 files changed, 13 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f864b751fd19..cdd09462fc98 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1579,6 +1579,13 @@ static void update_shares(struct sched_domain *sd)
1579 walk_tg_tree(tg_nop, tg_shares_up, 0, sd); 1579 walk_tg_tree(tg_nop, tg_shares_up, 0, sd);
1580} 1580}
1581 1581
1582static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1583{
1584 spin_unlock(&rq->lock);
1585 update_shares(sd);
1586 spin_lock(&rq->lock);
1587}
1588
1582static void update_h_load(int cpu) 1589static void update_h_load(int cpu)
1583{ 1590{
1584 walk_tg_tree(tg_load_down, tg_nop, cpu, NULL); 1591 walk_tg_tree(tg_load_down, tg_nop, cpu, NULL);
@@ -1595,6 +1602,10 @@ static inline void update_shares(struct sched_domain *sd)
1595{ 1602{
1596} 1603}
1597 1604
1605static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1606{
1607}
1608
1598#endif 1609#endif
1599 1610
1600#endif 1611#endif
@@ -3543,6 +3554,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
3543 3554
3544 schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]); 3555 schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
3545redo: 3556redo:
3557 update_shares_locked(this_rq, sd);
3546 group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE, 3558 group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
3547 &sd_idle, cpus, NULL); 3559 &sd_idle, cpus, NULL);
3548 if (!group) { 3560 if (!group) {
@@ -3586,6 +3598,7 @@ redo:
3586 } else 3598 } else
3587 sd->nr_balance_failed = 0; 3599 sd->nr_balance_failed = 0;
3588 3600
3601 update_shares_locked(this_rq, sd);
3589 return ld_moved; 3602 return ld_moved;
3590 3603
3591out_balanced: 3604out_balanced: