aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-06-27 07:41:19 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-27 08:31:33 -0400
commit4d8d595dfa69e1c807bf928f364668a7f30da5dc (patch)
treeaf61c1d6d53aea66fac272e7dad67ae93a832a66 /kernel
parentb6a86c746f5b708012809958462234d19e9c8177 (diff)
sched: update aggregate when holding the RQs
It was observed that in __update_group_shares_cpu() rq_weight > aggregate()->rq_weight This is caused by forks/wakeups in between the initial aggregate pass and locking of the RQs for load balance. To avoid this situation partially re-do the aggregation once we have the RQs locked (which avoids new tasks from appearing). Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c20
1 files changed, 20 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 160d3c209b8f..dae20199dc9c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1721,6 +1721,11 @@ aggregate_get_up(struct task_group *tg, int cpu, struct sched_domain *sd)
1721 aggregate_group_set_shares(tg, cpu, sd); 1721 aggregate_group_set_shares(tg, cpu, sd);
1722} 1722}
1723 1723
1724static void
1725aggregate_get_nop(struct task_group *tg, int cpu, struct sched_domain *sd)
1726{
1727}
1728
1724static DEFINE_PER_CPU(spinlock_t, aggregate_lock); 1729static DEFINE_PER_CPU(spinlock_t, aggregate_lock);
1725 1730
1726static void __init init_aggregate(void) 1731static void __init init_aggregate(void)
@@ -1740,6 +1745,11 @@ static int get_aggregate(int cpu, struct sched_domain *sd)
1740 return 1; 1745 return 1;
1741} 1746}
1742 1747
1748static void update_aggregate(int cpu, struct sched_domain *sd)
1749{
1750 aggregate_walk_tree(aggregate_get_down, aggregate_get_nop, cpu, sd);
1751}
1752
1743static void put_aggregate(int cpu, struct sched_domain *sd) 1753static void put_aggregate(int cpu, struct sched_domain *sd)
1744{ 1754{
1745 spin_unlock(&per_cpu(aggregate_lock, cpu)); 1755 spin_unlock(&per_cpu(aggregate_lock, cpu));
@@ -1761,6 +1771,10 @@ static inline int get_aggregate(int cpu, struct sched_domain *sd)
1761 return 0; 1771 return 0;
1762} 1772}
1763 1773
1774static inline void update_aggregate(int cpu, struct sched_domain *sd)
1775{
1776}
1777
1764static inline void put_aggregate(int cpu, struct sched_domain *sd) 1778static inline void put_aggregate(int cpu, struct sched_domain *sd)
1765{ 1779{
1766} 1780}
@@ -2192,6 +2206,12 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
2192 int load_idx = sd->forkexec_idx; 2206 int load_idx = sd->forkexec_idx;
2193 int imbalance = 100 + (sd->imbalance_pct-100)/2; 2207 int imbalance = 100 + (sd->imbalance_pct-100)/2;
2194 2208
2209 /*
2210 * now that we have both rqs locked the rq weight won't change
2211 * anymore - so update the stats.
2212 */
2213 update_aggregate(this_cpu, sd);
2214
2195 do { 2215 do {
2196 unsigned long load, avg_load; 2216 unsigned long load, avg_load;
2197 int local_group; 2217 int local_group;