aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTim Chen <tim.c.chen@linux.intel.com>2014-05-20 17:39:27 -0400
committerIngo Molnar <mingo@kernel.org>2014-06-05 05:52:01 -0400
commited61bbc69c773465782476c7e5869fa5607fa73a (patch)
tree71255efdc3b8d28fbbdc27a6205de9f27262e524
parent51f2176d74ace4c3f58579a605ef5a9720befb00 (diff)
sched/balancing: Reduce the rate of needless idle load balancing
The current no_hz idle load balancer do load balancing for *all* idle cpus, even though the time due to load balance for a particular idle cpu could be still a while in the future. This introduces a much higher load balancing rate than what is necessary. The patch changes the behavior by only doing idle load balancing on behalf of an idle cpu only when it is due for load balancing. On SGI's systems with over 3000 cores, the cpu responsible for idle balancing got overwhelmed with idle balancing, and introduces a lot of OS noise to workloads. This patch fixes the issue. Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com> Acked-by: Russ Anderson <rja@sgi.com> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Jason Low <jason.low2@hp.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Len Brown <len.brown@intel.com> Cc: Dimitri Sivanich <sivanich@sgi.com> Cc: Hedi Berriche <hedi@sgi.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: MichelLespinasse <walken@google.com> Cc: Peter Hurley <peter@hurleysoftware.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1400621967.2970.280.camel@schen9-DESK Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/sched/fair.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b71d8c39f1fd..7a0c000b6005 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7193,12 +7193,17 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
7193 7193
7194 rq = cpu_rq(balance_cpu); 7194 rq = cpu_rq(balance_cpu);
7195 7195
7196 raw_spin_lock_irq(&rq->lock); 7196 /*
7197 update_rq_clock(rq); 7197 * If time for next balance is due,
7198 update_idle_cpu_load(rq); 7198 * do the balance.
7199 raw_spin_unlock_irq(&rq->lock); 7199 */
7200 7200 if (time_after_eq(jiffies, rq->next_balance)) {
7201 rebalance_domains(rq, CPU_IDLE); 7201 raw_spin_lock_irq(&rq->lock);
7202 update_rq_clock(rq);
7203 update_idle_cpu_load(rq);
7204 raw_spin_unlock_irq(&rq->lock);
7205 rebalance_domains(rq, CPU_IDLE);
7206 }
7202 7207
7203 if (time_after(this_rq->next_balance, rq->next_balance)) 7208 if (time_after(this_rq->next_balance, rq->next_balance))
7204 this_rq->next_balance = rq->next_balance; 7209 this_rq->next_balance = rq->next_balance;