aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJason Low <jason.low2@hp.com>2014-04-23 21:30:35 -0400
committerIngo Molnar <mingo@kernel.org>2014-05-07 07:33:53 -0400
commit39a4d9ca77a31503c6317e49742341d0859d5cb2 (patch)
treed45517454f4823a8a0d6599837d22419076162fa /kernel
parentfb2aa85564f4de35d25db022ab93640f8bb51821 (diff)
sched/fair: Stop searching for tasks in newidle balance if there are runnable tasks
It was found that when running some workloads (such as AIM7) on large systems with many cores, CPUs do not remain idle for long. Thus, tasks can wake/get enqueued while doing idle balancing. In this patch, while traversing the domains in idle balance, in addition to checking for pulled_task, we add an extra check for this_rq->nr_running for determining if we should stop searching for tasks to pull. If there are runnable tasks on this rq, then we will stop traversing the domains. This reduces the chance that idle balance delays a task from running. This patch resulted in approximately a 6% performance improvement when running a Java Server workload on an 8 socket machine. Signed-off-by: Jason Low <jason.low2@hp.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: daniel.lezcano@linaro.org Cc: alex.shi@linaro.org Cc: preeti@linux.vnet.ibm.com Cc: efault@gmx.de Cc: vincent.guittot@linaro.org Cc: morten.rasmussen@arm.com Cc: aswin@hp.com Cc: chegu_vinod@hp.com Link: http://lkml.kernel.org/r/1398303035-18255-4-git-send-email-jason.low2@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 051903f33eec..28ccf502c63c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6713,7 +6713,6 @@ static int idle_balance(struct rq *this_rq)
6713 if (sd->flags & SD_BALANCE_NEWIDLE) { 6713 if (sd->flags & SD_BALANCE_NEWIDLE) {
6714 t0 = sched_clock_cpu(this_cpu); 6714 t0 = sched_clock_cpu(this_cpu);
6715 6715
6716 /* If we've pulled tasks over stop searching: */
6717 pulled_task = load_balance(this_cpu, this_rq, 6716 pulled_task = load_balance(this_cpu, this_rq,
6718 sd, CPU_NEWLY_IDLE, 6717 sd, CPU_NEWLY_IDLE,
6719 &continue_balancing); 6718 &continue_balancing);
@@ -6728,7 +6727,12 @@ static int idle_balance(struct rq *this_rq)
6728 interval = msecs_to_jiffies(sd->balance_interval); 6727 interval = msecs_to_jiffies(sd->balance_interval);
6729 if (time_after(next_balance, sd->last_balance + interval)) 6728 if (time_after(next_balance, sd->last_balance + interval))
6730 next_balance = sd->last_balance + interval; 6729 next_balance = sd->last_balance + interval;
6731 if (pulled_task) 6730
6731 /*
6732 * Stop searching for tasks to pull if there are
6733 * now runnable tasks on this rq.
6734 */
6735 if (pulled_task || this_rq->nr_running > 0)
6732 break; 6736 break;
6733 } 6737 }
6734 rcu_read_unlock(); 6738 rcu_read_unlock();