aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2014-09-04 11:32:10 -0400
committerIngo Molnar <mingo@kernel.org>2014-09-24 08:46:59 -0400
commit83a0a96a5f26d974580fd7251043ff70c8f1823d (patch)
tree0e0dbe604e7e0aa6aeec4a5cccdedddc552e8c70 /kernel
parent442bf3aaf55a91ebfec71da46a4ee10a3c905bcc (diff)
sched/fair: Leverage the idle state info when choosing the "idlest" cpu
The code in find_idlest_cpu() looks for the CPU with the smallest load. However, if multiple CPUs are idle, the first idle CPU is selected irrespective of the depth of its idle state. Among the idle CPUs we should pick the one with with the shallowest idle state, or the latest to have gone idle if all idle CPUs are in the same state. The later applies even when cpuidle is configured out. This patch doesn't cover the following issues: - The idle exit latency of a CPU might be larger than the time needed to migrate the waking task to an already running CPU with sufficient capacity, and therefore performance would benefit from task packing in such case (in most cases task packing is about power saving). - Some idle states have a non negligible and non abortable entry latency which needs to run to completion before the exit latency can start. A concurrent patch series is making this info available to the cpuidle core. Once available, the entry latency with the idle timestamp could determine when the exit latency may be effective. Those issues will be handled in due course. In the mean time, what is implemented here should improve things already compared to the current state of affairs. Based on an initial patch from Daniel Lezcano. Signed-off-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-pm@vger.kernel.org Cc: linaro-kernel@lists.linaro.org Link: http://lkml.kernel.org/n/tip-@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c41
1 files changed, 34 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9ee3d4f6de47..8cb32f83c9b0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -23,6 +23,7 @@
23#include <linux/latencytop.h> 23#include <linux/latencytop.h>
24#include <linux/sched.h> 24#include <linux/sched.h>
25#include <linux/cpumask.h> 25#include <linux/cpumask.h>
26#include <linux/cpuidle.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27#include <linux/profile.h> 28#include <linux/profile.h>
28#include <linux/interrupt.h> 29#include <linux/interrupt.h>
@@ -4415,20 +4416,46 @@ static int
4415find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) 4416find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
4416{ 4417{
4417 unsigned long load, min_load = ULONG_MAX; 4418 unsigned long load, min_load = ULONG_MAX;
4418 int idlest = -1; 4419 unsigned int min_exit_latency = UINT_MAX;
4420 u64 latest_idle_timestamp = 0;
4421 int least_loaded_cpu = this_cpu;
4422 int shallowest_idle_cpu = -1;
4419 int i; 4423 int i;
4420 4424
4421 /* Traverse only the allowed CPUs */ 4425 /* Traverse only the allowed CPUs */
4422 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) { 4426 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
4423 load = weighted_cpuload(i); 4427 if (idle_cpu(i)) {
4424 4428 struct rq *rq = cpu_rq(i);
4425 if (load < min_load || (load == min_load && i == this_cpu)) { 4429 struct cpuidle_state *idle = idle_get_state(rq);
4426 min_load = load; 4430 if (idle && idle->exit_latency < min_exit_latency) {
4427 idlest = i; 4431 /*
4432 * We give priority to a CPU whose idle state
4433 * has the smallest exit latency irrespective
4434 * of any idle timestamp.
4435 */
4436 min_exit_latency = idle->exit_latency;
4437 latest_idle_timestamp = rq->idle_stamp;
4438 shallowest_idle_cpu = i;
4439 } else if ((!idle || idle->exit_latency == min_exit_latency) &&
4440 rq->idle_stamp > latest_idle_timestamp) {
4441 /*
4442 * If equal or no active idle state, then
4443 * the most recently idled CPU might have
4444 * a warmer cache.
4445 */
4446 latest_idle_timestamp = rq->idle_stamp;
4447 shallowest_idle_cpu = i;
4448 }
4449 } else {
4450 load = weighted_cpuload(i);
4451 if (load < min_load || (load == min_load && i == this_cpu)) {
4452 min_load = load;
4453 least_loaded_cpu = i;
4454 }
4428 } 4455 }
4429 } 4456 }
4430 4457
4431 return idlest; 4458 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
4432} 4459}
4433 4460
4434/* 4461/*