aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorVaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>2008-12-18 12:56:29 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-19 03:21:52 -0500
commit7eb52dfa70dbf5232b5b83ec4357e6bebaa8fde8 (patch)
treedfff4d1dcf62e992717f0934961f4557896d4945 /kernel/sched_fair.c
parent7a09b1a27b1e5a4957e4af9951420fea02c44fba (diff)
sched: bias task wakeups to preferred semi-idle packages
Impact: tweak task wakeup to save power more agressively Preferred wakeup cpu (from a semi idle package) has been nominated in find_busiest_group() in the previous patch. Use this information in sched_mc_preferred_wakeup_cpu in function wake_idle() to bias task wakeups if the following conditions are satisfied: - The present cpu that is trying to wakeup the process is idle and waking the target process on this cpu will potentially wakeup a completely idle package - The previous cpu on which the target process ran is also idle and hence selecting the previous cpu may wakeup a semi idle cpu package - The task being woken up is allowed to run in the nominated cpu (cpu affinity and restrictions) Basically if both the current cpu and the previous cpu on which the task ran is idle, select the nominated cpu from semi idle cpu package for running the new task that is waking up. Cache hotness is considered since the actual biasing happens in wake_idle() only if the application is cache cold. This technique will effectively move short running bursty jobs in a mostly idle system. Wakeup biasing for power savings gets automatically disabled if system utilisation increases due to the fact that the probability of finding both this_cpu and prev_cpu idle decreases. Signed-off-by: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com> Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 08ffffd4a410..36b5e34fa99e 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1026,6 +1026,24 @@ static int wake_idle(int cpu, struct task_struct *p)
1026{ 1026{
1027 struct sched_domain *sd; 1027 struct sched_domain *sd;
1028 int i; 1028 int i;
1029 unsigned int chosen_wakeup_cpu;
1030 int this_cpu;
1031
1032 /*
1033 * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu
1034 * are idle and this is not a kernel thread and this task's affinity
1035 * allows it to be moved to preferred cpu, then just move!
1036 */
1037
1038 this_cpu = smp_processor_id();
1039 chosen_wakeup_cpu =
1040 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu;
1041
1042 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP &&
1043 idle_cpu(cpu) && idle_cpu(this_cpu) &&
1044 p->mm && !(p->flags & PF_KTHREAD) &&
1045 cpu_isset(chosen_wakeup_cpu, p->cpus_allowed))
1046 return chosen_wakeup_cpu;
1029 1047
1030 /* 1048 /*
1031 * If it is idle, then it is the best cpu to run this task. 1049 * If it is idle, then it is the best cpu to run this task.