aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-02 14:44:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-02 14:44:09 -0500
commitb840d79631c882786925303c2b0f4fefc31845ed (patch)
treecda60a95d4507fe1321fc285af38982d7eb9693b /kernel/sched_fair.c
parent597b0d21626da4e6f09f132442caf0cc2b0eb47c (diff)
parentc3d80000e3a812fe5a200d6bde755fbd7fa65481 (diff)
Merge branch 'cpus4096-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (66 commits) x86: export vector_used_by_percpu_irq x86: use logical apicid in x2apic_cluster's x2apic_cpu_mask_to_apicid_and() sched: nominate preferred wakeup cpu, fix x86: fix lguest used_vectors breakage, -v2 x86: fix warning in arch/x86/kernel/io_apic.c sched: fix warning in kernel/sched.c sched: move test_sd_parent() to an SMP section of sched.h sched: add SD_BALANCE_NEWIDLE at MC and CPU level for sched_mc>0 sched: activate active load balancing in new idle cpus sched: bias task wakeups to preferred semi-idle packages sched: nominate preferred wakeup cpu sched: favour lower logical cpu number for sched_mc balance sched: framework for sched_mc/smt_power_savings=N sched: convert BALANCE_FOR_xx_POWER to inline functions x86: use possible_cpus=NUM to extend the possible cpus allowed x86: fix cpu_mask_to_apicid_and to include cpu_online_mask x86: update io_apic.c to the new cpumask code x86: Introduce topology_core_cpumask()/topology_thread_cpumask() x86: xen: use smp_call_function_many() x86: use work_on_cpu in x86/kernel/cpu/mcheck/mce_amd_64.c ... Fixed up trivial conflict in kernel/time/tick-sched.c manually
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c32
1 files changed, 24 insertions, 8 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 5ad4440f0fc4..56c0efe902a7 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1019,16 +1019,33 @@ static void yield_task_fair(struct rq *rq)
1019 * search starts with cpus closest then further out as needed, 1019 * search starts with cpus closest then further out as needed,
1020 * so we always favor a closer, idle cpu. 1020 * so we always favor a closer, idle cpu.
1021 * Domains may include CPUs that are not usable for migration, 1021 * Domains may include CPUs that are not usable for migration,
1022 * hence we need to mask them out (cpu_active_map) 1022 * hence we need to mask them out (cpu_active_mask)
1023 * 1023 *
1024 * Returns the CPU we should wake onto. 1024 * Returns the CPU we should wake onto.
1025 */ 1025 */
1026#if defined(ARCH_HAS_SCHED_WAKE_IDLE) 1026#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
1027static int wake_idle(int cpu, struct task_struct *p) 1027static int wake_idle(int cpu, struct task_struct *p)
1028{ 1028{
1029 cpumask_t tmp;
1030 struct sched_domain *sd; 1029 struct sched_domain *sd;
1031 int i; 1030 int i;
1031 unsigned int chosen_wakeup_cpu;
1032 int this_cpu;
1033
1034 /*
1035 * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu
1036 * are idle and this is not a kernel thread and this task's affinity
1037 * allows it to be moved to preferred cpu, then just move!
1038 */
1039
1040 this_cpu = smp_processor_id();
1041 chosen_wakeup_cpu =
1042 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu;
1043
1044 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP &&
1045 idle_cpu(cpu) && idle_cpu(this_cpu) &&
1046 p->mm && !(p->flags & PF_KTHREAD) &&
1047 cpu_isset(chosen_wakeup_cpu, p->cpus_allowed))
1048 return chosen_wakeup_cpu;
1032 1049
1033 /* 1050 /*
1034 * If it is idle, then it is the best cpu to run this task. 1051 * If it is idle, then it is the best cpu to run this task.
@@ -1046,10 +1063,9 @@ static int wake_idle(int cpu, struct task_struct *p)
1046 if ((sd->flags & SD_WAKE_IDLE) 1063 if ((sd->flags & SD_WAKE_IDLE)
1047 || ((sd->flags & SD_WAKE_IDLE_FAR) 1064 || ((sd->flags & SD_WAKE_IDLE_FAR)
1048 && !task_hot(p, task_rq(p)->clock, sd))) { 1065 && !task_hot(p, task_rq(p)->clock, sd))) {
1049 cpus_and(tmp, sd->span, p->cpus_allowed); 1066 for_each_cpu_and(i, sched_domain_span(sd),
1050 cpus_and(tmp, tmp, cpu_active_map); 1067 &p->cpus_allowed) {
1051 for_each_cpu_mask_nr(i, tmp) { 1068 if (cpu_active(i) && idle_cpu(i)) {
1052 if (idle_cpu(i)) {
1053 if (i != task_cpu(p)) { 1069 if (i != task_cpu(p)) {
1054 schedstat_inc(p, 1070 schedstat_inc(p,
1055 se.nr_wakeups_idle); 1071 se.nr_wakeups_idle);
@@ -1242,13 +1258,13 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
1242 * this_cpu and prev_cpu are present in: 1258 * this_cpu and prev_cpu are present in:
1243 */ 1259 */
1244 for_each_domain(this_cpu, sd) { 1260 for_each_domain(this_cpu, sd) {
1245 if (cpu_isset(prev_cpu, sd->span)) { 1261 if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) {
1246 this_sd = sd; 1262 this_sd = sd;
1247 break; 1263 break;
1248 } 1264 }
1249 } 1265 }
1250 1266
1251 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) 1267 if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed)))
1252 goto out; 1268 goto out;
1253 1269
1254 /* 1270 /*