aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-05 07:53:39 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-05 07:53:39 -0500
commit5359c32eb7402124abc9964d5d53639fe0739cea (patch)
treed77b6967fe8420678bb9d1d936855ac0699c196a /kernel/sched_fair.c
parent8916edef5888c5d8fe283714416a9ca95b4c3431 (diff)
parentfe0bdec68b77020281dc814805edfe594ae89e0f (diff)
Merge branch 'linus' into sched/urgent
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c32
1 files changed, 24 insertions, 8 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index b808563f4f19..e0c0b4bc3f08 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1013,16 +1013,33 @@ static void yield_task_fair(struct rq *rq)
1013 * search starts with cpus closest then further out as needed, 1013 * search starts with cpus closest then further out as needed,
1014 * so we always favor a closer, idle cpu. 1014 * so we always favor a closer, idle cpu.
1015 * Domains may include CPUs that are not usable for migration, 1015 * Domains may include CPUs that are not usable for migration,
1016 * hence we need to mask them out (cpu_active_map) 1016 * hence we need to mask them out (cpu_active_mask)
1017 * 1017 *
1018 * Returns the CPU we should wake onto. 1018 * Returns the CPU we should wake onto.
1019 */ 1019 */
1020#if defined(ARCH_HAS_SCHED_WAKE_IDLE) 1020#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
1021static int wake_idle(int cpu, struct task_struct *p) 1021static int wake_idle(int cpu, struct task_struct *p)
1022{ 1022{
1023 cpumask_t tmp;
1024 struct sched_domain *sd; 1023 struct sched_domain *sd;
1025 int i; 1024 int i;
1025 unsigned int chosen_wakeup_cpu;
1026 int this_cpu;
1027
1028 /*
1029 * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu
1030 * are idle and this is not a kernel thread and this task's affinity
1031 * allows it to be moved to preferred cpu, then just move!
1032 */
1033
1034 this_cpu = smp_processor_id();
1035 chosen_wakeup_cpu =
1036 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu;
1037
1038 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP &&
1039 idle_cpu(cpu) && idle_cpu(this_cpu) &&
1040 p->mm && !(p->flags & PF_KTHREAD) &&
1041 cpu_isset(chosen_wakeup_cpu, p->cpus_allowed))
1042 return chosen_wakeup_cpu;
1026 1043
1027 /* 1044 /*
1028 * If it is idle, then it is the best cpu to run this task. 1045 * If it is idle, then it is the best cpu to run this task.
@@ -1040,10 +1057,9 @@ static int wake_idle(int cpu, struct task_struct *p)
1040 if ((sd->flags & SD_WAKE_IDLE) 1057 if ((sd->flags & SD_WAKE_IDLE)
1041 || ((sd->flags & SD_WAKE_IDLE_FAR) 1058 || ((sd->flags & SD_WAKE_IDLE_FAR)
1042 && !task_hot(p, task_rq(p)->clock, sd))) { 1059 && !task_hot(p, task_rq(p)->clock, sd))) {
1043 cpus_and(tmp, sd->span, p->cpus_allowed); 1060 for_each_cpu_and(i, sched_domain_span(sd),
1044 cpus_and(tmp, tmp, cpu_active_map); 1061 &p->cpus_allowed) {
1045 for_each_cpu_mask_nr(i, tmp) { 1062 if (cpu_active(i) && idle_cpu(i)) {
1046 if (idle_cpu(i)) {
1047 if (i != task_cpu(p)) { 1063 if (i != task_cpu(p)) {
1048 schedstat_inc(p, 1064 schedstat_inc(p,
1049 se.nr_wakeups_idle); 1065 se.nr_wakeups_idle);
@@ -1236,13 +1252,13 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
1236 * this_cpu and prev_cpu are present in: 1252 * this_cpu and prev_cpu are present in:
1237 */ 1253 */
1238 for_each_domain(this_cpu, sd) { 1254 for_each_domain(this_cpu, sd) {
1239 if (cpu_isset(prev_cpu, sd->span)) { 1255 if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) {
1240 this_sd = sd; 1256 this_sd = sd;
1241 break; 1257 break;
1242 } 1258 }
1243 } 1259 }
1244 1260
1245 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) 1261 if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed)))
1246 goto out; 1262 goto out;
1247 1263
1248 /* 1264 /*