diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 32 |
1 files changed, 24 insertions, 8 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5ad4440f0fc4..56c0efe902a7 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1019,16 +1019,33 @@ static void yield_task_fair(struct rq *rq) | |||
1019 | * search starts with cpus closest then further out as needed, | 1019 | * search starts with cpus closest then further out as needed, |
1020 | * so we always favor a closer, idle cpu. | 1020 | * so we always favor a closer, idle cpu. |
1021 | * Domains may include CPUs that are not usable for migration, | 1021 | * Domains may include CPUs that are not usable for migration, |
1022 | * hence we need to mask them out (cpu_active_map) | 1022 | * hence we need to mask them out (cpu_active_mask) |
1023 | * | 1023 | * |
1024 | * Returns the CPU we should wake onto. | 1024 | * Returns the CPU we should wake onto. |
1025 | */ | 1025 | */ |
1026 | #if defined(ARCH_HAS_SCHED_WAKE_IDLE) | 1026 | #if defined(ARCH_HAS_SCHED_WAKE_IDLE) |
1027 | static int wake_idle(int cpu, struct task_struct *p) | 1027 | static int wake_idle(int cpu, struct task_struct *p) |
1028 | { | 1028 | { |
1029 | cpumask_t tmp; | ||
1030 | struct sched_domain *sd; | 1029 | struct sched_domain *sd; |
1031 | int i; | 1030 | int i; |
1031 | unsigned int chosen_wakeup_cpu; | ||
1032 | int this_cpu; | ||
1033 | |||
1034 | /* | ||
1035 | * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu | ||
1036 | * are idle and this is not a kernel thread and this task's affinity | ||
1037 | * allows it to be moved to preferred cpu, then just move! | ||
1038 | */ | ||
1039 | |||
1040 | this_cpu = smp_processor_id(); | ||
1041 | chosen_wakeup_cpu = | ||
1042 | cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu; | ||
1043 | |||
1044 | if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP && | ||
1045 | idle_cpu(cpu) && idle_cpu(this_cpu) && | ||
1046 | p->mm && !(p->flags & PF_KTHREAD) && | ||
1047 | cpu_isset(chosen_wakeup_cpu, p->cpus_allowed)) | ||
1048 | return chosen_wakeup_cpu; | ||
1032 | 1049 | ||
1033 | /* | 1050 | /* |
1034 | * If it is idle, then it is the best cpu to run this task. | 1051 | * If it is idle, then it is the best cpu to run this task. |
@@ -1046,10 +1063,9 @@ static int wake_idle(int cpu, struct task_struct *p) | |||
1046 | if ((sd->flags & SD_WAKE_IDLE) | 1063 | if ((sd->flags & SD_WAKE_IDLE) |
1047 | || ((sd->flags & SD_WAKE_IDLE_FAR) | 1064 | || ((sd->flags & SD_WAKE_IDLE_FAR) |
1048 | && !task_hot(p, task_rq(p)->clock, sd))) { | 1065 | && !task_hot(p, task_rq(p)->clock, sd))) { |
1049 | cpus_and(tmp, sd->span, p->cpus_allowed); | 1066 | for_each_cpu_and(i, sched_domain_span(sd), |
1050 | cpus_and(tmp, tmp, cpu_active_map); | 1067 | &p->cpus_allowed) { |
1051 | for_each_cpu_mask_nr(i, tmp) { | 1068 | if (cpu_active(i) && idle_cpu(i)) { |
1052 | if (idle_cpu(i)) { | ||
1053 | if (i != task_cpu(p)) { | 1069 | if (i != task_cpu(p)) { |
1054 | schedstat_inc(p, | 1070 | schedstat_inc(p, |
1055 | se.nr_wakeups_idle); | 1071 | se.nr_wakeups_idle); |
@@ -1242,13 +1258,13 @@ static int select_task_rq_fair(struct task_struct *p, int sync) | |||
1242 | * this_cpu and prev_cpu are present in: | 1258 | * this_cpu and prev_cpu are present in: |
1243 | */ | 1259 | */ |
1244 | for_each_domain(this_cpu, sd) { | 1260 | for_each_domain(this_cpu, sd) { |
1245 | if (cpu_isset(prev_cpu, sd->span)) { | 1261 | if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) { |
1246 | this_sd = sd; | 1262 | this_sd = sd; |
1247 | break; | 1263 | break; |
1248 | } | 1264 | } |
1249 | } | 1265 | } |
1250 | 1266 | ||
1251 | if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) | 1267 | if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed))) |
1252 | goto out; | 1268 | goto out; |
1253 | 1269 | ||
1254 | /* | 1270 | /* |