aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c25
1 files changed, 10 insertions, 15 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 2d2be02b8e3b..b5a357396b49 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1040,7 +1040,7 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
1040 new_cpu = prev_cpu; 1040 new_cpu = prev_cpu;
1041 1041
1042 if (prev_cpu == this_cpu) 1042 if (prev_cpu == this_cpu)
1043 goto out_set_cpu; 1043 goto out;
1044 1044
1045 /* 1045 /*
1046 * 'this_sd' is the first domain that both 1046 * 'this_sd' is the first domain that both
@@ -1054,13 +1054,13 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
1054 } 1054 }
1055 1055
1056 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) 1056 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
1057 goto out_set_cpu; 1057 goto out;
1058 1058
1059 /* 1059 /*
1060 * Check for affine wakeup and passive balancing possibilities. 1060 * Check for affine wakeup and passive balancing possibilities.
1061 */ 1061 */
1062 if (!this_sd) 1062 if (!this_sd)
1063 goto out_keep_cpu; 1063 goto out;
1064 1064
1065 idx = this_sd->wake_idx; 1065 idx = this_sd->wake_idx;
1066 1066
@@ -1069,11 +1069,11 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
1069 load = source_load(prev_cpu, idx); 1069 load = source_load(prev_cpu, idx);
1070 this_load = target_load(this_cpu, idx); 1070 this_load = target_load(this_cpu, idx);
1071 1071
1072 new_cpu = this_cpu; /* Wake to this CPU if we can */
1073
1074 if (wake_affine(rq, this_sd, p, prev_cpu, this_cpu, sync, idx, 1072 if (wake_affine(rq, this_sd, p, prev_cpu, this_cpu, sync, idx,
1075 load, this_load, imbalance)) 1073 load, this_load, imbalance)) {
1076 goto out_set_cpu; 1074 new_cpu = this_cpu;
1075 goto out;
1076 }
1077 1077
1078 /* 1078 /*
1079 * Start passive balancing when half the imbalance_pct 1079 * Start passive balancing when half the imbalance_pct
@@ -1083,17 +1083,12 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
1083 if (imbalance*this_load <= 100*load) { 1083 if (imbalance*this_load <= 100*load) {
1084 schedstat_inc(this_sd, ttwu_move_balance); 1084 schedstat_inc(this_sd, ttwu_move_balance);
1085 schedstat_inc(p, se.nr_wakeups_passive); 1085 schedstat_inc(p, se.nr_wakeups_passive);
1086 goto out_set_cpu; 1086 new_cpu = this_cpu;
1087 goto out;
1087 } 1088 }
1088 } 1089 }
1089 1090
1090out_keep_cpu: 1091out:
1091 /*
1092 * Could not wake to this_cpu.
1093 * Wake to the previous cpu instead:
1094 */
1095 new_cpu = prev_cpu;
1096out_set_cpu:
1097 return wake_idle(new_cpu, p); 1092 return wake_idle(new_cpu, p);
1098} 1093}
1099#endif /* CONFIG_SMP */ 1094#endif /* CONFIG_SMP */