aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c32
1 files changed, 20 insertions, 12 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 70679b266693..2d2be02b8e3b 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -983,7 +983,7 @@ static inline int wake_idle(int cpu, struct task_struct *p)
983 983
984static int 984static int
985wake_affine(struct rq *rq, struct sched_domain *this_sd, struct task_struct *p, 985wake_affine(struct rq *rq, struct sched_domain *this_sd, struct task_struct *p,
986 int cpu, int this_cpu, int sync, int idx, 986 int prev_cpu, int this_cpu, int sync, int idx,
987 unsigned long load, unsigned long this_load, 987 unsigned long load, unsigned long this_load,
988 unsigned int imbalance) 988 unsigned int imbalance)
989{ 989{
@@ -1010,7 +1010,7 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct task_struct *p,
1010 if (sync) 1010 if (sync)
1011 tl -= current->se.load.weight; 1011 tl -= current->se.load.weight;
1012 1012
1013 if ((tl <= load && tl + target_load(cpu, idx) <= tl_per_task) || 1013 if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) ||
1014 100*(tl + p->se.load.weight) <= imbalance*load) { 1014 100*(tl + p->se.load.weight) <= imbalance*load) {
1015 /* 1015 /*
1016 * This domain has SD_WAKE_AFFINE and 1016 * This domain has SD_WAKE_AFFINE and
@@ -1028,22 +1028,26 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct task_struct *p,
1028static int select_task_rq_fair(struct task_struct *p, int sync) 1028static int select_task_rq_fair(struct task_struct *p, int sync)
1029{ 1029{
1030 struct sched_domain *sd, *this_sd = NULL; 1030 struct sched_domain *sd, *this_sd = NULL;
1031 int prev_cpu, this_cpu, new_cpu;
1031 unsigned long load, this_load; 1032 unsigned long load, this_load;
1032 int cpu, this_cpu, new_cpu;
1033 unsigned int imbalance; 1033 unsigned int imbalance;
1034 struct rq *rq; 1034 struct rq *rq;
1035 int idx; 1035 int idx;
1036 1036
1037 cpu = task_cpu(p); 1037 prev_cpu = task_cpu(p);
1038 rq = task_rq(p); 1038 rq = task_rq(p);
1039 this_cpu = smp_processor_id(); 1039 this_cpu = smp_processor_id();
1040 new_cpu = cpu; 1040 new_cpu = prev_cpu;
1041 1041
1042 if (cpu == this_cpu) 1042 if (prev_cpu == this_cpu)
1043 goto out_set_cpu; 1043 goto out_set_cpu;
1044 1044
1045 /*
1046 * 'this_sd' is the first domain that both
1047 * this_cpu and prev_cpu are present in:
1048 */
1045 for_each_domain(this_cpu, sd) { 1049 for_each_domain(this_cpu, sd) {
1046 if (cpu_isset(cpu, sd->span)) { 1050 if (cpu_isset(prev_cpu, sd->span)) {
1047 this_sd = sd; 1051 this_sd = sd;
1048 break; 1052 break;
1049 } 1053 }
@@ -1062,12 +1066,12 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
1062 1066
1063 imbalance = 100 + (this_sd->imbalance_pct - 100) / 2; 1067 imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
1064 1068
1065 load = source_load(cpu, idx); 1069 load = source_load(prev_cpu, idx);
1066 this_load = target_load(this_cpu, idx); 1070 this_load = target_load(this_cpu, idx);
1067 1071
1068 new_cpu = this_cpu; /* Wake to this CPU if we can */ 1072 new_cpu = this_cpu; /* Wake to this CPU if we can */
1069 1073
1070 if (wake_affine(rq, this_sd, p, cpu, this_cpu, sync, idx, 1074 if (wake_affine(rq, this_sd, p, prev_cpu, this_cpu, sync, idx,
1071 load, this_load, imbalance)) 1075 load, this_load, imbalance))
1072 goto out_set_cpu; 1076 goto out_set_cpu;
1073 1077
@@ -1084,7 +1088,11 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
1084 } 1088 }
1085 1089
1086out_keep_cpu: 1090out_keep_cpu:
1087 new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */ 1091 /*
1092 * Could not wake to this_cpu.
1093 * Wake to the previous cpu instead:
1094 */
1095 new_cpu = prev_cpu;
1088out_set_cpu: 1096out_set_cpu:
1089 return wake_idle(new_cpu, p); 1097 return wake_idle(new_cpu, p);
1090} 1098}