diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 16 |
1 files changed, 7 insertions, 9 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 95c1295ad26d..fcbe850a5a90 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1088,7 +1088,7 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu, | |||
1088 | #endif | 1088 | #endif |
1089 | 1089 | ||
1090 | static int | 1090 | static int |
1091 | wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, | 1091 | wake_affine(struct sched_domain *this_sd, struct rq *this_rq, |
1092 | struct task_struct *p, int prev_cpu, int this_cpu, int sync, | 1092 | struct task_struct *p, int prev_cpu, int this_cpu, int sync, |
1093 | int idx, unsigned long load, unsigned long this_load, | 1093 | int idx, unsigned long load, unsigned long this_load, |
1094 | unsigned int imbalance) | 1094 | unsigned int imbalance) |
@@ -1136,8 +1136,8 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, | |||
1136 | schedstat_inc(p, se.nr_wakeups_affine_attempts); | 1136 | schedstat_inc(p, se.nr_wakeups_affine_attempts); |
1137 | tl_per_task = cpu_avg_load_per_task(this_cpu); | 1137 | tl_per_task = cpu_avg_load_per_task(this_cpu); |
1138 | 1138 | ||
1139 | if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) || | 1139 | if (balanced || (tl <= load && tl + target_load(prev_cpu, idx) <= |
1140 | balanced) { | 1140 | tl_per_task)) { |
1141 | /* | 1141 | /* |
1142 | * This domain has SD_WAKE_AFFINE and | 1142 | * This domain has SD_WAKE_AFFINE and |
1143 | * p is cache cold in this domain, and | 1143 | * p is cache cold in this domain, and |
@@ -1156,16 +1156,17 @@ static int select_task_rq_fair(struct task_struct *p, int sync) | |||
1156 | struct sched_domain *sd, *this_sd = NULL; | 1156 | struct sched_domain *sd, *this_sd = NULL; |
1157 | int prev_cpu, this_cpu, new_cpu; | 1157 | int prev_cpu, this_cpu, new_cpu; |
1158 | unsigned long load, this_load; | 1158 | unsigned long load, this_load; |
1159 | struct rq *rq, *this_rq; | 1159 | struct rq *this_rq; |
1160 | unsigned int imbalance; | 1160 | unsigned int imbalance; |
1161 | int idx; | 1161 | int idx; |
1162 | 1162 | ||
1163 | prev_cpu = task_cpu(p); | 1163 | prev_cpu = task_cpu(p); |
1164 | rq = task_rq(p); | ||
1165 | this_cpu = smp_processor_id(); | 1164 | this_cpu = smp_processor_id(); |
1166 | this_rq = cpu_rq(this_cpu); | 1165 | this_rq = cpu_rq(this_cpu); |
1167 | new_cpu = prev_cpu; | 1166 | new_cpu = prev_cpu; |
1168 | 1167 | ||
1168 | if (prev_cpu == this_cpu) | ||
1169 | goto out; | ||
1169 | /* | 1170 | /* |
1170 | * 'this_sd' is the first domain that both | 1171 | * 'this_sd' is the first domain that both |
1171 | * this_cpu and prev_cpu are present in: | 1172 | * this_cpu and prev_cpu are present in: |
@@ -1193,13 +1194,10 @@ static int select_task_rq_fair(struct task_struct *p, int sync) | |||
1193 | load = source_load(prev_cpu, idx); | 1194 | load = source_load(prev_cpu, idx); |
1194 | this_load = target_load(this_cpu, idx); | 1195 | this_load = target_load(this_cpu, idx); |
1195 | 1196 | ||
1196 | if (wake_affine(rq, this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx, | 1197 | if (wake_affine(this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx, |
1197 | load, this_load, imbalance)) | 1198 | load, this_load, imbalance)) |
1198 | return this_cpu; | 1199 | return this_cpu; |
1199 | 1200 | ||
1200 | if (prev_cpu == this_cpu) | ||
1201 | goto out; | ||
1202 | |||
1203 | /* | 1201 | /* |
1204 | * Start passive balancing when half the imbalance_pct | 1202 | * Start passive balancing when half the imbalance_pct |
1205 | * limit is reached. | 1203 | * limit is reached. |