aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorAmit K. Arora <aarora@linux.vnet.ibm.com>2008-09-30 07:45:39 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-30 09:25:44 -0400
commit64b9e0294d24a4204232e13e01630b0690e48d61 (patch)
treea8399f65354ffe0777f78e169fd7be763148c121 /kernel/sched_fair.c
parentb87f17242da6b2ac6db2d179b2f93fb84cff2fbe (diff)
sched: minor optimizations in wake_affine and select_task_rq_fair
This patch does following: o Removes unused variable and argument "rq". o Optimizes one of the "if" conditions in wake_affine() - i.e. if "balanced" is true, we need not do rest of the calculations in the condition. o If this cpu is same as the previous cpu (on which woken up task was running when it went to sleep), no need to call wake_affine at all. Signed-off-by: Amit K Arora <aarora@linux.vnet.ibm.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 95c1295ad26d..fcbe850a5a90 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1088,7 +1088,7 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu,
1088#endif 1088#endif
1089 1089
1090static int 1090static int
1091wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, 1091wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
1092 struct task_struct *p, int prev_cpu, int this_cpu, int sync, 1092 struct task_struct *p, int prev_cpu, int this_cpu, int sync,
1093 int idx, unsigned long load, unsigned long this_load, 1093 int idx, unsigned long load, unsigned long this_load,
1094 unsigned int imbalance) 1094 unsigned int imbalance)
@@ -1136,8 +1136,8 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
1136 schedstat_inc(p, se.nr_wakeups_affine_attempts); 1136 schedstat_inc(p, se.nr_wakeups_affine_attempts);
1137 tl_per_task = cpu_avg_load_per_task(this_cpu); 1137 tl_per_task = cpu_avg_load_per_task(this_cpu);
1138 1138
1139 if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) || 1139 if (balanced || (tl <= load && tl + target_load(prev_cpu, idx) <=
1140 balanced) { 1140 tl_per_task)) {
1141 /* 1141 /*
1142 * This domain has SD_WAKE_AFFINE and 1142 * This domain has SD_WAKE_AFFINE and
1143 * p is cache cold in this domain, and 1143 * p is cache cold in this domain, and
@@ -1156,16 +1156,17 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
1156 struct sched_domain *sd, *this_sd = NULL; 1156 struct sched_domain *sd, *this_sd = NULL;
1157 int prev_cpu, this_cpu, new_cpu; 1157 int prev_cpu, this_cpu, new_cpu;
1158 unsigned long load, this_load; 1158 unsigned long load, this_load;
1159 struct rq *rq, *this_rq; 1159 struct rq *this_rq;
1160 unsigned int imbalance; 1160 unsigned int imbalance;
1161 int idx; 1161 int idx;
1162 1162
1163 prev_cpu = task_cpu(p); 1163 prev_cpu = task_cpu(p);
1164 rq = task_rq(p);
1165 this_cpu = smp_processor_id(); 1164 this_cpu = smp_processor_id();
1166 this_rq = cpu_rq(this_cpu); 1165 this_rq = cpu_rq(this_cpu);
1167 new_cpu = prev_cpu; 1166 new_cpu = prev_cpu;
1168 1167
1168 if (prev_cpu == this_cpu)
1169 goto out;
1169 /* 1170 /*
1170 * 'this_sd' is the first domain that both 1171 * 'this_sd' is the first domain that both
1171 * this_cpu and prev_cpu are present in: 1172 * this_cpu and prev_cpu are present in:
@@ -1193,13 +1194,10 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
1193 load = source_load(prev_cpu, idx); 1194 load = source_load(prev_cpu, idx);
1194 this_load = target_load(this_cpu, idx); 1195 this_load = target_load(this_cpu, idx);
1195 1196
1196 if (wake_affine(rq, this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx, 1197 if (wake_affine(this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx,
1197 load, this_load, imbalance)) 1198 load, this_load, imbalance))
1198 return this_cpu; 1199 return this_cpu;
1199 1200
1200 if (prev_cpu == this_cpu)
1201 goto out;
1202
1203 /* 1201 /*
1204 * Start passive balancing when half the imbalance_pct 1202 * Start passive balancing when half the imbalance_pct
1205 * limit is reached. 1203 * limit is reached.