diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-11-02 23:23:15 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-11-04 07:16:38 -0500 |
commit | e2c880630438f80b474378d5487b511b07665051 (patch) | |
tree | 0f35b58cd657a3b50e03a93a12367ab82bd569ef /kernel/sched_rt.c | |
parent | 45a5c8bad827ebb9c9798becc15bce2e804d49e0 (diff) |
cpumask: Simplify sched_rt.c
find_lowest_rq() wants to call pick_optimal_cpu() on the
intersection of sched_domain_span(sd) and lowest_mask. Rather
than doing a cpus_and into a temporary, we can open-code it.
This actually makes the code slightly clearer, IMHO.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Acked-by: Gregory Haskins <ghaskins@novell.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
LKML-Reference: <200911031453.15350.rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 61 |
1 files changed, 24 insertions, 37 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index a4d790cddb19..5c5fef378415 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -1153,29 +1153,12 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) | |||
1153 | 1153 | ||
1154 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); | 1154 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); |
1155 | 1155 | ||
1156 | static inline int pick_optimal_cpu(int this_cpu, | ||
1157 | const struct cpumask *mask) | ||
1158 | { | ||
1159 | int first; | ||
1160 | |||
1161 | /* "this_cpu" is cheaper to preempt than a remote processor */ | ||
1162 | if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask)) | ||
1163 | return this_cpu; | ||
1164 | |||
1165 | first = cpumask_first(mask); | ||
1166 | if (first < nr_cpu_ids) | ||
1167 | return first; | ||
1168 | |||
1169 | return -1; | ||
1170 | } | ||
1171 | |||
1172 | static int find_lowest_rq(struct task_struct *task) | 1156 | static int find_lowest_rq(struct task_struct *task) |
1173 | { | 1157 | { |
1174 | struct sched_domain *sd; | 1158 | struct sched_domain *sd; |
1175 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); | 1159 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); |
1176 | int this_cpu = smp_processor_id(); | 1160 | int this_cpu = smp_processor_id(); |
1177 | int cpu = task_cpu(task); | 1161 | int cpu = task_cpu(task); |
1178 | cpumask_var_t domain_mask; | ||
1179 | 1162 | ||
1180 | if (task->rt.nr_cpus_allowed == 1) | 1163 | if (task->rt.nr_cpus_allowed == 1) |
1181 | return -1; /* No other targets possible */ | 1164 | return -1; /* No other targets possible */ |
@@ -1198,28 +1181,26 @@ static int find_lowest_rq(struct task_struct *task) | |||
1198 | * Otherwise, we consult the sched_domains span maps to figure | 1181 | * Otherwise, we consult the sched_domains span maps to figure |
1199 | * out which cpu is logically closest to our hot cache data. | 1182 | * out which cpu is logically closest to our hot cache data. |
1200 | */ | 1183 | */ |
1201 | if (this_cpu == cpu) | 1184 | if (!cpumask_test_cpu(this_cpu, lowest_mask)) |
1202 | this_cpu = -1; /* Skip this_cpu opt if the same */ | 1185 | this_cpu = -1; /* Skip this_cpu opt if not among lowest */ |
1203 | |||
1204 | if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) { | ||
1205 | for_each_domain(cpu, sd) { | ||
1206 | if (sd->flags & SD_WAKE_AFFINE) { | ||
1207 | int best_cpu; | ||
1208 | 1186 | ||
1209 | cpumask_and(domain_mask, | 1187 | for_each_domain(cpu, sd) { |
1210 | sched_domain_span(sd), | 1188 | if (sd->flags & SD_WAKE_AFFINE) { |
1211 | lowest_mask); | 1189 | int best_cpu; |
1212 | 1190 | ||
1213 | best_cpu = pick_optimal_cpu(this_cpu, | 1191 | /* |
1214 | domain_mask); | 1192 | * "this_cpu" is cheaper to preempt than a |
1215 | 1193 | * remote processor. | |
1216 | if (best_cpu != -1) { | 1194 | */ |
1217 | free_cpumask_var(domain_mask); | 1195 | if (this_cpu != -1 && |
1218 | return best_cpu; | 1196 | cpumask_test_cpu(this_cpu, sched_domain_span(sd))) |
1219 | } | 1197 | return this_cpu; |
1220 | } | 1198 | |
1199 | best_cpu = cpumask_first_and(lowest_mask, | ||
1200 | sched_domain_span(sd)); | ||
1201 | if (best_cpu < nr_cpu_ids) | ||
1202 | return best_cpu; | ||
1221 | } | 1203 | } |
1222 | free_cpumask_var(domain_mask); | ||
1223 | } | 1204 | } |
1224 | 1205 | ||
1225 | /* | 1206 | /* |
@@ -1227,7 +1208,13 @@ static int find_lowest_rq(struct task_struct *task) | |||
1227 | * just give the caller *something* to work with from the compatible | 1208 | * just give the caller *something* to work with from the compatible |
1228 | * locations. | 1209 | * locations. |
1229 | */ | 1210 | */ |
1230 | return pick_optimal_cpu(this_cpu, lowest_mask); | 1211 | if (this_cpu != -1) |
1212 | return this_cpu; | ||
1213 | |||
1214 | cpu = cpumask_any(lowest_mask); | ||
1215 | if (cpu < nr_cpu_ids) | ||
1216 | return cpu; | ||
1217 | return -1; | ||
1231 | } | 1218 | } |
1232 | 1219 | ||
1233 | /* Will lock the rq it finds */ | 1220 | /* Will lock the rq it finds */ |