diff options
author | Gregory Haskins <ghaskins@novell.com> | 2008-01-25 15:08:10 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:10 -0500 |
commit | 07b4032c9e505e2a1fbe7703aff64a153c3249be (patch) | |
tree | 8b797b9cf80bc4f683a75557ff85ff385544cd74 | |
parent | e7693a362ec84bb5b6fd441d8a8b4b9d568a7a0c (diff) |
sched: break out search for RT tasks
Isolate the search logic into a function so that it can be used later
in places other than find_locked_lowest_rq().
Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/sched_rt.c | 66 |
1 files changed, 39 insertions, 27 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 5de1aebdbd1b..ffd02720b58f 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -263,54 +263,66 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, | |||
263 | 263 | ||
264 | static DEFINE_PER_CPU(cpumask_t, local_cpu_mask); | 264 | static DEFINE_PER_CPU(cpumask_t, local_cpu_mask); |
265 | 265 | ||
266 | /* Will lock the rq it finds */ | 266 | static int find_lowest_rq(struct task_struct *task) |
267 | static struct rq *find_lock_lowest_rq(struct task_struct *task, | ||
268 | struct rq *this_rq) | ||
269 | { | 267 | { |
270 | struct rq *lowest_rq = NULL; | ||
271 | int cpu; | 268 | int cpu; |
272 | int tries; | ||
273 | cpumask_t *cpu_mask = &__get_cpu_var(local_cpu_mask); | 269 | cpumask_t *cpu_mask = &__get_cpu_var(local_cpu_mask); |
270 | struct rq *lowest_rq = NULL; | ||
274 | 271 | ||
275 | cpus_and(*cpu_mask, cpu_online_map, task->cpus_allowed); | 272 | cpus_and(*cpu_mask, cpu_online_map, task->cpus_allowed); |
276 | 273 | ||
277 | for (tries = 0; tries < RT_MAX_TRIES; tries++) { | 274 | /* |
278 | /* | 275 | * Scan each rq for the lowest prio. |
279 | * Scan each rq for the lowest prio. | 276 | */ |
280 | */ | 277 | for_each_cpu_mask(cpu, *cpu_mask) { |
281 | for_each_cpu_mask(cpu, *cpu_mask) { | 278 | struct rq *rq = cpu_rq(cpu); |
282 | struct rq *rq = &per_cpu(runqueues, cpu); | ||
283 | 279 | ||
284 | if (cpu == this_rq->cpu) | 280 | if (cpu == rq->cpu) |
285 | continue; | 281 | continue; |
286 | 282 | ||
287 | /* We look for lowest RT prio or non-rt CPU */ | 283 | /* We look for lowest RT prio or non-rt CPU */ |
288 | if (rq->rt.highest_prio >= MAX_RT_PRIO) { | 284 | if (rq->rt.highest_prio >= MAX_RT_PRIO) { |
289 | lowest_rq = rq; | 285 | lowest_rq = rq; |
290 | break; | 286 | break; |
291 | } | 287 | } |
292 | 288 | ||
293 | /* no locking for now */ | 289 | /* no locking for now */ |
294 | if (rq->rt.highest_prio > task->prio && | 290 | if (rq->rt.highest_prio > task->prio && |
295 | (!lowest_rq || rq->rt.highest_prio > lowest_rq->rt.highest_prio)) { | 291 | (!lowest_rq || rq->rt.highest_prio > lowest_rq->rt.highest_prio)) { |
296 | lowest_rq = rq; | 292 | lowest_rq = rq; |
297 | } | ||
298 | } | 293 | } |
294 | } | ||
295 | |||
296 | return lowest_rq ? lowest_rq->cpu : -1; | ||
297 | } | ||
298 | |||
299 | /* Will lock the rq it finds */ | ||
300 | static struct rq *find_lock_lowest_rq(struct task_struct *task, | ||
301 | struct rq *rq) | ||
302 | { | ||
303 | struct rq *lowest_rq = NULL; | ||
304 | int cpu; | ||
305 | int tries; | ||
299 | 306 | ||
300 | if (!lowest_rq) | 307 | for (tries = 0; tries < RT_MAX_TRIES; tries++) { |
308 | cpu = find_lowest_rq(task); | ||
309 | |||
310 | if (cpu == -1) | ||
301 | break; | 311 | break; |
302 | 312 | ||
313 | lowest_rq = cpu_rq(cpu); | ||
314 | |||
303 | /* if the prio of this runqueue changed, try again */ | 315 | /* if the prio of this runqueue changed, try again */ |
304 | if (double_lock_balance(this_rq, lowest_rq)) { | 316 | if (double_lock_balance(rq, lowest_rq)) { |
305 | /* | 317 | /* |
306 | * We had to unlock the run queue. In | 318 | * We had to unlock the run queue. In |
307 | * the mean time, task could have | 319 | * the mean time, task could have |
308 | * migrated already or had its affinity changed. | 320 | * migrated already or had its affinity changed. |
309 | * Also make sure that it wasn't scheduled on its rq. | 321 | * Also make sure that it wasn't scheduled on its rq. |
310 | */ | 322 | */ |
311 | if (unlikely(task_rq(task) != this_rq || | 323 | if (unlikely(task_rq(task) != rq || |
312 | !cpu_isset(lowest_rq->cpu, task->cpus_allowed) || | 324 | !cpu_isset(lowest_rq->cpu, task->cpus_allowed) || |
313 | task_running(this_rq, task) || | 325 | task_running(rq, task) || |
314 | !task->se.on_rq)) { | 326 | !task->se.on_rq)) { |
315 | spin_unlock(&lowest_rq->lock); | 327 | spin_unlock(&lowest_rq->lock); |
316 | lowest_rq = NULL; | 328 | lowest_rq = NULL; |