diff options
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 49 |
1 files changed, 36 insertions, 13 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 52d88f193afc..61d198845f00 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -296,29 +296,36 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, | |||
296 | } | 296 | } |
297 | 297 | ||
298 | static DEFINE_PER_CPU(cpumask_t, local_cpu_mask); | 298 | static DEFINE_PER_CPU(cpumask_t, local_cpu_mask); |
299 | static DEFINE_PER_CPU(cpumask_t, valid_cpu_mask); | ||
300 | 299 | ||
301 | static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask) | 300 | static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask) |
302 | { | 301 | { |
303 | int cpu; | ||
304 | cpumask_t *valid_mask = &__get_cpu_var(valid_cpu_mask); | ||
305 | int lowest_prio = -1; | 302 | int lowest_prio = -1; |
303 | int lowest_cpu = -1; | ||
306 | int count = 0; | 304 | int count = 0; |
305 | int cpu; | ||
307 | 306 | ||
308 | cpus_clear(*lowest_mask); | 307 | cpus_and(*lowest_mask, cpu_online_map, task->cpus_allowed); |
309 | cpus_and(*valid_mask, cpu_online_map, task->cpus_allowed); | ||
310 | 308 | ||
311 | /* | 309 | /* |
312 | * Scan each rq for the lowest prio. | 310 | * Scan each rq for the lowest prio. |
313 | */ | 311 | */ |
314 | for_each_cpu_mask(cpu, *valid_mask) { | 312 | for_each_cpu_mask(cpu, *lowest_mask) { |
315 | struct rq *rq = cpu_rq(cpu); | 313 | struct rq *rq = cpu_rq(cpu); |
316 | 314 | ||
317 | /* We look for lowest RT prio or non-rt CPU */ | 315 | /* We look for lowest RT prio or non-rt CPU */ |
318 | if (rq->rt.highest_prio >= MAX_RT_PRIO) { | 316 | if (rq->rt.highest_prio >= MAX_RT_PRIO) { |
319 | if (count) | 317 | /* |
318 | * if we already found a low RT queue | ||
319 | * and now we found this non-rt queue | ||
320 | * clear the mask and set our bit. | ||
321 | * Otherwise just return the queue as is | ||
322 | * and the count==1 will cause the algorithm | ||
323 | * to use the first bit found. | ||
324 | */ | ||
325 | if (lowest_cpu != -1) { | ||
320 | cpus_clear(*lowest_mask); | 326 | cpus_clear(*lowest_mask); |
321 | cpu_set(rq->cpu, *lowest_mask); | 327 | cpu_set(rq->cpu, *lowest_mask); |
328 | } | ||
322 | return 1; | 329 | return 1; |
323 | } | 330 | } |
324 | 331 | ||
@@ -328,13 +335,29 @@ static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask) | |||
328 | if (rq->rt.highest_prio > lowest_prio) { | 335 | if (rq->rt.highest_prio > lowest_prio) { |
329 | /* new low - clear old data */ | 336 | /* new low - clear old data */ |
330 | lowest_prio = rq->rt.highest_prio; | 337 | lowest_prio = rq->rt.highest_prio; |
331 | if (count) { | 338 | lowest_cpu = cpu; |
332 | cpus_clear(*lowest_mask); | 339 | count = 0; |
333 | count = 0; | ||
334 | } | ||
335 | } | 340 | } |
336 | cpu_set(rq->cpu, *lowest_mask); | ||
337 | count++; | 341 | count++; |
342 | } else | ||
343 | cpu_clear(cpu, *lowest_mask); | ||
344 | } | ||
345 | |||
346 | /* | ||
347 | * Clear out all the set bits that represent | ||
348 | * runqueues that were of higher prio than | ||
349 | * the lowest_prio. | ||
350 | */ | ||
351 | if (lowest_cpu > 0) { | ||
352 | /* | ||
353 | * Perhaps we could add another cpumask op to | ||
354 | * zero out bits. Like cpu_zero_bits(cpumask, nrbits); | ||
355 | * Then that could be optimized to use memset and such. | ||
356 | */ | ||
357 | for_each_cpu_mask(cpu, *lowest_mask) { | ||
358 | if (cpu >= lowest_cpu) | ||
359 | break; | ||
360 | cpu_clear(cpu, *lowest_mask); | ||
338 | } | 361 | } |
339 | } | 362 | } |
340 | 363 | ||