diff options
author | Gregory Haskins <ghaskins@novell.com> | 2008-01-25 15:08:13 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:13 -0500 |
commit | 06f90dbd7610d51549004ea9c2ada337831eb292 (patch) | |
tree | 952ce91bf158cee7fc3df0553496be524bc38b78 /kernel | |
parent | 17b3279b48835eb522d842eae16f541da3729c8a (diff) |
sched: RT-balance, optimize
We can cheaply track the number of bits set in the cpumask for the lowest
priority CPUs. Therefore, compute the mask's weight and use it to skip
the optimal domain search logic when there is only one CPU available.
Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_rt.c | 25 |
1 files changed, 18 insertions, 7 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 72c81322fb9a..52d88f193afc 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -303,7 +303,7 @@ static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask) | |||
303 | int cpu; | 303 | int cpu; |
304 | cpumask_t *valid_mask = &__get_cpu_var(valid_cpu_mask); | 304 | cpumask_t *valid_mask = &__get_cpu_var(valid_cpu_mask); |
305 | int lowest_prio = -1; | 305 | int lowest_prio = -1; |
306 | int ret = 0; | 306 | int count = 0; |
307 | 307 | ||
308 | cpus_clear(*lowest_mask); | 308 | cpus_clear(*lowest_mask); |
309 | cpus_and(*valid_mask, cpu_online_map, task->cpus_allowed); | 309 | cpus_and(*valid_mask, cpu_online_map, task->cpus_allowed); |
@@ -316,7 +316,7 @@ static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask) | |||
316 | 316 | ||
317 | /* We look for lowest RT prio or non-rt CPU */ | 317 | /* We look for lowest RT prio or non-rt CPU */ |
318 | if (rq->rt.highest_prio >= MAX_RT_PRIO) { | 318 | if (rq->rt.highest_prio >= MAX_RT_PRIO) { |
319 | if (ret) | 319 | if (count) |
320 | cpus_clear(*lowest_mask); | 320 | cpus_clear(*lowest_mask); |
321 | cpu_set(rq->cpu, *lowest_mask); | 321 | cpu_set(rq->cpu, *lowest_mask); |
322 | return 1; | 322 | return 1; |
@@ -328,14 +328,17 @@ static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask) | |||
328 | if (rq->rt.highest_prio > lowest_prio) { | 328 | if (rq->rt.highest_prio > lowest_prio) { |
329 | /* new low - clear old data */ | 329 | /* new low - clear old data */ |
330 | lowest_prio = rq->rt.highest_prio; | 330 | lowest_prio = rq->rt.highest_prio; |
331 | cpus_clear(*lowest_mask); | 331 | if (count) { |
332 | cpus_clear(*lowest_mask); | ||
333 | count = 0; | ||
334 | } | ||
332 | } | 335 | } |
333 | cpu_set(rq->cpu, *lowest_mask); | 336 | cpu_set(rq->cpu, *lowest_mask); |
334 | ret = 1; | 337 | count++; |
335 | } | 338 | } |
336 | } | 339 | } |
337 | 340 | ||
338 | return ret; | 341 | return count; |
339 | } | 342 | } |
340 | 343 | ||
341 | static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) | 344 | static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) |
@@ -359,9 +362,17 @@ static int find_lowest_rq(struct task_struct *task) | |||
359 | cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask); | 362 | cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask); |
360 | int this_cpu = smp_processor_id(); | 363 | int this_cpu = smp_processor_id(); |
361 | int cpu = task_cpu(task); | 364 | int cpu = task_cpu(task); |
365 | int count = find_lowest_cpus(task, lowest_mask); | ||
362 | 366 | ||
363 | if (!find_lowest_cpus(task, lowest_mask)) | 367 | if (!count) |
364 | return -1; | 368 | return -1; /* No targets found */ |
369 | |||
370 | /* | ||
371 | * There is no sense in performing an optimal search if only one | ||
372 | * target is found. | ||
373 | */ | ||
374 | if (count == 1) | ||
375 | return first_cpu(*lowest_mask); | ||
365 | 376 | ||
366 | /* | 377 | /* |
367 | * At this point we have built a mask of cpus representing the | 378 | * At this point we have built a mask of cpus representing the |