diff options
author | Mike Travis <travis@sgi.com> | 2009-01-11 00:58:11 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-11 13:13:22 -0500 |
commit | d38b223c86db3162dc85b5a1997ac8a210e1660b (patch) | |
tree | 3e232926a5fce47e910e4356e833be43b7b03592 /kernel/sched_rt.c | |
parent | c90e785be2fd9dfaef1f030d0314e44052553736 (diff) |
cpumask: reduce stack usage in find_lowest_rq
Impact: reduce stack usage, cleanup
Use a cpumask_var_t in find_lowest_rq() and clean up other old
cpumask_t calls.
Signed-off-by: Mike Travis <travis@sgi.com>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 36 |
1 files changed, 22 insertions, 14 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 954e1a81b796..da932f4c8524 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -960,16 +960,17 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) | |||
960 | 960 | ||
961 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); | 961 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); |
962 | 962 | ||
963 | static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) | 963 | static inline int pick_optimal_cpu(int this_cpu, |
964 | const struct cpumask *mask) | ||
964 | { | 965 | { |
965 | int first; | 966 | int first; |
966 | 967 | ||
967 | /* "this_cpu" is cheaper to preempt than a remote processor */ | 968 | /* "this_cpu" is cheaper to preempt than a remote processor */ |
968 | if ((this_cpu != -1) && cpu_isset(this_cpu, *mask)) | 969 | if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask)) |
969 | return this_cpu; | 970 | return this_cpu; |
970 | 971 | ||
971 | first = first_cpu(*mask); | 972 | first = cpumask_first(mask); |
972 | if (first != NR_CPUS) | 973 | if (first < nr_cpu_ids) |
973 | return first; | 974 | return first; |
974 | 975 | ||
975 | return -1; | 976 | return -1; |
@@ -981,6 +982,7 @@ static int find_lowest_rq(struct task_struct *task) | |||
981 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); | 982 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); |
982 | int this_cpu = smp_processor_id(); | 983 | int this_cpu = smp_processor_id(); |
983 | int cpu = task_cpu(task); | 984 | int cpu = task_cpu(task); |
985 | cpumask_var_t domain_mask; | ||
984 | 986 | ||
985 | if (task->rt.nr_cpus_allowed == 1) | 987 | if (task->rt.nr_cpus_allowed == 1) |
986 | return -1; /* No other targets possible */ | 988 | return -1; /* No other targets possible */ |
@@ -1013,19 +1015,25 @@ static int find_lowest_rq(struct task_struct *task) | |||
1013 | if (this_cpu == cpu) | 1015 | if (this_cpu == cpu) |
1014 | this_cpu = -1; /* Skip this_cpu opt if the same */ | 1016 | this_cpu = -1; /* Skip this_cpu opt if the same */ |
1015 | 1017 | ||
1016 | for_each_domain(cpu, sd) { | 1018 | if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) { |
1017 | if (sd->flags & SD_WAKE_AFFINE) { | 1019 | for_each_domain(cpu, sd) { |
1018 | cpumask_t domain_mask; | 1020 | if (sd->flags & SD_WAKE_AFFINE) { |
1019 | int best_cpu; | 1021 | int best_cpu; |
1020 | 1022 | ||
1021 | cpumask_and(&domain_mask, sched_domain_span(sd), | 1023 | cpumask_and(domain_mask, |
1022 | lowest_mask); | 1024 | sched_domain_span(sd), |
1025 | lowest_mask); | ||
1023 | 1026 | ||
1024 | best_cpu = pick_optimal_cpu(this_cpu, | 1027 | best_cpu = pick_optimal_cpu(this_cpu, |
1025 | &domain_mask); | 1028 | domain_mask); |
1026 | if (best_cpu != -1) | 1029 | |
1027 | return best_cpu; | 1030 | if (best_cpu != -1) { |
1031 | free_cpumask_var(domain_mask); | ||
1032 | return best_cpu; | ||
1033 | } | ||
1034 | } | ||
1028 | } | 1035 | } |
1036 | free_cpumask_var(domain_mask); | ||
1029 | } | 1037 | } |
1030 | 1038 | ||
1031 | /* | 1039 | /* |