diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-03-26 16:39:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-27 12:28:43 -0400 |
commit | 6e15cf04860074ad032e88c306bea656bbdd0f22 (patch) | |
tree | c346383bb7563e8d66b2f4a502f875b259c34870 /kernel/sched_rt.c | |
parent | be0ea69674ed95e1e98cb3687a241badc756d228 (diff) | |
parent | 60db56422043aaa455ac7f858ce23c273220f9d9 (diff) |
Merge branch 'core/percpu' into percpu-cpumask-x86-for-linus-2
Conflicts:
arch/parisc/kernel/irq.c
arch/x86/include/asm/fixmap_64.h
arch/x86/include/asm/setup.h
kernel/irq/handle.c
Semantic merge:
arch/x86/include/asm/fixmap.h
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 32 |
1 files changed, 20 insertions, 12 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index c79dc7844012..299d012b4394 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -1122,12 +1122,13 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) | |||
1122 | 1122 | ||
1123 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); | 1123 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); |
1124 | 1124 | ||
1125 | static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) | 1125 | static inline int pick_optimal_cpu(int this_cpu, |
1126 | const struct cpumask *mask) | ||
1126 | { | 1127 | { |
1127 | int first; | 1128 | int first; |
1128 | 1129 | ||
1129 | /* "this_cpu" is cheaper to preempt than a remote processor */ | 1130 | /* "this_cpu" is cheaper to preempt than a remote processor */ |
1130 | if ((this_cpu != -1) && cpu_isset(this_cpu, *mask)) | 1131 | if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask)) |
1131 | return this_cpu; | 1132 | return this_cpu; |
1132 | 1133 | ||
1133 | first = cpumask_first(mask); | 1134 | first = cpumask_first(mask); |
@@ -1143,6 +1144,7 @@ static int find_lowest_rq(struct task_struct *task) | |||
1143 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); | 1144 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); |
1144 | int this_cpu = smp_processor_id(); | 1145 | int this_cpu = smp_processor_id(); |
1145 | int cpu = task_cpu(task); | 1146 | int cpu = task_cpu(task); |
1147 | cpumask_var_t domain_mask; | ||
1146 | 1148 | ||
1147 | if (task->rt.nr_cpus_allowed == 1) | 1149 | if (task->rt.nr_cpus_allowed == 1) |
1148 | return -1; /* No other targets possible */ | 1150 | return -1; /* No other targets possible */ |
@@ -1175,19 +1177,25 @@ static int find_lowest_rq(struct task_struct *task) | |||
1175 | if (this_cpu == cpu) | 1177 | if (this_cpu == cpu) |
1176 | this_cpu = -1; /* Skip this_cpu opt if the same */ | 1178 | this_cpu = -1; /* Skip this_cpu opt if the same */ |
1177 | 1179 | ||
1178 | for_each_domain(cpu, sd) { | 1180 | if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) { |
1179 | if (sd->flags & SD_WAKE_AFFINE) { | 1181 | for_each_domain(cpu, sd) { |
1180 | cpumask_t domain_mask; | 1182 | if (sd->flags & SD_WAKE_AFFINE) { |
1181 | int best_cpu; | 1183 | int best_cpu; |
1182 | 1184 | ||
1183 | cpumask_and(&domain_mask, sched_domain_span(sd), | 1185 | cpumask_and(domain_mask, |
1184 | lowest_mask); | 1186 | sched_domain_span(sd), |
1187 | lowest_mask); | ||
1185 | 1188 | ||
1186 | best_cpu = pick_optimal_cpu(this_cpu, | 1189 | best_cpu = pick_optimal_cpu(this_cpu, |
1187 | &domain_mask); | 1190 | domain_mask); |
1188 | if (best_cpu != -1) | 1191 | |
1189 | return best_cpu; | 1192 | if (best_cpu != -1) { |
1193 | free_cpumask_var(domain_mask); | ||
1194 | return best_cpu; | ||
1195 | } | ||
1196 | } | ||
1190 | } | 1197 | } |
1198 | free_cpumask_var(domain_mask); | ||
1191 | } | 1199 | } |
1192 | 1200 | ||
1193 | /* | 1201 | /* |