diff options
author | Anton Blanchard <anton@samba.org> | 2010-04-26 11:32:38 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-05-06 03:41:53 -0400 |
commit | 64fe220c13440a12d0bd8e32ebdf679e869e3ce3 (patch) | |
tree | 64b8b01866506d1ae733e6da617b7317fa436f85 | |
parent | af831e1e44619a7429eba8ece4eba8f977ee7c4f (diff) |
powerpc/cpumask: Convert xics driver to new cpumask API
Use the new cpumask API and add some comments to clarify how get_irq_server
works.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r-- | arch/powerpc/platforms/pseries/xics.c | 38 |
1 files changed, 21 insertions, 17 deletions
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 1bcedd8b4616..f19d19468393 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -163,29 +163,37 @@ static inline void lpar_qirr_info(int n_cpu , u8 value) | |||
163 | /* Interface to generic irq subsystem */ | 163 | /* Interface to generic irq subsystem */ |
164 | 164 | ||
165 | #ifdef CONFIG_SMP | 165 | #ifdef CONFIG_SMP |
166 | static int get_irq_server(unsigned int virq, cpumask_t cpumask, | 166 | /* |
167 | * For the moment we only implement delivery to all cpus or one cpu. | ||
168 | * | ||
169 | * If the requested affinity is cpu_all_mask, we set global affinity. | ||
170 | * If not we set it to the first cpu in the mask, even if multiple cpus | ||
171 | * are set. This is so things like irqbalance (which set core and package | ||
172 | * wide affinities) do the right thing. | ||
173 | */ | ||
174 | static int get_irq_server(unsigned int virq, const struct cpumask *cpumask, | ||
167 | unsigned int strict_check) | 175 | unsigned int strict_check) |
168 | { | 176 | { |
169 | int server; | ||
170 | /* For the moment only implement delivery to all cpus or one cpu */ | ||
171 | cpumask_t tmp = CPU_MASK_NONE; | ||
172 | 177 | ||
173 | if (!distribute_irqs) | 178 | if (!distribute_irqs) |
174 | return default_server; | 179 | return default_server; |
175 | 180 | ||
176 | if (!cpus_equal(cpumask, CPU_MASK_ALL)) { | 181 | if (!cpumask_equal(cpumask, cpu_all_mask)) { |
177 | cpus_and(tmp, cpu_online_map, cpumask); | 182 | int server = cpumask_first_and(cpu_online_mask, cpumask); |
178 | |||
179 | server = first_cpu(tmp); | ||
180 | 183 | ||
181 | if (server < NR_CPUS) | 184 | if (server < nr_cpu_ids) |
182 | return get_hard_smp_processor_id(server); | 185 | return get_hard_smp_processor_id(server); |
183 | 186 | ||
184 | if (strict_check) | 187 | if (strict_check) |
185 | return -1; | 188 | return -1; |
186 | } | 189 | } |
187 | 190 | ||
188 | if (cpus_equal(cpu_online_map, cpu_present_map)) | 191 | /* |
192 | * Workaround issue with some versions of JS20 firmware that | ||
193 | * deliver interrupts to cpus which haven't been started. This | ||
194 | * happens when using the maxcpus= boot option. | ||
195 | */ | ||
196 | if (cpumask_equal(cpu_online_mask, cpu_present_mask)) | ||
189 | return default_distrib_server; | 197 | return default_distrib_server; |
190 | 198 | ||
191 | return default_server; | 199 | return default_server; |
@@ -207,7 +215,7 @@ static void xics_unmask_irq(unsigned int virq) | |||
207 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | 215 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) |
208 | return; | 216 | return; |
209 | 217 | ||
210 | server = get_irq_server(virq, *(irq_to_desc(virq)->affinity), 0); | 218 | server = get_irq_server(virq, irq_to_desc(virq)->affinity, 0); |
211 | 219 | ||
212 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, | 220 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, |
213 | DEFAULT_PRIORITY); | 221 | DEFAULT_PRIORITY); |
@@ -398,11 +406,7 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) | |||
398 | return -1; | 406 | return -1; |
399 | } | 407 | } |
400 | 408 | ||
401 | /* | 409 | irq_server = get_irq_server(virq, cpumask, 1); |
402 | * For the moment only implement delivery to all cpus or one cpu. | ||
403 | * Get current irq_server for the given irq | ||
404 | */ | ||
405 | irq_server = get_irq_server(virq, *cpumask, 1); | ||
406 | if (irq_server == -1) { | 410 | if (irq_server == -1) { |
407 | char cpulist[128]; | 411 | char cpulist[128]; |
408 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); | 412 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); |
@@ -611,7 +615,7 @@ int __init smp_xics_probe(void) | |||
611 | { | 615 | { |
612 | xics_request_ipi(); | 616 | xics_request_ipi(); |
613 | 617 | ||
614 | return cpus_weight(cpu_possible_map); | 618 | return cpumask_weight(cpu_possible_mask); |
615 | } | 619 | } |
616 | 620 | ||
617 | #endif /* CONFIG_SMP */ | 621 | #endif /* CONFIG_SMP */ |