aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/io_apic.c
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2006-03-23 06:01:05 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-23 10:38:17 -0500
commit394e3902c55e667945f6f1c2bdbc59842cce70f7 (patch)
treef4bca0bdc0c291fda6f6949265aacec0669b9084 /arch/i386/kernel/io_apic.c
parent63872f87a151413100678f110d1556026002809e (diff)
[PATCH] more for_each_cpu() conversions
When we stop allocating percpu memory for not-possible CPUs we must not touch the percpu data for not-possible CPUs at all. The correct way of doing this is to test cpu_possible() or to use for_each_cpu(). This patch is a kernel-wide sweep of all instances of NR_CPUS. I found very few instances of this bug, if any. But the patch converts lots of open-coded test to use the preferred helper macros. Cc: Mikael Starvik <starvik@axis.com> Cc: David Howells <dhowells@redhat.com> Acked-by: Kyle McMartin <kyle@parisc-linux.org> Cc: Anton Blanchard <anton@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: Andi Kleen <ak@muc.de> Cc: Christian Zankel <chris@zankel.net> Cc: Philippe Elie <phil.el@wanadoo.fr> Cc: Nathan Scott <nathans@sgi.com> Cc: Jens Axboe <axboe@suse.de> Cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/kernel/io_apic.c')
-rw-r--r--arch/i386/kernel/io_apic.c22
1 files changed, 9 insertions, 13 deletions
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index fd1c60cfd294..311b4e7266f1 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -351,8 +351,8 @@ static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
351{ 351{
352 int i, j; 352 int i, j;
353 Dprintk("Rotating IRQs among CPUs.\n"); 353 Dprintk("Rotating IRQs among CPUs.\n");
354 for (i = 0; i < NR_CPUS; i++) { 354 for_each_online_cpu(i) {
355 for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) { 355 for (j = 0; j < NR_IRQS; j++) {
356 if (!irq_desc[j].action) 356 if (!irq_desc[j].action)
357 continue; 357 continue;
358 /* Is it a significant load ? */ 358 /* Is it a significant load ? */
@@ -381,7 +381,7 @@ static void do_irq_balance(void)
381 unsigned long imbalance = 0; 381 unsigned long imbalance = 0;
382 cpumask_t allowed_mask, target_cpu_mask, tmp; 382 cpumask_t allowed_mask, target_cpu_mask, tmp;
383 383
384 for (i = 0; i < NR_CPUS; i++) { 384 for_each_cpu(i) {
385 int package_index; 385 int package_index;
386 CPU_IRQ(i) = 0; 386 CPU_IRQ(i) = 0;
387 if (!cpu_online(i)) 387 if (!cpu_online(i))
@@ -422,9 +422,7 @@ static void do_irq_balance(void)
422 } 422 }
423 } 423 }
424 /* Find the least loaded processor package */ 424 /* Find the least loaded processor package */
425 for (i = 0; i < NR_CPUS; i++) { 425 for_each_online_cpu(i) {
426 if (!cpu_online(i))
427 continue;
428 if (i != CPU_TO_PACKAGEINDEX(i)) 426 if (i != CPU_TO_PACKAGEINDEX(i))
429 continue; 427 continue;
430 if (min_cpu_irq > CPU_IRQ(i)) { 428 if (min_cpu_irq > CPU_IRQ(i)) {
@@ -441,9 +439,7 @@ tryanothercpu:
441 */ 439 */
442 tmp_cpu_irq = 0; 440 tmp_cpu_irq = 0;
443 tmp_loaded = -1; 441 tmp_loaded = -1;
444 for (i = 0; i < NR_CPUS; i++) { 442 for_each_online_cpu(i) {
445 if (!cpu_online(i))
446 continue;
447 if (i != CPU_TO_PACKAGEINDEX(i)) 443 if (i != CPU_TO_PACKAGEINDEX(i))
448 continue; 444 continue;
449 if (max_cpu_irq <= CPU_IRQ(i)) 445 if (max_cpu_irq <= CPU_IRQ(i))
@@ -619,9 +615,7 @@ static int __init balanced_irq_init(void)
619 if (smp_num_siblings > 1 && !cpus_empty(tmp)) 615 if (smp_num_siblings > 1 && !cpus_empty(tmp))
620 physical_balance = 1; 616 physical_balance = 1;
621 617
622 for (i = 0; i < NR_CPUS; i++) { 618 for_each_online_cpu(i) {
623 if (!cpu_online(i))
624 continue;
625 irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); 619 irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
626 irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); 620 irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
627 if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { 621 if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
@@ -638,9 +632,11 @@ static int __init balanced_irq_init(void)
638 else 632 else
639 printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); 633 printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
640failed: 634failed:
641 for (i = 0; i < NR_CPUS; i++) { 635 for_each_cpu(i) {
642 kfree(irq_cpu_data[i].irq_delta); 636 kfree(irq_cpu_data[i].irq_delta);
637 irq_cpu_data[i].irq_delta = NULL;
643 kfree(irq_cpu_data[i].last_irq); 638 kfree(irq_cpu_data[i].last_irq);
639 irq_cpu_data[i].last_irq = NULL;
644 } 640 }
645 return 0; 641 return 0;
646} 642}