diff options
author | Andrew Morton <akpm@osdl.org> | 2006-03-23 06:01:05 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-23 10:38:17 -0500 |
commit | 394e3902c55e667945f6f1c2bdbc59842cce70f7 (patch) | |
tree | f4bca0bdc0c291fda6f6949265aacec0669b9084 /arch/i386/kernel | |
parent | 63872f87a151413100678f110d1556026002809e (diff) |
[PATCH] more for_each_cpu() conversions
When we stop allocating percpu memory for not-possible CPUs we must not touch
the percpu data for not-possible CPUs at all. The correct way of doing this
is to test cpu_possible() or to use for_each_cpu().
This patch is a kernel-wide sweep of all instances of NR_CPUS. I found very
few instances of this bug, if any. But the patch converts lots of open-coded
test to use the preferred helper macros.
Cc: Mikael Starvik <starvik@axis.com>
Cc: David Howells <dhowells@redhat.com>
Acked-by: Kyle McMartin <kyle@parisc-linux.org>
Cc: Anton Blanchard <anton@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: William Lee Irwin III <wli@holomorphy.com>
Cc: Andi Kleen <ak@muc.de>
Cc: Christian Zankel <chris@zankel.net>
Cc: Philippe Elie <phil.el@wanadoo.fr>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Jens Axboe <axboe@suse.de>
Cc: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/powernow-k8.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/io_apic.c | 22 | ||||
-rw-r--r-- | arch/i386/kernel/nmi.c | 4 |
3 files changed, 12 insertions, 18 deletions
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index e11a09207ec8..3d5110b65cc3 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -1145,9 +1145,7 @@ static int __cpuinit powernowk8_init(void) | |||
1145 | { | 1145 | { |
1146 | unsigned int i, supported_cpus = 0; | 1146 | unsigned int i, supported_cpus = 0; |
1147 | 1147 | ||
1148 | for (i=0; i<NR_CPUS; i++) { | 1148 | for_each_cpu(i) { |
1149 | if (!cpu_online(i)) | ||
1150 | continue; | ||
1151 | if (check_supported_cpu(i)) | 1149 | if (check_supported_cpu(i)) |
1152 | supported_cpus++; | 1150 | supported_cpus++; |
1153 | } | 1151 | } |
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index fd1c60cfd294..311b4e7266f1 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c | |||
@@ -351,8 +351,8 @@ static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold) | |||
351 | { | 351 | { |
352 | int i, j; | 352 | int i, j; |
353 | Dprintk("Rotating IRQs among CPUs.\n"); | 353 | Dprintk("Rotating IRQs among CPUs.\n"); |
354 | for (i = 0; i < NR_CPUS; i++) { | 354 | for_each_online_cpu(i) { |
355 | for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) { | 355 | for (j = 0; j < NR_IRQS; j++) { |
356 | if (!irq_desc[j].action) | 356 | if (!irq_desc[j].action) |
357 | continue; | 357 | continue; |
358 | /* Is it a significant load ? */ | 358 | /* Is it a significant load ? */ |
@@ -381,7 +381,7 @@ static void do_irq_balance(void) | |||
381 | unsigned long imbalance = 0; | 381 | unsigned long imbalance = 0; |
382 | cpumask_t allowed_mask, target_cpu_mask, tmp; | 382 | cpumask_t allowed_mask, target_cpu_mask, tmp; |
383 | 383 | ||
384 | for (i = 0; i < NR_CPUS; i++) { | 384 | for_each_cpu(i) { |
385 | int package_index; | 385 | int package_index; |
386 | CPU_IRQ(i) = 0; | 386 | CPU_IRQ(i) = 0; |
387 | if (!cpu_online(i)) | 387 | if (!cpu_online(i)) |
@@ -422,9 +422,7 @@ static void do_irq_balance(void) | |||
422 | } | 422 | } |
423 | } | 423 | } |
424 | /* Find the least loaded processor package */ | 424 | /* Find the least loaded processor package */ |
425 | for (i = 0; i < NR_CPUS; i++) { | 425 | for_each_online_cpu(i) { |
426 | if (!cpu_online(i)) | ||
427 | continue; | ||
428 | if (i != CPU_TO_PACKAGEINDEX(i)) | 426 | if (i != CPU_TO_PACKAGEINDEX(i)) |
429 | continue; | 427 | continue; |
430 | if (min_cpu_irq > CPU_IRQ(i)) { | 428 | if (min_cpu_irq > CPU_IRQ(i)) { |
@@ -441,9 +439,7 @@ tryanothercpu: | |||
441 | */ | 439 | */ |
442 | tmp_cpu_irq = 0; | 440 | tmp_cpu_irq = 0; |
443 | tmp_loaded = -1; | 441 | tmp_loaded = -1; |
444 | for (i = 0; i < NR_CPUS; i++) { | 442 | for_each_online_cpu(i) { |
445 | if (!cpu_online(i)) | ||
446 | continue; | ||
447 | if (i != CPU_TO_PACKAGEINDEX(i)) | 443 | if (i != CPU_TO_PACKAGEINDEX(i)) |
448 | continue; | 444 | continue; |
449 | if (max_cpu_irq <= CPU_IRQ(i)) | 445 | if (max_cpu_irq <= CPU_IRQ(i)) |
@@ -619,9 +615,7 @@ static int __init balanced_irq_init(void) | |||
619 | if (smp_num_siblings > 1 && !cpus_empty(tmp)) | 615 | if (smp_num_siblings > 1 && !cpus_empty(tmp)) |
620 | physical_balance = 1; | 616 | physical_balance = 1; |
621 | 617 | ||
622 | for (i = 0; i < NR_CPUS; i++) { | 618 | for_each_online_cpu(i) { |
623 | if (!cpu_online(i)) | ||
624 | continue; | ||
625 | irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); | 619 | irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); |
626 | irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); | 620 | irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); |
627 | if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { | 621 | if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { |
@@ -638,9 +632,11 @@ static int __init balanced_irq_init(void) | |||
638 | else | 632 | else |
639 | printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); | 633 | printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); |
640 | failed: | 634 | failed: |
641 | for (i = 0; i < NR_CPUS; i++) { | 635 | for_each_cpu(i) { |
642 | kfree(irq_cpu_data[i].irq_delta); | 636 | kfree(irq_cpu_data[i].irq_delta); |
637 | irq_cpu_data[i].irq_delta = NULL; | ||
643 | kfree(irq_cpu_data[i].last_irq); | 638 | kfree(irq_cpu_data[i].last_irq); |
639 | irq_cpu_data[i].last_irq = NULL; | ||
644 | } | 640 | } |
645 | return 0; | 641 | return 0; |
646 | } | 642 | } |
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 1db34effdd8d..9074818b9473 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -143,7 +143,7 @@ static int __init check_nmi_watchdog(void) | |||
143 | local_irq_enable(); | 143 | local_irq_enable(); |
144 | mdelay((10*1000)/nmi_hz); // wait 10 ticks | 144 | mdelay((10*1000)/nmi_hz); // wait 10 ticks |
145 | 145 | ||
146 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 146 | for_each_cpu(cpu) { |
147 | #ifdef CONFIG_SMP | 147 | #ifdef CONFIG_SMP |
148 | /* Check cpu_callin_map here because that is set | 148 | /* Check cpu_callin_map here because that is set |
149 | after the timer is started. */ | 149 | after the timer is started. */ |
@@ -510,7 +510,7 @@ void touch_nmi_watchdog (void) | |||
510 | * Just reset the alert counters, (other CPUs might be | 510 | * Just reset the alert counters, (other CPUs might be |
511 | * spinning on locks we hold): | 511 | * spinning on locks we hold): |
512 | */ | 512 | */ |
513 | for (i = 0; i < NR_CPUS; i++) | 513 | for_each_cpu(i) |
514 | alert_counter[i] = 0; | 514 | alert_counter[i] = 0; |
515 | 515 | ||
516 | /* | 516 | /* |