diff options
author | Andrew Morton <akpm@osdl.org> | 2006-03-23 06:01:05 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-23 10:38:17 -0500 |
commit | 394e3902c55e667945f6f1c2bdbc59842cce70f7 (patch) | |
tree | f4bca0bdc0c291fda6f6949265aacec0669b9084 | |
parent | 63872f87a151413100678f110d1556026002809e (diff) |
[PATCH] more for_each_cpu() conversions
When we stop allocating percpu memory for not-possible CPUs we must not touch
the percpu data for not-possible CPUs at all. The correct way of doing this
is to test cpu_possible() or to use for_each_cpu().
This patch is a kernel-wide sweep of all instances of NR_CPUS. I found very
few instances of this bug, if any. But the patch converts lots of open-coded
test to use the preferred helper macros.
Cc: Mikael Starvik <starvik@axis.com>
Cc: David Howells <dhowells@redhat.com>
Acked-by: Kyle McMartin <kyle@parisc-linux.org>
Cc: Anton Blanchard <anton@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: William Lee Irwin III <wli@holomorphy.com>
Cc: Andi Kleen <ak@muc.de>
Cc: Christian Zankel <chris@zankel.net>
Cc: Philippe Elie <phil.el@wanadoo.fr>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Jens Axboe <axboe@suse.de>
Cc: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
42 files changed, 137 insertions, 222 deletions
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c index 30deaf1b728a..b504def3e346 100644 --- a/arch/cris/kernel/irq.c +++ b/arch/cris/kernel/irq.c | |||
@@ -52,9 +52,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
52 | 52 | ||
53 | if (i == 0) { | 53 | if (i == 0) { |
54 | seq_printf(p, " "); | 54 | seq_printf(p, " "); |
55 | for (j=0; j<NR_CPUS; j++) | 55 | for_each_online_cpu(j) |
56 | if (cpu_online(j)) | 56 | seq_printf(p, "CPU%d ",j); |
57 | seq_printf(p, "CPU%d ",j); | ||
58 | seq_putc(p, '\n'); | 57 | seq_putc(p, '\n'); |
59 | } | 58 | } |
60 | 59 | ||
@@ -67,9 +66,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
67 | #ifndef CONFIG_SMP | 66 | #ifndef CONFIG_SMP |
68 | seq_printf(p, "%10u ", kstat_irqs(i)); | 67 | seq_printf(p, "%10u ", kstat_irqs(i)); |
69 | #else | 68 | #else |
70 | for (j = 0; j < NR_CPUS; j++) | 69 | for_each_online_cpu(j) |
71 | if (cpu_online(j)) | 70 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
72 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | ||
73 | #endif | 71 | #endif |
74 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | 72 | seq_printf(p, " %14s", irq_desc[i].handler->typename); |
75 | seq_printf(p, " %s", action->name); | 73 | seq_printf(p, " %s", action->name); |
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c index 27ab4c30aac6..11fa326a8f62 100644 --- a/arch/frv/kernel/irq.c +++ b/arch/frv/kernel/irq.c | |||
@@ -75,9 +75,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
75 | switch (i) { | 75 | switch (i) { |
76 | case 0: | 76 | case 0: |
77 | seq_printf(p, " "); | 77 | seq_printf(p, " "); |
78 | for (j = 0; j < NR_CPUS; j++) | 78 | for_each_online_cpu(j) |
79 | if (cpu_online(j)) | 79 | seq_printf(p, "CPU%d ",j); |
80 | seq_printf(p, "CPU%d ",j); | ||
81 | 80 | ||
82 | seq_putc(p, '\n'); | 81 | seq_putc(p, '\n'); |
83 | break; | 82 | break; |
@@ -100,9 +99,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
100 | #ifndef CONFIG_SMP | 99 | #ifndef CONFIG_SMP |
101 | seq_printf(p, "%10u ", kstat_irqs(i)); | 100 | seq_printf(p, "%10u ", kstat_irqs(i)); |
102 | #else | 101 | #else |
103 | for (j = 0; j < NR_CPUS; j++) | 102 | for_each_online_cpu(j) |
104 | if (cpu_online(j)) | 103 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]); |
105 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]); | ||
106 | #endif | 104 | #endif |
107 | 105 | ||
108 | level = group->sources[ix]->level - frv_irq_levels; | 106 | level = group->sources[ix]->level - frv_irq_levels; |
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index e11a09207ec8..3d5110b65cc3 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -1145,9 +1145,7 @@ static int __cpuinit powernowk8_init(void) | |||
1145 | { | 1145 | { |
1146 | unsigned int i, supported_cpus = 0; | 1146 | unsigned int i, supported_cpus = 0; |
1147 | 1147 | ||
1148 | for (i=0; i<NR_CPUS; i++) { | 1148 | for_each_cpu(i) { |
1149 | if (!cpu_online(i)) | ||
1150 | continue; | ||
1151 | if (check_supported_cpu(i)) | 1149 | if (check_supported_cpu(i)) |
1152 | supported_cpus++; | 1150 | supported_cpus++; |
1153 | } | 1151 | } |
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index fd1c60cfd294..311b4e7266f1 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c | |||
@@ -351,8 +351,8 @@ static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold) | |||
351 | { | 351 | { |
352 | int i, j; | 352 | int i, j; |
353 | Dprintk("Rotating IRQs among CPUs.\n"); | 353 | Dprintk("Rotating IRQs among CPUs.\n"); |
354 | for (i = 0; i < NR_CPUS; i++) { | 354 | for_each_online_cpu(i) { |
355 | for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) { | 355 | for (j = 0; j < NR_IRQS; j++) { |
356 | if (!irq_desc[j].action) | 356 | if (!irq_desc[j].action) |
357 | continue; | 357 | continue; |
358 | /* Is it a significant load ? */ | 358 | /* Is it a significant load ? */ |
@@ -381,7 +381,7 @@ static void do_irq_balance(void) | |||
381 | unsigned long imbalance = 0; | 381 | unsigned long imbalance = 0; |
382 | cpumask_t allowed_mask, target_cpu_mask, tmp; | 382 | cpumask_t allowed_mask, target_cpu_mask, tmp; |
383 | 383 | ||
384 | for (i = 0; i < NR_CPUS; i++) { | 384 | for_each_cpu(i) { |
385 | int package_index; | 385 | int package_index; |
386 | CPU_IRQ(i) = 0; | 386 | CPU_IRQ(i) = 0; |
387 | if (!cpu_online(i)) | 387 | if (!cpu_online(i)) |
@@ -422,9 +422,7 @@ static void do_irq_balance(void) | |||
422 | } | 422 | } |
423 | } | 423 | } |
424 | /* Find the least loaded processor package */ | 424 | /* Find the least loaded processor package */ |
425 | for (i = 0; i < NR_CPUS; i++) { | 425 | for_each_online_cpu(i) { |
426 | if (!cpu_online(i)) | ||
427 | continue; | ||
428 | if (i != CPU_TO_PACKAGEINDEX(i)) | 426 | if (i != CPU_TO_PACKAGEINDEX(i)) |
429 | continue; | 427 | continue; |
430 | if (min_cpu_irq > CPU_IRQ(i)) { | 428 | if (min_cpu_irq > CPU_IRQ(i)) { |
@@ -441,9 +439,7 @@ tryanothercpu: | |||
441 | */ | 439 | */ |
442 | tmp_cpu_irq = 0; | 440 | tmp_cpu_irq = 0; |
443 | tmp_loaded = -1; | 441 | tmp_loaded = -1; |
444 | for (i = 0; i < NR_CPUS; i++) { | 442 | for_each_online_cpu(i) { |
445 | if (!cpu_online(i)) | ||
446 | continue; | ||
447 | if (i != CPU_TO_PACKAGEINDEX(i)) | 443 | if (i != CPU_TO_PACKAGEINDEX(i)) |
448 | continue; | 444 | continue; |
449 | if (max_cpu_irq <= CPU_IRQ(i)) | 445 | if (max_cpu_irq <= CPU_IRQ(i)) |
@@ -619,9 +615,7 @@ static int __init balanced_irq_init(void) | |||
619 | if (smp_num_siblings > 1 && !cpus_empty(tmp)) | 615 | if (smp_num_siblings > 1 && !cpus_empty(tmp)) |
620 | physical_balance = 1; | 616 | physical_balance = 1; |
621 | 617 | ||
622 | for (i = 0; i < NR_CPUS; i++) { | 618 | for_each_online_cpu(i) { |
623 | if (!cpu_online(i)) | ||
624 | continue; | ||
625 | irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); | 619 | irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); |
626 | irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); | 620 | irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); |
627 | if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { | 621 | if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { |
@@ -638,9 +632,11 @@ static int __init balanced_irq_init(void) | |||
638 | else | 632 | else |
639 | printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); | 633 | printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); |
640 | failed: | 634 | failed: |
641 | for (i = 0; i < NR_CPUS; i++) { | 635 | for_each_cpu(i) { |
642 | kfree(irq_cpu_data[i].irq_delta); | 636 | kfree(irq_cpu_data[i].irq_delta); |
637 | irq_cpu_data[i].irq_delta = NULL; | ||
643 | kfree(irq_cpu_data[i].last_irq); | 638 | kfree(irq_cpu_data[i].last_irq); |
639 | irq_cpu_data[i].last_irq = NULL; | ||
644 | } | 640 | } |
645 | return 0; | 641 | return 0; |
646 | } | 642 | } |
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 1db34effdd8d..9074818b9473 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -143,7 +143,7 @@ static int __init check_nmi_watchdog(void) | |||
143 | local_irq_enable(); | 143 | local_irq_enable(); |
144 | mdelay((10*1000)/nmi_hz); // wait 10 ticks | 144 | mdelay((10*1000)/nmi_hz); // wait 10 ticks |
145 | 145 | ||
146 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 146 | for_each_cpu(cpu) { |
147 | #ifdef CONFIG_SMP | 147 | #ifdef CONFIG_SMP |
148 | /* Check cpu_callin_map here because that is set | 148 | /* Check cpu_callin_map here because that is set |
149 | after the timer is started. */ | 149 | after the timer is started. */ |
@@ -510,7 +510,7 @@ void touch_nmi_watchdog (void) | |||
510 | * Just reset the alert counters, (other CPUs might be | 510 | * Just reset the alert counters, (other CPUs might be |
511 | * spinning on locks we hold): | 511 | * spinning on locks we hold): |
512 | */ | 512 | */ |
513 | for (i = 0; i < NR_CPUS; i++) | 513 | for_each_cpu(i) |
514 | alert_counter[i] = 0; | 514 | alert_counter[i] = 0; |
515 | 515 | ||
516 | /* | 516 | /* |
diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c index 0493e8b8ec49..1accce50c2c7 100644 --- a/arch/i386/oprofile/nmi_int.c +++ b/arch/i386/oprofile/nmi_int.c | |||
@@ -122,7 +122,7 @@ static void nmi_save_registers(void * dummy) | |||
122 | static void free_msrs(void) | 122 | static void free_msrs(void) |
123 | { | 123 | { |
124 | int i; | 124 | int i; |
125 | for (i = 0; i < NR_CPUS; ++i) { | 125 | for_each_cpu(i) { |
126 | kfree(cpu_msrs[i].counters); | 126 | kfree(cpu_msrs[i].counters); |
127 | cpu_msrs[i].counters = NULL; | 127 | cpu_msrs[i].counters = NULL; |
128 | kfree(cpu_msrs[i].controls); | 128 | kfree(cpu_msrs[i].controls); |
@@ -138,10 +138,7 @@ static int allocate_msrs(void) | |||
138 | size_t counters_size = sizeof(struct op_msr) * model->num_counters; | 138 | size_t counters_size = sizeof(struct op_msr) * model->num_counters; |
139 | 139 | ||
140 | int i; | 140 | int i; |
141 | for (i = 0; i < NR_CPUS; ++i) { | 141 | for_each_online_cpu(i) { |
142 | if (!cpu_online(i)) | ||
143 | continue; | ||
144 | |||
145 | cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL); | 142 | cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL); |
146 | if (!cpu_msrs[i].counters) { | 143 | if (!cpu_msrs[i].counters) { |
147 | success = 0; | 144 | success = 0; |
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index 1ce63926a3c0..a4634b06f675 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c | |||
@@ -37,9 +37,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
37 | 37 | ||
38 | if (i == 0) { | 38 | if (i == 0) { |
39 | seq_printf(p, " "); | 39 | seq_printf(p, " "); |
40 | for (j=0; j<NR_CPUS; j++) | 40 | for_each_online_cpu(j) |
41 | if (cpu_online(j)) | 41 | seq_printf(p, "CPU%d ",j); |
42 | seq_printf(p, "CPU%d ",j); | ||
43 | seq_putc(p, '\n'); | 42 | seq_putc(p, '\n'); |
44 | } | 43 | } |
45 | 44 | ||
@@ -52,9 +51,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
52 | #ifndef CONFIG_SMP | 51 | #ifndef CONFIG_SMP |
53 | seq_printf(p, "%10u ", kstat_irqs(i)); | 52 | seq_printf(p, "%10u ", kstat_irqs(i)); |
54 | #else | 53 | #else |
55 | for (j = 0; j < NR_CPUS; j++) | 54 | for_each_online_cpu(j) |
56 | if (cpu_online(j)) | 55 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
57 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | ||
58 | #endif | 56 | #endif |
59 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | 57 | seq_printf(p, " %14s", irq_desc[i].handler->typename); |
60 | seq_printf(p, " %s", action->name); | 58 | seq_printf(p, " %s", action->name); |
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index 7d93992e462c..3dd76b3d2967 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c | |||
@@ -68,9 +68,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
68 | 68 | ||
69 | if (i == 0) { | 69 | if (i == 0) { |
70 | seq_printf(p, " "); | 70 | seq_printf(p, " "); |
71 | for (j=0; j<NR_CPUS; j++) | 71 | for_each_online_cpu(j) |
72 | if (cpu_online(j)) | 72 | seq_printf(p, "CPU%d ",j); |
73 | seq_printf(p, "CPU%d ",j); | ||
74 | seq_putc(p, '\n'); | 73 | seq_putc(p, '\n'); |
75 | } | 74 | } |
76 | 75 | ||
@@ -83,9 +82,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
83 | #ifndef CONFIG_SMP | 82 | #ifndef CONFIG_SMP |
84 | seq_printf(p, "%10u ", kstat_irqs(i)); | 83 | seq_printf(p, "%10u ", kstat_irqs(i)); |
85 | #else | 84 | #else |
86 | for (j = 0; j < NR_CPUS; j++) | 85 | for_each_online_cpu(j) |
87 | if (cpu_online(j)) | 86 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
88 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | ||
89 | #endif | 87 | #endif |
90 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | 88 | seq_printf(p, " %14s", irq_desc[i].handler->typename); |
91 | seq_printf(p, " %s", action->name); | 89 | seq_printf(p, " %s", action->name); |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 06ed90752424..78d171bfa331 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -167,8 +167,8 @@ int smp_call_function (void (*func) (void *info), void *info, int retry, | |||
167 | mb(); | 167 | mb(); |
168 | 168 | ||
169 | /* Send a message to all other CPUs and wait for them to respond */ | 169 | /* Send a message to all other CPUs and wait for them to respond */ |
170 | for (i = 0; i < NR_CPUS; i++) | 170 | for_each_online_cpu(i) |
171 | if (cpu_online(i) && i != cpu) | 171 | if (i != cpu) |
172 | core_send_ipi(i, SMP_CALL_FUNCTION); | 172 | core_send_ipi(i, SMP_CALL_FUNCTION); |
173 | 173 | ||
174 | /* Wait for response */ | 174 | /* Wait for response */ |
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 73e5e52781d8..2854ac4c9be1 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c | |||
@@ -88,12 +88,9 @@ static inline int find_level(cpuid_t *cpunum, int irq) | |||
88 | { | 88 | { |
89 | int cpu, i; | 89 | int cpu, i; |
90 | 90 | ||
91 | for (cpu = 0; cpu <= NR_CPUS; cpu++) { | 91 | for_each_online_cpu(cpu) { |
92 | struct slice_data *si = cpu_data[cpu].data; | 92 | struct slice_data *si = cpu_data[cpu].data; |
93 | 93 | ||
94 | if (!cpu_online(cpu)) | ||
95 | continue; | ||
96 | |||
97 | for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++) | 94 | for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++) |
98 | if (si->level_to_irq[i] == irq) { | 95 | if (si->level_to_irq[i] == irq) { |
99 | *cpunum = cpu; | 96 | *cpunum = cpu; |
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 25564b7ca6bb..d6ac1c60a471 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
@@ -298,8 +298,8 @@ send_IPI_allbutself(enum ipi_message_type op) | |||
298 | { | 298 | { |
299 | int i; | 299 | int i; |
300 | 300 | ||
301 | for (i = 0; i < NR_CPUS; i++) { | 301 | for_each_online_cpu(i) { |
302 | if (cpu_online(i) && i != smp_processor_id()) | 302 | if (i != smp_processor_id()) |
303 | send_IPI_single(i, op); | 303 | send_IPI_single(i, op); |
304 | } | 304 | } |
305 | } | 305 | } |
@@ -643,14 +643,13 @@ int sys_cpus(int argc, char **argv) | |||
643 | if ( argc == 1 ){ | 643 | if ( argc == 1 ){ |
644 | 644 | ||
645 | #ifdef DUMP_MORE_STATE | 645 | #ifdef DUMP_MORE_STATE |
646 | for(i=0; i<NR_CPUS; i++) { | 646 | for_each_online_cpu(i) { |
647 | int cpus_per_line = 4; | 647 | int cpus_per_line = 4; |
648 | if(cpu_online(i)) { | 648 | |
649 | if (j++ % cpus_per_line) | 649 | if (j++ % cpus_per_line) |
650 | printk(" %3d",i); | 650 | printk(" %3d",i); |
651 | else | 651 | else |
652 | printk("\n %3d",i); | 652 | printk("\n %3d",i); |
653 | } | ||
654 | } | 653 | } |
655 | printk("\n"); | 654 | printk("\n"); |
656 | #else | 655 | #else |
@@ -659,9 +658,7 @@ int sys_cpus(int argc, char **argv) | |||
659 | } else if((argc==2) && !(strcmp(argv[1],"-l"))) { | 658 | } else if((argc==2) && !(strcmp(argv[1],"-l"))) { |
660 | printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n"); | 659 | printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n"); |
661 | #ifdef DUMP_MORE_STATE | 660 | #ifdef DUMP_MORE_STATE |
662 | for(i=0;i<NR_CPUS;i++) { | 661 | for_each_online_cpu(i) { |
663 | if (!cpu_online(i)) | ||
664 | continue; | ||
665 | if (cpu_data[i].cpuid != NO_PROC_ID) { | 662 | if (cpu_data[i].cpuid != NO_PROC_ID) { |
666 | switch(cpu_data[i].state) { | 663 | switch(cpu_data[i].state) { |
667 | case STATE_RENDEZVOUS: | 664 | case STATE_RENDEZVOUS: |
@@ -695,9 +692,7 @@ int sys_cpus(int argc, char **argv) | |||
695 | } else if ((argc==2) && !(strcmp(argv[1],"-s"))) { | 692 | } else if ((argc==2) && !(strcmp(argv[1],"-s"))) { |
696 | #ifdef DUMP_MORE_STATE | 693 | #ifdef DUMP_MORE_STATE |
697 | printk("\nCPUSTATE CPUID\n"); | 694 | printk("\nCPUSTATE CPUID\n"); |
698 | for (i=0;i<NR_CPUS;i++) { | 695 | for_each_online_cpu(i) { |
699 | if (!cpu_online(i)) | ||
700 | continue; | ||
701 | if (cpu_data[i].cpuid != NO_PROC_ID) { | 696 | if (cpu_data[i].cpuid != NO_PROC_ID) { |
702 | switch(cpu_data[i].state) { | 697 | switch(cpu_data[i].state) { |
703 | case STATE_RENDEZVOUS: | 698 | case STATE_RENDEZVOUS: |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 24dc8117b822..771a59cbd213 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -135,9 +135,8 @@ skip: | |||
135 | #ifdef CONFIG_TAU_INT | 135 | #ifdef CONFIG_TAU_INT |
136 | if (tau_initialized){ | 136 | if (tau_initialized){ |
137 | seq_puts(p, "TAU: "); | 137 | seq_puts(p, "TAU: "); |
138 | for (j = 0; j < NR_CPUS; j++) | 138 | for_each_online_cpu(j) |
139 | if (cpu_online(j)) | 139 | seq_printf(p, "%10u ", tau_interrupts(j)); |
140 | seq_printf(p, "%10u ", tau_interrupts(j)); | ||
141 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); | 140 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); |
142 | } | 141 | } |
143 | #endif | 142 | #endif |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index be12041c0fc5..c1d62bf11f29 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -162,9 +162,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
162 | #if defined(CONFIG_SMP) && defined(CONFIG_PPC32) | 162 | #if defined(CONFIG_SMP) && defined(CONFIG_PPC32) |
163 | unsigned long bogosum = 0; | 163 | unsigned long bogosum = 0; |
164 | int i; | 164 | int i; |
165 | for (i = 0; i < NR_CPUS; ++i) | 165 | for_each_online_cpu(i) |
166 | if (cpu_online(i)) | 166 | bogosum += loops_per_jiffy; |
167 | bogosum += loops_per_jiffy; | ||
168 | seq_printf(m, "total bogomips\t: %lu.%02lu\n", | 167 | seq_printf(m, "total bogomips\t: %lu.%02lu\n", |
169 | bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); | 168 | bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); |
170 | #endif /* CONFIG_SMP && CONFIG_PPC32 */ | 169 | #endif /* CONFIG_SMP && CONFIG_PPC32 */ |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index db72a92943bf..dc2770df25b3 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -272,9 +272,8 @@ int __init ppc_init(void) | |||
272 | if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); | 272 | if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); |
273 | 273 | ||
274 | /* register CPU devices */ | 274 | /* register CPU devices */ |
275 | for (i = 0; i < NR_CPUS; i++) | 275 | for_each_cpu(i) |
276 | if (cpu_possible(i)) | 276 | register_cpu(&cpu_devices[i], i, NULL); |
277 | register_cpu(&cpu_devices[i], i, NULL); | ||
278 | 277 | ||
279 | /* call platform init */ | 278 | /* call platform init */ |
280 | if (ppc_md.init != NULL) { | 279 | if (ppc_md.init != NULL) { |
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 6d64a9bf3474..1065d87fc279 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c | |||
@@ -191,9 +191,7 @@ static void smp_psurge_message_pass(int target, int msg) | |||
191 | if (num_online_cpus() < 2) | 191 | if (num_online_cpus() < 2) |
192 | return; | 192 | return; |
193 | 193 | ||
194 | for (i = 0; i < NR_CPUS; i++) { | 194 | for_each_online_cpu(i) { |
195 | if (!cpu_online(i)) | ||
196 | continue; | ||
197 | if (target == MSG_ALL | 195 | if (target == MSG_ALL |
198 | || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) | 196 | || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) |
199 | || target == i) { | 197 | || target == i) { |
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c index c08ab432e958..53e9deacee82 100644 --- a/arch/ppc/kernel/setup.c +++ b/arch/ppc/kernel/setup.c | |||
@@ -168,9 +168,8 @@ int show_cpuinfo(struct seq_file *m, void *v) | |||
168 | /* Show summary information */ | 168 | /* Show summary information */ |
169 | #ifdef CONFIG_SMP | 169 | #ifdef CONFIG_SMP |
170 | unsigned long bogosum = 0; | 170 | unsigned long bogosum = 0; |
171 | for (i = 0; i < NR_CPUS; ++i) | 171 | for_each_online_cpu(i) |
172 | if (cpu_online(i)) | 172 | bogosum += cpu_data[i].loops_per_jiffy; |
173 | bogosum += cpu_data[i].loops_per_jiffy; | ||
174 | seq_printf(m, "total bogomips\t: %lu.%02lu\n", | 173 | seq_printf(m, "total bogomips\t: %lu.%02lu\n", |
175 | bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); | 174 | bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); |
176 | #endif /* CONFIG_SMP */ | 175 | #endif /* CONFIG_SMP */ |
@@ -712,9 +711,8 @@ int __init ppc_init(void) | |||
712 | if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); | 711 | if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); |
713 | 712 | ||
714 | /* register CPU devices */ | 713 | /* register CPU devices */ |
715 | for (i = 0; i < NR_CPUS; i++) | 714 | for_each_cpu(i) |
716 | if (cpu_possible(i)) | 715 | register_cpu(&cpu_devices[i], i, NULL); |
717 | register_cpu(&cpu_devices[i], i, NULL); | ||
718 | 716 | ||
719 | /* call platform init */ | 717 | /* call platform init */ |
720 | if (ppc_md.init != NULL) { | 718 | if (ppc_md.init != NULL) { |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 7dbe00c76c6b..d52d6d211d9f 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -799,9 +799,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
799 | */ | 799 | */ |
800 | print_cpu_info(&S390_lowcore.cpu_data); | 800 | print_cpu_info(&S390_lowcore.cpu_data); |
801 | 801 | ||
802 | for(i = 0; i < NR_CPUS; i++) { | 802 | for_each_cpu(i) { |
803 | if (!cpu_possible(i)) | ||
804 | continue; | ||
805 | lowcore_ptr[i] = (struct _lowcore *) | 803 | lowcore_ptr[i] = (struct _lowcore *) |
806 | __get_free_pages(GFP_KERNEL|GFP_DMA, | 804 | __get_free_pages(GFP_KERNEL|GFP_DMA, |
807 | sizeof(void*) == 8 ? 1 : 0); | 805 | sizeof(void*) == 8 ? 1 : 0); |
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 6883c00728cb..b56e79632f24 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c | |||
@@ -35,9 +35,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
35 | 35 | ||
36 | if (i == 0) { | 36 | if (i == 0) { |
37 | seq_puts(p, " "); | 37 | seq_puts(p, " "); |
38 | for (j=0; j<NR_CPUS; j++) | 38 | for_each_online_cpu(j) |
39 | if (cpu_online(j)) | 39 | seq_printf(p, "CPU%d ",j); |
40 | seq_printf(p, "CPU%d ",j); | ||
41 | seq_putc(p, '\n'); | 40 | seq_putc(p, '\n'); |
42 | } | 41 | } |
43 | 42 | ||
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index a067a34e0b64..c0e79843f580 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
@@ -404,9 +404,8 @@ static int __init topology_init(void) | |||
404 | { | 404 | { |
405 | int cpu_id; | 405 | int cpu_id; |
406 | 406 | ||
407 | for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++) | 407 | for_each_cpu(cpu_id) |
408 | if (cpu_possible(cpu_id)) | 408 | register_cpu(&cpu[cpu_id], cpu_id, NULL); |
409 | register_cpu(&cpu[cpu_id], cpu_id, NULL); | ||
410 | 409 | ||
411 | return 0; | 410 | return 0; |
412 | } | 411 | } |
diff --git a/arch/sh64/kernel/irq.c b/arch/sh64/kernel/irq.c index 9fc2b71dbd84..d69879c0e063 100644 --- a/arch/sh64/kernel/irq.c +++ b/arch/sh64/kernel/irq.c | |||
@@ -53,9 +53,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
53 | 53 | ||
54 | if (i == 0) { | 54 | if (i == 0) { |
55 | seq_puts(p, " "); | 55 | seq_puts(p, " "); |
56 | for (j=0; j<NR_CPUS; j++) | 56 | for_each_online_cpu(j) |
57 | if (cpu_online(j)) | 57 | seq_printf(p, "CPU%d ",j); |
58 | seq_printf(p, "CPU%d ",j); | ||
59 | seq_putc(p, '\n'); | 58 | seq_putc(p, '\n'); |
60 | } | 59 | } |
61 | 60 | ||
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c index 410b9a72aba9..4c60a6ef54a9 100644 --- a/arch/sparc/kernel/irq.c +++ b/arch/sparc/kernel/irq.c | |||
@@ -184,9 +184,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
184 | #ifndef CONFIG_SMP | 184 | #ifndef CONFIG_SMP |
185 | seq_printf(p, "%10u ", kstat_irqs(i)); | 185 | seq_printf(p, "%10u ", kstat_irqs(i)); |
186 | #else | 186 | #else |
187 | for (j = 0; j < NR_CPUS; j++) { | 187 | for_each_online_cpu(j) { |
188 | if (cpu_online(j)) | 188 | seq_printf(p, "%10u ", |
189 | seq_printf(p, "%10u ", | ||
190 | kstat_cpu(cpu_logical_map(j)).irqs[i]); | 189 | kstat_cpu(cpu_logical_map(j)).irqs[i]); |
191 | } | 190 | } |
192 | #endif | 191 | #endif |
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c index c6e721d8f477..ea5682ce7031 100644 --- a/arch/sparc/kernel/smp.c +++ b/arch/sparc/kernel/smp.c | |||
@@ -243,9 +243,8 @@ int setup_profiling_timer(unsigned int multiplier) | |||
243 | return -EINVAL; | 243 | return -EINVAL; |
244 | 244 | ||
245 | spin_lock_irqsave(&prof_setup_lock, flags); | 245 | spin_lock_irqsave(&prof_setup_lock, flags); |
246 | for(i = 0; i < NR_CPUS; i++) { | 246 | for_each_cpu(i) { |
247 | if (cpu_possible(i)) | 247 | load_profile_irq(i, lvl14_resolution / multiplier); |
248 | load_profile_irq(i, lvl14_resolution / multiplier); | ||
249 | prof_multiplier(i) = multiplier; | 248 | prof_multiplier(i) = multiplier; |
250 | } | 249 | } |
251 | spin_unlock_irqrestore(&prof_setup_lock, flags); | 250 | spin_unlock_irqrestore(&prof_setup_lock, flags); |
@@ -273,13 +272,12 @@ void smp_bogo(struct seq_file *m) | |||
273 | { | 272 | { |
274 | int i; | 273 | int i; |
275 | 274 | ||
276 | for (i = 0; i < NR_CPUS; i++) { | 275 | for_each_online_cpu(i) { |
277 | if (cpu_online(i)) | 276 | seq_printf(m, |
278 | seq_printf(m, | 277 | "Cpu%dBogo\t: %lu.%02lu\n", |
279 | "Cpu%dBogo\t: %lu.%02lu\n", | 278 | i, |
280 | i, | 279 | cpu_data(i).udelay_val/(500000/HZ), |
281 | cpu_data(i).udelay_val/(500000/HZ), | 280 | (cpu_data(i).udelay_val/(5000/HZ))%100); |
282 | (cpu_data(i).udelay_val/(5000/HZ))%100); | ||
283 | } | 281 | } |
284 | } | 282 | } |
285 | 283 | ||
@@ -288,8 +286,6 @@ void smp_info(struct seq_file *m) | |||
288 | int i; | 286 | int i; |
289 | 287 | ||
290 | seq_printf(m, "State:\n"); | 288 | seq_printf(m, "State:\n"); |
291 | for (i = 0; i < NR_CPUS; i++) { | 289 | for_each_online_cpu(i) |
292 | if (cpu_online(i)) | 290 | seq_printf(m, "CPU%d\t\t: online\n", i); |
293 | seq_printf(m, "CPU%d\t\t: online\n", i); | ||
294 | } | ||
295 | } | 291 | } |
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c index 52621348a56c..cea7fc6fc6e5 100644 --- a/arch/sparc/kernel/sun4d_irq.c +++ b/arch/sparc/kernel/sun4d_irq.c | |||
@@ -103,11 +103,9 @@ found_it: seq_printf(p, "%3d: ", i); | |||
103 | #ifndef CONFIG_SMP | 103 | #ifndef CONFIG_SMP |
104 | seq_printf(p, "%10u ", kstat_irqs(i)); | 104 | seq_printf(p, "%10u ", kstat_irqs(i)); |
105 | #else | 105 | #else |
106 | for (x = 0; x < NR_CPUS; x++) { | 106 | for_each_online_cpu(x) |
107 | if (cpu_online(x)) | 107 | seq_printf(p, "%10u ", |
108 | seq_printf(p, "%10u ", | 108 | kstat_cpu(cpu_logical_map(x)).irqs[i]); |
109 | kstat_cpu(cpu_logical_map(x)).irqs[i]); | ||
110 | } | ||
111 | #endif | 109 | #endif |
112 | seq_printf(p, "%c %s", | 110 | seq_printf(p, "%c %s", |
113 | (action->flags & SA_INTERRUPT) ? '+' : ' ', | 111 | (action->flags & SA_INTERRUPT) ? '+' : ' ', |
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index 4219dd2ce3a2..41bb9596be48 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c | |||
@@ -249,11 +249,9 @@ void __init smp4d_boot_cpus(void) | |||
249 | } else { | 249 | } else { |
250 | unsigned long bogosum = 0; | 250 | unsigned long bogosum = 0; |
251 | 251 | ||
252 | for(i = 0; i < NR_CPUS; i++) { | 252 | for_each_present_cpu(i) { |
253 | if (cpu_isset(i, cpu_present_map)) { | 253 | bogosum += cpu_data(i).udelay_val; |
254 | bogosum += cpu_data(i).udelay_val; | 254 | smp_highest_cpu = i; |
255 | smp_highest_cpu = i; | ||
256 | } | ||
257 | } | 255 | } |
258 | SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100)); | 256 | SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100)); |
259 | printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", | 257 | printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", |
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index fbbd8a474c4c..1dde312eebda 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c | |||
@@ -218,10 +218,8 @@ void __init smp4m_boot_cpus(void) | |||
218 | cpu_present_map = cpumask_of_cpu(smp_processor_id()); | 218 | cpu_present_map = cpumask_of_cpu(smp_processor_id()); |
219 | } else { | 219 | } else { |
220 | unsigned long bogosum = 0; | 220 | unsigned long bogosum = 0; |
221 | for(i = 0; i < NR_CPUS; i++) { | 221 | for_each_present_cpu(i) |
222 | if (cpu_isset(i, cpu_present_map)) | 222 | bogosum += cpu_data(i).udelay_val; |
223 | bogosum += cpu_data(i).udelay_val; | ||
224 | } | ||
225 | printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", | 223 | printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", |
226 | cpucount + 1, | 224 | cpucount + 1, |
227 | bogosum/(500000/HZ), | 225 | bogosum/(500000/HZ), |
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 8c93ba655b33..e505a4125e35 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -117,9 +117,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
117 | #ifndef CONFIG_SMP | 117 | #ifndef CONFIG_SMP |
118 | seq_printf(p, "%10u ", kstat_irqs(i)); | 118 | seq_printf(p, "%10u ", kstat_irqs(i)); |
119 | #else | 119 | #else |
120 | for (j = 0; j < NR_CPUS; j++) { | 120 | for_each_online_cpu(j) { |
121 | if (!cpu_online(j)) | ||
122 | continue; | ||
123 | seq_printf(p, "%10u ", | 121 | seq_printf(p, "%10u ", |
124 | kstat_cpu(j).irqs[i]); | 122 | kstat_cpu(j).irqs[i]); |
125 | } | 123 | } |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 373a701c90a5..1b6e2ade1008 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -57,25 +57,21 @@ void smp_info(struct seq_file *m) | |||
57 | int i; | 57 | int i; |
58 | 58 | ||
59 | seq_printf(m, "State:\n"); | 59 | seq_printf(m, "State:\n"); |
60 | for (i = 0; i < NR_CPUS; i++) { | 60 | for_each_online_cpu(i) |
61 | if (cpu_online(i)) | 61 | seq_printf(m, "CPU%d:\t\tonline\n", i); |
62 | seq_printf(m, | ||
63 | "CPU%d:\t\tonline\n", i); | ||
64 | } | ||
65 | } | 62 | } |
66 | 63 | ||
67 | void smp_bogo(struct seq_file *m) | 64 | void smp_bogo(struct seq_file *m) |
68 | { | 65 | { |
69 | int i; | 66 | int i; |
70 | 67 | ||
71 | for (i = 0; i < NR_CPUS; i++) | 68 | for_each_online_cpu(i) |
72 | if (cpu_online(i)) | 69 | seq_printf(m, |
73 | seq_printf(m, | 70 | "Cpu%dBogo\t: %lu.%02lu\n" |
74 | "Cpu%dBogo\t: %lu.%02lu\n" | 71 | "Cpu%dClkTck\t: %016lx\n", |
75 | "Cpu%dClkTck\t: %016lx\n", | 72 | i, cpu_data(i).udelay_val / (500000/HZ), |
76 | i, cpu_data(i).udelay_val / (500000/HZ), | 73 | (cpu_data(i).udelay_val / (5000/HZ)) % 100, |
77 | (cpu_data(i).udelay_val / (5000/HZ)) % 100, | 74 | i, cpu_data(i).clock_tick); |
78 | i, cpu_data(i).clock_tick); | ||
79 | } | 75 | } |
80 | 76 | ||
81 | void __init smp_store_cpu_info(int id) | 77 | void __init smp_store_cpu_info(int id) |
@@ -1282,7 +1278,7 @@ int setup_profiling_timer(unsigned int multiplier) | |||
1282 | return -EINVAL; | 1278 | return -EINVAL; |
1283 | 1279 | ||
1284 | spin_lock_irqsave(&prof_setup_lock, flags); | 1280 | spin_lock_irqsave(&prof_setup_lock, flags); |
1285 | for (i = 0; i < NR_CPUS; i++) | 1281 | for_each_cpu(i) |
1286 | prof_multiplier(i) = multiplier; | 1282 | prof_multiplier(i) = multiplier; |
1287 | current_tick_offset = (timer_tick_offset / multiplier); | 1283 | current_tick_offset = (timer_tick_offset / multiplier); |
1288 | spin_unlock_irqrestore(&prof_setup_lock, flags); | 1284 | spin_unlock_irqrestore(&prof_setup_lock, flags); |
@@ -1384,10 +1380,8 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
1384 | unsigned long bogosum = 0; | 1380 | unsigned long bogosum = 0; |
1385 | int i; | 1381 | int i; |
1386 | 1382 | ||
1387 | for (i = 0; i < NR_CPUS; i++) { | 1383 | for_each_online_cpu(i) |
1388 | if (cpu_online(i)) | 1384 | bogosum += cpu_data(i).udelay_val; |
1389 | bogosum += cpu_data(i).udelay_val; | ||
1390 | } | ||
1391 | printk("Total of %ld processors activated " | 1385 | printk("Total of %ld processors activated " |
1392 | "(%lu.%02lu BogoMIPS).\n", | 1386 | "(%lu.%02lu BogoMIPS).\n", |
1393 | (long) num_online_cpus(), | 1387 | (long) num_online_cpus(), |
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c index 30d2a1e545fe..d8bd0b345b1e 100644 --- a/arch/x86_64/kernel/irq.c +++ b/arch/x86_64/kernel/irq.c | |||
@@ -38,9 +38,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
38 | 38 | ||
39 | if (i == 0) { | 39 | if (i == 0) { |
40 | seq_printf(p, " "); | 40 | seq_printf(p, " "); |
41 | for (j=0; j<NR_CPUS; j++) | 41 | for_each_online_cpu(j) |
42 | if (cpu_online(j)) | 42 | seq_printf(p, "CPU%d ",j); |
43 | seq_printf(p, "CPU%d ",j); | ||
44 | seq_putc(p, '\n'); | 43 | seq_putc(p, '\n'); |
45 | } | 44 | } |
46 | 45 | ||
@@ -53,10 +52,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
53 | #ifndef CONFIG_SMP | 52 | #ifndef CONFIG_SMP |
54 | seq_printf(p, "%10u ", kstat_irqs(i)); | 53 | seq_printf(p, "%10u ", kstat_irqs(i)); |
55 | #else | 54 | #else |
56 | for (j=0; j<NR_CPUS; j++) | 55 | for_each_online_cpu(j) |
57 | if (cpu_online(j)) | 56 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
58 | seq_printf(p, "%10u ", | ||
59 | kstat_cpu(j).irqs[i]); | ||
60 | #endif | 57 | #endif |
61 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | 58 | seq_printf(p, " %14s", irq_desc[i].handler->typename); |
62 | 59 | ||
@@ -68,15 +65,13 @@ skip: | |||
68 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 65 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
69 | } else if (i == NR_IRQS) { | 66 | } else if (i == NR_IRQS) { |
70 | seq_printf(p, "NMI: "); | 67 | seq_printf(p, "NMI: "); |
71 | for (j = 0; j < NR_CPUS; j++) | 68 | for_each_online_cpu(j) |
72 | if (cpu_online(j)) | 69 | seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); |
73 | seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); | ||
74 | seq_putc(p, '\n'); | 70 | seq_putc(p, '\n'); |
75 | #ifdef CONFIG_X86_LOCAL_APIC | 71 | #ifdef CONFIG_X86_LOCAL_APIC |
76 | seq_printf(p, "LOC: "); | 72 | seq_printf(p, "LOC: "); |
77 | for (j = 0; j < NR_CPUS; j++) | 73 | for_each_online_cpu(j) |
78 | if (cpu_online(j)) | 74 | seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs); |
79 | seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs); | ||
80 | seq_putc(p, '\n'); | 75 | seq_putc(p, '\n'); |
81 | #endif | 76 | #endif |
82 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 77 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index 5bf17e41cd2d..66c009e10bac 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c | |||
@@ -162,9 +162,7 @@ int __init check_nmi_watchdog (void) | |||
162 | local_irq_enable(); | 162 | local_irq_enable(); |
163 | mdelay((10*1000)/nmi_hz); // wait 10 ticks | 163 | mdelay((10*1000)/nmi_hz); // wait 10 ticks |
164 | 164 | ||
165 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 165 | for_each_online_cpu(cpu) { |
166 | if (!cpu_online(cpu)) | ||
167 | continue; | ||
168 | if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) { | 166 | if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) { |
169 | endflag = 1; | 167 | endflag = 1; |
170 | printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", | 168 | printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", |
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index 4cbf6d91571f..51f9bed455fa 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c | |||
@@ -83,9 +83,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
83 | 83 | ||
84 | if (i == 0) { | 84 | if (i == 0) { |
85 | seq_printf(p, " "); | 85 | seq_printf(p, " "); |
86 | for (j=0; j<NR_CPUS; j++) | 86 | for_each_online_cpu(j) |
87 | if (cpu_online(j)) | 87 | seq_printf(p, "CPU%d ",j); |
88 | seq_printf(p, "CPU%d ",j); | ||
89 | seq_putc(p, '\n'); | 88 | seq_putc(p, '\n'); |
90 | } | 89 | } |
91 | 90 | ||
@@ -98,9 +97,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
98 | #ifndef CONFIG_SMP | 97 | #ifndef CONFIG_SMP |
99 | seq_printf(p, "%10u ", kstat_irqs(i)); | 98 | seq_printf(p, "%10u ", kstat_irqs(i)); |
100 | #else | 99 | #else |
101 | for (j = 0; j < NR_CPUS; j++) | 100 | for_each_online_cpu(j) |
102 | if (cpu_online(j)) | 101 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
103 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | ||
104 | #endif | 102 | #endif |
105 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | 103 | seq_printf(p, " %14s", irq_desc[i].handler->typename); |
106 | seq_printf(p, " %s", action->name); | 104 | seq_printf(p, " %s", action->name); |
@@ -113,9 +111,8 @@ skip: | |||
113 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 111 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
114 | } else if (i == NR_IRQS) { | 112 | } else if (i == NR_IRQS) { |
115 | seq_printf(p, "NMI: "); | 113 | seq_printf(p, "NMI: "); |
116 | for (j = 0; j < NR_CPUS; j++) | 114 | for_each_online_cpu(j) |
117 | if (cpu_online(j)) | 115 | seq_printf(p, "%10u ", nmi_count(j)); |
118 | seq_printf(p, "%10u ", nmi_count(j)); | ||
119 | seq_putc(p, '\n'); | 116 | seq_putc(p, '\n'); |
120 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 117 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
121 | } | 118 | } |
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 690a1aae0b34..0c13795dca38 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c | |||
@@ -172,11 +172,9 @@ static struct net_device_stats *get_stats(struct net_device *dev) | |||
172 | 172 | ||
173 | memset(stats, 0, sizeof(struct net_device_stats)); | 173 | memset(stats, 0, sizeof(struct net_device_stats)); |
174 | 174 | ||
175 | for (i=0; i < NR_CPUS; i++) { | 175 | for_each_cpu(i) { |
176 | struct net_device_stats *lb_stats; | 176 | struct net_device_stats *lb_stats; |
177 | 177 | ||
178 | if (!cpu_possible(i)) | ||
179 | continue; | ||
180 | lb_stats = &per_cpu(loopback_stats, i); | 178 | lb_stats = &per_cpu(loopback_stats, i); |
181 | stats->rx_bytes += lb_stats->rx_bytes; | 179 | stats->rx_bytes += lb_stats->rx_bytes; |
182 | stats->tx_bytes += lb_stats->tx_bytes; | 180 | stats->tx_bytes += lb_stats->tx_bytes; |
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 78193e4bbdb5..330d3869b41e 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c | |||
@@ -38,9 +38,8 @@ void free_cpu_buffers(void) | |||
38 | { | 38 | { |
39 | int i; | 39 | int i; |
40 | 40 | ||
41 | for_each_online_cpu(i) { | 41 | for_each_online_cpu(i) |
42 | vfree(cpu_buffer[i].buffer); | 42 | vfree(cpu_buffer[i].buffer); |
43 | } | ||
44 | } | 43 | } |
45 | 44 | ||
46 | int alloc_cpu_buffers(void) | 45 | int alloc_cpu_buffers(void) |
diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c index 8955720a2c6b..713e6a7505d0 100644 --- a/fs/xfs/linux-2.6/xfs_stats.c +++ b/fs/xfs/linux-2.6/xfs_stats.c | |||
@@ -62,18 +62,15 @@ xfs_read_xfsstats( | |||
62 | while (j < xstats[i].endpoint) { | 62 | while (j < xstats[i].endpoint) { |
63 | val = 0; | 63 | val = 0; |
64 | /* sum over all cpus */ | 64 | /* sum over all cpus */ |
65 | for (c = 0; c < NR_CPUS; c++) { | 65 | for_each_cpu(c) |
66 | if (!cpu_possible(c)) continue; | ||
67 | val += *(((__u32*)&per_cpu(xfsstats, c) + j)); | 66 | val += *(((__u32*)&per_cpu(xfsstats, c) + j)); |
68 | } | ||
69 | len += sprintf(buffer + len, " %u", val); | 67 | len += sprintf(buffer + len, " %u", val); |
70 | j++; | 68 | j++; |
71 | } | 69 | } |
72 | buffer[len++] = '\n'; | 70 | buffer[len++] = '\n'; |
73 | } | 71 | } |
74 | /* extra precision counters */ | 72 | /* extra precision counters */ |
75 | for (i = 0; i < NR_CPUS; i++) { | 73 | for_each_cpu(i) { |
76 | if (!cpu_possible(i)) continue; | ||
77 | xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes; | 74 | xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes; |
78 | xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes; | 75 | xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes; |
79 | xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes; | 76 | xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes; |
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c index a02564972420..7079cc837210 100644 --- a/fs/xfs/linux-2.6/xfs_sysctl.c +++ b/fs/xfs/linux-2.6/xfs_sysctl.c | |||
@@ -38,8 +38,7 @@ xfs_stats_clear_proc_handler( | |||
38 | 38 | ||
39 | if (!ret && write && *valp) { | 39 | if (!ret && write && *valp) { |
40 | printk("XFS Clearing xfsstats\n"); | 40 | printk("XFS Clearing xfsstats\n"); |
41 | for (c = 0; c < NR_CPUS; c++) { | 41 | for_each_cpu(c) { |
42 | if (!cpu_possible(c)) continue; | ||
43 | preempt_disable(); | 42 | preempt_disable(); |
44 | /* save vn_active, it's a universal truth! */ | 43 | /* save vn_active, it's a universal truth! */ |
45 | vn_active = per_cpu(xfsstats, c).vn_active; | 44 | vn_active = per_cpu(xfsstats, c).vn_active; |
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h index 6f92482cc96c..0c017fc181c1 100644 --- a/include/asm-alpha/mmu_context.h +++ b/include/asm-alpha/mmu_context.h | |||
@@ -231,9 +231,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
231 | { | 231 | { |
232 | int i; | 232 | int i; |
233 | 233 | ||
234 | for (i = 0; i < NR_CPUS; i++) | 234 | for_each_online_cpu(i) |
235 | if (cpu_online(i)) | 235 | mm->context[i] = 0; |
236 | mm->context[i] = 0; | ||
237 | if (tsk != current) | 236 | if (tsk != current) |
238 | task_thread_info(tsk)->pcb.ptbr | 237 | task_thread_info(tsk)->pcb.ptbr |
239 | = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; | 238 | = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; |
diff --git a/include/asm-alpha/topology.h b/include/asm-alpha/topology.h index eb740e280d9c..420ccde6b916 100644 --- a/include/asm-alpha/topology.h +++ b/include/asm-alpha/topology.h | |||
@@ -27,8 +27,8 @@ static inline cpumask_t node_to_cpumask(int node) | |||
27 | cpumask_t node_cpu_mask = CPU_MASK_NONE; | 27 | cpumask_t node_cpu_mask = CPU_MASK_NONE; |
28 | int cpu; | 28 | int cpu; |
29 | 29 | ||
30 | for(cpu = 0; cpu < NR_CPUS; cpu++) { | 30 | for_each_online_cpu(cpu) { |
31 | if (cpu_online(cpu) && (cpu_to_node(cpu) == node)) | 31 | if (cpu_to_node(cpu) == node) |
32 | cpu_set(cpu, node_cpu_mask); | 32 | cpu_set(cpu, node_cpu_mask); |
33 | } | 33 | } |
34 | 34 | ||
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 9044aeb37828..78cf45547e31 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h | |||
@@ -19,10 +19,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; | |||
19 | #define percpu_modcopy(pcpudst, src, size) \ | 19 | #define percpu_modcopy(pcpudst, src, size) \ |
20 | do { \ | 20 | do { \ |
21 | unsigned int __i; \ | 21 | unsigned int __i; \ |
22 | for (__i = 0; __i < NR_CPUS; __i++) \ | 22 | for_each_cpu(__i) \ |
23 | if (cpu_possible(__i)) \ | 23 | memcpy((pcpudst)+__per_cpu_offset[__i], \ |
24 | memcpy((pcpudst)+__per_cpu_offset[__i], \ | 24 | (src), (size)); \ |
25 | (src), (size)); \ | ||
26 | } while (0) | 25 | } while (0) |
27 | #else /* ! SMP */ | 26 | #else /* ! SMP */ |
28 | 27 | ||
diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h index e31922c50e53..464301cd0d03 100644 --- a/include/asm-powerpc/percpu.h +++ b/include/asm-powerpc/percpu.h | |||
@@ -27,10 +27,9 @@ | |||
27 | #define percpu_modcopy(pcpudst, src, size) \ | 27 | #define percpu_modcopy(pcpudst, src, size) \ |
28 | do { \ | 28 | do { \ |
29 | unsigned int __i; \ | 29 | unsigned int __i; \ |
30 | for (__i = 0; __i < NR_CPUS; __i++) \ | 30 | for_each_cpu(__i) \ |
31 | if (cpu_possible(__i)) \ | 31 | memcpy((pcpudst)+__per_cpu_offset(__i), \ |
32 | memcpy((pcpudst)+__per_cpu_offset(__i), \ | 32 | (src), (size)); \ |
33 | (src), (size)); \ | ||
34 | } while (0) | 33 | } while (0) |
35 | 34 | ||
36 | extern void setup_per_cpu_areas(void); | 35 | extern void setup_per_cpu_areas(void); |
diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h index 123fcaca295e..e10ed87094f0 100644 --- a/include/asm-s390/percpu.h +++ b/include/asm-s390/percpu.h | |||
@@ -46,10 +46,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; | |||
46 | #define percpu_modcopy(pcpudst, src, size) \ | 46 | #define percpu_modcopy(pcpudst, src, size) \ |
47 | do { \ | 47 | do { \ |
48 | unsigned int __i; \ | 48 | unsigned int __i; \ |
49 | for (__i = 0; __i < NR_CPUS; __i++) \ | 49 | for_each_cpu(__i) \ |
50 | if (cpu_possible(__i)) \ | 50 | memcpy((pcpudst)+__per_cpu_offset[__i], \ |
51 | memcpy((pcpudst)+__per_cpu_offset[__i], \ | 51 | (src), (size)); \ |
52 | (src), (size)); \ | ||
53 | } while (0) | 52 | } while (0) |
54 | 53 | ||
55 | #else /* ! SMP */ | 54 | #else /* ! SMP */ |
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h index aea4e51e7cd1..82032e159a76 100644 --- a/include/asm-sparc64/percpu.h +++ b/include/asm-sparc64/percpu.h | |||
@@ -26,10 +26,9 @@ register unsigned long __local_per_cpu_offset asm("g5"); | |||
26 | #define percpu_modcopy(pcpudst, src, size) \ | 26 | #define percpu_modcopy(pcpudst, src, size) \ |
27 | do { \ | 27 | do { \ |
28 | unsigned int __i; \ | 28 | unsigned int __i; \ |
29 | for (__i = 0; __i < NR_CPUS; __i++) \ | 29 | for_each_cpu(__i) \ |
30 | if (cpu_possible(__i)) \ | 30 | memcpy((pcpudst)+__per_cpu_offset(__i), \ |
31 | memcpy((pcpudst)+__per_cpu_offset(__i), \ | 31 | (src), (size)); \ |
32 | (src), (size)); \ | ||
33 | } while (0) | 32 | } while (0) |
34 | #else /* ! SMP */ | 33 | #else /* ! SMP */ |
35 | 34 | ||
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h index 29a6b0408f75..4405b4adeaba 100644 --- a/include/asm-x86_64/percpu.h +++ b/include/asm-x86_64/percpu.h | |||
@@ -26,10 +26,9 @@ | |||
26 | #define percpu_modcopy(pcpudst, src, size) \ | 26 | #define percpu_modcopy(pcpudst, src, size) \ |
27 | do { \ | 27 | do { \ |
28 | unsigned int __i; \ | 28 | unsigned int __i; \ |
29 | for (__i = 0; __i < NR_CPUS; __i++) \ | 29 | for_each_cpu(__i) \ |
30 | if (cpu_possible(__i)) \ | 30 | memcpy((pcpudst)+__per_cpu_offset(__i), \ |
31 | memcpy((pcpudst)+__per_cpu_offset(__i), \ | 31 | (src), (size)); \ |
32 | (src), (size)); \ | ||
33 | } while (0) | 32 | } while (0) |
34 | 33 | ||
35 | extern void setup_per_cpu_areas(void); | 34 | extern void setup_per_cpu_areas(void); |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index eef5ccdcd731..fd647fde5ec1 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -149,22 +149,16 @@ struct disk_attribute { | |||
149 | ({ \ | 149 | ({ \ |
150 | typeof(gendiskp->dkstats->field) res = 0; \ | 150 | typeof(gendiskp->dkstats->field) res = 0; \ |
151 | int i; \ | 151 | int i; \ |
152 | for (i=0; i < NR_CPUS; i++) { \ | 152 | for_each_cpu(i) \ |
153 | if (!cpu_possible(i)) \ | ||
154 | continue; \ | ||
155 | res += per_cpu_ptr(gendiskp->dkstats, i)->field; \ | 153 | res += per_cpu_ptr(gendiskp->dkstats, i)->field; \ |
156 | } \ | ||
157 | res; \ | 154 | res; \ |
158 | }) | 155 | }) |
159 | 156 | ||
160 | static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) { | 157 | static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) { |
161 | int i; | 158 | int i; |
162 | for (i=0; i < NR_CPUS; i++) { | 159 | for_each_cpu(i) |
163 | if (cpu_possible(i)) { | 160 | memset(per_cpu_ptr(gendiskp->dkstats, i), value, |
164 | memset(per_cpu_ptr(gendiskp->dkstats, i), value, | 161 | sizeof (struct disk_stats)); |
165 | sizeof (struct disk_stats)); | ||
166 | } | ||
167 | } | ||
168 | } | 162 | } |
169 | 163 | ||
170 | #else | 164 | #else |