aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/irq.c
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2006-03-23 06:01:05 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-23 10:38:17 -0500
commit394e3902c55e667945f6f1c2bdbc59842cce70f7 (patch)
treef4bca0bdc0c291fda6f6949265aacec0669b9084 /arch/x86_64/kernel/irq.c
parent63872f87a151413100678f110d1556026002809e (diff)
[PATCH] more for_each_cpu() conversions
When we stop allocating percpu memory for not-possible CPUs we must not touch the percpu data for not-possible CPUs at all. The correct way of doing this is to test cpu_possible() or to use for_each_cpu(). This patch is a kernel-wide sweep of all instances of NR_CPUS. I found very few instances of this bug, if any. But the patch converts lots of open-coded test to use the preferred helper macros. Cc: Mikael Starvik <starvik@axis.com> Cc: David Howells <dhowells@redhat.com> Acked-by: Kyle McMartin <kyle@parisc-linux.org> Cc: Anton Blanchard <anton@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: Andi Kleen <ak@muc.de> Cc: Christian Zankel <chris@zankel.net> Cc: Philippe Elie <phil.el@wanadoo.fr> Cc: Nathan Scott <nathans@sgi.com> Cc: Jens Axboe <axboe@suse.de> Cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/kernel/irq.c')
-rw-r--r--arch/x86_64/kernel/irq.c21
1 files changed, 8 insertions, 13 deletions
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index 30d2a1e545f..d8bd0b345b1 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -38,9 +38,8 @@ int show_interrupts(struct seq_file *p, void *v)
38 38
39 if (i == 0) { 39 if (i == 0) {
40 seq_printf(p, " "); 40 seq_printf(p, " ");
41 for (j=0; j<NR_CPUS; j++) 41 for_each_online_cpu(j)
42 if (cpu_online(j)) 42 seq_printf(p, "CPU%d ",j);
43 seq_printf(p, "CPU%d ",j);
44 seq_putc(p, '\n'); 43 seq_putc(p, '\n');
45 } 44 }
46 45
@@ -53,10 +52,8 @@ int show_interrupts(struct seq_file *p, void *v)
53#ifndef CONFIG_SMP 52#ifndef CONFIG_SMP
54 seq_printf(p, "%10u ", kstat_irqs(i)); 53 seq_printf(p, "%10u ", kstat_irqs(i));
55#else 54#else
56 for (j=0; j<NR_CPUS; j++) 55 for_each_online_cpu(j)
57 if (cpu_online(j)) 56 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
58 seq_printf(p, "%10u ",
59 kstat_cpu(j).irqs[i]);
60#endif 57#endif
61 seq_printf(p, " %14s", irq_desc[i].handler->typename); 58 seq_printf(p, " %14s", irq_desc[i].handler->typename);
62 59
@@ -68,15 +65,13 @@ skip:
68 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 65 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
69 } else if (i == NR_IRQS) { 66 } else if (i == NR_IRQS) {
70 seq_printf(p, "NMI: "); 67 seq_printf(p, "NMI: ");
71 for (j = 0; j < NR_CPUS; j++) 68 for_each_online_cpu(j)
72 if (cpu_online(j)) 69 seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
73 seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
74 seq_putc(p, '\n'); 70 seq_putc(p, '\n');
75#ifdef CONFIG_X86_LOCAL_APIC 71#ifdef CONFIG_X86_LOCAL_APIC
76 seq_printf(p, "LOC: "); 72 seq_printf(p, "LOC: ");
77 for (j = 0; j < NR_CPUS; j++) 73 for_each_online_cpu(j)
78 if (cpu_online(j)) 74 seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
79 seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
80 seq_putc(p, '\n'); 75 seq_putc(p, '\n');
81#endif 76#endif
82 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 77 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));