aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-10-16 05:32:24 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-16 10:53:29 -0400
commit6b39ba771e3c78d00e0abcebad270bd4212b28bc (patch)
tree98c0b1598eff3882faaef3b0f2ee72dcbe2f0ea0
parentc0c168ca26b54a4a6ad34fc813fe00f275fbc94c (diff)
x86: unify show_interrupts() and proc helpers
show_interrupts() and proc helpers are basically the same for 32 and 64 bit. Move them to a shared source file. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/irq.c166
-rw-r--r--arch/x86/kernel/irq_32.c146
-rw-r--r--arch/x86/kernel/irq_64.c132
4 files changed, 167 insertions, 279 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index cb6ade443f00..d7e5a58ee22f 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -23,7 +23,7 @@ CFLAGS_hpet.o := $(nostackp)
23CFLAGS_tsc.o := $(nostackp) 23CFLAGS_tsc.o := $(nostackp)
24 24
25obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o 25obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o
26obj-y += traps.o irq_$(BITS).o dumpstack_$(BITS).o 26obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
27obj-y += time_$(BITS).o ioport.o ldt.o 27obj-y += time_$(BITS).o ioport.o ldt.o
28obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o 28obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o
29obj-$(CONFIG_X86_VISWS) += visws_quirks.o 29obj-$(CONFIG_X86_VISWS) += visws_quirks.o
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
new file mode 100644
index 000000000000..3b9128498976
--- /dev/null
+++ b/arch/x86/kernel/irq.c
@@ -0,0 +1,166 @@
1/*
2 * Common interrupt code for 32 and 64 bit
3 */
4#include <linux/cpu.h>
5#include <linux/interrupt.h>
6#include <linux/kernel_stat.h>
7#include <linux/seq_file.h>
8
9#include <asm/apic.h>
10#include <asm/io_apic.h>
11#include <asm/smp.h>
12
13atomic_t irq_err_count;
14
15#ifdef CONFIG_X86_32
16# define irq_stats(x) (&per_cpu(irq_stat,x))
17#else
18# define irq_stats(x) cpu_pda(x)
19#endif
20/*
21 * /proc/interrupts printing:
22 */
23static int show_other_interrupts(struct seq_file *p)
24{
25 int j;
26
27 seq_printf(p, "NMI: ");
28 for_each_online_cpu(j)
29 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
30 seq_printf(p, " Non-maskable interrupts\n");
31#ifdef CONFIG_X86_LOCAL_APIC
32 seq_printf(p, "LOC: ");
33 for_each_online_cpu(j)
34 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
35 seq_printf(p, " Local timer interrupts\n");
36#endif
37#ifdef CONFIG_SMP
38 seq_printf(p, "RES: ");
39 for_each_online_cpu(j)
40 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
41 seq_printf(p, " Rescheduling interrupts\n");
42 seq_printf(p, "CAL: ");
43 for_each_online_cpu(j)
44 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
45 seq_printf(p, " Function call interrupts\n");
46 seq_printf(p, "TLB: ");
47 for_each_online_cpu(j)
48 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
49 seq_printf(p, " TLB shootdowns\n");
50#endif
51#ifdef CONFIG_X86_MCE
52 seq_printf(p, "TRM: ");
53 for_each_online_cpu(j)
54 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
55 seq_printf(p, " Thermal event interrupts\n");
56# ifdef CONFIG_X86_64
57 seq_printf(p, "THR: ");
58 for_each_online_cpu(j)
59 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
60 seq_printf(p, " Threshold APIC interrupts\n");
61# endif
62#endif
63#ifdef CONFIG_X86_LOCAL_APIC
64 seq_printf(p, "SPU: ");
65 for_each_online_cpu(j)
66 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
67 seq_printf(p, " Spurious interrupts\n");
68#endif
69 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
70#if defined(CONFIG_X86_IO_APIC)
71 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
72#endif
73 return 0;
74}
75
76int show_interrupts(struct seq_file *p, void *v)
77{
78 unsigned long flags, any_count = 0;
79 int i = *(loff_t *) v, j;
80 struct irqaction *action;
81 struct irq_desc *desc;
82
83 if (i > nr_irqs)
84 return 0;
85
86 if (i == nr_irqs)
87 return show_other_interrupts(p);
88
89 /* print header */
90 if (i == 0) {
91 seq_printf(p, " ");
92 for_each_online_cpu(j)
93 seq_printf(p, "CPU%-8d",j);
94 seq_putc(p, '\n');
95 }
96
97 desc = irq_to_desc(i);
98 spin_lock_irqsave(&desc->lock, flags);
99#ifndef CONFIG_SMP
100 any_count = kstat_irqs(i);
101#else
102 for_each_online_cpu(j)
103 any_count |= kstat_irqs_cpu(i, j);
104#endif
105 action = desc->action;
106 if (!action && !any_count)
107 goto out;
108
109 seq_printf(p, "%3d: ", i);
110#ifndef CONFIG_SMP
111 seq_printf(p, "%10u ", kstat_irqs(i));
112#else
113 for_each_online_cpu(j)
114 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
115#endif
116 seq_printf(p, " %8s", desc->chip->name);
117 seq_printf(p, "-%-8s", desc->name);
118
119 if (action) {
120 seq_printf(p, " %s", action->name);
121 while ((action = action->next) != NULL)
122 seq_printf(p, ", %s", action->name);
123 }
124
125 seq_putc(p, '\n');
126out:
127 spin_unlock_irqrestore(&desc->lock, flags);
128 return 0;
129}
130
131/*
132 * /proc/stat helpers
133 */
134u64 arch_irq_stat_cpu(unsigned int cpu)
135{
136 u64 sum = irq_stats(cpu)->__nmi_count;
137
138#ifdef CONFIG_X86_LOCAL_APIC
139 sum += irq_stats(cpu)->apic_timer_irqs;
140#endif
141#ifdef CONFIG_SMP
142 sum += irq_stats(cpu)->irq_resched_count;
143 sum += irq_stats(cpu)->irq_call_count;
144 sum += irq_stats(cpu)->irq_tlb_count;
145#endif
146#ifdef CONFIG_X86_MCE
147 sum += irq_stats(cpu)->irq_thermal_count;
148# ifdef CONFIG_X86_64
149 sum += irq_stats(cpu)->irq_threshold_count;
150#endif
151#endif
152#ifdef CONFIG_X86_LOCAL_APIC
153 sum += irq_stats(cpu)->irq_spurious_count;
154#endif
155 return sum;
156}
157
158u64 arch_irq_stat(void)
159{
160 u64 sum = atomic_read(&irq_err_count);
161
162#ifdef CONFIG_X86_IO_APIC
163 sum += atomic_read(&irq_mis_count);
164#endif
165 return sum;
166}
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 8d525765a6c4..6d9bf3936c78 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -253,152 +253,6 @@ unsigned int do_IRQ(struct pt_regs *regs)
253 return 1; 253 return 1;
254} 254}
255 255
256/*
257 * Interrupt statistics:
258 */
259
260atomic_t irq_err_count;
261
262/*
263 * /proc/interrupts printing:
264 */
265
266static int show_other_interrupts(struct seq_file *p)
267{
268 int j;
269
270 seq_printf(p, "NMI: ");
271 for_each_online_cpu(j)
272 seq_printf(p, "%10u ", nmi_count(j));
273 seq_printf(p, " Non-maskable interrupts\n");
274#ifdef CONFIG_X86_LOCAL_APIC
275 seq_printf(p, "LOC: ");
276 for_each_online_cpu(j)
277 seq_printf(p, "%10u ", per_cpu(irq_stat,j).apic_timer_irqs);
278 seq_printf(p, " Local timer interrupts\n");
279#endif
280#ifdef CONFIG_SMP
281 seq_printf(p, "RES: ");
282 for_each_online_cpu(j)
283 seq_printf(p, "%10u ", per_cpu(irq_stat,j).irq_resched_count);
284 seq_printf(p, " Rescheduling interrupts\n");
285 seq_printf(p, "CAL: ");
286 for_each_online_cpu(j)
287 seq_printf(p, "%10u ", per_cpu(irq_stat,j).irq_call_count);
288 seq_printf(p, " Function call interrupts\n");
289 seq_printf(p, "TLB: ");
290 for_each_online_cpu(j)
291 seq_printf(p, "%10u ", per_cpu(irq_stat,j).irq_tlb_count);
292 seq_printf(p, " TLB shootdowns\n");
293#endif
294#ifdef CONFIG_X86_MCE
295 seq_printf(p, "TRM: ");
296 for_each_online_cpu(j)
297 seq_printf(p, "%10u ", per_cpu(irq_stat,j).irq_thermal_count);
298 seq_printf(p, " Thermal event interrupts\n");
299#endif
300#ifdef CONFIG_X86_LOCAL_APIC
301 seq_printf(p, "SPU: ");
302 for_each_online_cpu(j)
303 seq_printf(p, "%10u ", per_cpu(irq_stat,j).irq_spurious_count);
304 seq_printf(p, " Spurious interrupts\n");
305#endif
306 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
307#if defined(CONFIG_X86_IO_APIC)
308 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
309#endif
310 return 0;
311}
312
313int show_interrupts(struct seq_file *p, void *v)
314{
315 unsigned long flags, any_count = 0;
316 int i = *(loff_t *) v, j;
317 struct irqaction *action;
318 struct irq_desc *desc;
319
320 if (i > nr_irqs)
321 return 0;
322
323 if (i == nr_irqs)
324 return show_other_interrupts(p);
325
326 /* print header */
327 if (i == 0) {
328 seq_printf(p, " ");
329 for_each_online_cpu(j)
330 seq_printf(p, "CPU%-8d",j);
331 seq_putc(p, '\n');
332 }
333
334 desc = irq_to_desc(i);
335 spin_lock_irqsave(&desc->lock, flags);
336#ifndef CONFIG_SMP
337 any_count = kstat_irqs(i);
338#else
339 for_each_online_cpu(j)
340 any_count |= kstat_irqs_cpu(i, j);
341#endif
342 action = desc->action;
343 if (!action && !any_count)
344 goto out;
345
346 seq_printf(p, "%3d: ", i);
347#ifndef CONFIG_SMP
348 seq_printf(p, "%10u ", kstat_irqs(i));
349#else
350 for_each_online_cpu(j)
351 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
352#endif
353 seq_printf(p, " %8s", desc->chip->name);
354 seq_printf(p, "-%-8s", desc->name);
355
356 if (action) {
357 seq_printf(p, " %s", action->name);
358 while ((action = action->next) != NULL)
359 seq_printf(p, ", %s", action->name);
360 }
361
362 seq_putc(p, '\n');
363out:
364 spin_unlock_irqrestore(&desc->lock, flags);
365 return 0;
366}
367
368/*
369 * /proc/stat helpers
370 */
371u64 arch_irq_stat_cpu(unsigned int cpu)
372{
373 u64 sum = nmi_count(cpu);
374
375#ifdef CONFIG_X86_LOCAL_APIC
376 sum += per_cpu(irq_stat, cpu).apic_timer_irqs;
377#endif
378#ifdef CONFIG_SMP
379 sum += per_cpu(irq_stat, cpu).irq_resched_count;
380 sum += per_cpu(irq_stat, cpu).irq_call_count;
381 sum += per_cpu(irq_stat, cpu).irq_tlb_count;
382#endif
383#ifdef CONFIG_X86_MCE
384 sum += per_cpu(irq_stat, cpu).irq_thermal_count;
385#endif
386#ifdef CONFIG_X86_LOCAL_APIC
387 sum += per_cpu(irq_stat, cpu).irq_spurious_count;
388#endif
389 return sum;
390}
391
392u64 arch_irq_stat(void)
393{
394 u64 sum = atomic_read(&irq_err_count);
395
396#ifdef CONFIG_X86_IO_APIC
397 sum += atomic_read(&irq_mis_count);
398#endif
399 return sum;
400}
401
402#ifdef CONFIG_HOTPLUG_CPU 256#ifdef CONFIG_HOTPLUG_CPU
403#include <mach_apic.h> 257#include <mach_apic.h>
404 258
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 4f374294f292..39ef7feb9ea4 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -18,8 +18,6 @@
18#include <asm/idle.h> 18#include <asm/idle.h>
19#include <asm/smp.h> 19#include <asm/smp.h>
20 20
21atomic_t irq_err_count;
22
23/* 21/*
24 * 'what should we do if we get a hw irq event on an illegal vector'. 22 * 'what should we do if we get a hw irq event on an illegal vector'.
25 * each architecture has to answer this themselves. 23 * each architecture has to answer this themselves.
@@ -65,136 +63,6 @@ static inline void stack_overflow_check(struct pt_regs *regs)
65#endif 63#endif
66 64
67/* 65/*
68 * Generic, controller-independent functions:
69 */
70
71static int show_other_interrupts(struct seq_file *p)
72{
73 int j;
74
75 seq_printf(p, "NMI: ");
76 for_each_online_cpu(j)
77 seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
78 seq_printf(p, " Non-maskable interrupts\n");
79 seq_printf(p, "LOC: ");
80 for_each_online_cpu(j)
81 seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
82 seq_printf(p, " Local timer interrupts\n");
83#ifdef CONFIG_SMP
84 seq_printf(p, "RES: ");
85 for_each_online_cpu(j)
86 seq_printf(p, "%10u ", cpu_pda(j)->irq_resched_count);
87 seq_printf(p, " Rescheduling interrupts\n");
88 seq_printf(p, "CAL: ");
89 for_each_online_cpu(j)
90 seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count);
91 seq_printf(p, " Function call interrupts\n");
92 seq_printf(p, "TLB: ");
93 for_each_online_cpu(j)
94 seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count);
95 seq_printf(p, " TLB shootdowns\n");
96#endif
97#ifdef CONFIG_X86_MCE
98 seq_printf(p, "TRM: ");
99 for_each_online_cpu(j)
100 seq_printf(p, "%10u ", cpu_pda(j)->irq_thermal_count);
101 seq_printf(p, " Thermal event interrupts\n");
102 seq_printf(p, "THR: ");
103 for_each_online_cpu(j)
104 seq_printf(p, "%10u ", cpu_pda(j)->irq_threshold_count);
105 seq_printf(p, " Threshold APIC interrupts\n");
106#endif
107 seq_printf(p, "SPU: ");
108 for_each_online_cpu(j)
109 seq_printf(p, "%10u ", cpu_pda(j)->irq_spurious_count);
110 seq_printf(p, " Spurious interrupts\n");
111 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
112
113 return 0;
114}
115
116int show_interrupts(struct seq_file *p, void *v)
117{
118 unsigned long flags, any_count = 0;
119 int i = *(loff_t *) v, j;
120 struct irqaction *action;
121 struct irq_desc *desc;
122
123 if (i > nr_irqs)
124 return 0;
125
126 if (i == nr_irqs)
127 return show_other_interrupts(p);
128
129 /* print header */
130 if (i == 0) {
131 seq_printf(p, " ");
132 for_each_online_cpu(j)
133 seq_printf(p, "CPU%-8d",j);
134 seq_putc(p, '\n');
135 }
136
137 desc = irq_to_desc(i);
138 spin_lock_irqsave(&desc->lock, flags);
139#ifndef CONFIG_SMP
140 any_count = kstat_irqs(i);
141#else
142 for_each_online_cpu(j)
143 any_count |= kstat_irqs_cpu(i, j);
144#endif
145 action = desc->action;
146 if (!action && !any_count)
147 goto out;
148
149 seq_printf(p, "%3d: ", i);
150#ifndef CONFIG_SMP
151 seq_printf(p, "%10u ", kstat_irqs(i));
152#else
153 for_each_online_cpu(j)
154 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
155#endif
156 seq_printf(p, " %8s", desc->chip->name);
157 seq_printf(p, "-%-8s", desc->name);
158
159 if (action) {
160 seq_printf(p, " %s", action->name);
161 while ((action = action->next) != NULL)
162 seq_printf(p, ", %s", action->name);
163 }
164
165 seq_putc(p, '\n');
166out:
167 spin_unlock_irqrestore(&desc->lock, flags);
168 return 0;
169}
170
171/*
172 * /proc/stat helpers
173 */
174u64 arch_irq_stat_cpu(unsigned int cpu)
175{
176 u64 sum = cpu_pda(cpu)->__nmi_count;
177
178 sum += cpu_pda(cpu)->apic_timer_irqs;
179#ifdef CONFIG_SMP
180 sum += cpu_pda(cpu)->irq_resched_count;
181 sum += cpu_pda(cpu)->irq_call_count;
182 sum += cpu_pda(cpu)->irq_tlb_count;
183#endif
184#ifdef CONFIG_X86_MCE
185 sum += cpu_pda(cpu)->irq_thermal_count;
186 sum += cpu_pda(cpu)->irq_threshold_count;
187#endif
188 sum += cpu_pda(cpu)->irq_spurious_count;
189 return sum;
190}
191
192u64 arch_irq_stat(void)
193{
194 return atomic_read(&irq_err_count);
195}
196
197/*
198 * do_IRQ handles all normal device IRQ's (the special 66 * do_IRQ handles all normal device IRQ's (the special
199 * SMP cross-CPU interrupts have their own specific 67 * SMP cross-CPU interrupts have their own specific
200 * handlers). 68 * handlers).