diff options
Diffstat (limited to 'arch/i386/kernel/irq.c')
-rw-r--r-- | arch/i386/kernel/irq.c | 67 |
1 files changed, 54 insertions, 13 deletions
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c index 73945a3c53c4..af115004aec5 100644 --- a/arch/i386/kernel/irq.c +++ b/arch/i386/kernel/irq.c | |||
@@ -15,6 +15,9 @@ | |||
15 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/kernel_stat.h> | 17 | #include <linux/kernel_stat.h> |
18 | #include <linux/notifier.h> | ||
19 | #include <linux/cpu.h> | ||
20 | #include <linux/delay.h> | ||
18 | 21 | ||
19 | DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_maxaligned_in_smp; | 22 | DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_maxaligned_in_smp; |
20 | EXPORT_PER_CPU_SYMBOL(irq_stat); | 23 | EXPORT_PER_CPU_SYMBOL(irq_stat); |
@@ -210,9 +213,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
210 | 213 | ||
211 | if (i == 0) { | 214 | if (i == 0) { |
212 | seq_printf(p, " "); | 215 | seq_printf(p, " "); |
213 | for (j=0; j<NR_CPUS; j++) | 216 | for_each_cpu(j) |
214 | if (cpu_online(j)) | 217 | seq_printf(p, "CPU%d ",j); |
215 | seq_printf(p, "CPU%d ",j); | ||
216 | seq_putc(p, '\n'); | 218 | seq_putc(p, '\n'); |
217 | } | 219 | } |
218 | 220 | ||
@@ -225,9 +227,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
225 | #ifndef CONFIG_SMP | 227 | #ifndef CONFIG_SMP |
226 | seq_printf(p, "%10u ", kstat_irqs(i)); | 228 | seq_printf(p, "%10u ", kstat_irqs(i)); |
227 | #else | 229 | #else |
228 | for (j = 0; j < NR_CPUS; j++) | 230 | for_each_cpu(j) |
229 | if (cpu_online(j)) | 231 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
230 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | ||
231 | #endif | 232 | #endif |
232 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | 233 | seq_printf(p, " %14s", irq_desc[i].handler->typename); |
233 | seq_printf(p, " %s", action->name); | 234 | seq_printf(p, " %s", action->name); |
@@ -240,16 +241,14 @@ skip: | |||
240 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 241 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
241 | } else if (i == NR_IRQS) { | 242 | } else if (i == NR_IRQS) { |
242 | seq_printf(p, "NMI: "); | 243 | seq_printf(p, "NMI: "); |
243 | for (j = 0; j < NR_CPUS; j++) | 244 | for_each_cpu(j) |
244 | if (cpu_online(j)) | 245 | seq_printf(p, "%10u ", nmi_count(j)); |
245 | seq_printf(p, "%10u ", nmi_count(j)); | ||
246 | seq_putc(p, '\n'); | 246 | seq_putc(p, '\n'); |
247 | #ifdef CONFIG_X86_LOCAL_APIC | 247 | #ifdef CONFIG_X86_LOCAL_APIC |
248 | seq_printf(p, "LOC: "); | 248 | seq_printf(p, "LOC: "); |
249 | for (j = 0; j < NR_CPUS; j++) | 249 | for_each_cpu(j) |
250 | if (cpu_online(j)) | 250 | seq_printf(p, "%10u ", |
251 | seq_printf(p, "%10u ", | 251 | per_cpu(irq_stat,j).apic_timer_irqs); |
252 | per_cpu(irq_stat,j).apic_timer_irqs); | ||
253 | seq_putc(p, '\n'); | 252 | seq_putc(p, '\n'); |
254 | #endif | 253 | #endif |
255 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 254 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
@@ -259,3 +258,45 @@ skip: | |||
259 | } | 258 | } |
260 | return 0; | 259 | return 0; |
261 | } | 260 | } |
261 | |||
262 | #ifdef CONFIG_HOTPLUG_CPU | ||
263 | #include <mach_apic.h> | ||
264 | |||
265 | void fixup_irqs(cpumask_t map) | ||
266 | { | ||
267 | unsigned int irq; | ||
268 | static int warned; | ||
269 | |||
270 | for (irq = 0; irq < NR_IRQS; irq++) { | ||
271 | cpumask_t mask; | ||
272 | if (irq == 2) | ||
273 | continue; | ||
274 | |||
275 | cpus_and(mask, irq_affinity[irq], map); | ||
276 | if (any_online_cpu(mask) == NR_CPUS) { | ||
277 | printk("Breaking affinity for irq %i\n", irq); | ||
278 | mask = map; | ||
279 | } | ||
280 | if (irq_desc[irq].handler->set_affinity) | ||
281 | irq_desc[irq].handler->set_affinity(irq, mask); | ||
282 | else if (irq_desc[irq].action && !(warned++)) | ||
283 | printk("Cannot set affinity for irq %i\n", irq); | ||
284 | } | ||
285 | |||
286 | #if 0 | ||
287 | barrier(); | ||
288 | /* Ingo Molnar says: "after the IO-APIC masks have been redirected | ||
289 | [note the nop - the interrupt-enable boundary on x86 is two | ||
290 | instructions from sti] - to flush out pending hardirqs and | ||
291 | IPIs. After this point nothing is supposed to reach this CPU." */ | ||
292 | __asm__ __volatile__("sti; nop; cli"); | ||
293 | barrier(); | ||
294 | #else | ||
295 | /* That doesn't seem sufficient. Give it 1ms. */ | ||
296 | local_irq_enable(); | ||
297 | mdelay(1); | ||
298 | local_irq_disable(); | ||
299 | #endif | ||
300 | } | ||
301 | #endif | ||
302 | |||