aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2010-01-31 15:34:36 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-02-16 22:02:49 -0500
commit17081102a6e0fe32cf47cdbdf8f2e9ab55273b08 (patch)
tree6bbe3f22d4414598cdfdb69510f0679b90c68175 /arch/powerpc
parent89713ed10815401a1bfe12e3a076b64048381b56 (diff)
powerpc: Convert global "BAD" interrupt to per cpu spurious
I often get asked if BAD interrupts are really bad. On some boxes (eg IBM machines running a hypervisor) there are valid cases where are presented with an interrupt that is not for us. These cases are common enough to show up as thousands of BAD interrupts a day. Tone them down by calling them spurious. Since they can be a significant cause of OS jitter, we may as well log them per cpu so we know where they are occurring. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/hardirq.h4
-rw-r--r--arch/powerpc/kernel/irq.c19
2 files changed, 8 insertions, 15 deletions
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index cd2d4be882aa..3147a2970125 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -9,6 +9,7 @@ typedef struct {
9 unsigned int timer_irqs; 9 unsigned int timer_irqs;
10 unsigned int pmu_irqs; 10 unsigned int pmu_irqs;
11 unsigned int mce_exceptions; 11 unsigned int mce_exceptions;
12 unsigned int spurious_irqs;
12} ____cacheline_aligned irq_cpustat_t; 13} ____cacheline_aligned irq_cpustat_t;
13 14
14DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 15DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
@@ -25,7 +26,4 @@ static inline void ack_bad_irq(unsigned int irq)
25extern u64 arch_irq_stat_cpu(unsigned int cpu); 26extern u64 arch_irq_stat_cpu(unsigned int cpu);
26#define arch_irq_stat_cpu arch_irq_stat_cpu 27#define arch_irq_stat_cpu arch_irq_stat_cpu
27 28
28extern u64 arch_irq_stat(void);
29#define arch_irq_stat arch_irq_stat
30
31#endif /* _ASM_POWERPC_HARDIRQ_H */ 29#endif /* _ASM_POWERPC_HARDIRQ_H */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 710505240f2f..9ae77e52f9d3 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -77,7 +77,6 @@ DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
77EXPORT_PER_CPU_SYMBOL(irq_stat); 77EXPORT_PER_CPU_SYMBOL(irq_stat);
78 78
79int __irq_offset_value; 79int __irq_offset_value;
80static int ppc_spurious_interrupts;
81 80
82#ifdef CONFIG_PPC32 81#ifdef CONFIG_PPC32
83EXPORT_SYMBOL(__irq_offset_value); 82EXPORT_SYMBOL(__irq_offset_value);
@@ -201,6 +200,11 @@ static int show_other_interrupts(struct seq_file *p, int prec)
201 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs); 200 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
202 seq_printf(p, " Local timer interrupts\n"); 201 seq_printf(p, " Local timer interrupts\n");
203 202
203 seq_printf(p, "%*s: ", prec, "SPU");
204 for_each_online_cpu(j)
205 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
206 seq_printf(p, " Spurious interrupts\n");
207
204 seq_printf(p, "%*s: ", prec, "CNT"); 208 seq_printf(p, "%*s: ", prec, "CNT");
205 for_each_online_cpu(j) 209 for_each_online_cpu(j)
206 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); 210 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
@@ -211,8 +215,6 @@ static int show_other_interrupts(struct seq_file *p, int prec)
211 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); 215 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
212 seq_printf(p, " Machine check exceptions\n"); 216 seq_printf(p, " Machine check exceptions\n");
213 217
214 seq_printf(p, "%*s: %10u\n", prec, "BAD", ppc_spurious_interrupts);
215
216 return 0; 218 return 0;
217} 219}
218 220
@@ -282,13 +284,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
282 284
283 sum += per_cpu(irq_stat, cpu).pmu_irqs; 285 sum += per_cpu(irq_stat, cpu).pmu_irqs;
284 sum += per_cpu(irq_stat, cpu).mce_exceptions; 286 sum += per_cpu(irq_stat, cpu).mce_exceptions;
285 287 sum += per_cpu(irq_stat, cpu).spurious_irqs;
286 return sum;
287}
288
289u64 arch_irq_stat(void)
290{
291 u64 sum = ppc_spurious_interrupts;
292 288
293 return sum; 289 return sum;
294} 290}
@@ -404,8 +400,7 @@ void do_IRQ(struct pt_regs *regs)
404 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) 400 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
405 handle_one_irq(irq); 401 handle_one_irq(irq);
406 else if (irq != NO_IRQ_IGNORE) 402 else if (irq != NO_IRQ_IGNORE)
407 /* That's not SMP safe ... but who cares ? */ 403 __get_cpu_var(irq_stat).spurious_irqs++;
408 ppc_spurious_interrupts++;
409 404
410 irq_exit(); 405 irq_exit();
411 set_irq_regs(old_regs); 406 set_irq_regs(old_regs);