aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/hardirq.h9
-rw-r--r--arch/powerpc/kernel/irq.c35
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kernel/traps.c4
4 files changed, 50 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 9bf3467581b1..cd2d4be882aa 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -6,6 +6,9 @@
6 6
7typedef struct { 7typedef struct {
8 unsigned int __softirq_pending; 8 unsigned int __softirq_pending;
9 unsigned int timer_irqs;
10 unsigned int pmu_irqs;
11 unsigned int mce_exceptions;
9} ____cacheline_aligned irq_cpustat_t; 12} ____cacheline_aligned irq_cpustat_t;
10 13
11DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 14DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
@@ -19,4 +22,10 @@ static inline void ack_bad_irq(unsigned int irq)
19 printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); 22 printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
20} 23}
21 24
25extern u64 arch_irq_stat_cpu(unsigned int cpu);
26#define arch_irq_stat_cpu arch_irq_stat_cpu
27
28extern u64 arch_irq_stat(void);
29#define arch_irq_stat arch_irq_stat
30
22#endif /* _ASM_POWERPC_HARDIRQ_H */ 31#endif /* _ASM_POWERPC_HARDIRQ_H */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index b9cbb4570048..710505240f2f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -196,6 +196,21 @@ static int show_other_interrupts(struct seq_file *p, int prec)
196 } 196 }
197#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ 197#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
198 198
199 seq_printf(p, "%*s: ", prec, "LOC");
200 for_each_online_cpu(j)
201 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
202 seq_printf(p, " Local timer interrupts\n");
203
204 seq_printf(p, "%*s: ", prec, "CNT");
205 for_each_online_cpu(j)
206 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
207 seq_printf(p, " Performance monitoring interrupts\n");
208
209 seq_printf(p, "%*s: ", prec, "MCE");
210 for_each_online_cpu(j)
211 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
212 seq_printf(p, " Machine check exceptions\n");
213
199 seq_printf(p, "%*s: %10u\n", prec, "BAD", ppc_spurious_interrupts); 214 seq_printf(p, "%*s: %10u\n", prec, "BAD", ppc_spurious_interrupts);
200 215
201 return 0; 216 return 0;
@@ -258,6 +273,26 @@ out:
258 return 0; 273 return 0;
259} 274}
260 275
276/*
277 * /proc/stat helpers
278 */
279u64 arch_irq_stat_cpu(unsigned int cpu)
280{
281 u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
282
283 sum += per_cpu(irq_stat, cpu).pmu_irqs;
284 sum += per_cpu(irq_stat, cpu).mce_exceptions;
285
286 return sum;
287}
288
289u64 arch_irq_stat(void)
290{
291 u64 sum = ppc_spurious_interrupts;
292
293 return sum;
294}
295
261#ifdef CONFIG_HOTPLUG_CPU 296#ifdef CONFIG_HOTPLUG_CPU
262void fixup_irqs(cpumask_t map) 297void fixup_irqs(cpumask_t map)
263{ 298{
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index ed1c0f58344a..1b16b9a3e49a 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -575,6 +575,8 @@ void timer_interrupt(struct pt_regs * regs)
575 575
576 trace_timer_interrupt_entry(regs); 576 trace_timer_interrupt_entry(regs);
577 577
578 __get_cpu_var(irq_stat).timer_irqs++;
579
578 /* Ensure a positive value is written to the decrementer, or else 580 /* Ensure a positive value is written to the decrementer, or else
579 * some CPUs will continuue to take decrementer exceptions */ 581 * some CPUs will continuue to take decrementer exceptions */
580 set_dec(DECREMENTER_MAX); 582 set_dec(DECREMENTER_MAX);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 0a320dbd950a..895da29e7db8 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -483,6 +483,8 @@ void machine_check_exception(struct pt_regs *regs)
483{ 483{
484 int recover = 0; 484 int recover = 0;
485 485
486 __get_cpu_var(irq_stat).mce_exceptions++;
487
486 /* See if any machine dependent calls. In theory, we would want 488 /* See if any machine dependent calls. In theory, we would want
487 * to call the CPU first, and call the ppc_md. one if the CPU 489 * to call the CPU first, and call the ppc_md. one if the CPU
488 * one returns a positive number. However there is existing code 490 * one returns a positive number. However there is existing code
@@ -965,6 +967,8 @@ void vsx_unavailable_exception(struct pt_regs *regs)
965 967
966void performance_monitor_exception(struct pt_regs *regs) 968void performance_monitor_exception(struct pt_regs *regs)
967{ 969{
970 __get_cpu_var(irq_stat).pmu_irqs++;
971
968 perf_irq(regs); 972 perf_irq(regs);
969} 973}
970 974