aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/parisc/include/asm/hardirq.h36
-rw-r--r--arch/parisc/include/asm/processor.h1
-rw-r--r--arch/parisc/kernel/irq.c48
-rw-r--r--arch/parisc/kernel/smp.c3
-rw-r--r--arch/parisc/mm/init.c2
5 files changed, 86 insertions, 4 deletions
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h
index 0d68184a76cb..a9c0fb195253 100644
--- a/arch/parisc/include/asm/hardirq.h
+++ b/arch/parisc/include/asm/hardirq.h
@@ -1,11 +1,45 @@
1/* hardirq.h: PA-RISC hard IRQ support. 1/* hardirq.h: PA-RISC hard IRQ support.
2 * 2 *
3 * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx> 3 * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx>
4 * Copyright (C) 2013 Helge Deller <deller@gmx.de>
4 */ 5 */
5 6
6#ifndef _PARISC_HARDIRQ_H 7#ifndef _PARISC_HARDIRQ_H
7#define _PARISC_HARDIRQ_H 8#define _PARISC_HARDIRQ_H
8 9
9#include <asm-generic/hardirq.h> 10#include <linux/cache.h>
11#include <linux/threads.h>
12#include <linux/irq.h>
13
14typedef struct {
15 unsigned int __softirq_pending;
16#ifdef CONFIG_DEBUG_STACKOVERFLOW
17 unsigned int kernel_stack_usage;
18#endif
19#ifdef CONFIG_SMP
20 unsigned int irq_resched_count;
21 unsigned int irq_call_count;
22 /*
23 * irq_tlb_count is double-counted in irq_call_count, so it must be
24 * subtracted from irq_call_count when displaying irq_call_count
25 */
26 unsigned int irq_tlb_count;
27#endif
28} ____cacheline_aligned irq_cpustat_t;
29
30DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
31
32#define __ARCH_IRQ_STAT
33#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
34#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
35#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
36
37#define __ARCH_SET_SOFTIRQ_PENDING
38
39#define set_softirq_pending(x) \
40 this_cpu_write(irq_stat.__softirq_pending, (x))
41#define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x))
42
43#define ack_bad_irq(irq) WARN(1, "unexpected IRQ trap at vector %02x\n", irq)
10 44
11#endif /* _PARISC_HARDIRQ_H */ 45#endif /* _PARISC_HARDIRQ_H */
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 242f06a5fbd8..064015547d1e 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -112,7 +112,6 @@ struct cpuinfo_parisc {
112 unsigned long txn_addr; /* MMIO addr of EIR or id_eid */ 112 unsigned long txn_addr; /* MMIO addr of EIR or id_eid */
113#ifdef CONFIG_SMP 113#ifdef CONFIG_SMP
114 unsigned long pending_ipi; /* bitmap of type ipi_message_type */ 114 unsigned long pending_ipi; /* bitmap of type ipi_message_type */
115 unsigned long ipi_count; /* number ipi Interrupts */
116#endif 115#endif
117 unsigned long bh_count; /* number of times bh was invoked */ 116 unsigned long bh_count; /* number of times bh was invoked */
118 unsigned long prof_counter; /* per CPU profiling support */ 117 unsigned long prof_counter; /* per CPU profiling support */
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 810f9cf89e48..a237e32ede19 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -152,6 +152,40 @@ static struct irq_chip cpu_interrupt_type = {
152 .irq_retrigger = NULL, 152 .irq_retrigger = NULL,
153}; 153};
154 154
155DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
156#define irq_stats(x) (&per_cpu(irq_stat, x))
157
158/*
159 * /proc/interrupts printing for arch specific interrupts
160 */
161int arch_show_interrupts(struct seq_file *p, int prec)
162{
163 int j;
164
165#ifdef CONFIG_DEBUG_STACKOVERFLOW
166 seq_printf(p, "%*s: ", prec, "STK");
167 for_each_online_cpu(j)
168 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
169 seq_printf(p, " Kernel stack usage\n");
170#endif
171#ifdef CONFIG_SMP
172 seq_printf(p, "%*s: ", prec, "RES");
173 for_each_online_cpu(j)
174 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
175 seq_printf(p, " Rescheduling interrupts\n");
176 seq_printf(p, "%*s: ", prec, "CAL");
177 for_each_online_cpu(j)
178 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count -
179 irq_stats(j)->irq_tlb_count);
180 seq_printf(p, " Function call interrupts\n");
181 seq_printf(p, "%*s: ", prec, "TLB");
182 for_each_online_cpu(j)
183 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
184 seq_printf(p, " TLB shootdowns\n");
185#endif
186 return 0;
187}
188
155int show_interrupts(struct seq_file *p, void *v) 189int show_interrupts(struct seq_file *p, void *v)
156{ 190{
157 int i = *(loff_t *) v, j; 191 int i = *(loff_t *) v, j;
@@ -219,6 +253,9 @@ int show_interrupts(struct seq_file *p, void *v)
219 raw_spin_unlock_irqrestore(&desc->lock, flags); 253 raw_spin_unlock_irqrestore(&desc->lock, flags);
220 } 254 }
221 255
256 if (i == NR_IRQS)
257 arch_show_interrupts(p, 3);
258
222 return 0; 259 return 0;
223} 260}
224 261
@@ -340,13 +377,22 @@ static inline void stack_overflow_check(struct pt_regs *regs)
340 /* Our stack starts directly behind the thread_info struct. */ 377 /* Our stack starts directly behind the thread_info struct. */
341 unsigned long stack_start = (unsigned long) current_thread_info(); 378 unsigned long stack_start = (unsigned long) current_thread_info();
342 unsigned long sp = regs->gr[30]; 379 unsigned long sp = regs->gr[30];
380 unsigned long stack_usage;
381 unsigned int *last_usage;
343 382
344 /* if sr7 != 0, we interrupted a userspace process which we do not want 383 /* if sr7 != 0, we interrupted a userspace process which we do not want
345 * to check for stack overflow. We will only check the kernel stack. */ 384 * to check for stack overflow. We will only check the kernel stack. */
346 if (regs->sr[7]) 385 if (regs->sr[7])
347 return; 386 return;
348 387
349 if (likely((sp - stack_start) < (THREAD_SIZE - STACK_MARGIN))) 388 /* calculate kernel stack usage */
389 stack_usage = sp - stack_start;
390 last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id());
391
392 if (unlikely(stack_usage > *last_usage))
393 *last_usage = stack_usage;
394
395 if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN)))
350 return; 396 return;
351 397
352 pr_emerg("stackcheck: %s will most likely overflow kernel stack " 398 pr_emerg("stackcheck: %s will most likely overflow kernel stack "
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index fd1bb1519c2b..218e20bff9d2 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -127,7 +127,7 @@ ipi_interrupt(int irq, void *dev_id)
127 unsigned long flags; 127 unsigned long flags;
128 128
129 /* Count this now; we may make a call that never returns. */ 129 /* Count this now; we may make a call that never returns. */
130 p->ipi_count++; 130 inc_irq_stat(irq_call_count);
131 131
132 mb(); /* Order interrupt and bit testing. */ 132 mb(); /* Order interrupt and bit testing. */
133 133
@@ -155,6 +155,7 @@ ipi_interrupt(int irq, void *dev_id)
155 155
156 case IPI_RESCHEDULE: 156 case IPI_RESCHEDULE:
157 smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu); 157 smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
158 inc_irq_stat(irq_resched_count);
158 scheduler_ipi(); 159 scheduler_ipi();
159 break; 160 break;
160 161
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 157b931e7b09..ce939ac8622b 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -1069,6 +1069,7 @@ void flush_tlb_all(void)
1069{ 1069{
1070 int do_recycle; 1070 int do_recycle;
1071 1071
1072 inc_irq_stat(irq_tlb_count);
1072 do_recycle = 0; 1073 do_recycle = 0;
1073 spin_lock(&sid_lock); 1074 spin_lock(&sid_lock);
1074 if (dirty_space_ids > RECYCLE_THRESHOLD) { 1075 if (dirty_space_ids > RECYCLE_THRESHOLD) {
@@ -1089,6 +1090,7 @@ void flush_tlb_all(void)
1089#else 1090#else
1090void flush_tlb_all(void) 1091void flush_tlb_all(void)
1091{ 1092{
1093 inc_irq_stat(irq_tlb_count);
1092 spin_lock(&sid_lock); 1094 spin_lock(&sid_lock);
1093 flush_tlb_all_local(NULL); 1095 flush_tlb_all_local(NULL);
1094 recycle_sids(); 1096 recycle_sids();