aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorHelge Deller <deller@gmx.de>2013-05-10 17:24:01 -0400
committerHelge Deller <deller@gmx.de>2013-05-11 15:10:15 -0400
commit416821d3d68164909b2cbcf398e4ba0797f5f8a2 (patch)
treec7ba229007acf58d5b04763177aa04b5ddca98d9 /arch
parent2dbd3cac87250a0d44e07acc86c4224a08522709 (diff)
parisc: implement irq stacks - part 2 (v2)
This patch fixes few build issues which were introduced with the last irq stack patch, e.g. the combination of stack overflow check and irq stack. Furthermore we now do proper locking and change the irq bh handler to use the irq stack as well. In /proc/interrupts one now can monitor how huge the irq stack has grown and how often it was preferred over the kernel stack. IRQ stacks are now enabled by default just to make sure that we not overflow the kernel stack by accident. Signed-off-by: Helge Deller <deller@gmx.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/include/asm/hardirq.h9
-rw-r--r--arch/parisc/include/asm/processor.h3
-rw-r--r--arch/parisc/kernel/irq.c101
-rw-r--r--arch/parisc/mm/init.c4
5 files changed, 102 insertions, 17 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index cad060f288cf..6507dabdd5dd 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -245,7 +245,7 @@ config SMP
245 245
246config IRQSTACKS 246config IRQSTACKS
247 bool "Use separate kernel stacks when processing interrupts" 247 bool "Use separate kernel stacks when processing interrupts"
248 default n 248 default y
249 help 249 help
250 If you say Y here the kernel will use separate kernel stacks 250 If you say Y here the kernel will use separate kernel stacks
251 for handling hard and soft interrupts. This can help avoid 251 for handling hard and soft interrupts. This can help avoid
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h
index 12373c4dabab..c19f7138ba48 100644
--- a/arch/parisc/include/asm/hardirq.h
+++ b/arch/parisc/include/asm/hardirq.h
@@ -11,10 +11,18 @@
11#include <linux/threads.h> 11#include <linux/threads.h>
12#include <linux/irq.h> 12#include <linux/irq.h>
13 13
14#ifdef CONFIG_IRQSTACKS
15#define __ARCH_HAS_DO_SOFTIRQ
16#endif
17
14typedef struct { 18typedef struct {
15 unsigned int __softirq_pending; 19 unsigned int __softirq_pending;
16#ifdef CONFIG_DEBUG_STACKOVERFLOW 20#ifdef CONFIG_DEBUG_STACKOVERFLOW
17 unsigned int kernel_stack_usage; 21 unsigned int kernel_stack_usage;
22#ifdef CONFIG_IRQSTACKS
23 unsigned int irq_stack_usage;
24 unsigned int irq_stack_counter;
25#endif
18#endif 26#endif
19#ifdef CONFIG_SMP 27#ifdef CONFIG_SMP
20 unsigned int irq_resched_count; 28 unsigned int irq_resched_count;
@@ -28,6 +36,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
28#define __ARCH_IRQ_STAT 36#define __ARCH_IRQ_STAT
29#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) 37#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
30#define inc_irq_stat(member) this_cpu_inc(irq_stat.member) 38#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
39#define __inc_irq_stat(member) __this_cpu_inc(irq_stat.member)
31#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending) 40#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
32 41
33#define __ARCH_SET_SOFTIRQ_PENDING 42#define __ARCH_SET_SOFTIRQ_PENDING
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 064015547d1e..cfbc43929cf6 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -63,10 +63,13 @@
63 */ 63 */
64#ifdef __KERNEL__ 64#ifdef __KERNEL__
65 65
66#include <linux/spinlock_types.h>
67
66#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */ 68#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
67 69
68union irq_stack_union { 70union irq_stack_union {
69 unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; 71 unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
72 raw_spinlock_t lock;
70}; 73};
71 74
72DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); 75DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index e255db0bb761..55237a70e197 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -166,22 +166,32 @@ int arch_show_interrupts(struct seq_file *p, int prec)
166 seq_printf(p, "%*s: ", prec, "STK"); 166 seq_printf(p, "%*s: ", prec, "STK");
167 for_each_online_cpu(j) 167 for_each_online_cpu(j)
168 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage); 168 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
169 seq_printf(p, " Kernel stack usage\n"); 169 seq_puts(p, " Kernel stack usage\n");
170# ifdef CONFIG_IRQSTACKS
171 seq_printf(p, "%*s: ", prec, "IST");
172 for_each_online_cpu(j)
173 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
174 seq_puts(p, " Interrupt stack usage\n");
175 seq_printf(p, "%*s: ", prec, "ISC");
176 for_each_online_cpu(j)
177 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_counter);
178 seq_puts(p, " Interrupt stack usage counter\n");
179# endif
170#endif 180#endif
171#ifdef CONFIG_SMP 181#ifdef CONFIG_SMP
172 seq_printf(p, "%*s: ", prec, "RES"); 182 seq_printf(p, "%*s: ", prec, "RES");
173 for_each_online_cpu(j) 183 for_each_online_cpu(j)
174 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); 184 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
175 seq_printf(p, " Rescheduling interrupts\n"); 185 seq_puts(p, " Rescheduling interrupts\n");
176 seq_printf(p, "%*s: ", prec, "CAL"); 186 seq_printf(p, "%*s: ", prec, "CAL");
177 for_each_online_cpu(j) 187 for_each_online_cpu(j)
178 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); 188 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
179 seq_printf(p, " Function call interrupts\n"); 189 seq_puts(p, " Function call interrupts\n");
180#endif 190#endif
181 seq_printf(p, "%*s: ", prec, "TLB"); 191 seq_printf(p, "%*s: ", prec, "TLB");
182 for_each_online_cpu(j) 192 for_each_online_cpu(j)
183 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); 193 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
184 seq_printf(p, " TLB shootdowns\n"); 194 seq_puts(p, " TLB shootdowns\n");
185 return 0; 195 return 0;
186} 196}
187 197
@@ -378,6 +388,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
378 unsigned long sp = regs->gr[30]; 388 unsigned long sp = regs->gr[30];
379 unsigned long stack_usage; 389 unsigned long stack_usage;
380 unsigned int *last_usage; 390 unsigned int *last_usage;
391 int cpu = smp_processor_id();
381 392
382 /* if sr7 != 0, we interrupted a userspace process which we do not want 393 /* if sr7 != 0, we interrupted a userspace process which we do not want
383 * to check for stack overflow. We will only check the kernel stack. */ 394 * to check for stack overflow. We will only check the kernel stack. */
@@ -386,7 +397,31 @@ static inline void stack_overflow_check(struct pt_regs *regs)
386 397
387 /* calculate kernel stack usage */ 398 /* calculate kernel stack usage */
388 stack_usage = sp - stack_start; 399 stack_usage = sp - stack_start;
389 last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id()); 400#ifdef CONFIG_IRQSTACKS
401 if (likely(stack_usage <= THREAD_SIZE))
402 goto check_kernel_stack; /* found kernel stack */
403
404 /* check irq stack usage */
405 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
406 stack_usage = sp - stack_start;
407
408 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
409 if (unlikely(stack_usage > *last_usage))
410 *last_usage = stack_usage;
411
412 if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
413 return;
414
415 pr_emerg("stackcheck: %s will most likely overflow irq stack "
416 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
417 current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
418 goto panic_check;
419
420check_kernel_stack:
421#endif
422
423 /* check kernel stack usage */
424 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
390 425
391 if (unlikely(stack_usage > *last_usage)) 426 if (unlikely(stack_usage > *last_usage))
392 *last_usage = stack_usage; 427 *last_usage = stack_usage;
@@ -398,31 +433,69 @@ static inline void stack_overflow_check(struct pt_regs *regs)
398 "(sp:%lx, stk bottom-top:%lx-%lx)\n", 433 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
399 current->comm, sp, stack_start, stack_start + THREAD_SIZE); 434 current->comm, sp, stack_start, stack_start + THREAD_SIZE);
400 435
436#ifdef CONFIG_IRQSTACKS
437panic_check:
438#endif
401 if (sysctl_panic_on_stackoverflow) 439 if (sysctl_panic_on_stackoverflow)
402 panic("low stack detected by irq handler - check messages\n"); 440 panic("low stack detected by irq handler - check messages\n");
403#endif 441#endif
404} 442}
405 443
406#ifdef CONFIG_IRQSTACKS 444#ifdef CONFIG_IRQSTACKS
407DEFINE_PER_CPU(union irq_stack_union, irq_stack_union); 445DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
446 .lock = __RAW_SPIN_LOCK_UNLOCKED((irq_stack_union).lock)
447 };
408 448
409static void execute_on_irq_stack(void *func, unsigned long param1) 449static void execute_on_irq_stack(void *func, unsigned long param1)
410{ 450{
411 unsigned long *irq_stack_start; 451 union irq_stack_union *union_ptr;
412 unsigned long irq_stack; 452 unsigned long irq_stack;
413 int cpu = smp_processor_id(); 453 raw_spinlock_t *irq_stack_in_use;
414 454
415 irq_stack_start = &per_cpu(irq_stack_union, cpu).stack[0]; 455 union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
416 irq_stack = (unsigned long) irq_stack_start; 456 irq_stack = (unsigned long) &union_ptr->stack;
417 irq_stack = ALIGN(irq_stack, 16); /* align for stack frame usage */ 457 irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.lock),
458 64); /* align for stack frame usage */
418 459
419 BUG_ON(*irq_stack_start); /* report bug if we were called recursive. */ 460 /* We may be called recursive. If we are already using the irq stack,
420 *irq_stack_start = 1; 461 * just continue to use it. Use spinlocks to serialize
462 * the irq stack usage.
463 */
464 irq_stack_in_use = &union_ptr->lock;
465 if (!raw_spin_trylock(irq_stack_in_use)) {
466 void (*direct_call)(unsigned long p1) = func;
467
468 /* We are using the IRQ stack already.
469 * Do direct call on current stack. */
470 direct_call(param1);
471 return;
472 }
421 473
422 /* This is where we switch to the IRQ stack. */ 474 /* This is where we switch to the IRQ stack. */
423 call_on_stack(param1, func, irq_stack); 475 call_on_stack(param1, func, irq_stack);
424 476
425 *irq_stack_start = 0; 477 __inc_irq_stat(irq_stack_counter);
478
479 /* free up irq stack usage. */
480 do_raw_spin_unlock(irq_stack_in_use);
481}
482
483asmlinkage void do_softirq(void)
484{
485 __u32 pending;
486 unsigned long flags;
487
488 if (in_interrupt())
489 return;
490
491 local_irq_save(flags);
492
493 pending = local_softirq_pending();
494
495 if (pending)
496 execute_on_irq_stack(__do_softirq, 0);
497
498 local_irq_restore(flags);
426} 499}
427#endif /* CONFIG_IRQSTACKS */ 500#endif /* CONFIG_IRQSTACKS */
428 501
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index ce939ac8622b..1c965642068b 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -1069,7 +1069,7 @@ void flush_tlb_all(void)
1069{ 1069{
1070 int do_recycle; 1070 int do_recycle;
1071 1071
1072 inc_irq_stat(irq_tlb_count); 1072 __inc_irq_stat(irq_tlb_count);
1073 do_recycle = 0; 1073 do_recycle = 0;
1074 spin_lock(&sid_lock); 1074 spin_lock(&sid_lock);
1075 if (dirty_space_ids > RECYCLE_THRESHOLD) { 1075 if (dirty_space_ids > RECYCLE_THRESHOLD) {
@@ -1090,7 +1090,7 @@ void flush_tlb_all(void)
1090#else 1090#else
1091void flush_tlb_all(void) 1091void flush_tlb_all(void)
1092{ 1092{
1093 inc_irq_stat(irq_tlb_count); 1093 __inc_irq_stat(irq_tlb_count);
1094 spin_lock(&sid_lock); 1094 spin_lock(&sid_lock);
1095 flush_tlb_all_local(NULL); 1095 flush_tlb_all_local(NULL);
1096 recycle_sids(); 1096 recycle_sids();