aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/kernel
diff options
context:
space:
mode:
authorHelge Deller <deller@gmx.de>2013-05-10 17:24:01 -0400
committerHelge Deller <deller@gmx.de>2013-05-11 15:10:15 -0400
commit416821d3d68164909b2cbcf398e4ba0797f5f8a2 (patch)
treec7ba229007acf58d5b04763177aa04b5ddca98d9 /arch/parisc/kernel
parent2dbd3cac87250a0d44e07acc86c4224a08522709 (diff)
parisc: implement irq stacks - part 2 (v2)
This patch fixes few build issues which were introduced with the last irq stack patch, e.g. the combination of stack overflow check and irq stack. Furthermore we now do proper locking and change the irq bh handler to use the irq stack as well. In /proc/interrupts one now can monitor how huge the irq stack has grown and how often it was preferred over the kernel stack. IRQ stacks are now enabled by default just to make sure that we not overflow the kernel stack by accident. Signed-off-by: Helge Deller <deller@gmx.de>
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r--arch/parisc/kernel/irq.c101
1 files changed, 87 insertions, 14 deletions
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index e255db0bb761..55237a70e197 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -166,22 +166,32 @@ int arch_show_interrupts(struct seq_file *p, int prec)
166 seq_printf(p, "%*s: ", prec, "STK"); 166 seq_printf(p, "%*s: ", prec, "STK");
167 for_each_online_cpu(j) 167 for_each_online_cpu(j)
168 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage); 168 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
169 seq_printf(p, " Kernel stack usage\n"); 169 seq_puts(p, " Kernel stack usage\n");
170# ifdef CONFIG_IRQSTACKS
171 seq_printf(p, "%*s: ", prec, "IST");
172 for_each_online_cpu(j)
173 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
174 seq_puts(p, " Interrupt stack usage\n");
175 seq_printf(p, "%*s: ", prec, "ISC");
176 for_each_online_cpu(j)
177 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_counter);
178 seq_puts(p, " Interrupt stack usage counter\n");
179# endif
170#endif 180#endif
171#ifdef CONFIG_SMP 181#ifdef CONFIG_SMP
172 seq_printf(p, "%*s: ", prec, "RES"); 182 seq_printf(p, "%*s: ", prec, "RES");
173 for_each_online_cpu(j) 183 for_each_online_cpu(j)
174 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); 184 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
175 seq_printf(p, " Rescheduling interrupts\n"); 185 seq_puts(p, " Rescheduling interrupts\n");
176 seq_printf(p, "%*s: ", prec, "CAL"); 186 seq_printf(p, "%*s: ", prec, "CAL");
177 for_each_online_cpu(j) 187 for_each_online_cpu(j)
178 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); 188 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
179 seq_printf(p, " Function call interrupts\n"); 189 seq_puts(p, " Function call interrupts\n");
180#endif 190#endif
181 seq_printf(p, "%*s: ", prec, "TLB"); 191 seq_printf(p, "%*s: ", prec, "TLB");
182 for_each_online_cpu(j) 192 for_each_online_cpu(j)
183 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); 193 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
184 seq_printf(p, " TLB shootdowns\n"); 194 seq_puts(p, " TLB shootdowns\n");
185 return 0; 195 return 0;
186} 196}
187 197
@@ -378,6 +388,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
378 unsigned long sp = regs->gr[30]; 388 unsigned long sp = regs->gr[30];
379 unsigned long stack_usage; 389 unsigned long stack_usage;
380 unsigned int *last_usage; 390 unsigned int *last_usage;
391 int cpu = smp_processor_id();
381 392
382 /* if sr7 != 0, we interrupted a userspace process which we do not want 393 /* if sr7 != 0, we interrupted a userspace process which we do not want
383 * to check for stack overflow. We will only check the kernel stack. */ 394 * to check for stack overflow. We will only check the kernel stack. */
@@ -386,7 +397,31 @@ static inline void stack_overflow_check(struct pt_regs *regs)
386 397
387 /* calculate kernel stack usage */ 398 /* calculate kernel stack usage */
388 stack_usage = sp - stack_start; 399 stack_usage = sp - stack_start;
389 last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id()); 400#ifdef CONFIG_IRQSTACKS
401 if (likely(stack_usage <= THREAD_SIZE))
402 goto check_kernel_stack; /* found kernel stack */
403
404 /* check irq stack usage */
405 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
406 stack_usage = sp - stack_start;
407
408 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
409 if (unlikely(stack_usage > *last_usage))
410 *last_usage = stack_usage;
411
412 if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
413 return;
414
415 pr_emerg("stackcheck: %s will most likely overflow irq stack "
416 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
417 current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
418 goto panic_check;
419
420check_kernel_stack:
421#endif
422
423 /* check kernel stack usage */
424 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
390 425
391 if (unlikely(stack_usage > *last_usage)) 426 if (unlikely(stack_usage > *last_usage))
392 *last_usage = stack_usage; 427 *last_usage = stack_usage;
@@ -398,31 +433,69 @@ static inline void stack_overflow_check(struct pt_regs *regs)
398 "(sp:%lx, stk bottom-top:%lx-%lx)\n", 433 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
399 current->comm, sp, stack_start, stack_start + THREAD_SIZE); 434 current->comm, sp, stack_start, stack_start + THREAD_SIZE);
400 435
436#ifdef CONFIG_IRQSTACKS
437panic_check:
438#endif
401 if (sysctl_panic_on_stackoverflow) 439 if (sysctl_panic_on_stackoverflow)
402 panic("low stack detected by irq handler - check messages\n"); 440 panic("low stack detected by irq handler - check messages\n");
403#endif 441#endif
404} 442}
405 443
406#ifdef CONFIG_IRQSTACKS 444#ifdef CONFIG_IRQSTACKS
407DEFINE_PER_CPU(union irq_stack_union, irq_stack_union); 445DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
446 .lock = __RAW_SPIN_LOCK_UNLOCKED((irq_stack_union).lock)
447 };
408 448
409static void execute_on_irq_stack(void *func, unsigned long param1) 449static void execute_on_irq_stack(void *func, unsigned long param1)
410{ 450{
411 unsigned long *irq_stack_start; 451 union irq_stack_union *union_ptr;
412 unsigned long irq_stack; 452 unsigned long irq_stack;
413 int cpu = smp_processor_id(); 453 raw_spinlock_t *irq_stack_in_use;
414 454
415 irq_stack_start = &per_cpu(irq_stack_union, cpu).stack[0]; 455 union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
416 irq_stack = (unsigned long) irq_stack_start; 456 irq_stack = (unsigned long) &union_ptr->stack;
417 irq_stack = ALIGN(irq_stack, 16); /* align for stack frame usage */ 457 irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.lock),
458 64); /* align for stack frame usage */
418 459
419 BUG_ON(*irq_stack_start); /* report bug if we were called recursive. */ 460 /* We may be called recursive. If we are already using the irq stack,
420 *irq_stack_start = 1; 461 * just continue to use it. Use spinlocks to serialize
462 * the irq stack usage.
463 */
464 irq_stack_in_use = &union_ptr->lock;
465 if (!raw_spin_trylock(irq_stack_in_use)) {
466 void (*direct_call)(unsigned long p1) = func;
467
468 /* We are using the IRQ stack already.
469 * Do direct call on current stack. */
470 direct_call(param1);
471 return;
472 }
421 473
422 /* This is where we switch to the IRQ stack. */ 474 /* This is where we switch to the IRQ stack. */
423 call_on_stack(param1, func, irq_stack); 475 call_on_stack(param1, func, irq_stack);
424 476
425 *irq_stack_start = 0; 477 __inc_irq_stat(irq_stack_counter);
478
479 /* free up irq stack usage. */
480 do_raw_spin_unlock(irq_stack_in_use);
481}
482
483asmlinkage void do_softirq(void)
484{
485 __u32 pending;
486 unsigned long flags;
487
488 if (in_interrupt())
489 return;
490
491 local_irq_save(flags);
492
493 pending = local_softirq_pending();
494
495 if (pending)
496 execute_on_irq_stack(__do_softirq, 0);
497
498 local_irq_restore(flags);
426} 499}
427#endif /* CONFIG_IRQSTACKS */ 500#endif /* CONFIG_IRQSTACKS */
428 501