aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/kernel
diff options
context:
space:
mode:
authorHelge Deller <deller@gmx.de>2013-05-24 17:27:35 -0400
committerHelge Deller <deller@gmx.de>2013-05-24 17:29:01 -0400
commitd96b51ec14650b490ab98e738bcc02309396e5bc (patch)
treebcc82f90634f689e629ed692f2fa6f1654d3cbbf /arch/parisc/kernel
parent2c2d32bed1a1bb6121494965b31badb280f04b0e (diff)
parisc: fix irq stack on UP and SMP
The logic to detect if the irq stack was already in use with raw_spin_trylock() is wrong, because it will generate a "trylock failure on UP" error message with CONFIG_SMP=n and CONFIG_DEBUG_SPINLOCK=y. arch_spin_trylock() can't be used either since in the CONFIG_SMP=n case no atomic protection is given and we are reentrant here. A mutex didn't worked either and brings more overhead by turning off interrupts. So, let's use the fastest path for parisc which is the ldcw instruction. Counting how often the irq stack was used is pretty useless, so just drop this piece of code. Signed-off-by: Helge Deller <deller@gmx.de>
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r--arch/parisc/kernel/irq.c41
1 files changed, 26 insertions, 15 deletions
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 9c2d953f3de5..2e6443b1e922 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -27,11 +27,11 @@
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/kernel_stat.h> 28#include <linux/kernel_stat.h>
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/spinlock.h>
31#include <linux/types.h> 30#include <linux/types.h>
32#include <asm/io.h> 31#include <asm/io.h>
33 32
34#include <asm/smp.h> 33#include <asm/smp.h>
34#include <asm/ldcw.h>
35 35
36#undef PARISC_IRQ_CR16_COUNTS 36#undef PARISC_IRQ_CR16_COUNTS
37 37
@@ -172,10 +172,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
172 for_each_online_cpu(j) 172 for_each_online_cpu(j)
173 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage); 173 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
174 seq_puts(p, " Interrupt stack usage\n"); 174 seq_puts(p, " Interrupt stack usage\n");
175 seq_printf(p, "%*s: ", prec, "ISC");
176 for_each_online_cpu(j)
177 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_counter);
178 seq_puts(p, " Interrupt stack usage counter\n");
179# endif 175# endif
180#endif 176#endif
181#ifdef CONFIG_SMP 177#ifdef CONFIG_SMP
@@ -384,6 +380,24 @@ static inline int eirr_to_irq(unsigned long eirr)
384 return (BITS_PER_LONG - bit) + TIMER_IRQ; 380 return (BITS_PER_LONG - bit) + TIMER_IRQ;
385} 381}
386 382
383#ifdef CONFIG_IRQSTACKS
384/*
385 * IRQ STACK - used for irq handler
386 */
387#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
388
389union irq_stack_union {
390 unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
391 volatile unsigned int slock[4];
392 volatile unsigned int lock[1];
393};
394
395DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
396 .slock = { 1,1,1,1 },
397 };
398#endif
399
400
387int sysctl_panic_on_stackoverflow = 1; 401int sysctl_panic_on_stackoverflow = 1;
388 402
389static inline void stack_overflow_check(struct pt_regs *regs) 403static inline void stack_overflow_check(struct pt_regs *regs)
@@ -450,27 +464,26 @@ panic_check:
450} 464}
451 465
452#ifdef CONFIG_IRQSTACKS 466#ifdef CONFIG_IRQSTACKS
453DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = { 467/* in entry.S: */
454 .lock = __RAW_SPIN_LOCK_UNLOCKED((irq_stack_union).lock) 468void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
455 };
456 469
457static void execute_on_irq_stack(void *func, unsigned long param1) 470static void execute_on_irq_stack(void *func, unsigned long param1)
458{ 471{
459 union irq_stack_union *union_ptr; 472 union irq_stack_union *union_ptr;
460 unsigned long irq_stack; 473 unsigned long irq_stack;
461 raw_spinlock_t *irq_stack_in_use; 474 volatile unsigned int *irq_stack_in_use;
462 475
463 union_ptr = &per_cpu(irq_stack_union, smp_processor_id()); 476 union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
464 irq_stack = (unsigned long) &union_ptr->stack; 477 irq_stack = (unsigned long) &union_ptr->stack;
465 irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.lock), 478 irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock),
466 64); /* align for stack frame usage */ 479 64); /* align for stack frame usage */
467 480
468 /* We may be called recursive. If we are already using the irq stack, 481 /* We may be called recursive. If we are already using the irq stack,
469 * just continue to use it. Use spinlocks to serialize 482 * just continue to use it. Use spinlocks to serialize
470 * the irq stack usage. 483 * the irq stack usage.
471 */ 484 */
472 irq_stack_in_use = &union_ptr->lock; 485 irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr);
473 if (!raw_spin_trylock(irq_stack_in_use)) { 486 if (!__ldcw(irq_stack_in_use)) {
474 void (*direct_call)(unsigned long p1) = func; 487 void (*direct_call)(unsigned long p1) = func;
475 488
476 /* We are using the IRQ stack already. 489 /* We are using the IRQ stack already.
@@ -482,10 +495,8 @@ static void execute_on_irq_stack(void *func, unsigned long param1)
482 /* This is where we switch to the IRQ stack. */ 495 /* This is where we switch to the IRQ stack. */
483 call_on_stack(param1, func, irq_stack); 496 call_on_stack(param1, func, irq_stack);
484 497
485 __inc_irq_stat(irq_stack_counter);
486
487 /* free up irq stack usage. */ 498 /* free up irq stack usage. */
488 do_raw_spin_unlock(irq_stack_in_use); 499 *irq_stack_in_use = 1;
489} 500}
490 501
491asmlinkage void do_softirq(void) 502asmlinkage void do_softirq(void)