aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/kernel/irqchip.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:55:21 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:55:21 -0400
commitbbb20089a3275a19e475dbc21320c3742e3ca423 (patch)
tree216fdc1cbef450ca688135c5b8969169482d9a48 /arch/blackfin/kernel/irqchip.c
parent3e48e656903e9fd8bc805c6a2c4264d7808d315b (diff)
parent657a77fa7284d8ae28dfa48f1dc5d919bf5b2843 (diff)
Merge branch 'dmaengine' into async-tx-next
Conflicts: crypto/async_tx/async_xor.c drivers/dma/ioat/dma_v2.h drivers/dma/ioat/pci.c drivers/md/raid5.c
Diffstat (limited to 'arch/blackfin/kernel/irqchip.c')
-rw-r--r--arch/blackfin/kernel/irqchip.c112
1 files changed, 47 insertions, 65 deletions
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 401bd32aa499..4b5fd36187d9 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -38,36 +38,15 @@
38#include <asm/pda.h> 38#include <asm/pda.h>
39 39
40static atomic_t irq_err_count; 40static atomic_t irq_err_count;
41static spinlock_t irq_controller_lock;
42
43/*
44 * Dummy mask/unmask handler
45 */
46void dummy_mask_unmask_irq(unsigned int irq)
47{
48}
49
50void ack_bad_irq(unsigned int irq) 41void ack_bad_irq(unsigned int irq)
51{ 42{
52 atomic_inc(&irq_err_count); 43 atomic_inc(&irq_err_count);
53 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); 44 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
54} 45}
55 46
56static struct irq_chip bad_chip = {
57 .ack = dummy_mask_unmask_irq,
58 .mask = dummy_mask_unmask_irq,
59 .unmask = dummy_mask_unmask_irq,
60};
61
62static struct irq_desc bad_irq_desc = { 47static struct irq_desc bad_irq_desc = {
63 .status = IRQ_DISABLED,
64 .chip = &bad_chip,
65 .handle_irq = handle_bad_irq, 48 .handle_irq = handle_bad_irq,
66 .depth = 1,
67 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 49 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
68#ifdef CONFIG_SMP
69 .affinity = CPU_MASK_ALL
70#endif
71}; 50};
72 51
73#ifdef CONFIG_CPUMASK_OFFSTACK 52#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -75,6 +54,7 @@ static struct irq_desc bad_irq_desc = {
75#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK." 54#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
76#endif 55#endif
77 56
57#ifdef CONFIG_PROC_FS
78int show_interrupts(struct seq_file *p, void *v) 58int show_interrupts(struct seq_file *p, void *v)
79{ 59{
80 int i = *(loff_t *) v, j; 60 int i = *(loff_t *) v, j;
@@ -106,50 +86,29 @@ int show_interrupts(struct seq_file *p, void *v)
106 } 86 }
107 return 0; 87 return 0;
108} 88}
109
110/*
111 * do_IRQ handles all hardware IRQs. Decoded IRQs should not
112 * come via this function. Instead, they should provide their
113 * own 'handler'
114 */
115#ifdef CONFIG_DO_IRQ_L1
116__attribute__((l1_text))
117#endif
118asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
119{
120 struct pt_regs *old_regs;
121 struct irq_desc *desc = irq_desc + irq;
122#ifndef CONFIG_IPIPE
123 unsigned short pending, other_ints;
124#endif 89#endif
125 old_regs = set_irq_regs(regs);
126 90
127 /*
128 * Some hardware gives randomly wrong interrupts. Rather
129 * than crashing, do something sensible.
130 */
131 if (irq >= NR_IRQS)
132 desc = &bad_irq_desc;
133
134 irq_enter();
135#ifdef CONFIG_DEBUG_STACKOVERFLOW 91#ifdef CONFIG_DEBUG_STACKOVERFLOW
92static void check_stack_overflow(int irq)
93{
136 /* Debugging check for stack overflow: is there less than STACK_WARN free? */ 94 /* Debugging check for stack overflow: is there less than STACK_WARN free? */
137 { 95 long sp = __get_SP() & (THREAD_SIZE - 1);
138 long sp;
139
140 sp = __get_SP() & (THREAD_SIZE-1);
141 96
142 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { 97 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
143 dump_stack(); 98 dump_stack();
144 printk(KERN_EMERG "%s: possible stack overflow while handling irq %i " 99 pr_emerg("irq%i: possible stack overflow only %ld bytes free\n",
145 " only %ld bytes free\n", 100 irq, sp - sizeof(struct thread_info));
146 __func__, irq, sp - sizeof(struct thread_info));
147 }
148 } 101 }
102}
103#else
104static inline void check_stack_overflow(int irq) { }
149#endif 105#endif
150 generic_handle_irq(irq);
151 106
152#ifndef CONFIG_IPIPE 107#ifndef CONFIG_IPIPE
108static void maybe_lower_to_irq14(void)
109{
110 unsigned short pending, other_ints;
111
153 /* 112 /*
154 * If we're the only interrupt running (ignoring IRQ15 which 113 * If we're the only interrupt running (ignoring IRQ15 which
155 * is for syscalls), lower our priority to IRQ14 so that 114 * is for syscalls), lower our priority to IRQ14 so that
@@ -163,7 +122,38 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
163 other_ints = pending & (pending - 1); 122 other_ints = pending & (pending - 1);
164 if (other_ints == 0) 123 if (other_ints == 0)
165 lower_to_irq14(); 124 lower_to_irq14();
166#endif /* !CONFIG_IPIPE */ 125}
126#else
127static inline void maybe_lower_to_irq14(void) { }
128#endif
129
130/*
131 * do_IRQ handles all hardware IRQs. Decoded IRQs should not
132 * come via this function. Instead, they should provide their
133 * own 'handler'
134 */
135#ifdef CONFIG_DO_IRQ_L1
136__attribute__((l1_text))
137#endif
138asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
139{
140 struct pt_regs *old_regs = set_irq_regs(regs);
141
142 irq_enter();
143
144 check_stack_overflow(irq);
145
146 /*
147 * Some hardware gives randomly wrong interrupts. Rather
148 * than crashing, do something sensible.
149 */
150 if (irq >= NR_IRQS)
151 handle_bad_irq(irq, &bad_irq_desc);
152 else
153 generic_handle_irq(irq);
154
155 maybe_lower_to_irq14();
156
167 irq_exit(); 157 irq_exit();
168 158
169 set_irq_regs(old_regs); 159 set_irq_regs(old_regs);
@@ -171,14 +161,6 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
171 161
172void __init init_IRQ(void) 162void __init init_IRQ(void)
173{ 163{
174 struct irq_desc *desc;
175 int irq;
176
177 spin_lock_init(&irq_controller_lock);
178 for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
179 *desc = bad_irq_desc;
180 }
181
182 init_arch_irq(); 164 init_arch_irq();
183 165
184#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND 166#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND