diff options
author | Mike Frysinger <vapier@gentoo.org> | 2009-06-15 06:10:03 -0400 |
---|---|---|
committer | Mike Frysinger <vapier@gentoo.org> | 2009-06-22 21:15:40 -0400 |
commit | 26579216f3cdf1ae05f0af8412b444870a167510 (patch) | |
tree | 6774806c22c4914430930e522367e9df8c1192af /arch/blackfin | |
parent | a200ad22bb15fe01cf222fa631687876baad5e01 (diff) |
Blackfin: redo handling of bad irqs
With the common IRQ code initializing much more of the irq_desc state, we
can't blindly initialize it ourselves to the local bad_irq state. If we
do, we end up wrongly clobbering many fields. So punt most of the bad irq
code as the common layers will handle the default state, and simply call
handle_bad_irq() directly when the IRQ we are processing is invalid.
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Diffstat (limited to 'arch/blackfin')
-rw-r--r-- | arch/blackfin/kernel/irqchip.c | 53 |
1 files changed, 11 insertions, 42 deletions
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c index 6e31e935bb31..36bba3027735 100644 --- a/arch/blackfin/kernel/irqchip.c +++ b/arch/blackfin/kernel/irqchip.c | |||
@@ -38,38 +38,15 @@ | |||
38 | #include <asm/pda.h> | 38 | #include <asm/pda.h> |
39 | 39 | ||
40 | static atomic_t irq_err_count; | 40 | static atomic_t irq_err_count; |
41 | static spinlock_t irq_controller_lock; | ||
42 | |||
43 | /* | ||
44 | * Dummy mask/unmask handler | ||
45 | */ | ||
46 | void dummy_mask_unmask_irq(unsigned int irq) | ||
47 | { | ||
48 | } | ||
49 | |||
50 | void ack_bad_irq(unsigned int irq) | 41 | void ack_bad_irq(unsigned int irq) |
51 | { | 42 | { |
52 | atomic_inc(&irq_err_count); | 43 | atomic_inc(&irq_err_count); |
53 | printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); | 44 | printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); |
54 | } | 45 | } |
55 | 46 | ||
56 | static struct irq_chip bad_chip = { | ||
57 | .ack = dummy_mask_unmask_irq, | ||
58 | .mask = dummy_mask_unmask_irq, | ||
59 | .unmask = dummy_mask_unmask_irq, | ||
60 | }; | ||
61 | |||
62 | static int bad_stats; | ||
63 | static struct irq_desc bad_irq_desc = { | 47 | static struct irq_desc bad_irq_desc = { |
64 | .status = IRQ_DISABLED, | ||
65 | .chip = &bad_chip, | ||
66 | .handle_irq = handle_bad_irq, | 48 | .handle_irq = handle_bad_irq, |
67 | .depth = 1, | ||
68 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), | 49 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), |
69 | .kstat_irqs = &bad_stats, | ||
70 | #ifdef CONFIG_SMP | ||
71 | .affinity = CPU_MASK_ALL | ||
72 | #endif | ||
73 | }; | 50 | }; |
74 | 51 | ||
75 | #ifdef CONFIG_CPUMASK_OFFSTACK | 52 | #ifdef CONFIG_CPUMASK_OFFSTACK |
@@ -119,21 +96,13 @@ __attribute__((l1_text)) | |||
119 | #endif | 96 | #endif |
120 | asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) | 97 | asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) |
121 | { | 98 | { |
122 | struct pt_regs *old_regs; | ||
123 | struct irq_desc *desc = irq_desc + irq; | ||
124 | #ifndef CONFIG_IPIPE | 99 | #ifndef CONFIG_IPIPE |
125 | unsigned short pending, other_ints; | 100 | unsigned short pending, other_ints; |
126 | #endif | 101 | #endif |
127 | old_regs = set_irq_regs(regs); | 102 | struct pt_regs *old_regs = set_irq_regs(regs); |
128 | |||
129 | /* | ||
130 | * Some hardware gives randomly wrong interrupts. Rather | ||
131 | * than crashing, do something sensible. | ||
132 | */ | ||
133 | if (irq >= NR_IRQS) | ||
134 | desc = &bad_irq_desc; | ||
135 | 103 | ||
136 | irq_enter(); | 104 | irq_enter(); |
105 | |||
137 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 106 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
138 | /* Debugging check for stack overflow: is there less than STACK_WARN free? */ | 107 | /* Debugging check for stack overflow: is there less than STACK_WARN free? */ |
139 | { | 108 | { |
@@ -149,7 +118,15 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) | |||
149 | } | 118 | } |
150 | } | 119 | } |
151 | #endif | 120 | #endif |
152 | generic_handle_irq(irq); | 121 | |
122 | /* | ||
123 | * Some hardware gives randomly wrong interrupts. Rather | ||
124 | * than crashing, do something sensible. | ||
125 | */ | ||
126 | if (irq >= NR_IRQS) | ||
127 | handle_bad_irq(irq, &bad_irq_desc); | ||
128 | else | ||
129 | generic_handle_irq(irq); | ||
153 | 130 | ||
154 | #ifndef CONFIG_IPIPE | 131 | #ifndef CONFIG_IPIPE |
155 | /* | 132 | /* |
@@ -173,14 +150,6 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) | |||
173 | 150 | ||
174 | void __init init_IRQ(void) | 151 | void __init init_IRQ(void) |
175 | { | 152 | { |
176 | struct irq_desc *desc; | ||
177 | int irq; | ||
178 | |||
179 | spin_lock_init(&irq_controller_lock); | ||
180 | for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) { | ||
181 | *desc = bad_irq_desc; | ||
182 | } | ||
183 | |||
184 | init_arch_irq(); | 153 | init_arch_irq(); |
185 | 154 | ||
186 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND | 155 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND |