aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r--arch/powerpc/kernel/irq.c124
1 files changed, 67 insertions, 57 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index feff792ed0f9..f7f376ea7b17 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -53,6 +53,7 @@
53#include <linux/bootmem.h> 53#include <linux/bootmem.h>
54#include <linux/pci.h> 54#include <linux/pci.h>
55#include <linux/debugfs.h> 55#include <linux/debugfs.h>
56#include <linux/perf_counter.h>
56 57
57#include <asm/uaccess.h> 58#include <asm/uaccess.h>
58#include <asm/system.h> 59#include <asm/system.h>
@@ -117,6 +118,7 @@ notrace void raw_local_irq_restore(unsigned long en)
117 if (!en) 118 if (!en)
118 return; 119 return;
119 120
121#ifdef CONFIG_PPC_STD_MMU_64
120 if (firmware_has_feature(FW_FEATURE_ISERIES)) { 122 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
121 /* 123 /*
122 * Do we need to disable preemption here? Not really: in the 124 * Do we need to disable preemption here? Not really: in the
@@ -134,6 +136,7 @@ notrace void raw_local_irq_restore(unsigned long en)
134 if (local_paca->lppaca_ptr->int_dword.any_int) 136 if (local_paca->lppaca_ptr->int_dword.any_int)
135 iseries_handle_interrupts(); 137 iseries_handle_interrupts();
136 } 138 }
139#endif /* CONFIG_PPC_STD_MMU_64 */
137 140
138 if (test_perf_counter_pending()) { 141 if (test_perf_counter_pending()) {
139 clear_perf_counter_pending(); 142 clear_perf_counter_pending();
@@ -253,77 +256,84 @@ void fixup_irqs(cpumask_t map)
253} 256}
254#endif 257#endif
255 258
256void do_IRQ(struct pt_regs *regs)
257{
258 struct pt_regs *old_regs = set_irq_regs(regs);
259 unsigned int irq;
260#ifdef CONFIG_IRQSTACKS 259#ifdef CONFIG_IRQSTACKS
260static inline void handle_one_irq(unsigned int irq)
261{
261 struct thread_info *curtp, *irqtp; 262 struct thread_info *curtp, *irqtp;
262#endif 263 unsigned long saved_sp_limit;
264 struct irq_desc *desc;
263 265
264 irq_enter(); 266 /* Switch to the irq stack to handle this */
267 curtp = current_thread_info();
268 irqtp = hardirq_ctx[smp_processor_id()];
269
270 if (curtp == irqtp) {
271 /* We're already on the irq stack, just handle it */
272 generic_handle_irq(irq);
273 return;
274 }
275
276 desc = irq_desc + irq;
277 saved_sp_limit = current->thread.ksp_limit;
278
279 irqtp->task = curtp->task;
280 irqtp->flags = 0;
281
282 /* Copy the softirq bits in preempt_count so that the
283 * softirq checks work in the hardirq context. */
284 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
285 (curtp->preempt_count & SOFTIRQ_MASK);
286
287 current->thread.ksp_limit = (unsigned long)irqtp +
288 _ALIGN_UP(sizeof(struct thread_info), 16);
289
290 call_handle_irq(irq, desc, irqtp, desc->handle_irq);
291 current->thread.ksp_limit = saved_sp_limit;
292 irqtp->task = NULL;
293
294 /* Set any flag that may have been set on the
295 * alternate stack
296 */
297 if (irqtp->flags)
298 set_bits(irqtp->flags, &curtp->flags);
299}
300#else
301static inline void handle_one_irq(unsigned int irq)
302{
303 generic_handle_irq(irq);
304}
305#endif
265 306
307static inline void check_stack_overflow(void)
308{
266#ifdef CONFIG_DEBUG_STACKOVERFLOW 309#ifdef CONFIG_DEBUG_STACKOVERFLOW
267 /* Debugging check for stack overflow: is there less than 2KB free? */ 310 long sp;
268 {
269 long sp;
270 311
271 sp = __get_SP() & (THREAD_SIZE-1); 312 sp = __get_SP() & (THREAD_SIZE-1);
272 313
273 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { 314 /* check for stack overflow: is there less than 2KB free? */
274 printk("do_IRQ: stack overflow: %ld\n", 315 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
275 sp - sizeof(struct thread_info)); 316 printk("do_IRQ: stack overflow: %ld\n",
276 dump_stack(); 317 sp - sizeof(struct thread_info));
277 } 318 dump_stack();
278 } 319 }
279#endif 320#endif
321}
280 322
281 /* 323void do_IRQ(struct pt_regs *regs)
282 * Every platform is required to implement ppc_md.get_irq. 324{
283 * This function will either return an irq number or NO_IRQ to 325 struct pt_regs *old_regs = set_irq_regs(regs);
284 * indicate there are no more pending. 326 unsigned int irq;
285 * The value NO_IRQ_IGNORE is for buggy hardware and means that this
286 * IRQ has already been handled. -- Tom
287 */
288 irq = ppc_md.get_irq();
289 327
290 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { 328 irq_enter();
291#ifdef CONFIG_IRQSTACKS
292 /* Switch to the irq stack to handle this */
293 curtp = current_thread_info();
294 irqtp = hardirq_ctx[smp_processor_id()];
295 if (curtp != irqtp) {
296 struct irq_desc *desc = irq_desc + irq;
297 void *handler = desc->handle_irq;
298 unsigned long saved_sp_limit = current->thread.ksp_limit;
299 if (handler == NULL)
300 handler = &__do_IRQ;
301 irqtp->task = curtp->task;
302 irqtp->flags = 0;
303
304 /* Copy the softirq bits in preempt_count so that the
305 * softirq checks work in the hardirq context.
306 */
307 irqtp->preempt_count =
308 (irqtp->preempt_count & ~SOFTIRQ_MASK) |
309 (curtp->preempt_count & SOFTIRQ_MASK);
310 329
311 current->thread.ksp_limit = (unsigned long)irqtp + 330 check_stack_overflow();
312 _ALIGN_UP(sizeof(struct thread_info), 16);
313 call_handle_irq(irq, desc, irqtp, handler);
314 current->thread.ksp_limit = saved_sp_limit;
315 irqtp->task = NULL;
316 331
332 irq = ppc_md.get_irq();
317 333
318 /* Set any flag that may have been set on the 334 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
319 * alternate stack 335 handle_one_irq(irq);
320 */ 336 else if (irq != NO_IRQ_IGNORE)
321 if (irqtp->flags)
322 set_bits(irqtp->flags, &curtp->flags);
323 } else
324#endif
325 generic_handle_irq(irq);
326 } else if (irq != NO_IRQ_IGNORE)
327 /* That's not SMP safe ... but who cares ? */ 337 /* That's not SMP safe ... but who cares ? */
328 ppc_spurious_interrupts++; 338 ppc_spurious_interrupts++;
329 339