aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r--arch/powerpc/kernel/irq.c123
1 files changed, 66 insertions, 57 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 844d3f882a15..f7f376ea7b17 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -118,6 +118,7 @@ notrace void raw_local_irq_restore(unsigned long en)
118 if (!en) 118 if (!en)
119 return; 119 return;
120 120
121#ifdef CONFIG_PPC_STD_MMU_64
121 if (firmware_has_feature(FW_FEATURE_ISERIES)) { 122 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
122 /* 123 /*
123 * Do we need to disable preemption here? Not really: in the 124 * Do we need to disable preemption here? Not really: in the
@@ -135,6 +136,7 @@ notrace void raw_local_irq_restore(unsigned long en)
135 if (local_paca->lppaca_ptr->int_dword.any_int) 136 if (local_paca->lppaca_ptr->int_dword.any_int)
136 iseries_handle_interrupts(); 137 iseries_handle_interrupts();
137 } 138 }
139#endif /* CONFIG_PPC_STD_MMU_64 */
138 140
139 if (test_perf_counter_pending()) { 141 if (test_perf_counter_pending()) {
140 clear_perf_counter_pending(); 142 clear_perf_counter_pending();
@@ -254,77 +256,84 @@ void fixup_irqs(cpumask_t map)
254} 256}
255#endif 257#endif
256 258
257void do_IRQ(struct pt_regs *regs)
258{
259 struct pt_regs *old_regs = set_irq_regs(regs);
260 unsigned int irq;
261#ifdef CONFIG_IRQSTACKS 259#ifdef CONFIG_IRQSTACKS
260static inline void handle_one_irq(unsigned int irq)
261{
262 struct thread_info *curtp, *irqtp; 262 struct thread_info *curtp, *irqtp;
263#endif 263 unsigned long saved_sp_limit;
264 struct irq_desc *desc;
264 265
265 irq_enter(); 266 /* Switch to the irq stack to handle this */
267 curtp = current_thread_info();
268 irqtp = hardirq_ctx[smp_processor_id()];
269
270 if (curtp == irqtp) {
271 /* We're already on the irq stack, just handle it */
272 generic_handle_irq(irq);
273 return;
274 }
275
276 desc = irq_desc + irq;
277 saved_sp_limit = current->thread.ksp_limit;
278
279 irqtp->task = curtp->task;
280 irqtp->flags = 0;
281
282 /* Copy the softirq bits in preempt_count so that the
283 * softirq checks work in the hardirq context. */
284 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
285 (curtp->preempt_count & SOFTIRQ_MASK);
286
287 current->thread.ksp_limit = (unsigned long)irqtp +
288 _ALIGN_UP(sizeof(struct thread_info), 16);
289
290 call_handle_irq(irq, desc, irqtp, desc->handle_irq);
291 current->thread.ksp_limit = saved_sp_limit;
292 irqtp->task = NULL;
293
294 /* Set any flag that may have been set on the
295 * alternate stack
296 */
297 if (irqtp->flags)
298 set_bits(irqtp->flags, &curtp->flags);
299}
300#else
301static inline void handle_one_irq(unsigned int irq)
302{
303 generic_handle_irq(irq);
304}
305#endif
266 306
307static inline void check_stack_overflow(void)
308{
267#ifdef CONFIG_DEBUG_STACKOVERFLOW 309#ifdef CONFIG_DEBUG_STACKOVERFLOW
268 /* Debugging check for stack overflow: is there less than 2KB free? */ 310 long sp;
269 {
270 long sp;
271 311
272 sp = __get_SP() & (THREAD_SIZE-1); 312 sp = __get_SP() & (THREAD_SIZE-1);
273 313
274 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { 314 /* check for stack overflow: is there less than 2KB free? */
275 printk("do_IRQ: stack overflow: %ld\n", 315 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
276 sp - sizeof(struct thread_info)); 316 printk("do_IRQ: stack overflow: %ld\n",
277 dump_stack(); 317 sp - sizeof(struct thread_info));
278 } 318 dump_stack();
279 } 319 }
280#endif 320#endif
321}
281 322
282 /* 323void do_IRQ(struct pt_regs *regs)
283 * Every platform is required to implement ppc_md.get_irq. 324{
284 * This function will either return an irq number or NO_IRQ to 325 struct pt_regs *old_regs = set_irq_regs(regs);
285 * indicate there are no more pending. 326 unsigned int irq;
286 * The value NO_IRQ_IGNORE is for buggy hardware and means that this
287 * IRQ has already been handled. -- Tom
288 */
289 irq = ppc_md.get_irq();
290 327
291 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { 328 irq_enter();
292#ifdef CONFIG_IRQSTACKS
293 /* Switch to the irq stack to handle this */
294 curtp = current_thread_info();
295 irqtp = hardirq_ctx[smp_processor_id()];
296 if (curtp != irqtp) {
297 struct irq_desc *desc = irq_desc + irq;
298 void *handler = desc->handle_irq;
299 unsigned long saved_sp_limit = current->thread.ksp_limit;
300 if (handler == NULL)
301 handler = &__do_IRQ;
302 irqtp->task = curtp->task;
303 irqtp->flags = 0;
304
305 /* Copy the softirq bits in preempt_count so that the
306 * softirq checks work in the hardirq context.
307 */
308 irqtp->preempt_count =
309 (irqtp->preempt_count & ~SOFTIRQ_MASK) |
310 (curtp->preempt_count & SOFTIRQ_MASK);
311 329
312 current->thread.ksp_limit = (unsigned long)irqtp + 330 check_stack_overflow();
313 _ALIGN_UP(sizeof(struct thread_info), 16);
314 call_handle_irq(irq, desc, irqtp, handler);
315 current->thread.ksp_limit = saved_sp_limit;
316 irqtp->task = NULL;
317 331
332 irq = ppc_md.get_irq();
318 333
319 /* Set any flag that may have been set on the 334 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
320 * alternate stack 335 handle_one_irq(irq);
321 */ 336 else if (irq != NO_IRQ_IGNORE)
322 if (irqtp->flags)
323 set_bits(irqtp->flags, &curtp->flags);
324 } else
325#endif
326 generic_handle_irq(irq);
327 } else if (irq != NO_IRQ_IGNORE)
328 /* That's not SMP safe ... but who cares ? */ 337 /* That's not SMP safe ... but who cares ? */
329 ppc_spurious_interrupts++; 338 ppc_spurious_interrupts++;
330 339