diff options
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r-- | arch/powerpc/kernel/irq.c | 101 |
1 files changed, 45 insertions, 56 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index c69440cef7af..c7cb8c232d2f 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -441,50 +441,6 @@ void migrate_irqs(void) | |||
441 | } | 441 | } |
442 | #endif | 442 | #endif |
443 | 443 | ||
444 | static inline void handle_one_irq(unsigned int irq) | ||
445 | { | ||
446 | struct thread_info *curtp, *irqtp; | ||
447 | unsigned long saved_sp_limit; | ||
448 | struct irq_desc *desc; | ||
449 | |||
450 | desc = irq_to_desc(irq); | ||
451 | if (!desc) | ||
452 | return; | ||
453 | |||
454 | /* Switch to the irq stack to handle this */ | ||
455 | curtp = current_thread_info(); | ||
456 | irqtp = hardirq_ctx[smp_processor_id()]; | ||
457 | |||
458 | if (curtp == irqtp) { | ||
459 | /* We're already on the irq stack, just handle it */ | ||
460 | desc->handle_irq(irq, desc); | ||
461 | return; | ||
462 | } | ||
463 | |||
464 | saved_sp_limit = current->thread.ksp_limit; | ||
465 | |||
466 | irqtp->task = curtp->task; | ||
467 | irqtp->flags = 0; | ||
468 | |||
469 | /* Copy the softirq bits in preempt_count so that the | ||
470 | * softirq checks work in the hardirq context. */ | ||
471 | irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) | | ||
472 | (curtp->preempt_count & SOFTIRQ_MASK); | ||
473 | |||
474 | current->thread.ksp_limit = (unsigned long)irqtp + | ||
475 | _ALIGN_UP(sizeof(struct thread_info), 16); | ||
476 | |||
477 | call_handle_irq(irq, desc, irqtp, desc->handle_irq); | ||
478 | current->thread.ksp_limit = saved_sp_limit; | ||
479 | irqtp->task = NULL; | ||
480 | |||
481 | /* Set any flag that may have been set on the | ||
482 | * alternate stack | ||
483 | */ | ||
484 | if (irqtp->flags) | ||
485 | set_bits(irqtp->flags, &curtp->flags); | ||
486 | } | ||
487 | |||
488 | static inline void check_stack_overflow(void) | 444 | static inline void check_stack_overflow(void) |
489 | { | 445 | { |
490 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 446 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
@@ -501,9 +457,9 @@ static inline void check_stack_overflow(void) | |||
501 | #endif | 457 | #endif |
502 | } | 458 | } |
503 | 459 | ||
504 | void do_IRQ(struct pt_regs *regs) | 460 | void __do_irq(struct pt_regs *regs) |
505 | { | 461 | { |
506 | struct pt_regs *old_regs = set_irq_regs(regs); | 462 | struct irq_desc *desc; |
507 | unsigned int irq; | 463 | unsigned int irq; |
508 | 464 | ||
509 | irq_enter(); | 465 | irq_enter(); |
@@ -519,18 +475,57 @@ void do_IRQ(struct pt_regs *regs) | |||
519 | */ | 475 | */ |
520 | irq = ppc_md.get_irq(); | 476 | irq = ppc_md.get_irq(); |
521 | 477 | ||
522 | /* We can hard enable interrupts now */ | 478 | /* We can hard enable interrupts now to allow perf interrupts */ |
523 | may_hard_irq_enable(); | 479 | may_hard_irq_enable(); |
524 | 480 | ||
525 | /* And finally process it */ | 481 | /* And finally process it */ |
526 | if (irq != NO_IRQ) | 482 | if (unlikely(irq == NO_IRQ)) |
527 | handle_one_irq(irq); | ||
528 | else | ||
529 | __get_cpu_var(irq_stat).spurious_irqs++; | 483 | __get_cpu_var(irq_stat).spurious_irqs++; |
484 | else { | ||
485 | desc = irq_to_desc(irq); | ||
486 | if (likely(desc)) | ||
487 | desc->handle_irq(irq, desc); | ||
488 | } | ||
530 | 489 | ||
531 | trace_irq_exit(regs); | 490 | trace_irq_exit(regs); |
532 | 491 | ||
533 | irq_exit(); | 492 | irq_exit(); |
493 | } | ||
494 | |||
495 | void do_IRQ(struct pt_regs *regs) | ||
496 | { | ||
497 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
498 | struct thread_info *curtp, *irqtp, *sirqtp; | ||
499 | |||
500 | /* Switch to the irq stack to handle this */ | ||
501 | curtp = current_thread_info(); | ||
502 | irqtp = hardirq_ctx[raw_smp_processor_id()]; | ||
503 | sirqtp = softirq_ctx[raw_smp_processor_id()]; | ||
504 | |||
505 | /* Already there ? */ | ||
506 | if (unlikely(curtp == irqtp || curtp == sirqtp)) { | ||
507 | __do_irq(regs); | ||
508 | set_irq_regs(old_regs); | ||
509 | return; | ||
510 | } | ||
511 | |||
512 | /* Prepare the thread_info in the irq stack */ | ||
513 | irqtp->task = curtp->task; | ||
514 | irqtp->flags = 0; | ||
515 | |||
516 | /* Copy the preempt_count so that the [soft]irq checks work. */ | ||
517 | irqtp->preempt_count = curtp->preempt_count; | ||
518 | |||
519 | /* Switch stack and call */ | ||
520 | call_do_irq(regs, irqtp); | ||
521 | |||
522 | /* Restore stack limit */ | ||
523 | irqtp->task = NULL; | ||
524 | |||
525 | /* Copy back updates to the thread_info */ | ||
526 | if (irqtp->flags) | ||
527 | set_bits(irqtp->flags, &curtp->flags); | ||
528 | |||
534 | set_irq_regs(old_regs); | 529 | set_irq_regs(old_regs); |
535 | } | 530 | } |
536 | 531 | ||
@@ -592,28 +587,22 @@ void irq_ctx_init(void) | |||
592 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); | 587 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); |
593 | tp = softirq_ctx[i]; | 588 | tp = softirq_ctx[i]; |
594 | tp->cpu = i; | 589 | tp->cpu = i; |
595 | tp->preempt_count = 0; | ||
596 | 590 | ||
597 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); | 591 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); |
598 | tp = hardirq_ctx[i]; | 592 | tp = hardirq_ctx[i]; |
599 | tp->cpu = i; | 593 | tp->cpu = i; |
600 | tp->preempt_count = HARDIRQ_OFFSET; | ||
601 | } | 594 | } |
602 | } | 595 | } |
603 | 596 | ||
604 | static inline void do_softirq_onstack(void) | 597 | static inline void do_softirq_onstack(void) |
605 | { | 598 | { |
606 | struct thread_info *curtp, *irqtp; | 599 | struct thread_info *curtp, *irqtp; |
607 | unsigned long saved_sp_limit = current->thread.ksp_limit; | ||
608 | 600 | ||
609 | curtp = current_thread_info(); | 601 | curtp = current_thread_info(); |
610 | irqtp = softirq_ctx[smp_processor_id()]; | 602 | irqtp = softirq_ctx[smp_processor_id()]; |
611 | irqtp->task = curtp->task; | 603 | irqtp->task = curtp->task; |
612 | irqtp->flags = 0; | 604 | irqtp->flags = 0; |
613 | current->thread.ksp_limit = (unsigned long)irqtp + | ||
614 | _ALIGN_UP(sizeof(struct thread_info), 16); | ||
615 | call_do_softirq(irqtp); | 605 | call_do_softirq(irqtp); |
616 | current->thread.ksp_limit = saved_sp_limit; | ||
617 | irqtp->task = NULL; | 606 | irqtp->task = NULL; |
618 | 607 | ||
619 | /* Set any flag that may have been set on the | 608 | /* Set any flag that may have been set on the |