diff options
Diffstat (limited to 'arch/sparc64/kernel/irq.c')
-rw-r--r-- | arch/sparc64/kernel/irq.c | 163 |
1 files changed, 69 insertions, 94 deletions
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 11e645c9ec50..0fb1738a4f3f 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -68,11 +68,7 @@ struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BY | |||
68 | * access to this structure takes a TLB miss it could cause | 68 | * access to this structure takes a TLB miss it could cause |
69 | * the 5-level sparc v9 trap stack to overflow. | 69 | * the 5-level sparc v9 trap stack to overflow. |
70 | */ | 70 | */ |
71 | struct irq_work_struct { | 71 | #define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist) |
72 | unsigned int irq_worklists[16]; | ||
73 | }; | ||
74 | struct irq_work_struct __irq_work[NR_CPUS]; | ||
75 | #define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)]) | ||
76 | 72 | ||
77 | static struct irqaction *irq_action[NR_IRQS+1]; | 73 | static struct irqaction *irq_action[NR_IRQS+1]; |
78 | 74 | ||
@@ -91,10 +87,8 @@ static void register_irq_proc (unsigned int irq); | |||
91 | */ | 87 | */ |
92 | #define put_ino_in_irqaction(action, irq) \ | 88 | #define put_ino_in_irqaction(action, irq) \ |
93 | action->flags &= 0xffffffffffffUL; \ | 89 | action->flags &= 0xffffffffffffUL; \ |
94 | if (__bucket(irq) == &pil0_dummy_bucket) \ | 90 | action->flags |= __irq_ino(irq) << 48; |
95 | action->flags |= 0xdeadUL << 48; \ | 91 | |
96 | else \ | ||
97 | action->flags |= __irq_ino(irq) << 48; | ||
98 | #define get_ino_in_irqaction(action) (action->flags >> 48) | 92 | #define get_ino_in_irqaction(action) (action->flags >> 48) |
99 | 93 | ||
100 | #define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff) | 94 | #define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff) |
@@ -251,15 +245,6 @@ void disable_irq(unsigned int irq) | |||
251 | } | 245 | } |
252 | } | 246 | } |
253 | 247 | ||
254 | /* The timer is the one "weird" interrupt which is generated by | ||
255 | * the CPU %tick register and not by some normal vectored interrupt | ||
256 | * source. To handle this special case, we use this dummy INO bucket. | ||
257 | */ | ||
258 | static struct irq_desc pil0_dummy_desc; | ||
259 | static struct ino_bucket pil0_dummy_bucket = { | ||
260 | .irq_info = &pil0_dummy_desc, | ||
261 | }; | ||
262 | |||
263 | static void build_irq_error(const char *msg, unsigned int ino, int pil, int inofixup, | 248 | static void build_irq_error(const char *msg, unsigned int ino, int pil, int inofixup, |
264 | unsigned long iclr, unsigned long imap, | 249 | unsigned long iclr, unsigned long imap, |
265 | struct ino_bucket *bucket) | 250 | struct ino_bucket *bucket) |
@@ -276,15 +261,7 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long | |||
276 | struct ino_bucket *bucket; | 261 | struct ino_bucket *bucket; |
277 | int ino; | 262 | int ino; |
278 | 263 | ||
279 | if (pil == 0) { | 264 | BUG_ON(pil == 0); |
280 | if (iclr != 0UL || imap != 0UL) { | ||
281 | prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n", | ||
282 | iclr, imap); | ||
283 | prom_halt(); | ||
284 | } | ||
285 | return __irq(&pil0_dummy_bucket); | ||
286 | } | ||
287 | |||
288 | BUG_ON(tlb_type == hypervisor); | 265 | BUG_ON(tlb_type == hypervisor); |
289 | 266 | ||
290 | /* RULE: Both must be specified in all other cases. */ | 267 | /* RULE: Both must be specified in all other cases. */ |
@@ -371,7 +348,7 @@ static void atomic_bucket_insert(struct ino_bucket *bucket) | |||
371 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | 348 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); |
372 | __asm__ __volatile__("wrpr %0, %1, %%pstate" | 349 | __asm__ __volatile__("wrpr %0, %1, %%pstate" |
373 | : : "r" (pstate), "i" (PSTATE_IE)); | 350 | : : "r" (pstate), "i" (PSTATE_IE)); |
374 | ent = irq_work(smp_processor_id(), bucket->pil); | 351 | ent = irq_work(smp_processor_id()); |
375 | bucket->irq_chain = *ent; | 352 | bucket->irq_chain = *ent; |
376 | *ent = __irq(bucket); | 353 | *ent = __irq(bucket); |
377 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); | 354 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); |
@@ -437,7 +414,7 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_ | |||
437 | if (unlikely(!bucket->irq_info)) | 414 | if (unlikely(!bucket->irq_info)) |
438 | return -ENODEV; | 415 | return -ENODEV; |
439 | 416 | ||
440 | if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) { | 417 | if (irqflags & SA_SAMPLE_RANDOM) { |
441 | /* | 418 | /* |
442 | * This function might sleep, we want to call it first, | 419 | * This function might sleep, we want to call it first, |
443 | * outside of the atomic block. In SA_STATIC_ALLOC case, | 420 | * outside of the atomic block. In SA_STATIC_ALLOC case, |
@@ -465,12 +442,9 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_ | |||
465 | } | 442 | } |
466 | 443 | ||
467 | bucket->flags |= IBF_ACTIVE; | 444 | bucket->flags |= IBF_ACTIVE; |
468 | pending = 0; | 445 | pending = bucket->pending; |
469 | if (bucket != &pil0_dummy_bucket) { | 446 | if (pending) |
470 | pending = bucket->pending; | 447 | bucket->pending = 0; |
471 | if (pending) | ||
472 | bucket->pending = 0; | ||
473 | } | ||
474 | 448 | ||
475 | action->handler = handler; | 449 | action->handler = handler; |
476 | action->flags = irqflags; | 450 | action->flags = irqflags; |
@@ -487,13 +461,12 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_ | |||
487 | /* We ate the IVEC already, this makes sure it does not get lost. */ | 461 | /* We ate the IVEC already, this makes sure it does not get lost. */ |
488 | if (pending) { | 462 | if (pending) { |
489 | atomic_bucket_insert(bucket); | 463 | atomic_bucket_insert(bucket); |
490 | set_softint(1 << bucket->pil); | 464 | set_softint(1 << PIL_DEVICE_IRQ); |
491 | } | 465 | } |
492 | 466 | ||
493 | spin_unlock_irqrestore(&irq_action_lock, flags); | 467 | spin_unlock_irqrestore(&irq_action_lock, flags); |
494 | 468 | ||
495 | if (bucket != &pil0_dummy_bucket) | 469 | register_irq_proc(__irq_ino(irq)); |
496 | register_irq_proc(__irq_ino(irq)); | ||
497 | 470 | ||
498 | #ifdef CONFIG_SMP | 471 | #ifdef CONFIG_SMP |
499 | distribute_irqs(); | 472 | distribute_irqs(); |
@@ -533,7 +506,9 @@ void free_irq(unsigned int irq, void *dev_id) | |||
533 | { | 506 | { |
534 | struct irqaction *action; | 507 | struct irqaction *action; |
535 | struct ino_bucket *bucket; | 508 | struct ino_bucket *bucket; |
509 | struct irq_desc *desc; | ||
536 | unsigned long flags; | 510 | unsigned long flags; |
511 | int ent, i; | ||
537 | 512 | ||
538 | spin_lock_irqsave(&irq_action_lock, flags); | 513 | spin_lock_irqsave(&irq_action_lock, flags); |
539 | 514 | ||
@@ -549,42 +524,39 @@ void free_irq(unsigned int irq, void *dev_id) | |||
549 | spin_lock_irqsave(&irq_action_lock, flags); | 524 | spin_lock_irqsave(&irq_action_lock, flags); |
550 | 525 | ||
551 | bucket = __bucket(irq); | 526 | bucket = __bucket(irq); |
552 | if (bucket != &pil0_dummy_bucket) { | 527 | desc = bucket->irq_info; |
553 | struct irq_desc *desc = bucket->irq_info; | ||
554 | int ent, i; | ||
555 | 528 | ||
556 | for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { | 529 | for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { |
557 | struct irqaction *p = &desc->action[i]; | 530 | struct irqaction *p = &desc->action[i]; |
558 | 531 | ||
559 | if (p == action) { | 532 | if (p == action) { |
560 | desc->action_active_mask &= ~(1 << i); | 533 | desc->action_active_mask &= ~(1 << i); |
561 | break; | 534 | break; |
562 | } | ||
563 | } | 535 | } |
536 | } | ||
564 | 537 | ||
565 | if (!desc->action_active_mask) { | 538 | if (!desc->action_active_mask) { |
566 | unsigned long imap = bucket->imap; | 539 | unsigned long imap = bucket->imap; |
567 | |||
568 | /* This unique interrupt source is now inactive. */ | ||
569 | bucket->flags &= ~IBF_ACTIVE; | ||
570 | 540 | ||
571 | /* See if any other buckets share this bucket's IMAP | 541 | /* This unique interrupt source is now inactive. */ |
572 | * and are still active. | 542 | bucket->flags &= ~IBF_ACTIVE; |
573 | */ | ||
574 | for (ent = 0; ent < NUM_IVECS; ent++) { | ||
575 | struct ino_bucket *bp = &ivector_table[ent]; | ||
576 | if (bp != bucket && | ||
577 | bp->imap == imap && | ||
578 | (bp->flags & IBF_ACTIVE) != 0) | ||
579 | break; | ||
580 | } | ||
581 | 543 | ||
582 | /* Only disable when no other sub-irq levels of | 544 | /* See if any other buckets share this bucket's IMAP |
583 | * the same IMAP are active. | 545 | * and are still active. |
584 | */ | 546 | */ |
585 | if (ent == NUM_IVECS) | 547 | for (ent = 0; ent < NUM_IVECS; ent++) { |
586 | disable_irq(irq); | 548 | struct ino_bucket *bp = &ivector_table[ent]; |
549 | if (bp != bucket && | ||
550 | bp->imap == imap && | ||
551 | (bp->flags & IBF_ACTIVE) != 0) | ||
552 | break; | ||
587 | } | 553 | } |
554 | |||
555 | /* Only disable when no other sub-irq levels of | ||
556 | * the same IMAP are active. | ||
557 | */ | ||
558 | if (ent == NUM_IVECS) | ||
559 | disable_irq(irq); | ||
588 | } | 560 | } |
589 | 561 | ||
590 | spin_unlock_irqrestore(&irq_action_lock, flags); | 562 | spin_unlock_irqrestore(&irq_action_lock, flags); |
@@ -625,7 +597,7 @@ void synchronize_irq(unsigned int irq) | |||
625 | } | 597 | } |
626 | #endif /* CONFIG_SMP */ | 598 | #endif /* CONFIG_SMP */ |
627 | 599 | ||
628 | static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs) | 600 | static void process_bucket(struct ino_bucket *bp, struct pt_regs *regs) |
629 | { | 601 | { |
630 | struct irq_desc *desc = bp->irq_info; | 602 | struct irq_desc *desc = bp->irq_info; |
631 | unsigned char flags = bp->flags; | 603 | unsigned char flags = bp->flags; |
@@ -676,51 +648,54 @@ static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs) | |||
676 | 648 | ||
677 | /* Test and add entropy */ | 649 | /* Test and add entropy */ |
678 | if (random & SA_SAMPLE_RANDOM) | 650 | if (random & SA_SAMPLE_RANDOM) |
679 | add_interrupt_randomness(irq); | 651 | add_interrupt_randomness(bp->pil); |
680 | } | 652 | } |
681 | out: | 653 | out: |
682 | bp->flags &= ~IBF_INPROGRESS; | 654 | bp->flags &= ~IBF_INPROGRESS; |
683 | } | 655 | } |
684 | 656 | ||
657 | #ifndef CONFIG_SMP | ||
658 | extern irqreturn_t timer_interrupt(int, void *, struct pt_regs *); | ||
659 | |||
660 | void timer_irq(int irq, struct pt_regs *regs) | ||
661 | { | ||
662 | unsigned long clr_mask = 1 << irq; | ||
663 | unsigned long tick_mask = tick_ops->softint_mask; | ||
664 | |||
665 | if (get_softint() & tick_mask) { | ||
666 | irq = 0; | ||
667 | clr_mask = tick_mask; | ||
668 | } | ||
669 | clear_softint(clr_mask); | ||
670 | |||
671 | irq_enter(); | ||
672 | kstat_this_cpu.irqs[irq]++; | ||
673 | timer_interrupt(irq, NULL, regs); | ||
674 | irq_exit(); | ||
675 | } | ||
676 | #endif | ||
677 | |||
685 | void handler_irq(int irq, struct pt_regs *regs) | 678 | void handler_irq(int irq, struct pt_regs *regs) |
686 | { | 679 | { |
687 | struct ino_bucket *bp; | 680 | struct ino_bucket *bp; |
688 | int cpu = smp_processor_id(); | 681 | int cpu = smp_processor_id(); |
689 | 682 | ||
690 | #ifndef CONFIG_SMP | 683 | /* XXX at this point we should be able to assert that |
691 | /* | 684 | * XXX irq is PIL_DEVICE_IRQ... |
692 | * Check for TICK_INT on level 14 softint. | ||
693 | */ | 685 | */ |
694 | { | ||
695 | unsigned long clr_mask = 1 << irq; | ||
696 | unsigned long tick_mask = tick_ops->softint_mask; | ||
697 | |||
698 | if ((irq == 14) && (get_softint() & tick_mask)) { | ||
699 | irq = 0; | ||
700 | clr_mask = tick_mask; | ||
701 | } | ||
702 | clear_softint(clr_mask); | ||
703 | } | ||
704 | #else | ||
705 | clear_softint(1 << irq); | 686 | clear_softint(1 << irq); |
706 | #endif | ||
707 | 687 | ||
708 | irq_enter(); | 688 | irq_enter(); |
709 | kstat_this_cpu.irqs[irq]++; | ||
710 | 689 | ||
711 | /* Sliiiick... */ | 690 | /* Sliiiick... */ |
712 | #ifndef CONFIG_SMP | 691 | bp = __bucket(xchg32(irq_work(cpu), 0)); |
713 | bp = ((irq != 0) ? | ||
714 | __bucket(xchg32(irq_work(cpu, irq), 0)) : | ||
715 | &pil0_dummy_bucket); | ||
716 | #else | ||
717 | bp = __bucket(xchg32(irq_work(cpu, irq), 0)); | ||
718 | #endif | ||
719 | while (bp) { | 692 | while (bp) { |
720 | struct ino_bucket *nbp = __bucket(bp->irq_chain); | 693 | struct ino_bucket *nbp = __bucket(bp->irq_chain); |
721 | 694 | ||
695 | kstat_this_cpu.irqs[bp->pil]++; | ||
696 | |||
722 | bp->irq_chain = 0; | 697 | bp->irq_chain = 0; |
723 | process_bucket(irq, bp, regs); | 698 | process_bucket(bp, regs); |
724 | bp = nbp; | 699 | bp = nbp; |
725 | } | 700 | } |
726 | irq_exit(); | 701 | irq_exit(); |
@@ -929,7 +904,7 @@ void init_irqwork_curcpu(void) | |||
929 | { | 904 | { |
930 | int cpu = hard_smp_processor_id(); | 905 | int cpu = hard_smp_processor_id(); |
931 | 906 | ||
932 | memset(__irq_work + cpu, 0, sizeof(struct irq_work_struct)); | 907 | trap_block[cpu].irq_worklist = 0; |
933 | } | 908 | } |
934 | 909 | ||
935 | static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type) | 910 | static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type) |