diff options
author | Kevin D. Kissell <kevink@mips.com> | 2007-08-03 13:38:03 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2007-10-11 18:45:57 -0400 |
commit | f571eff0a24ed97a919f2b61bb4afdeab4b43002 (patch) | |
tree | e9d6c597fafca02720f000cf795a37f2d163f10f /arch/mips/kernel/smtc.c | |
parent | bbf25010f1a6b761914430f5fca081ec8c7accd1 (diff) |
[MIPS] IRQ Affinity Support for SMTC on Malta Platform
Signed-off-by: Kevin D. Kissell <kevink@mips.com>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel/smtc.c')
-rw-r--r-- | arch/mips/kernel/smtc.c | 63 |
1 files changed, 63 insertions, 0 deletions
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index f09404377ef1..fe22387d58b1 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -606,6 +606,60 @@ int setup_irq_smtc(unsigned int irq, struct irqaction * new, | |||
606 | return setup_irq(irq, new); | 606 | return setup_irq(irq, new); |
607 | } | 607 | } |
608 | 608 | ||
609 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||
610 | /* | ||
611 | * Support for IRQ affinity to TCs | ||
612 | */ | ||
613 | |||
614 | void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity) | ||
615 | { | ||
616 | /* | ||
617 | * If a "fast path" cache of quickly decodable affinity state | ||
618 | * is maintained, this is where it gets done, on a call up | ||
619 | * from the platform affinity code. | ||
620 | */ | ||
621 | } | ||
622 | |||
623 | void smtc_forward_irq(unsigned int irq) | ||
624 | { | ||
625 | int target; | ||
626 | |||
627 | /* | ||
628 | * OK wise guy, now figure out how to get the IRQ | ||
629 | * to be serviced on an authorized "CPU". | ||
630 | * | ||
631 | * Ideally, to handle the situation where an IRQ has multiple | ||
632 | * eligible CPUS, we would maintain state per IRQ that would | ||
633 | * allow a fair distribution of service requests. Since the | ||
634 | * expected use model is any-or-only-one, for simplicity | ||
635 | * and efficiency, we just pick the easiest one to find. | ||
636 | */ | ||
637 | |||
638 | target = first_cpu(irq_desc[irq].affinity); | ||
639 | |||
640 | /* | ||
641 | * We depend on the platform code to have correctly processed | ||
642 | * IRQ affinity change requests to ensure that the IRQ affinity | ||
643 | * mask has been purged of bits corresponding to nonexistent and | ||
644 | * offline "CPUs", and to TCs bound to VPEs other than the VPE | ||
645 | * connected to the physical interrupt input for the interrupt | ||
646 | * in question. Otherwise we have a nasty problem with interrupt | ||
647 | * mask management. This is best handled in non-performance-critical | ||
648 | * platform IRQ affinity setting code, to minimize interrupt-time | ||
649 | * checks. | ||
650 | */ | ||
651 | |||
652 | /* If no one is eligible, service locally */ | ||
653 | if (target >= NR_CPUS) { | ||
654 | do_IRQ_no_affinity(irq); | ||
655 | return; | ||
656 | } | ||
657 | |||
658 | smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); | ||
659 | } | ||
660 | |||
661 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||
662 | |||
609 | /* | 663 | /* |
610 | * IPI model for SMTC is tricky, because interrupts aren't TC-specific. | 664 | * IPI model for SMTC is tricky, because interrupts aren't TC-specific. |
611 | * Within a VPE one TC can interrupt another by different approaches. | 665 | * Within a VPE one TC can interrupt another by different approaches. |
@@ -830,6 +884,15 @@ void ipi_decode(struct smtc_ipi *pipi) | |||
830 | break; | 884 | break; |
831 | } | 885 | } |
832 | break; | 886 | break; |
887 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||
888 | case IRQ_AFFINITY_IPI: | ||
889 | /* | ||
890 | * Accept a "forwarded" interrupt that was initially | ||
891 | * taken by a TC who doesn't have affinity for the IRQ. | ||
892 | */ | ||
893 | do_IRQ_no_affinity((int)arg_copy); | ||
894 | break; | ||
895 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||
833 | default: | 896 | default: |
834 | printk("Impossible SMTC IPI Type 0x%x\n", type_copy); | 897 | printk("Impossible SMTC IPI Type 0x%x\n", type_copy); |
835 | break; | 898 | break; |