aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/i8259.c3
-rw-r--r--arch/mips/kernel/smtc.c63
2 files changed, 66 insertions, 0 deletions
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 3a2d255361bc..4f4359bfd180 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -39,6 +39,9 @@ static struct irq_chip i8259A_chip = {
39 .disable = disable_8259A_irq, 39 .disable = disable_8259A_irq,
40 .unmask = enable_8259A_irq, 40 .unmask = enable_8259A_irq,
41 .mask_ack = mask_and_ack_8259A, 41 .mask_ack = mask_and_ack_8259A,
42#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
43 .set_affinity = plat_set_irq_affinity,
44#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
42}; 45};
43 46
44/* 47/*
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index f09404377ef1..fe22387d58b1 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -606,6 +606,60 @@ int setup_irq_smtc(unsigned int irq, struct irqaction * new,
606 return setup_irq(irq, new); 606 return setup_irq(irq, new);
607} 607}
608 608
609#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
610/*
611 * Support for IRQ affinity to TCs
612 */
613
614void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
615{
616 /*
617 * If a "fast path" cache of quickly decodable affinity state
618 * is maintained, this is where it gets done, on a call up
619 * from the platform affinity code.
620 */
621}
622
623void smtc_forward_irq(unsigned int irq)
624{
625 int target;
626
627 /*
628 * OK wise guy, now figure out how to get the IRQ
629 * to be serviced on an authorized "CPU".
630 *
631 * Ideally, to handle the situation where an IRQ has multiple
632 * eligible CPUS, we would maintain state per IRQ that would
633 * allow a fair distribution of service requests. Since the
634 * expected use model is any-or-only-one, for simplicity
635 * and efficiency, we just pick the easiest one to find.
636 */
637
638 target = first_cpu(irq_desc[irq].affinity);
639
640 /*
641 * We depend on the platform code to have correctly processed
642 * IRQ affinity change requests to ensure that the IRQ affinity
643 * mask has been purged of bits corresponding to nonexistent and
644 * offline "CPUs", and to TCs bound to VPEs other than the VPE
645 * connected to the physical interrupt input for the interrupt
646 * in question. Otherwise we have a nasty problem with interrupt
647 * mask management. This is best handled in non-performance-critical
648 * platform IRQ affinity setting code, to minimize interrupt-time
649 * checks.
650 */
651
652 /* If no one is eligible, service locally */
653 if (target >= NR_CPUS) {
654 do_IRQ_no_affinity(irq);
655 return;
656 }
657
658 smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
659}
660
661#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
662
609/* 663/*
610 * IPI model for SMTC is tricky, because interrupts aren't TC-specific. 664 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
611 * Within a VPE one TC can interrupt another by different approaches. 665 * Within a VPE one TC can interrupt another by different approaches.
@@ -830,6 +884,15 @@ void ipi_decode(struct smtc_ipi *pipi)
830 break; 884 break;
831 } 885 }
832 break; 886 break;
887#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
888 case IRQ_AFFINITY_IPI:
889 /*
890 * Accept a "forwarded" interrupt that was initially
891 * taken by a TC who doesn't have affinity for the IRQ.
892 */
893 do_IRQ_no_affinity((int)arg_copy);
894 break;
895#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
833 default: 896 default:
834 printk("Impossible SMTC IPI Type 0x%x\n", type_copy); 897 printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
835 break; 898 break;