diff options
-rw-r--r-- | arch/mips/Kconfig | 13 | ||||
-rw-r--r-- | arch/mips/kernel/i8259.c | 3 | ||||
-rw-r--r-- | arch/mips/kernel/smtc.c | 63 | ||||
-rw-r--r-- | arch/mips/mips-boards/malta/malta_smtc.c | 50 | ||||
-rw-r--r-- | include/asm-mips/irq.h | 67 | ||||
-rw-r--r-- | include/asm-mips/smtc_ipi.h | 1 |
6 files changed, 195 insertions, 2 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 3b807b4bc7cd..b09eee2281c5 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -1378,6 +1378,19 @@ config MIPS_MT_SMTC_IM_BACKSTOP | |||
1378 | impact on interrupt service overhead. Disable it only if you know | 1378 | impact on interrupt service overhead. Disable it only if you know |
1379 | what you are doing. | 1379 | what you are doing. |
1380 | 1380 | ||
1381 | config MIPS_MT_SMTC_IRQAFF | ||
1382 | bool "Support IRQ affinity API" | ||
1383 | depends on MIPS_MT_SMTC | ||
1384 | default n | ||
1385 | help | ||
1386 | Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) | ||
1387 | for SMTC Linux kernel. Requires platform support, of which | ||
1388 | an example can be found in the MIPS kernel i8259 and Malta | ||
1389 | platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY | ||
1390 | be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to | ||
1391 | interrupt dispatch, and should be used only if you know what | ||
1392 | you are doing. | ||
1393 | |||
1381 | config MIPS_VPE_LOADER_TOM | 1394 | config MIPS_VPE_LOADER_TOM |
1382 | bool "Load VPE program into memory hidden from linux" | 1395 | bool "Load VPE program into memory hidden from linux" |
1383 | depends on MIPS_VPE_LOADER | 1396 | depends on MIPS_VPE_LOADER |
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 3a2d255361bc..4f4359bfd180 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c | |||
@@ -39,6 +39,9 @@ static struct irq_chip i8259A_chip = { | |||
39 | .disable = disable_8259A_irq, | 39 | .disable = disable_8259A_irq, |
40 | .unmask = enable_8259A_irq, | 40 | .unmask = enable_8259A_irq, |
41 | .mask_ack = mask_and_ack_8259A, | 41 | .mask_ack = mask_and_ack_8259A, |
42 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||
43 | .set_affinity = plat_set_irq_affinity, | ||
44 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||
42 | }; | 45 | }; |
43 | 46 | ||
44 | /* | 47 | /* |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index f09404377ef1..fe22387d58b1 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -606,6 +606,60 @@ int setup_irq_smtc(unsigned int irq, struct irqaction * new, | |||
606 | return setup_irq(irq, new); | 606 | return setup_irq(irq, new); |
607 | } | 607 | } |
608 | 608 | ||
609 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||
610 | /* | ||
611 | * Support for IRQ affinity to TCs | ||
612 | */ | ||
613 | |||
614 | void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity) | ||
615 | { | ||
616 | /* | ||
617 | * If a "fast path" cache of quickly decodable affinity state | ||
618 | * is maintained, this is where it gets done, on a call up | ||
619 | * from the platform affinity code. | ||
620 | */ | ||
621 | } | ||
622 | |||
623 | void smtc_forward_irq(unsigned int irq) | ||
624 | { | ||
625 | int target; | ||
626 | |||
627 | /* | ||
628 | * OK wise guy, now figure out how to get the IRQ | ||
629 | * to be serviced on an authorized "CPU". | ||
630 | * | ||
631 | * Ideally, to handle the situation where an IRQ has multiple | ||
632 | * eligible CPUS, we would maintain state per IRQ that would | ||
633 | * allow a fair distribution of service requests. Since the | ||
634 | * expected use model is any-or-only-one, for simplicity | ||
635 | * and efficiency, we just pick the easiest one to find. | ||
636 | */ | ||
637 | |||
638 | target = first_cpu(irq_desc[irq].affinity); | ||
639 | |||
640 | /* | ||
641 | * We depend on the platform code to have correctly processed | ||
642 | * IRQ affinity change requests to ensure that the IRQ affinity | ||
643 | * mask has been purged of bits corresponding to nonexistent and | ||
644 | * offline "CPUs", and to TCs bound to VPEs other than the VPE | ||
645 | * connected to the physical interrupt input for the interrupt | ||
646 | * in question. Otherwise we have a nasty problem with interrupt | ||
647 | * mask management. This is best handled in non-performance-critical | ||
648 | * platform IRQ affinity setting code, to minimize interrupt-time | ||
649 | * checks. | ||
650 | */ | ||
651 | |||
652 | /* If no one is eligible, service locally */ | ||
653 | if (target >= NR_CPUS) { | ||
654 | do_IRQ_no_affinity(irq); | ||
655 | return; | ||
656 | } | ||
657 | |||
658 | smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); | ||
659 | } | ||
660 | |||
661 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||
662 | |||
609 | /* | 663 | /* |
610 | * IPI model for SMTC is tricky, because interrupts aren't TC-specific. | 664 | * IPI model for SMTC is tricky, because interrupts aren't TC-specific. |
611 | * Within a VPE one TC can interrupt another by different approaches. | 665 | * Within a VPE one TC can interrupt another by different approaches. |
@@ -830,6 +884,15 @@ void ipi_decode(struct smtc_ipi *pipi) | |||
830 | break; | 884 | break; |
831 | } | 885 | } |
832 | break; | 886 | break; |
887 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||
888 | case IRQ_AFFINITY_IPI: | ||
889 | /* | ||
890 | * Accept a "forwarded" interrupt that was initially | ||
891 | * taken by a TC who doesn't have affinity for the IRQ. | ||
892 | */ | ||
893 | do_IRQ_no_affinity((int)arg_copy); | ||
894 | break; | ||
895 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||
833 | default: | 896 | default: |
834 | printk("Impossible SMTC IPI Type 0x%x\n", type_copy); | 897 | printk("Impossible SMTC IPI Type 0x%x\n", type_copy); |
835 | break; | 898 | break; |
diff --git a/arch/mips/mips-boards/malta/malta_smtc.c b/arch/mips/mips-boards/malta/malta_smtc.c index ae05d058cb37..5c980f4a48fe 100644 --- a/arch/mips/mips-boards/malta/malta_smtc.c +++ b/arch/mips/mips-boards/malta/malta_smtc.c | |||
@@ -88,3 +88,53 @@ void __cpuinit prom_smp_finish(void) | |||
88 | void prom_cpus_done(void) | 88 | void prom_cpus_done(void) |
89 | { | 89 | { |
90 | } | 90 | } |
91 | |||
92 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||
93 | /* | ||
94 | * IRQ affinity hook | ||
95 | */ | ||
96 | |||
97 | |||
98 | void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity) | ||
99 | { | ||
100 | cpumask_t tmask = affinity; | ||
101 | int cpu = 0; | ||
102 | void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); | ||
103 | |||
104 | /* | ||
105 | * On the legacy Malta development board, all I/O interrupts | ||
106 | * are routed through the 8259 and combined in a single signal | ||
107 | * to the CPU daughterboard, and on the CoreFPGA2/3 34K models, | ||
108 | * that signal is brought to IP2 of both VPEs. To avoid racing | ||
109 | * concurrent interrupt service events, IP2 is enabled only on | ||
110 | * one VPE, by convention VPE0. So long as no bits are ever | ||
111 | * cleared in the affinity mask, there will never be any | ||
112 | * interrupt forwarding. But as soon as a program or operator | ||
113 | * sets affinity for one of the related IRQs, we need to make | ||
114 | * sure that we don't ever try to forward across the VPE boundry, | ||
115 | * at least not until we engineer a system where the interrupt | ||
116 | * _ack() or _end() function can somehow know that it corresponds | ||
117 | * to an interrupt taken on another VPE, and perform the appropriate | ||
118 | * restoration of Status.IM state using MFTR/MTTR instead of the | ||
119 | * normal local behavior. We also ensure that no attempt will | ||
120 | * be made to forward to an offline "CPU". | ||
121 | */ | ||
122 | |||
123 | for_each_cpu_mask(cpu, affinity) { | ||
124 | if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) | ||
125 | cpu_clear(cpu, tmask); | ||
126 | } | ||
127 | irq_desc[irq].affinity = tmask; | ||
128 | |||
129 | if (cpus_empty(tmask)) | ||
130 | /* | ||
131 | * We could restore a default mask here, but the | ||
132 | * runtime code can anyway deal with the null set | ||
133 | */ | ||
134 | printk(KERN_WARNING | ||
135 | "IRQ affinity leaves no legal CPU for IRQ %d\n", irq); | ||
136 | |||
137 | /* Do any generic SMTC IRQ affinity setup */ | ||
138 | smtc_set_irq_affinity(irq, tmask); | ||
139 | } | ||
140 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h index 2cb52cf8bd4e..a58f0eecc68f 100644 --- a/include/asm-mips/irq.h +++ b/include/asm-mips/irq.h | |||
@@ -46,6 +46,38 @@ static inline void smtc_im_ack_irq(unsigned int irq) | |||
46 | 46 | ||
47 | #endif /* CONFIG_MIPS_MT_SMTC */ | 47 | #endif /* CONFIG_MIPS_MT_SMTC */ |
48 | 48 | ||
49 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||
50 | #include <linux/cpumask.h> | ||
51 | |||
52 | extern void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity); | ||
53 | extern void smtc_forward_irq(unsigned int irq); | ||
54 | |||
55 | /* | ||
56 | * IRQ affinity hook invoked at the beginning of interrupt dispatch | ||
57 | * if option is enabled. | ||
58 | * | ||
59 | * Up through Linux 2.6.22 (at least) cpumask operations are very | ||
60 | * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity | ||
61 | * used a "fast path" per-IRQ-descriptor cache of affinity information | ||
62 | * to reduce latency. As there is a project afoot to optimize the | ||
63 | * cpumask implementations, this version is optimistically assuming | ||
64 | * that cpumask.h macro overhead is reasonable during interrupt dispatch. | ||
65 | */ | ||
66 | #define IRQ_AFFINITY_HOOK(irq) \ | ||
67 | do { \ | ||
68 | if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) { \ | ||
69 | smtc_forward_irq(irq); \ | ||
70 | irq_exit(); \ | ||
71 | return; \ | ||
72 | } \ | ||
73 | } while (0) | ||
74 | |||
75 | #else /* Not doing SMTC affinity */ | ||
76 | |||
77 | #define IRQ_AFFINITY_HOOK(irq) do { } while (0) | ||
78 | |||
79 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||
80 | |||
49 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | 81 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP |
50 | 82 | ||
51 | /* | 83 | /* |
@@ -56,13 +88,27 @@ static inline void smtc_im_ack_irq(unsigned int irq) | |||
56 | */ | 88 | */ |
57 | #define __DO_IRQ_SMTC_HOOK(irq) \ | 89 | #define __DO_IRQ_SMTC_HOOK(irq) \ |
58 | do { \ | 90 | do { \ |
91 | IRQ_AFFINITY_HOOK(irq); \ | ||
59 | if (irq_hwmask[irq] & 0x0000ff00) \ | 92 | if (irq_hwmask[irq] & 0x0000ff00) \ |
60 | write_c0_tccontext(read_c0_tccontext() & \ | 93 | write_c0_tccontext(read_c0_tccontext() & \ |
61 | ~(irq_hwmask[irq] & 0x0000ff00)); \ | 94 | ~(irq_hwmask[irq] & 0x0000ff00)); \ |
95 | } while (0) | ||
96 | |||
97 | #define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) \ | ||
98 | do { \ | ||
99 | if (irq_hwmask[irq] & 0x0000ff00) \ | ||
100 | write_c0_tccontext(read_c0_tccontext() & \ | ||
101 | ~(irq_hwmask[irq] & 0x0000ff00)); \ | ||
62 | } while (0) | 102 | } while (0) |
103 | |||
63 | #else | 104 | #else |
64 | 105 | ||
65 | #define __DO_IRQ_SMTC_HOOK(irq) do { } while (0) | 106 | #define __DO_IRQ_SMTC_HOOK(irq) \ |
107 | do { \ | ||
108 | IRQ_AFFINITY_HOOK(irq); \ | ||
109 | } while (0) | ||
110 | #define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) do { } while (0) | ||
111 | |||
66 | #endif | 112 | #endif |
67 | 113 | ||
68 | /* | 114 | /* |
@@ -81,6 +127,23 @@ do { \ | |||
81 | irq_exit(); \ | 127 | irq_exit(); \ |
82 | } while (0) | 128 | } while (0) |
83 | 129 | ||
130 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||
131 | /* | ||
132 | * To avoid inefficient and in some cases pathological re-checking of | ||
133 | * IRQ affinity, we have this variant that skips the affinity check. | ||
134 | */ | ||
135 | |||
136 | |||
137 | #define do_IRQ_no_affinity(irq) \ | ||
138 | do { \ | ||
139 | irq_enter(); \ | ||
140 | __NO_AFFINITY_IRQ_SMTC_HOOK(irq); \ | ||
141 | generic_handle_irq(irq); \ | ||
142 | irq_exit(); \ | ||
143 | } while (0) | ||
144 | |||
145 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||
146 | |||
84 | extern void arch_init_irq(void); | 147 | extern void arch_init_irq(void); |
85 | extern void spurious_interrupt(void); | 148 | extern void spurious_interrupt(void); |
86 | 149 | ||
diff --git a/include/asm-mips/smtc_ipi.h b/include/asm-mips/smtc_ipi.h index a52a4a7a36e0..e09131a6127d 100644 --- a/include/asm-mips/smtc_ipi.h +++ b/include/asm-mips/smtc_ipi.h | |||
@@ -34,6 +34,7 @@ struct smtc_ipi { | |||
34 | 34 | ||
35 | #define LINUX_SMP_IPI 1 | 35 | #define LINUX_SMP_IPI 1 |
36 | #define SMTC_CLOCK_TICK 2 | 36 | #define SMTC_CLOCK_TICK 2 |
37 | #define IRQ_AFFINITY_IPI 3 | ||
37 | 38 | ||
38 | /* | 39 | /* |
39 | * A queue of IPI messages | 40 | * A queue of IPI messages |