diff options
Diffstat (limited to 'include/asm-mips/irq.h')
-rw-r--r-- | include/asm-mips/irq.h | 67 |
1 files changed, 65 insertions, 2 deletions
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h index 2cb52cf8bd4e..a58f0eecc68f 100644 --- a/include/asm-mips/irq.h +++ b/include/asm-mips/irq.h | |||
@@ -46,6 +46,38 @@ static inline void smtc_im_ack_irq(unsigned int irq) | |||
46 | 46 | ||
47 | #endif /* CONFIG_MIPS_MT_SMTC */ | 47 | #endif /* CONFIG_MIPS_MT_SMTC */ |
48 | 48 | ||
49 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||
50 | #include <linux/cpumask.h> | ||
51 | |||
52 | extern void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity); | ||
53 | extern void smtc_forward_irq(unsigned int irq); | ||
54 | |||
55 | /* | ||
56 | * IRQ affinity hook invoked at the beginning of interrupt dispatch | ||
57 | * if option is enabled. | ||
58 | * | ||
59 | * Up through Linux 2.6.22 (at least) cpumask operations are very | ||
60 | * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity | ||
61 | * used a "fast path" per-IRQ-descriptor cache of affinity information | ||
62 | * to reduce latency. As there is a project afoot to optimize the | ||
63 | * cpumask implementations, this version is optimistically assuming | ||
64 | * that cpumask.h macro overhead is reasonable during interrupt dispatch. | ||
65 | */ | ||
66 | #define IRQ_AFFINITY_HOOK(irq) \ | ||
67 | do { \ | ||
68 | if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) { \ | ||
69 | smtc_forward_irq(irq); \ | ||
70 | irq_exit(); \ | ||
71 | return; \ | ||
72 | } \ | ||
73 | } while (0) | ||
74 | |||
75 | #else /* Not doing SMTC affinity */ | ||
76 | |||
77 | #define IRQ_AFFINITY_HOOK(irq) do { } while (0) | ||
78 | |||
79 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||
80 | |||
49 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | 81 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP |
50 | 82 | ||
51 | /* | 83 | /* |
@@ -56,13 +88,27 @@ static inline void smtc_im_ack_irq(unsigned int irq) | |||
56 | */ | 88 | */ |
57 | #define __DO_IRQ_SMTC_HOOK(irq) \ | 89 | #define __DO_IRQ_SMTC_HOOK(irq) \ |
58 | do { \ | 90 | do { \ |
91 | IRQ_AFFINITY_HOOK(irq); \ | ||
59 | if (irq_hwmask[irq] & 0x0000ff00) \ | 92 | if (irq_hwmask[irq] & 0x0000ff00) \ |
60 | write_c0_tccontext(read_c0_tccontext() & \ | 93 | write_c0_tccontext(read_c0_tccontext() & \ |
61 | ~(irq_hwmask[irq] & 0x0000ff00)); \ | 94 | ~(irq_hwmask[irq] & 0x0000ff00)); \ |
95 | } while (0) | ||
96 | |||
97 | #define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) \ | ||
98 | do { \ | ||
99 | if (irq_hwmask[irq] & 0x0000ff00) \ | ||
100 | write_c0_tccontext(read_c0_tccontext() & \ | ||
101 | ~(irq_hwmask[irq] & 0x0000ff00)); \ | ||
62 | } while (0) | 102 | } while (0) |
103 | |||
63 | #else | 104 | #else |
64 | 105 | ||
65 | #define __DO_IRQ_SMTC_HOOK(irq) do { } while (0) | 106 | #define __DO_IRQ_SMTC_HOOK(irq) \ |
107 | do { \ | ||
108 | IRQ_AFFINITY_HOOK(irq); \ | ||
109 | } while (0) | ||
110 | #define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) do { } while (0) | ||
111 | |||
66 | #endif | 112 | #endif |
67 | 113 | ||
68 | /* | 114 | /* |
@@ -81,6 +127,23 @@ do { \ | |||
81 | irq_exit(); \ | 127 | irq_exit(); \ |
82 | } while (0) | 128 | } while (0) |
83 | 129 | ||
130 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||
131 | /* | ||
132 | * To avoid inefficient and in some cases pathological re-checking of | ||
133 | * IRQ affinity, we have this variant that skips the affinity check. | ||
134 | */ | ||
135 | |||
136 | |||
137 | #define do_IRQ_no_affinity(irq) \ | ||
138 | do { \ | ||
139 | irq_enter(); \ | ||
140 | __NO_AFFINITY_IRQ_SMTC_HOOK(irq); \ | ||
141 | generic_handle_irq(irq); \ | ||
142 | irq_exit(); \ | ||
143 | } while (0) | ||
144 | |||
145 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||
146 | |||
84 | extern void arch_init_irq(void); | 147 | extern void arch_init_irq(void); |
85 | extern void spurious_interrupt(void); | 148 | extern void spurious_interrupt(void); |
86 | 149 | ||