diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2015-03-09 04:33:10 -0400 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2015-06-22 04:36:57 -0400 |
commit | eaf0ecc33f82b9c46528d1646575dd8caf586a3d (patch) | |
tree | 36af85361641964d284aaa6fffd9b299d59579bd /arch/arc/kernel/mcip.c | |
parent | 72d72880612705143ad32cf4ede0d6ae27e8b975 (diff) |
ARCv2: SMP: intc: IDU 2nd level intc for dynamic IRQ distribution
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/kernel/mcip.c')
-rw-r--r-- | arch/arc/kernel/mcip.c | 183 |
1 files changed, 182 insertions, 1 deletions
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index ad7e90b97f6e..30284e8de6ff 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c | |||
@@ -14,10 +14,10 @@ | |||
14 | #include <asm/mcip.h> | 14 | #include <asm/mcip.h> |
15 | 15 | ||
16 | static char smp_cpuinfo_buf[128]; | 16 | static char smp_cpuinfo_buf[128]; |
17 | static int idu_detected; | ||
17 | 18 | ||
18 | static DEFINE_RAW_SPINLOCK(mcip_lock); | 19 | static DEFINE_RAW_SPINLOCK(mcip_lock); |
19 | 20 | ||
20 | |||
21 | /* | 21 | /* |
22 | * Any SMP specific init any CPU does when it comes up. | 22 | * Any SMP specific init any CPU does when it comes up. |
23 | * Here we setup the CPU to enable Inter-Processor-Interrupts | 23 | * Here we setup the CPU to enable Inter-Processor-Interrupts |
@@ -150,6 +150,8 @@ void mcip_init_early_smp(void) | |||
150 | IS_AVAIL1(mp.dbg, "DEBUG "), | 150 | IS_AVAIL1(mp.dbg, "DEBUG "), |
151 | IS_AVAIL1(mp.grtc, "GRTC")); | 151 | IS_AVAIL1(mp.grtc, "GRTC")); |
152 | 152 | ||
153 | idu_detected = mp.idu; | ||
154 | |||
153 | if (mp.dbg) { | 155 | if (mp.dbg) { |
154 | __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf); | 156 | __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf); |
155 | __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf); | 157 | __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf); |
@@ -158,3 +160,182 @@ void mcip_init_early_smp(void) | |||
158 | if (IS_ENABLED(CONFIG_ARC_HAS_GRTC) && !mp.grtc) | 160 | if (IS_ENABLED(CONFIG_ARC_HAS_GRTC) && !mp.grtc) |
159 | panic("kernel trying to use non-existent GRTC\n"); | 161 | panic("kernel trying to use non-existent GRTC\n"); |
160 | } | 162 | } |
163 | |||
164 | /*************************************************************************** | ||
165 | * ARCv2 Interrupt Distribution Unit (IDU) | ||
166 | * | ||
167 | * Connects external "COMMON" IRQs to core intc, providing: | ||
168 | * -dynamic routing (IRQ affinity) | ||
169 | * -load balancing (Round Robin interrupt distribution) | ||
170 | * -1:N distribution | ||
171 | * | ||
172 | * It physically resides in the MCIP hw block | ||
173 | */ | ||
174 | |||
175 | #include <linux/irqchip.h> | ||
176 | #include <linux/of.h> | ||
177 | #include <linux/of_irq.h> | ||
178 | #include "../../drivers/irqchip/irqchip.h" | ||
179 | |||
180 | /* | ||
181 | * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core) | ||
182 | */ | ||
183 | static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask) | ||
184 | { | ||
185 | __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask); | ||
186 | } | ||
187 | |||
188 | static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl, | ||
189 | unsigned int distr) | ||
190 | { | ||
191 | union { | ||
192 | unsigned int word; | ||
193 | struct { | ||
194 | unsigned int distr:2, pad:2, lvl:1, pad2:27; | ||
195 | }; | ||
196 | } data; | ||
197 | |||
198 | data.distr = distr; | ||
199 | data.lvl = lvl; | ||
200 | __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word); | ||
201 | } | ||
202 | |||
203 | static void idu_irq_mask(struct irq_data *data) | ||
204 | { | ||
205 | unsigned long flags; | ||
206 | |||
207 | raw_spin_lock_irqsave(&mcip_lock, flags); | ||
208 | __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1); | ||
209 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | ||
210 | } | ||
211 | |||
212 | static void idu_irq_unmask(struct irq_data *data) | ||
213 | { | ||
214 | unsigned long flags; | ||
215 | |||
216 | raw_spin_lock_irqsave(&mcip_lock, flags); | ||
217 | __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0); | ||
218 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | ||
219 | } | ||
220 | |||
221 | static int | ||
222 | idu_irq_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool f) | ||
223 | { | ||
224 | return IRQ_SET_MASK_OK; | ||
225 | } | ||
226 | |||
227 | static struct irq_chip idu_irq_chip = { | ||
228 | .name = "MCIP IDU Intc", | ||
229 | .irq_mask = idu_irq_mask, | ||
230 | .irq_unmask = idu_irq_unmask, | ||
231 | #ifdef CONFIG_SMP | ||
232 | .irq_set_affinity = idu_irq_set_affinity, | ||
233 | #endif | ||
234 | |||
235 | }; | ||
236 | |||
237 | static int idu_first_irq; | ||
238 | |||
239 | static void idu_cascade_isr(unsigned int core_irq, struct irq_desc *desc) | ||
240 | { | ||
241 | struct irq_domain *domain = irq_desc_get_handler_data(desc); | ||
242 | unsigned int idu_irq; | ||
243 | |||
244 | idu_irq = core_irq - idu_first_irq; | ||
245 | generic_handle_irq(irq_find_mapping(domain, idu_irq)); | ||
246 | } | ||
247 | |||
248 | static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) | ||
249 | { | ||
250 | irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq); | ||
251 | irq_set_status_flags(virq, IRQ_MOVE_PCNTXT); | ||
252 | |||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | static int idu_irq_xlate(struct irq_domain *d, struct device_node *n, | ||
257 | const u32 *intspec, unsigned int intsize, | ||
258 | irq_hw_number_t *out_hwirq, unsigned int *out_type) | ||
259 | { | ||
260 | irq_hw_number_t hwirq = *out_hwirq = intspec[0]; | ||
261 | int distri = intspec[1]; | ||
262 | unsigned long flags; | ||
263 | |||
264 | *out_type = IRQ_TYPE_NONE; | ||
265 | |||
266 | /* XXX: validate distribution scheme again online cpu mask */ | ||
267 | if (distri == 0) { | ||
268 | /* 0 - Round Robin to all cpus, otherwise 1 bit per core */ | ||
269 | raw_spin_lock_irqsave(&mcip_lock, flags); | ||
270 | idu_set_dest(hwirq, BIT(num_online_cpus()) - 1); | ||
271 | idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR); | ||
272 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | ||
273 | } else { | ||
274 | /* | ||
275 | * DEST based distribution for Level Triggered intr can only | ||
276 | * have 1 CPU, so generalize it to always contain 1 cpu | ||
277 | */ | ||
278 | int cpu = ffs(distri); | ||
279 | |||
280 | if (cpu != fls(distri)) | ||
281 | pr_warn("IDU irq %lx distri mode set to cpu %x\n", | ||
282 | hwirq, cpu); | ||
283 | |||
284 | raw_spin_lock_irqsave(&mcip_lock, flags); | ||
285 | idu_set_dest(hwirq, cpu); | ||
286 | idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST); | ||
287 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | ||
288 | } | ||
289 | |||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | static const struct irq_domain_ops idu_irq_ops = { | ||
294 | .xlate = idu_irq_xlate, | ||
295 | .map = idu_irq_map, | ||
296 | }; | ||
297 | |||
298 | /* | ||
299 | * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI) | ||
300 | * [24, 23+C]: If C > 0 then "C" common IRQs | ||
301 | * [24+C, N]: Not statically assigned, private-per-core | ||
302 | */ | ||
303 | |||
304 | |||
305 | static int __init | ||
306 | idu_of_init(struct device_node *intc, struct device_node *parent) | ||
307 | { | ||
308 | struct irq_domain *domain; | ||
309 | /* Read IDU BCR to confirm nr_irqs */ | ||
310 | int nr_irqs = of_irq_count(intc); | ||
311 | int i, irq; | ||
312 | |||
313 | if (!idu_detected) | ||
314 | panic("IDU not detected, but DeviceTree using it"); | ||
315 | |||
316 | pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs); | ||
317 | |||
318 | domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); | ||
319 | |||
320 | /* Parent interrupts (core-intc) are already mapped */ | ||
321 | |||
322 | for (i = 0; i < nr_irqs; i++) { | ||
323 | /* | ||
324 | * Return parent uplink IRQs (towards core intc) 24,25,..... | ||
325 | * this step has been done before already | ||
326 | * however we need it to get the parent virq and set IDU handler | ||
327 | * as first level isr | ||
328 | */ | ||
329 | irq = irq_of_parse_and_map(intc, i); | ||
330 | if (!i) | ||
331 | idu_first_irq = irq; | ||
332 | |||
333 | irq_set_handler_data(irq, domain); | ||
334 | irq_set_chained_handler(irq, idu_cascade_isr); | ||
335 | } | ||
336 | |||
337 | __mcip_cmd(CMD_IDU_ENABLE, 0); | ||
338 | |||
339 | return 0; | ||
340 | } | ||
341 | IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init); | ||