aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r--arch/powerpc/kernel/irq.c478
1 files changed, 478 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
new file mode 100644
index 000000000000..4b7940693f3d
--- /dev/null
+++ b/arch/powerpc/kernel/irq.c
@@ -0,0 +1,478 @@
1/*
2 * arch/ppc/kernel/irq.c
3 *
4 * Derived from arch/i386/kernel/irq.c
5 * Copyright (C) 1992 Linus Torvalds
6 * Adapted from arch/i386 by Gary Thomas
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
9 * Copyright (C) 1996-2001 Cort Dougan
10 * Adapted for Power Macintosh by Paul Mackerras
11 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 * This file contains the code used by various IRQ handling routines:
20 * asking for different IRQ's should be done through these routines
21 * instead of just grabbing them. Thus setups with different IRQ numbers
22 * shouldn't result in any weird surprises, and installing new handlers
23 * should be easier.
24 *
25 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
26 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
27 * mask register (of which only 16 are defined), hence the weird shifting
28 * and complement of the cached_irq_mask. I want to be able to stuff
29 * this right into the SIU SMASK register.
30 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
31 * to reduce code space and undefined function references.
32 */
33
34#include <linux/errno.h>
35#include <linux/module.h>
36#include <linux/threads.h>
37#include <linux/kernel_stat.h>
38#include <linux/signal.h>
39#include <linux/sched.h>
40#include <linux/ptrace.h>
41#include <linux/ioport.h>
42#include <linux/interrupt.h>
43#include <linux/timex.h>
44#include <linux/config.h>
45#include <linux/init.h>
46#include <linux/slab.h>
47#include <linux/pci.h>
48#include <linux/delay.h>
49#include <linux/irq.h>
50#include <linux/proc_fs.h>
51#include <linux/random.h>
52#include <linux/seq_file.h>
53#include <linux/cpumask.h>
54#include <linux/profile.h>
55#include <linux/bitops.h>
56#ifdef CONFIG_PPC64
57#include <linux/kallsyms.h>
58#endif
59
60#include <asm/uaccess.h>
61#include <asm/system.h>
62#include <asm/io.h>
63#include <asm/pgtable.h>
64#include <asm/irq.h>
65#include <asm/cache.h>
66#include <asm/prom.h>
67#include <asm/ptrace.h>
68#include <asm/machdep.h>
69#ifdef CONFIG_PPC64
70#include <asm/iseries/it_lp_queue.h>
71#include <asm/paca.h>
72#endif
73
74static int ppc_spurious_interrupts;
75
76#if defined(CONFIG_PPC_ISERIES) && defined(CONFIG_SMP)
77extern void iSeries_smp_message_recv(struct pt_regs *);
78#endif
79
80#ifdef CONFIG_PPC32
81#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
82
83unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
84atomic_t ppc_n_lost_interrupts;
85
86#ifdef CONFIG_TAU_INT
87extern int tau_initialized;
88extern int tau_interrupts(int);
89#endif
90
91#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
92extern atomic_t ipi_recv;
93extern atomic_t ipi_sent;
94#endif
95#endif /* CONFIG_PPC32 */
96
97#ifdef CONFIG_PPC64
98EXPORT_SYMBOL(irq_desc);
99
100int distribute_irqs = 1;
101int __irq_offset_value;
102u64 ppc64_interrupt_controller;
103#endif /* CONFIG_PPC64 */
104
105int show_interrupts(struct seq_file *p, void *v)
106{
107 int i = *(loff_t *)v, j;
108 struct irqaction *action;
109 irq_desc_t *desc;
110 unsigned long flags;
111
112 if (i == 0) {
113 seq_puts(p, " ");
114 for_each_online_cpu(j)
115 seq_printf(p, "CPU%d ", j);
116 seq_putc(p, '\n');
117 }
118
119 if (i < NR_IRQS) {
120 desc = get_irq_desc(i);
121 spin_lock_irqsave(&desc->lock, flags);
122 action = desc->action;
123 if (!action || !action->handler)
124 goto skip;
125 seq_printf(p, "%3d: ", i);
126#ifdef CONFIG_SMP
127 for_each_online_cpu(j)
128 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
129#else
130 seq_printf(p, "%10u ", kstat_irqs(i));
131#endif /* CONFIG_SMP */
132 if (desc->handler)
133 seq_printf(p, " %s ", desc->handler->typename);
134 else
135 seq_puts(p, " None ");
136 seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge ");
137 seq_printf(p, " %s", action->name);
138 for (action = action->next; action; action = action->next)
139 seq_printf(p, ", %s", action->name);
140 seq_putc(p, '\n');
141skip:
142 spin_unlock_irqrestore(&desc->lock, flags);
143 } else if (i == NR_IRQS) {
144#ifdef CONFIG_PPC32
145#ifdef CONFIG_TAU_INT
146 if (tau_initialized){
147 seq_puts(p, "TAU: ");
148 for (j = 0; j < NR_CPUS; j++)
149 if (cpu_online(j))
150 seq_printf(p, "%10u ", tau_interrupts(j));
151 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
152 }
153#endif
154#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
155 /* should this be per processor send/receive? */
156 seq_printf(p, "IPI (recv/sent): %10u/%u\n",
157 atomic_read(&ipi_recv), atomic_read(&ipi_sent));
158#endif
159#endif /* CONFIG_PPC32 */
160 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
161 }
162 return 0;
163}
164
165#ifdef CONFIG_HOTPLUG_CPU
166void fixup_irqs(cpumask_t map)
167{
168 unsigned int irq;
169 static int warned;
170
171 for_each_irq(irq) {
172 cpumask_t mask;
173
174 if (irq_desc[irq].status & IRQ_PER_CPU)
175 continue;
176
177 cpus_and(mask, irq_affinity[irq], map);
178 if (any_online_cpu(mask) == NR_CPUS) {
179 printk("Breaking affinity for irq %i\n", irq);
180 mask = map;
181 }
182 if (irq_desc[irq].handler->set_affinity)
183 irq_desc[irq].handler->set_affinity(irq, mask);
184 else if (irq_desc[irq].action && !(warned++))
185 printk("Cannot set affinity for irq %i\n", irq);
186 }
187
188 local_irq_enable();
189 mdelay(1);
190 local_irq_disable();
191}
192#endif
193
194#ifdef CONFIG_PPC_ISERIES
195void do_IRQ(struct pt_regs *regs)
196{
197 struct paca_struct *lpaca;
198
199 irq_enter();
200
201#ifdef CONFIG_DEBUG_STACKOVERFLOW
202 /* Debugging check for stack overflow: is there less than 2KB free? */
203 {
204 long sp;
205
206 sp = __get_SP() & (THREAD_SIZE-1);
207
208 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
209 printk("do_IRQ: stack overflow: %ld\n",
210 sp - sizeof(struct thread_info));
211 dump_stack();
212 }
213 }
214#endif
215
216 lpaca = get_paca();
217#ifdef CONFIG_SMP
218 if (lpaca->lppaca.int_dword.fields.ipi_cnt) {
219 lpaca->lppaca.int_dword.fields.ipi_cnt = 0;
220 iSeries_smp_message_recv(regs);
221 }
222#endif /* CONFIG_SMP */
223 if (hvlpevent_is_pending())
224 process_hvlpevents(regs);
225
226 irq_exit();
227
228 if (lpaca->lppaca.int_dword.fields.decr_int) {
229 lpaca->lppaca.int_dword.fields.decr_int = 0;
230 /* Signal a fake decrementer interrupt */
231 timer_interrupt(regs);
232 }
233}
234
235#else /* CONFIG_PPC_ISERIES */
236
237void do_IRQ(struct pt_regs *regs)
238{
239 int irq;
240#ifdef CONFIG_IRQSTACKS
241 struct thread_info *curtp, *irqtp;
242#endif
243
244 irq_enter();
245
246#ifdef CONFIG_DEBUG_STACKOVERFLOW
247 /* Debugging check for stack overflow: is there less than 2KB free? */
248 {
249 long sp;
250
251 sp = __get_SP() & (THREAD_SIZE-1);
252
253 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
254 printk("do_IRQ: stack overflow: %ld\n",
255 sp - sizeof(struct thread_info));
256 dump_stack();
257 }
258 }
259#endif
260
261 /*
262 * Every platform is required to implement ppc_md.get_irq.
263 * This function will either return an irq number or -1 to
264 * indicate there are no more pending.
265 * The value -2 is for buggy hardware and means that this IRQ
266 * has already been handled. -- Tom
267 */
268 irq = ppc_md.get_irq(regs);
269
270 if (irq >= 0) {
271#ifdef CONFIG_IRQSTACKS
272 /* Switch to the irq stack to handle this */
273 curtp = current_thread_info();
274 irqtp = hardirq_ctx[smp_processor_id()];
275 if (curtp != irqtp) {
276 irqtp->task = curtp->task;
277 irqtp->flags = 0;
278 call___do_IRQ(irq, regs, irqtp);
279 irqtp->task = NULL;
280 if (irqtp->flags)
281 set_bits(irqtp->flags, &curtp->flags);
282 } else
283#endif
284 __do_IRQ(irq, regs);
285 } else
286#ifdef CONFIG_PPC32
287 if (irq != -2)
288#endif
289 /* That's not SMP safe ... but who cares ? */
290 ppc_spurious_interrupts++;
291 irq_exit();
292}
293
294#endif /* CONFIG_PPC_ISERIES */
295
296void __init init_IRQ(void)
297{
298#ifdef CONFIG_PPC64
299 static int once = 0;
300
301 if (once)
302 return;
303
304 once++;
305
306#endif
307 ppc_md.init_IRQ();
308#ifdef CONFIG_PPC64
309 irq_ctx_init();
310#endif
311}
312
313#ifdef CONFIG_PPC64
314#ifndef CONFIG_PPC_ISERIES
315/*
316 * Virtual IRQ mapping code, used on systems with XICS interrupt controllers.
317 */
318
319#define UNDEFINED_IRQ 0xffffffff
320unsigned int virt_irq_to_real_map[NR_IRQS];
321
322/*
323 * Don't use virtual irqs 0, 1, 2 for devices.
324 * The pcnet32 driver considers interrupt numbers < 2 to be invalid,
325 * and 2 is the XICS IPI interrupt.
326 * We limit virtual irqs to 17 less than NR_IRQS so that when we
327 * offset them by 16 (to reserve the first 16 for ISA interrupts)
328 * we don't end up with an interrupt number >= NR_IRQS.
329 */
330#define MIN_VIRT_IRQ 3
331#define MAX_VIRT_IRQ (NR_IRQS - NUM_ISA_INTERRUPTS - 1)
332#define NR_VIRT_IRQS (MAX_VIRT_IRQ - MIN_VIRT_IRQ + 1)
333
334void
335virt_irq_init(void)
336{
337 int i;
338 for (i = 0; i < NR_IRQS; i++)
339 virt_irq_to_real_map[i] = UNDEFINED_IRQ;
340}
341
342/* Create a mapping for a real_irq if it doesn't already exist.
343 * Return the virtual irq as a convenience.
344 */
345int virt_irq_create_mapping(unsigned int real_irq)
346{
347 unsigned int virq, first_virq;
348 static int warned;
349
350 if (ppc64_interrupt_controller == IC_OPEN_PIC)
351 return real_irq; /* no mapping for openpic (for now) */
352
353 if (ppc64_interrupt_controller == IC_CELL_PIC)
354 return real_irq; /* no mapping for iic either */
355
356 /* don't map interrupts < MIN_VIRT_IRQ */
357 if (real_irq < MIN_VIRT_IRQ) {
358 virt_irq_to_real_map[real_irq] = real_irq;
359 return real_irq;
360 }
361
362 /* map to a number between MIN_VIRT_IRQ and MAX_VIRT_IRQ */
363 virq = real_irq;
364 if (virq > MAX_VIRT_IRQ)
365 virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ;
366
367 /* search for this number or a free slot */
368 first_virq = virq;
369 while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) {
370 if (virt_irq_to_real_map[virq] == real_irq)
371 return virq;
372 if (++virq > MAX_VIRT_IRQ)
373 virq = MIN_VIRT_IRQ;
374 if (virq == first_virq)
375 goto nospace; /* oops, no free slots */
376 }
377
378 virt_irq_to_real_map[virq] = real_irq;
379 return virq;
380
381 nospace:
382 if (!warned) {
383 printk(KERN_CRIT "Interrupt table is full\n");
384 printk(KERN_CRIT "Increase NR_IRQS (currently %d) "
385 "in your kernel sources and rebuild.\n", NR_IRQS);
386 warned = 1;
387 }
388 return NO_IRQ;
389}
390
391/*
392 * In most cases will get a hit on the very first slot checked in the
393 * virt_irq_to_real_map. Only when there are a large number of
394 * IRQs will this be expensive.
395 */
396unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
397{
398 unsigned int virq;
399 unsigned int first_virq;
400
401 virq = real_irq;
402
403 if (virq > MAX_VIRT_IRQ)
404 virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ;
405
406 first_virq = virq;
407
408 do {
409 if (virt_irq_to_real_map[virq] == real_irq)
410 return virq;
411
412 virq++;
413
414 if (virq >= MAX_VIRT_IRQ)
415 virq = 0;
416
417 } while (first_virq != virq);
418
419 return NO_IRQ;
420
421}
422
423#endif /* CONFIG_PPC_ISERIES */
424
425#ifdef CONFIG_IRQSTACKS
426struct thread_info *softirq_ctx[NR_CPUS];
427struct thread_info *hardirq_ctx[NR_CPUS];
428
429void irq_ctx_init(void)
430{
431 struct thread_info *tp;
432 int i;
433
434 for_each_cpu(i) {
435 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
436 tp = softirq_ctx[i];
437 tp->cpu = i;
438 tp->preempt_count = SOFTIRQ_OFFSET;
439
440 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
441 tp = hardirq_ctx[i];
442 tp->cpu = i;
443 tp->preempt_count = HARDIRQ_OFFSET;
444 }
445}
446
447void do_softirq(void)
448{
449 unsigned long flags;
450 struct thread_info *curtp, *irqtp;
451
452 if (in_interrupt())
453 return;
454
455 local_irq_save(flags);
456
457 if (local_softirq_pending()) {
458 curtp = current_thread_info();
459 irqtp = softirq_ctx[smp_processor_id()];
460 irqtp->task = curtp->task;
461 call_do_softirq(irqtp);
462 irqtp->task = NULL;
463 }
464
465 local_irq_restore(flags);
466}
467EXPORT_SYMBOL(do_softirq);
468
469#endif /* CONFIG_IRQSTACKS */
470
471static int __init setup_noirqdistrib(char *str)
472{
473 distribute_irqs = 0;
474 return 1;
475}
476
477__setup("noirqdistrib", setup_noirqdistrib);
478#endif /* CONFIG_PPC64 */