diff options
Diffstat (limited to 'arch/sparc/kernel/irq_64.c')
-rw-r--r-- | arch/sparc/kernel/irq_64.c | 1101 |
1 files changed, 1101 insertions, 0 deletions
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c new file mode 100644 index 000000000000..a3ea2bcb95de --- /dev/null +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -0,0 +1,1101 @@ | |||
1 | /* irq.c: UltraSparc IRQ handling/init/registry. | ||
2 | * | ||
3 | * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) | ||
4 | * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) | ||
5 | * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) | ||
6 | */ | ||
7 | |||
8 | #include <linux/module.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/linkage.h> | ||
11 | #include <linux/ptrace.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/kernel_stat.h> | ||
14 | #include <linux/signal.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/random.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/proc_fs.h> | ||
22 | #include <linux/seq_file.h> | ||
23 | #include <linux/bootmem.h> | ||
24 | #include <linux/irq.h> | ||
25 | |||
26 | #include <asm/ptrace.h> | ||
27 | #include <asm/processor.h> | ||
28 | #include <asm/atomic.h> | ||
29 | #include <asm/system.h> | ||
30 | #include <asm/irq.h> | ||
31 | #include <asm/io.h> | ||
32 | #include <asm/iommu.h> | ||
33 | #include <asm/upa.h> | ||
34 | #include <asm/oplib.h> | ||
35 | #include <asm/prom.h> | ||
36 | #include <asm/timer.h> | ||
37 | #include <asm/smp.h> | ||
38 | #include <asm/starfire.h> | ||
39 | #include <asm/uaccess.h> | ||
40 | #include <asm/cache.h> | ||
41 | #include <asm/cpudata.h> | ||
42 | #include <asm/auxio.h> | ||
43 | #include <asm/head.h> | ||
44 | #include <asm/hypervisor.h> | ||
45 | #include <asm/cacheflush.h> | ||
46 | |||
47 | #include "entry.h" | ||
48 | |||
49 | #define NUM_IVECS (IMAP_INR + 1) | ||
50 | |||
51 | struct ino_bucket *ivector_table; | ||
52 | unsigned long ivector_table_pa; | ||
53 | |||
54 | /* On several sun4u processors, it is illegal to mix bypass and | ||
55 | * non-bypass accesses. Therefore we access all INO buckets | ||
56 | * using bypass accesses only. | ||
57 | */ | ||
58 | static unsigned long bucket_get_chain_pa(unsigned long bucket_pa) | ||
59 | { | ||
60 | unsigned long ret; | ||
61 | |||
62 | __asm__ __volatile__("ldxa [%1] %2, %0" | ||
63 | : "=&r" (ret) | ||
64 | : "r" (bucket_pa + | ||
65 | offsetof(struct ino_bucket, | ||
66 | __irq_chain_pa)), | ||
67 | "i" (ASI_PHYS_USE_EC)); | ||
68 | |||
69 | return ret; | ||
70 | } | ||
71 | |||
72 | static void bucket_clear_chain_pa(unsigned long bucket_pa) | ||
73 | { | ||
74 | __asm__ __volatile__("stxa %%g0, [%0] %1" | ||
75 | : /* no outputs */ | ||
76 | : "r" (bucket_pa + | ||
77 | offsetof(struct ino_bucket, | ||
78 | __irq_chain_pa)), | ||
79 | "i" (ASI_PHYS_USE_EC)); | ||
80 | } | ||
81 | |||
82 | static unsigned int bucket_get_virt_irq(unsigned long bucket_pa) | ||
83 | { | ||
84 | unsigned int ret; | ||
85 | |||
86 | __asm__ __volatile__("lduwa [%1] %2, %0" | ||
87 | : "=&r" (ret) | ||
88 | : "r" (bucket_pa + | ||
89 | offsetof(struct ino_bucket, | ||
90 | __virt_irq)), | ||
91 | "i" (ASI_PHYS_USE_EC)); | ||
92 | |||
93 | return ret; | ||
94 | } | ||
95 | |||
96 | static void bucket_set_virt_irq(unsigned long bucket_pa, | ||
97 | unsigned int virt_irq) | ||
98 | { | ||
99 | __asm__ __volatile__("stwa %0, [%1] %2" | ||
100 | : /* no outputs */ | ||
101 | : "r" (virt_irq), | ||
102 | "r" (bucket_pa + | ||
103 | offsetof(struct ino_bucket, | ||
104 | __virt_irq)), | ||
105 | "i" (ASI_PHYS_USE_EC)); | ||
106 | } | ||
107 | |||
108 | #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa) | ||
109 | |||
110 | static struct { | ||
111 | unsigned int dev_handle; | ||
112 | unsigned int dev_ino; | ||
113 | unsigned int in_use; | ||
114 | } virt_irq_table[NR_IRQS]; | ||
115 | static DEFINE_SPINLOCK(virt_irq_alloc_lock); | ||
116 | |||
117 | unsigned char virt_irq_alloc(unsigned int dev_handle, | ||
118 | unsigned int dev_ino) | ||
119 | { | ||
120 | unsigned long flags; | ||
121 | unsigned char ent; | ||
122 | |||
123 | BUILD_BUG_ON(NR_IRQS >= 256); | ||
124 | |||
125 | spin_lock_irqsave(&virt_irq_alloc_lock, flags); | ||
126 | |||
127 | for (ent = 1; ent < NR_IRQS; ent++) { | ||
128 | if (!virt_irq_table[ent].in_use) | ||
129 | break; | ||
130 | } | ||
131 | if (ent >= NR_IRQS) { | ||
132 | printk(KERN_ERR "IRQ: Out of virtual IRQs.\n"); | ||
133 | ent = 0; | ||
134 | } else { | ||
135 | virt_irq_table[ent].dev_handle = dev_handle; | ||
136 | virt_irq_table[ent].dev_ino = dev_ino; | ||
137 | virt_irq_table[ent].in_use = 1; | ||
138 | } | ||
139 | |||
140 | spin_unlock_irqrestore(&virt_irq_alloc_lock, flags); | ||
141 | |||
142 | return ent; | ||
143 | } | ||
144 | |||
145 | #ifdef CONFIG_PCI_MSI | ||
146 | void virt_irq_free(unsigned int virt_irq) | ||
147 | { | ||
148 | unsigned long flags; | ||
149 | |||
150 | if (virt_irq >= NR_IRQS) | ||
151 | return; | ||
152 | |||
153 | spin_lock_irqsave(&virt_irq_alloc_lock, flags); | ||
154 | |||
155 | virt_irq_table[virt_irq].in_use = 0; | ||
156 | |||
157 | spin_unlock_irqrestore(&virt_irq_alloc_lock, flags); | ||
158 | } | ||
159 | #endif | ||
160 | |||
161 | /* | ||
162 | * /proc/interrupts printing: | ||
163 | */ | ||
164 | |||
165 | int show_interrupts(struct seq_file *p, void *v) | ||
166 | { | ||
167 | int i = *(loff_t *) v, j; | ||
168 | struct irqaction * action; | ||
169 | unsigned long flags; | ||
170 | |||
171 | if (i == 0) { | ||
172 | seq_printf(p, " "); | ||
173 | for_each_online_cpu(j) | ||
174 | seq_printf(p, "CPU%d ",j); | ||
175 | seq_putc(p, '\n'); | ||
176 | } | ||
177 | |||
178 | if (i < NR_IRQS) { | ||
179 | spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
180 | action = irq_desc[i].action; | ||
181 | if (!action) | ||
182 | goto skip; | ||
183 | seq_printf(p, "%3d: ",i); | ||
184 | #ifndef CONFIG_SMP | ||
185 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
186 | #else | ||
187 | for_each_online_cpu(j) | ||
188 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | ||
189 | #endif | ||
190 | seq_printf(p, " %9s", irq_desc[i].chip->typename); | ||
191 | seq_printf(p, " %s", action->name); | ||
192 | |||
193 | for (action=action->next; action; action = action->next) | ||
194 | seq_printf(p, ", %s", action->name); | ||
195 | |||
196 | seq_putc(p, '\n'); | ||
197 | skip: | ||
198 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
199 | } | ||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) | ||
204 | { | ||
205 | unsigned int tid; | ||
206 | |||
207 | if (this_is_starfire) { | ||
208 | tid = starfire_translate(imap, cpuid); | ||
209 | tid <<= IMAP_TID_SHIFT; | ||
210 | tid &= IMAP_TID_UPA; | ||
211 | } else { | ||
212 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
213 | unsigned long ver; | ||
214 | |||
215 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | ||
216 | if ((ver >> 32UL) == __JALAPENO_ID || | ||
217 | (ver >> 32UL) == __SERRANO_ID) { | ||
218 | tid = cpuid << IMAP_TID_SHIFT; | ||
219 | tid &= IMAP_TID_JBUS; | ||
220 | } else { | ||
221 | unsigned int a = cpuid & 0x1f; | ||
222 | unsigned int n = (cpuid >> 5) & 0x1f; | ||
223 | |||
224 | tid = ((a << IMAP_AID_SHIFT) | | ||
225 | (n << IMAP_NID_SHIFT)); | ||
226 | tid &= (IMAP_AID_SAFARI | | ||
227 | IMAP_NID_SAFARI);; | ||
228 | } | ||
229 | } else { | ||
230 | tid = cpuid << IMAP_TID_SHIFT; | ||
231 | tid &= IMAP_TID_UPA; | ||
232 | } | ||
233 | } | ||
234 | |||
235 | return tid; | ||
236 | } | ||
237 | |||
238 | struct irq_handler_data { | ||
239 | unsigned long iclr; | ||
240 | unsigned long imap; | ||
241 | |||
242 | void (*pre_handler)(unsigned int, void *, void *); | ||
243 | void *arg1; | ||
244 | void *arg2; | ||
245 | }; | ||
246 | |||
247 | #ifdef CONFIG_SMP | ||
248 | static int irq_choose_cpu(unsigned int virt_irq) | ||
249 | { | ||
250 | cpumask_t mask = irq_desc[virt_irq].affinity; | ||
251 | int cpuid; | ||
252 | |||
253 | if (cpus_equal(mask, CPU_MASK_ALL)) { | ||
254 | static int irq_rover; | ||
255 | static DEFINE_SPINLOCK(irq_rover_lock); | ||
256 | unsigned long flags; | ||
257 | |||
258 | /* Round-robin distribution... */ | ||
259 | do_round_robin: | ||
260 | spin_lock_irqsave(&irq_rover_lock, flags); | ||
261 | |||
262 | while (!cpu_online(irq_rover)) { | ||
263 | if (++irq_rover >= NR_CPUS) | ||
264 | irq_rover = 0; | ||
265 | } | ||
266 | cpuid = irq_rover; | ||
267 | do { | ||
268 | if (++irq_rover >= NR_CPUS) | ||
269 | irq_rover = 0; | ||
270 | } while (!cpu_online(irq_rover)); | ||
271 | |||
272 | spin_unlock_irqrestore(&irq_rover_lock, flags); | ||
273 | } else { | ||
274 | cpumask_t tmp; | ||
275 | |||
276 | cpus_and(tmp, cpu_online_map, mask); | ||
277 | |||
278 | if (cpus_empty(tmp)) | ||
279 | goto do_round_robin; | ||
280 | |||
281 | cpuid = first_cpu(tmp); | ||
282 | } | ||
283 | |||
284 | return cpuid; | ||
285 | } | ||
286 | #else | ||
287 | static int irq_choose_cpu(unsigned int virt_irq) | ||
288 | { | ||
289 | return real_hard_smp_processor_id(); | ||
290 | } | ||
291 | #endif | ||
292 | |||
293 | static void sun4u_irq_enable(unsigned int virt_irq) | ||
294 | { | ||
295 | struct irq_handler_data *data = get_irq_chip_data(virt_irq); | ||
296 | |||
297 | if (likely(data)) { | ||
298 | unsigned long cpuid, imap, val; | ||
299 | unsigned int tid; | ||
300 | |||
301 | cpuid = irq_choose_cpu(virt_irq); | ||
302 | imap = data->imap; | ||
303 | |||
304 | tid = sun4u_compute_tid(imap, cpuid); | ||
305 | |||
306 | val = upa_readq(imap); | ||
307 | val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS | | ||
308 | IMAP_AID_SAFARI | IMAP_NID_SAFARI); | ||
309 | val |= tid | IMAP_VALID; | ||
310 | upa_writeq(val, imap); | ||
311 | upa_writeq(ICLR_IDLE, data->iclr); | ||
312 | } | ||
313 | } | ||
314 | |||
315 | static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask) | ||
316 | { | ||
317 | sun4u_irq_enable(virt_irq); | ||
318 | } | ||
319 | |||
320 | static void sun4u_irq_disable(unsigned int virt_irq) | ||
321 | { | ||
322 | struct irq_handler_data *data = get_irq_chip_data(virt_irq); | ||
323 | |||
324 | if (likely(data)) { | ||
325 | unsigned long imap = data->imap; | ||
326 | unsigned long tmp = upa_readq(imap); | ||
327 | |||
328 | tmp &= ~IMAP_VALID; | ||
329 | upa_writeq(tmp, imap); | ||
330 | } | ||
331 | } | ||
332 | |||
333 | static void sun4u_irq_eoi(unsigned int virt_irq) | ||
334 | { | ||
335 | struct irq_handler_data *data = get_irq_chip_data(virt_irq); | ||
336 | struct irq_desc *desc = irq_desc + virt_irq; | ||
337 | |||
338 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
339 | return; | ||
340 | |||
341 | if (likely(data)) | ||
342 | upa_writeq(ICLR_IDLE, data->iclr); | ||
343 | } | ||
344 | |||
345 | static void sun4v_irq_enable(unsigned int virt_irq) | ||
346 | { | ||
347 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | ||
348 | unsigned long cpuid = irq_choose_cpu(virt_irq); | ||
349 | int err; | ||
350 | |||
351 | err = sun4v_intr_settarget(ino, cpuid); | ||
352 | if (err != HV_EOK) | ||
353 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " | ||
354 | "err(%d)\n", ino, cpuid, err); | ||
355 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); | ||
356 | if (err != HV_EOK) | ||
357 | printk(KERN_ERR "sun4v_intr_setstate(%x): " | ||
358 | "err(%d)\n", ino, err); | ||
359 | err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); | ||
360 | if (err != HV_EOK) | ||
361 | printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n", | ||
362 | ino, err); | ||
363 | } | ||
364 | |||
365 | static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask) | ||
366 | { | ||
367 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | ||
368 | unsigned long cpuid = irq_choose_cpu(virt_irq); | ||
369 | int err; | ||
370 | |||
371 | err = sun4v_intr_settarget(ino, cpuid); | ||
372 | if (err != HV_EOK) | ||
373 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " | ||
374 | "err(%d)\n", ino, cpuid, err); | ||
375 | } | ||
376 | |||
377 | static void sun4v_irq_disable(unsigned int virt_irq) | ||
378 | { | ||
379 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | ||
380 | int err; | ||
381 | |||
382 | err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); | ||
383 | if (err != HV_EOK) | ||
384 | printk(KERN_ERR "sun4v_intr_setenabled(%x): " | ||
385 | "err(%d)\n", ino, err); | ||
386 | } | ||
387 | |||
388 | static void sun4v_irq_eoi(unsigned int virt_irq) | ||
389 | { | ||
390 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | ||
391 | struct irq_desc *desc = irq_desc + virt_irq; | ||
392 | int err; | ||
393 | |||
394 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
395 | return; | ||
396 | |||
397 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); | ||
398 | if (err != HV_EOK) | ||
399 | printk(KERN_ERR "sun4v_intr_setstate(%x): " | ||
400 | "err(%d)\n", ino, err); | ||
401 | } | ||
402 | |||
403 | static void sun4v_virq_enable(unsigned int virt_irq) | ||
404 | { | ||
405 | unsigned long cpuid, dev_handle, dev_ino; | ||
406 | int err; | ||
407 | |||
408 | cpuid = irq_choose_cpu(virt_irq); | ||
409 | |||
410 | dev_handle = virt_irq_table[virt_irq].dev_handle; | ||
411 | dev_ino = virt_irq_table[virt_irq].dev_ino; | ||
412 | |||
413 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); | ||
414 | if (err != HV_EOK) | ||
415 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " | ||
416 | "err(%d)\n", | ||
417 | dev_handle, dev_ino, cpuid, err); | ||
418 | err = sun4v_vintr_set_state(dev_handle, dev_ino, | ||
419 | HV_INTR_STATE_IDLE); | ||
420 | if (err != HV_EOK) | ||
421 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | ||
422 | "HV_INTR_STATE_IDLE): err(%d)\n", | ||
423 | dev_handle, dev_ino, err); | ||
424 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, | ||
425 | HV_INTR_ENABLED); | ||
426 | if (err != HV_EOK) | ||
427 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | ||
428 | "HV_INTR_ENABLED): err(%d)\n", | ||
429 | dev_handle, dev_ino, err); | ||
430 | } | ||
431 | |||
432 | static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask) | ||
433 | { | ||
434 | unsigned long cpuid, dev_handle, dev_ino; | ||
435 | int err; | ||
436 | |||
437 | cpuid = irq_choose_cpu(virt_irq); | ||
438 | |||
439 | dev_handle = virt_irq_table[virt_irq].dev_handle; | ||
440 | dev_ino = virt_irq_table[virt_irq].dev_ino; | ||
441 | |||
442 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); | ||
443 | if (err != HV_EOK) | ||
444 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " | ||
445 | "err(%d)\n", | ||
446 | dev_handle, dev_ino, cpuid, err); | ||
447 | } | ||
448 | |||
449 | static void sun4v_virq_disable(unsigned int virt_irq) | ||
450 | { | ||
451 | unsigned long dev_handle, dev_ino; | ||
452 | int err; | ||
453 | |||
454 | dev_handle = virt_irq_table[virt_irq].dev_handle; | ||
455 | dev_ino = virt_irq_table[virt_irq].dev_ino; | ||
456 | |||
457 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, | ||
458 | HV_INTR_DISABLED); | ||
459 | if (err != HV_EOK) | ||
460 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | ||
461 | "HV_INTR_DISABLED): err(%d)\n", | ||
462 | dev_handle, dev_ino, err); | ||
463 | } | ||
464 | |||
465 | static void sun4v_virq_eoi(unsigned int virt_irq) | ||
466 | { | ||
467 | struct irq_desc *desc = irq_desc + virt_irq; | ||
468 | unsigned long dev_handle, dev_ino; | ||
469 | int err; | ||
470 | |||
471 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
472 | return; | ||
473 | |||
474 | dev_handle = virt_irq_table[virt_irq].dev_handle; | ||
475 | dev_ino = virt_irq_table[virt_irq].dev_ino; | ||
476 | |||
477 | err = sun4v_vintr_set_state(dev_handle, dev_ino, | ||
478 | HV_INTR_STATE_IDLE); | ||
479 | if (err != HV_EOK) | ||
480 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | ||
481 | "HV_INTR_STATE_IDLE): err(%d)\n", | ||
482 | dev_handle, dev_ino, err); | ||
483 | } | ||
484 | |||
485 | static struct irq_chip sun4u_irq = { | ||
486 | .typename = "sun4u", | ||
487 | .enable = sun4u_irq_enable, | ||
488 | .disable = sun4u_irq_disable, | ||
489 | .eoi = sun4u_irq_eoi, | ||
490 | .set_affinity = sun4u_set_affinity, | ||
491 | }; | ||
492 | |||
493 | static struct irq_chip sun4v_irq = { | ||
494 | .typename = "sun4v", | ||
495 | .enable = sun4v_irq_enable, | ||
496 | .disable = sun4v_irq_disable, | ||
497 | .eoi = sun4v_irq_eoi, | ||
498 | .set_affinity = sun4v_set_affinity, | ||
499 | }; | ||
500 | |||
501 | static struct irq_chip sun4v_virq = { | ||
502 | .typename = "vsun4v", | ||
503 | .enable = sun4v_virq_enable, | ||
504 | .disable = sun4v_virq_disable, | ||
505 | .eoi = sun4v_virq_eoi, | ||
506 | .set_affinity = sun4v_virt_set_affinity, | ||
507 | }; | ||
508 | |||
509 | static void pre_flow_handler(unsigned int virt_irq, | ||
510 | struct irq_desc *desc) | ||
511 | { | ||
512 | struct irq_handler_data *data = get_irq_chip_data(virt_irq); | ||
513 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | ||
514 | |||
515 | data->pre_handler(ino, data->arg1, data->arg2); | ||
516 | |||
517 | handle_fasteoi_irq(virt_irq, desc); | ||
518 | } | ||
519 | |||
520 | void irq_install_pre_handler(int virt_irq, | ||
521 | void (*func)(unsigned int, void *, void *), | ||
522 | void *arg1, void *arg2) | ||
523 | { | ||
524 | struct irq_handler_data *data = get_irq_chip_data(virt_irq); | ||
525 | struct irq_desc *desc = irq_desc + virt_irq; | ||
526 | |||
527 | data->pre_handler = func; | ||
528 | data->arg1 = arg1; | ||
529 | data->arg2 = arg2; | ||
530 | |||
531 | desc->handle_irq = pre_flow_handler; | ||
532 | } | ||
533 | |||
534 | unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) | ||
535 | { | ||
536 | struct ino_bucket *bucket; | ||
537 | struct irq_handler_data *data; | ||
538 | unsigned int virt_irq; | ||
539 | int ino; | ||
540 | |||
541 | BUG_ON(tlb_type == hypervisor); | ||
542 | |||
543 | ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; | ||
544 | bucket = &ivector_table[ino]; | ||
545 | virt_irq = bucket_get_virt_irq(__pa(bucket)); | ||
546 | if (!virt_irq) { | ||
547 | virt_irq = virt_irq_alloc(0, ino); | ||
548 | bucket_set_virt_irq(__pa(bucket), virt_irq); | ||
549 | set_irq_chip_and_handler_name(virt_irq, | ||
550 | &sun4u_irq, | ||
551 | handle_fasteoi_irq, | ||
552 | "IVEC"); | ||
553 | } | ||
554 | |||
555 | data = get_irq_chip_data(virt_irq); | ||
556 | if (unlikely(data)) | ||
557 | goto out; | ||
558 | |||
559 | data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); | ||
560 | if (unlikely(!data)) { | ||
561 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); | ||
562 | prom_halt(); | ||
563 | } | ||
564 | set_irq_chip_data(virt_irq, data); | ||
565 | |||
566 | data->imap = imap; | ||
567 | data->iclr = iclr; | ||
568 | |||
569 | out: | ||
570 | return virt_irq; | ||
571 | } | ||
572 | |||
573 | static unsigned int sun4v_build_common(unsigned long sysino, | ||
574 | struct irq_chip *chip) | ||
575 | { | ||
576 | struct ino_bucket *bucket; | ||
577 | struct irq_handler_data *data; | ||
578 | unsigned int virt_irq; | ||
579 | |||
580 | BUG_ON(tlb_type != hypervisor); | ||
581 | |||
582 | bucket = &ivector_table[sysino]; | ||
583 | virt_irq = bucket_get_virt_irq(__pa(bucket)); | ||
584 | if (!virt_irq) { | ||
585 | virt_irq = virt_irq_alloc(0, sysino); | ||
586 | bucket_set_virt_irq(__pa(bucket), virt_irq); | ||
587 | set_irq_chip_and_handler_name(virt_irq, chip, | ||
588 | handle_fasteoi_irq, | ||
589 | "IVEC"); | ||
590 | } | ||
591 | |||
592 | data = get_irq_chip_data(virt_irq); | ||
593 | if (unlikely(data)) | ||
594 | goto out; | ||
595 | |||
596 | data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); | ||
597 | if (unlikely(!data)) { | ||
598 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); | ||
599 | prom_halt(); | ||
600 | } | ||
601 | set_irq_chip_data(virt_irq, data); | ||
602 | |||
603 | /* Catch accidental accesses to these things. IMAP/ICLR handling | ||
604 | * is done by hypervisor calls on sun4v platforms, not by direct | ||
605 | * register accesses. | ||
606 | */ | ||
607 | data->imap = ~0UL; | ||
608 | data->iclr = ~0UL; | ||
609 | |||
610 | out: | ||
611 | return virt_irq; | ||
612 | } | ||
613 | |||
614 | unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) | ||
615 | { | ||
616 | unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino); | ||
617 | |||
618 | return sun4v_build_common(sysino, &sun4v_irq); | ||
619 | } | ||
620 | |||
621 | unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) | ||
622 | { | ||
623 | struct irq_handler_data *data; | ||
624 | unsigned long hv_err, cookie; | ||
625 | struct ino_bucket *bucket; | ||
626 | struct irq_desc *desc; | ||
627 | unsigned int virt_irq; | ||
628 | |||
629 | bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); | ||
630 | if (unlikely(!bucket)) | ||
631 | return 0; | ||
632 | __flush_dcache_range((unsigned long) bucket, | ||
633 | ((unsigned long) bucket + | ||
634 | sizeof(struct ino_bucket))); | ||
635 | |||
636 | virt_irq = virt_irq_alloc(devhandle, devino); | ||
637 | bucket_set_virt_irq(__pa(bucket), virt_irq); | ||
638 | |||
639 | set_irq_chip_and_handler_name(virt_irq, &sun4v_virq, | ||
640 | handle_fasteoi_irq, | ||
641 | "IVEC"); | ||
642 | |||
643 | data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); | ||
644 | if (unlikely(!data)) | ||
645 | return 0; | ||
646 | |||
647 | /* In order to make the LDC channel startup sequence easier, | ||
648 | * especially wrt. locking, we do not let request_irq() enable | ||
649 | * the interrupt. | ||
650 | */ | ||
651 | desc = irq_desc + virt_irq; | ||
652 | desc->status |= IRQ_NOAUTOEN; | ||
653 | |||
654 | set_irq_chip_data(virt_irq, data); | ||
655 | |||
656 | /* Catch accidental accesses to these things. IMAP/ICLR handling | ||
657 | * is done by hypervisor calls on sun4v platforms, not by direct | ||
658 | * register accesses. | ||
659 | */ | ||
660 | data->imap = ~0UL; | ||
661 | data->iclr = ~0UL; | ||
662 | |||
663 | cookie = ~__pa(bucket); | ||
664 | hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie); | ||
665 | if (hv_err) { | ||
666 | prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] " | ||
667 | "err=%lu\n", devhandle, devino, hv_err); | ||
668 | prom_halt(); | ||
669 | } | ||
670 | |||
671 | return virt_irq; | ||
672 | } | ||
673 | |||
674 | void ack_bad_irq(unsigned int virt_irq) | ||
675 | { | ||
676 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | ||
677 | |||
678 | if (!ino) | ||
679 | ino = 0xdeadbeef; | ||
680 | |||
681 | printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n", | ||
682 | ino, virt_irq); | ||
683 | } | ||
684 | |||
685 | void *hardirq_stack[NR_CPUS]; | ||
686 | void *softirq_stack[NR_CPUS]; | ||
687 | |||
688 | static __attribute__((always_inline)) void *set_hardirq_stack(void) | ||
689 | { | ||
690 | void *orig_sp, *sp = hardirq_stack[smp_processor_id()]; | ||
691 | |||
692 | __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp)); | ||
693 | if (orig_sp < sp || | ||
694 | orig_sp > (sp + THREAD_SIZE)) { | ||
695 | sp += THREAD_SIZE - 192 - STACK_BIAS; | ||
696 | __asm__ __volatile__("mov %0, %%sp" : : "r" (sp)); | ||
697 | } | ||
698 | |||
699 | return orig_sp; | ||
700 | } | ||
701 | static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) | ||
702 | { | ||
703 | __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); | ||
704 | } | ||
705 | |||
706 | void handler_irq(int irq, struct pt_regs *regs) | ||
707 | { | ||
708 | unsigned long pstate, bucket_pa; | ||
709 | struct pt_regs *old_regs; | ||
710 | void *orig_sp; | ||
711 | |||
712 | clear_softint(1 << irq); | ||
713 | |||
714 | old_regs = set_irq_regs(regs); | ||
715 | irq_enter(); | ||
716 | |||
717 | /* Grab an atomic snapshot of the pending IVECs. */ | ||
718 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" | ||
719 | "wrpr %0, %3, %%pstate\n\t" | ||
720 | "ldx [%2], %1\n\t" | ||
721 | "stx %%g0, [%2]\n\t" | ||
722 | "wrpr %0, 0x0, %%pstate\n\t" | ||
723 | : "=&r" (pstate), "=&r" (bucket_pa) | ||
724 | : "r" (irq_work_pa(smp_processor_id())), | ||
725 | "i" (PSTATE_IE) | ||
726 | : "memory"); | ||
727 | |||
728 | orig_sp = set_hardirq_stack(); | ||
729 | |||
730 | while (bucket_pa) { | ||
731 | struct irq_desc *desc; | ||
732 | unsigned long next_pa; | ||
733 | unsigned int virt_irq; | ||
734 | |||
735 | next_pa = bucket_get_chain_pa(bucket_pa); | ||
736 | virt_irq = bucket_get_virt_irq(bucket_pa); | ||
737 | bucket_clear_chain_pa(bucket_pa); | ||
738 | |||
739 | desc = irq_desc + virt_irq; | ||
740 | |||
741 | desc->handle_irq(virt_irq, desc); | ||
742 | |||
743 | bucket_pa = next_pa; | ||
744 | } | ||
745 | |||
746 | restore_hardirq_stack(orig_sp); | ||
747 | |||
748 | irq_exit(); | ||
749 | set_irq_regs(old_regs); | ||
750 | } | ||
751 | |||
752 | void do_softirq(void) | ||
753 | { | ||
754 | unsigned long flags; | ||
755 | |||
756 | if (in_interrupt()) | ||
757 | return; | ||
758 | |||
759 | local_irq_save(flags); | ||
760 | |||
761 | if (local_softirq_pending()) { | ||
762 | void *orig_sp, *sp = softirq_stack[smp_processor_id()]; | ||
763 | |||
764 | sp += THREAD_SIZE - 192 - STACK_BIAS; | ||
765 | |||
766 | __asm__ __volatile__("mov %%sp, %0\n\t" | ||
767 | "mov %1, %%sp" | ||
768 | : "=&r" (orig_sp) | ||
769 | : "r" (sp)); | ||
770 | __do_softirq(); | ||
771 | __asm__ __volatile__("mov %0, %%sp" | ||
772 | : : "r" (orig_sp)); | ||
773 | } | ||
774 | |||
775 | local_irq_restore(flags); | ||
776 | } | ||
777 | |||
778 | static void unhandled_perf_irq(struct pt_regs *regs) | ||
779 | { | ||
780 | unsigned long pcr, pic; | ||
781 | |||
782 | read_pcr(pcr); | ||
783 | read_pic(pic); | ||
784 | |||
785 | write_pcr(0); | ||
786 | |||
787 | printk(KERN_EMERG "CPU %d: Got unexpected perf counter IRQ.\n", | ||
788 | smp_processor_id()); | ||
789 | printk(KERN_EMERG "CPU %d: PCR[%016lx] PIC[%016lx]\n", | ||
790 | smp_processor_id(), pcr, pic); | ||
791 | } | ||
792 | |||
793 | /* Almost a direct copy of the powerpc PMC code. */ | ||
794 | static DEFINE_SPINLOCK(perf_irq_lock); | ||
795 | static void *perf_irq_owner_caller; /* mostly for debugging */ | ||
796 | static void (*perf_irq)(struct pt_regs *regs) = unhandled_perf_irq; | ||
797 | |||
798 | /* Invoked from level 15 PIL handler in trap table. */ | ||
799 | void perfctr_irq(int irq, struct pt_regs *regs) | ||
800 | { | ||
801 | clear_softint(1 << irq); | ||
802 | perf_irq(regs); | ||
803 | } | ||
804 | |||
805 | int register_perfctr_intr(void (*handler)(struct pt_regs *)) | ||
806 | { | ||
807 | int ret; | ||
808 | |||
809 | if (!handler) | ||
810 | return -EINVAL; | ||
811 | |||
812 | spin_lock(&perf_irq_lock); | ||
813 | if (perf_irq != unhandled_perf_irq) { | ||
814 | printk(KERN_WARNING "register_perfctr_intr: " | ||
815 | "perf IRQ busy (reserved by caller %p)\n", | ||
816 | perf_irq_owner_caller); | ||
817 | ret = -EBUSY; | ||
818 | goto out; | ||
819 | } | ||
820 | |||
821 | perf_irq_owner_caller = __builtin_return_address(0); | ||
822 | perf_irq = handler; | ||
823 | |||
824 | ret = 0; | ||
825 | out: | ||
826 | spin_unlock(&perf_irq_lock); | ||
827 | |||
828 | return ret; | ||
829 | } | ||
830 | EXPORT_SYMBOL_GPL(register_perfctr_intr); | ||
831 | |||
832 | void release_perfctr_intr(void (*handler)(struct pt_regs *)) | ||
833 | { | ||
834 | spin_lock(&perf_irq_lock); | ||
835 | perf_irq_owner_caller = NULL; | ||
836 | perf_irq = unhandled_perf_irq; | ||
837 | spin_unlock(&perf_irq_lock); | ||
838 | } | ||
839 | EXPORT_SYMBOL_GPL(release_perfctr_intr); | ||
840 | |||
841 | #ifdef CONFIG_HOTPLUG_CPU | ||
842 | void fixup_irqs(void) | ||
843 | { | ||
844 | unsigned int irq; | ||
845 | |||
846 | for (irq = 0; irq < NR_IRQS; irq++) { | ||
847 | unsigned long flags; | ||
848 | |||
849 | spin_lock_irqsave(&irq_desc[irq].lock, flags); | ||
850 | if (irq_desc[irq].action && | ||
851 | !(irq_desc[irq].status & IRQ_PER_CPU)) { | ||
852 | if (irq_desc[irq].chip->set_affinity) | ||
853 | irq_desc[irq].chip->set_affinity(irq, | ||
854 | irq_desc[irq].affinity); | ||
855 | } | ||
856 | spin_unlock_irqrestore(&irq_desc[irq].lock, flags); | ||
857 | } | ||
858 | |||
859 | tick_ops->disable_irq(); | ||
860 | } | ||
861 | #endif | ||
862 | |||
863 | struct sun5_timer { | ||
864 | u64 count0; | ||
865 | u64 limit0; | ||
866 | u64 count1; | ||
867 | u64 limit1; | ||
868 | }; | ||
869 | |||
870 | static struct sun5_timer *prom_timers; | ||
871 | static u64 prom_limit0, prom_limit1; | ||
872 | |||
873 | static void map_prom_timers(void) | ||
874 | { | ||
875 | struct device_node *dp; | ||
876 | const unsigned int *addr; | ||
877 | |||
878 | /* PROM timer node hangs out in the top level of device siblings... */ | ||
879 | dp = of_find_node_by_path("/"); | ||
880 | dp = dp->child; | ||
881 | while (dp) { | ||
882 | if (!strcmp(dp->name, "counter-timer")) | ||
883 | break; | ||
884 | dp = dp->sibling; | ||
885 | } | ||
886 | |||
887 | /* Assume if node is not present, PROM uses different tick mechanism | ||
888 | * which we should not care about. | ||
889 | */ | ||
890 | if (!dp) { | ||
891 | prom_timers = (struct sun5_timer *) 0; | ||
892 | return; | ||
893 | } | ||
894 | |||
895 | /* If PROM is really using this, it must be mapped by him. */ | ||
896 | addr = of_get_property(dp, "address", NULL); | ||
897 | if (!addr) { | ||
898 | prom_printf("PROM does not have timer mapped, trying to continue.\n"); | ||
899 | prom_timers = (struct sun5_timer *) 0; | ||
900 | return; | ||
901 | } | ||
902 | prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]); | ||
903 | } | ||
904 | |||
905 | static void kill_prom_timer(void) | ||
906 | { | ||
907 | if (!prom_timers) | ||
908 | return; | ||
909 | |||
910 | /* Save them away for later. */ | ||
911 | prom_limit0 = prom_timers->limit0; | ||
912 | prom_limit1 = prom_timers->limit1; | ||
913 | |||
914 | /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14. | ||
915 | * We turn both off here just to be paranoid. | ||
916 | */ | ||
917 | prom_timers->limit0 = 0; | ||
918 | prom_timers->limit1 = 0; | ||
919 | |||
920 | /* Wheee, eat the interrupt packet too... */ | ||
921 | __asm__ __volatile__( | ||
922 | " mov 0x40, %%g2\n" | ||
923 | " ldxa [%%g0] %0, %%g1\n" | ||
924 | " ldxa [%%g2] %1, %%g1\n" | ||
925 | " stxa %%g0, [%%g0] %0\n" | ||
926 | " membar #Sync\n" | ||
927 | : /* no outputs */ | ||
928 | : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R) | ||
929 | : "g1", "g2"); | ||
930 | } | ||
931 | |||
932 | void notrace init_irqwork_curcpu(void) | ||
933 | { | ||
934 | int cpu = hard_smp_processor_id(); | ||
935 | |||
936 | trap_block[cpu].irq_worklist_pa = 0UL; | ||
937 | } | ||
938 | |||
939 | /* Please be very careful with register_one_mondo() and | ||
940 | * sun4v_register_mondo_queues(). | ||
941 | * | ||
942 | * On SMP this gets invoked from the CPU trampoline before | ||
943 | * the cpu has fully taken over the trap table from OBP, | ||
944 | * and it's kernel stack + %g6 thread register state is | ||
945 | * not fully cooked yet. | ||
946 | * | ||
947 | * Therefore you cannot make any OBP calls, not even prom_printf, | ||
948 | * from these two routines. | ||
949 | */ | ||
950 | static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) | ||
951 | { | ||
952 | unsigned long num_entries = (qmask + 1) / 64; | ||
953 | unsigned long status; | ||
954 | |||
955 | status = sun4v_cpu_qconf(type, paddr, num_entries); | ||
956 | if (status != HV_EOK) { | ||
957 | prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, " | ||
958 | "err %lu\n", type, paddr, num_entries, status); | ||
959 | prom_halt(); | ||
960 | } | ||
961 | } | ||
962 | |||
963 | void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu) | ||
964 | { | ||
965 | struct trap_per_cpu *tb = &trap_block[this_cpu]; | ||
966 | |||
967 | register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO, | ||
968 | tb->cpu_mondo_qmask); | ||
969 | register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO, | ||
970 | tb->dev_mondo_qmask); | ||
971 | register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR, | ||
972 | tb->resum_qmask); | ||
973 | register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR, | ||
974 | tb->nonresum_qmask); | ||
975 | } | ||
976 | |||
977 | static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask) | ||
978 | { | ||
979 | unsigned long size = PAGE_ALIGN(qmask + 1); | ||
980 | void *p = __alloc_bootmem(size, size, 0); | ||
981 | if (!p) { | ||
982 | prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); | ||
983 | prom_halt(); | ||
984 | } | ||
985 | |||
986 | *pa_ptr = __pa(p); | ||
987 | } | ||
988 | |||
989 | static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask) | ||
990 | { | ||
991 | unsigned long size = PAGE_ALIGN(qmask + 1); | ||
992 | void *p = __alloc_bootmem(size, size, 0); | ||
993 | |||
994 | if (!p) { | ||
995 | prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); | ||
996 | prom_halt(); | ||
997 | } | ||
998 | |||
999 | *pa_ptr = __pa(p); | ||
1000 | } | ||
1001 | |||
1002 | static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) | ||
1003 | { | ||
1004 | #ifdef CONFIG_SMP | ||
1005 | void *page; | ||
1006 | |||
1007 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); | ||
1008 | |||
1009 | page = alloc_bootmem_pages(PAGE_SIZE); | ||
1010 | if (!page) { | ||
1011 | prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); | ||
1012 | prom_halt(); | ||
1013 | } | ||
1014 | |||
1015 | tb->cpu_mondo_block_pa = __pa(page); | ||
1016 | tb->cpu_list_pa = __pa(page + 64); | ||
1017 | #endif | ||
1018 | } | ||
1019 | |||
1020 | /* Allocate mondo and error queues for all possible cpus. */ | ||
1021 | static void __init sun4v_init_mondo_queues(void) | ||
1022 | { | ||
1023 | int cpu; | ||
1024 | |||
1025 | for_each_possible_cpu(cpu) { | ||
1026 | struct trap_per_cpu *tb = &trap_block[cpu]; | ||
1027 | |||
1028 | alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask); | ||
1029 | alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask); | ||
1030 | alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask); | ||
1031 | alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask); | ||
1032 | alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask); | ||
1033 | alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, | ||
1034 | tb->nonresum_qmask); | ||
1035 | } | ||
1036 | } | ||
1037 | |||
1038 | static void __init init_send_mondo_info(void) | ||
1039 | { | ||
1040 | int cpu; | ||
1041 | |||
1042 | for_each_possible_cpu(cpu) { | ||
1043 | struct trap_per_cpu *tb = &trap_block[cpu]; | ||
1044 | |||
1045 | init_cpu_send_mondo_info(tb); | ||
1046 | } | ||
1047 | } | ||
1048 | |||
1049 | static struct irqaction timer_irq_action = { | ||
1050 | .name = "timer", | ||
1051 | }; | ||
1052 | |||
1053 | /* Only invoked on boot processor. */ | ||
1054 | void __init init_IRQ(void) | ||
1055 | { | ||
1056 | unsigned long size; | ||
1057 | |||
1058 | map_prom_timers(); | ||
1059 | kill_prom_timer(); | ||
1060 | |||
1061 | size = sizeof(struct ino_bucket) * NUM_IVECS; | ||
1062 | ivector_table = alloc_bootmem(size); | ||
1063 | if (!ivector_table) { | ||
1064 | prom_printf("Fatal error, cannot allocate ivector_table\n"); | ||
1065 | prom_halt(); | ||
1066 | } | ||
1067 | __flush_dcache_range((unsigned long) ivector_table, | ||
1068 | ((unsigned long) ivector_table) + size); | ||
1069 | |||
1070 | ivector_table_pa = __pa(ivector_table); | ||
1071 | |||
1072 | if (tlb_type == hypervisor) | ||
1073 | sun4v_init_mondo_queues(); | ||
1074 | |||
1075 | init_send_mondo_info(); | ||
1076 | |||
1077 | if (tlb_type == hypervisor) { | ||
1078 | /* Load up the boot cpu's entries. */ | ||
1079 | sun4v_register_mondo_queues(hard_smp_processor_id()); | ||
1080 | } | ||
1081 | |||
1082 | /* We need to clear any IRQ's pending in the soft interrupt | ||
1083 | * registers, a spurious one could be left around from the | ||
1084 | * PROM timer which we just disabled. | ||
1085 | */ | ||
1086 | clear_softint(get_softint()); | ||
1087 | |||
1088 | /* Now that ivector table is initialized, it is safe | ||
1089 | * to receive IRQ vector traps. We will normally take | ||
1090 | * one or two right now, in case some device PROM used | ||
1091 | * to boot us wants to speak to us. We just ignore them. | ||
1092 | */ | ||
1093 | __asm__ __volatile__("rdpr %%pstate, %%g1\n\t" | ||
1094 | "or %%g1, %0, %%g1\n\t" | ||
1095 | "wrpr %%g1, 0x0, %%pstate" | ||
1096 | : /* No outputs */ | ||
1097 | : "i" (PSTATE_IE) | ||
1098 | : "g1"); | ||
1099 | |||
1100 | irq_desc[0].action = &timer_irq_action; | ||
1101 | } | ||