diff options
Diffstat (limited to 'arch/sparc64/kernel/irq.c')
-rw-r--r-- | arch/sparc64/kernel/irq.c | 1269 |
1 files changed, 1269 insertions, 0 deletions
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c new file mode 100644 index 000000000000..a38cb5036df0 --- /dev/null +++ b/arch/sparc64/kernel/irq.c | |||
@@ -0,0 +1,1269 @@ | |||
1 | /* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $ | ||
2 | * irq.c: UltraSparc IRQ handling/init/registry. | ||
3 | * | ||
4 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | ||
5 | * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) | ||
6 | * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/ptrace.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/kernel_stat.h> | ||
15 | #include <linux/signal.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/random.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/proc_fs.h> | ||
23 | #include <linux/seq_file.h> | ||
24 | |||
25 | #include <asm/ptrace.h> | ||
26 | #include <asm/processor.h> | ||
27 | #include <asm/atomic.h> | ||
28 | #include <asm/system.h> | ||
29 | #include <asm/irq.h> | ||
30 | #include <asm/sbus.h> | ||
31 | #include <asm/iommu.h> | ||
32 | #include <asm/upa.h> | ||
33 | #include <asm/oplib.h> | ||
34 | #include <asm/timer.h> | ||
35 | #include <asm/smp.h> | ||
36 | #include <asm/starfire.h> | ||
37 | #include <asm/uaccess.h> | ||
38 | #include <asm/cache.h> | ||
39 | #include <asm/cpudata.h> | ||
40 | |||
41 | #ifdef CONFIG_SMP | ||
42 | static void distribute_irqs(void); | ||
43 | #endif | ||
44 | |||
45 | /* UPA nodes send interrupt packet to UltraSparc with first data reg | ||
46 | * value low 5 (7 on Starfire) bits holding the IRQ identifier being | ||
47 | * delivered. We must translate this into a non-vector IRQ so we can | ||
48 | * set the softint on this cpu. | ||
49 | * | ||
50 | * To make processing these packets efficient and race free we use | ||
51 | * an array of irq buckets below. The interrupt vector handler in | ||
52 | * entry.S feeds incoming packets into per-cpu pil-indexed lists. | ||
53 | * The IVEC handler does not need to act atomically, the PIL dispatch | ||
54 | * code uses CAS to get an atomic snapshot of the list and clear it | ||
55 | * at the same time. | ||
56 | */ | ||
57 | |||
58 | struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES))); | ||
59 | |||
60 | /* This has to be in the main kernel image, it cannot be | ||
61 | * turned into per-cpu data. The reason is that the main | ||
62 | * kernel image is locked into the TLB and this structure | ||
63 | * is accessed from the vectored interrupt trap handler. If | ||
64 | * access to this structure takes a TLB miss it could cause | ||
65 | * the 5-level sparc v9 trap stack to overflow. | ||
66 | */ | ||
67 | struct irq_work_struct { | ||
68 | unsigned int irq_worklists[16]; | ||
69 | }; | ||
70 | struct irq_work_struct __irq_work[NR_CPUS]; | ||
71 | #define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)]) | ||
72 | |||
73 | #ifdef CONFIG_PCI | ||
74 | /* This is a table of physical addresses used to deal with IBF_DMA_SYNC. | ||
75 | * It is used for PCI only to synchronize DMA transfers with IRQ delivery | ||
76 | * for devices behind busses other than APB on Sabre systems. | ||
77 | * | ||
78 | * Currently these physical addresses are just config space accesses | ||
79 | * to the command register for that device. | ||
80 | */ | ||
81 | unsigned long pci_dma_wsync; | ||
82 | unsigned long dma_sync_reg_table[256]; | ||
83 | unsigned char dma_sync_reg_table_entry = 0; | ||
84 | #endif | ||
85 | |||
86 | /* This is based upon code in the 32-bit Sparc kernel written mostly by | ||
87 | * David Redman (djhr@tadpole.co.uk). | ||
88 | */ | ||
89 | #define MAX_STATIC_ALLOC 4 | ||
90 | static struct irqaction static_irqaction[MAX_STATIC_ALLOC]; | ||
91 | static int static_irq_count; | ||
92 | |||
93 | /* This is exported so that fast IRQ handlers can get at it... -DaveM */ | ||
94 | struct irqaction *irq_action[NR_IRQS+1] = { | ||
95 | NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, | ||
96 | NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL | ||
97 | }; | ||
98 | |||
99 | /* This only synchronizes entities which modify IRQ handler | ||
100 | * state and some selected user-level spots that want to | ||
101 | * read things in the table. IRQ handler processing orders | ||
102 | * its' accesses such that no locking is needed. | ||
103 | */ | ||
104 | static DEFINE_SPINLOCK(irq_action_lock); | ||
105 | |||
106 | static void register_irq_proc (unsigned int irq); | ||
107 | |||
108 | /* | ||
109 | * Upper 2b of irqaction->flags holds the ino. | ||
110 | * irqaction->mask holds the smp affinity information. | ||
111 | */ | ||
112 | #define put_ino_in_irqaction(action, irq) \ | ||
113 | action->flags &= 0xffffffffffffUL; \ | ||
114 | if (__bucket(irq) == &pil0_dummy_bucket) \ | ||
115 | action->flags |= 0xdeadUL << 48; \ | ||
116 | else \ | ||
117 | action->flags |= __irq_ino(irq) << 48; | ||
118 | #define get_ino_in_irqaction(action) (action->flags >> 48) | ||
119 | |||
120 | #define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff) | ||
121 | #define get_smpaff_in_irqaction(action) ((action)->mask) | ||
122 | |||
123 | int show_interrupts(struct seq_file *p, void *v) | ||
124 | { | ||
125 | unsigned long flags; | ||
126 | int i = *(loff_t *) v; | ||
127 | struct irqaction *action; | ||
128 | #ifdef CONFIG_SMP | ||
129 | int j; | ||
130 | #endif | ||
131 | |||
132 | spin_lock_irqsave(&irq_action_lock, flags); | ||
133 | if (i <= NR_IRQS) { | ||
134 | if (!(action = *(i + irq_action))) | ||
135 | goto out_unlock; | ||
136 | seq_printf(p, "%3d: ", i); | ||
137 | #ifndef CONFIG_SMP | ||
138 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
139 | #else | ||
140 | for (j = 0; j < NR_CPUS; j++) { | ||
141 | if (!cpu_online(j)) | ||
142 | continue; | ||
143 | seq_printf(p, "%10u ", | ||
144 | kstat_cpu(j).irqs[i]); | ||
145 | } | ||
146 | #endif | ||
147 | seq_printf(p, " %s:%lx", action->name, | ||
148 | get_ino_in_irqaction(action)); | ||
149 | for (action = action->next; action; action = action->next) { | ||
150 | seq_printf(p, ", %s:%lx", action->name, | ||
151 | get_ino_in_irqaction(action)); | ||
152 | } | ||
153 | seq_putc(p, '\n'); | ||
154 | } | ||
155 | out_unlock: | ||
156 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
157 | |||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | /* Now these are always passed a true fully specified sun4u INO. */ | ||
162 | void enable_irq(unsigned int irq) | ||
163 | { | ||
164 | struct ino_bucket *bucket = __bucket(irq); | ||
165 | unsigned long imap; | ||
166 | unsigned long tid; | ||
167 | |||
168 | imap = bucket->imap; | ||
169 | if (imap == 0UL) | ||
170 | return; | ||
171 | |||
172 | preempt_disable(); | ||
173 | |||
174 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
175 | unsigned long ver; | ||
176 | |||
177 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | ||
178 | if ((ver >> 32) == 0x003e0016) { | ||
179 | /* We set it to our JBUS ID. */ | ||
180 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | ||
181 | : "=r" (tid) | ||
182 | : "i" (ASI_JBUS_CONFIG)); | ||
183 | tid = ((tid & (0x1fUL<<17)) << 9); | ||
184 | tid &= IMAP_TID_JBUS; | ||
185 | } else { | ||
186 | /* We set it to our Safari AID. */ | ||
187 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | ||
188 | : "=r" (tid) | ||
189 | : "i" (ASI_SAFARI_CONFIG)); | ||
190 | tid = ((tid & (0x3ffUL<<17)) << 9); | ||
191 | tid &= IMAP_AID_SAFARI; | ||
192 | } | ||
193 | } else if (this_is_starfire == 0) { | ||
194 | /* We set it to our UPA MID. */ | ||
195 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | ||
196 | : "=r" (tid) | ||
197 | : "i" (ASI_UPA_CONFIG)); | ||
198 | tid = ((tid & UPA_CONFIG_MID) << 9); | ||
199 | tid &= IMAP_TID_UPA; | ||
200 | } else { | ||
201 | tid = (starfire_translate(imap, smp_processor_id()) << 26); | ||
202 | tid &= IMAP_TID_UPA; | ||
203 | } | ||
204 | |||
205 | /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product | ||
206 | * of this SYSIO's preconfigured IGN in the SYSIO Control | ||
207 | * Register, the hardware just mirrors that value here. | ||
208 | * However for Graphics and UPA Slave devices the full | ||
209 | * IMAP_INR field can be set by the programmer here. | ||
210 | * | ||
211 | * Things like FFB can now be handled via the new IRQ mechanism. | ||
212 | */ | ||
213 | upa_writel(tid | IMAP_VALID, imap); | ||
214 | |||
215 | preempt_enable(); | ||
216 | } | ||
217 | |||
218 | /* This now gets passed true ino's as well. */ | ||
219 | void disable_irq(unsigned int irq) | ||
220 | { | ||
221 | struct ino_bucket *bucket = __bucket(irq); | ||
222 | unsigned long imap; | ||
223 | |||
224 | imap = bucket->imap; | ||
225 | if (imap != 0UL) { | ||
226 | u32 tmp; | ||
227 | |||
228 | /* NOTE: We do not want to futz with the IRQ clear registers | ||
229 | * and move the state to IDLE, the SCSI code does call | ||
230 | * disable_irq() to assure atomicity in the queue cmd | ||
231 | * SCSI adapter driver code. Thus we'd lose interrupts. | ||
232 | */ | ||
233 | tmp = upa_readl(imap); | ||
234 | tmp &= ~IMAP_VALID; | ||
235 | upa_writel(tmp, imap); | ||
236 | } | ||
237 | } | ||
238 | |||
239 | /* The timer is the one "weird" interrupt which is generated by | ||
240 | * the CPU %tick register and not by some normal vectored interrupt | ||
241 | * source. To handle this special case, we use this dummy INO bucket. | ||
242 | */ | ||
243 | static struct ino_bucket pil0_dummy_bucket = { | ||
244 | 0, /* irq_chain */ | ||
245 | 0, /* pil */ | ||
246 | 0, /* pending */ | ||
247 | 0, /* flags */ | ||
248 | 0, /* __unused */ | ||
249 | NULL, /* irq_info */ | ||
250 | 0UL, /* iclr */ | ||
251 | 0UL, /* imap */ | ||
252 | }; | ||
253 | |||
254 | unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap) | ||
255 | { | ||
256 | struct ino_bucket *bucket; | ||
257 | int ino; | ||
258 | |||
259 | if (pil == 0) { | ||
260 | if (iclr != 0UL || imap != 0UL) { | ||
261 | prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n", | ||
262 | iclr, imap); | ||
263 | prom_halt(); | ||
264 | } | ||
265 | return __irq(&pil0_dummy_bucket); | ||
266 | } | ||
267 | |||
268 | /* RULE: Both must be specified in all other cases. */ | ||
269 | if (iclr == 0UL || imap == 0UL) { | ||
270 | prom_printf("Invalid build_irq %d %d %016lx %016lx\n", | ||
271 | pil, inofixup, iclr, imap); | ||
272 | prom_halt(); | ||
273 | } | ||
274 | |||
275 | ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; | ||
276 | if (ino > NUM_IVECS) { | ||
277 | prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n", | ||
278 | ino, pil, inofixup, iclr, imap); | ||
279 | prom_halt(); | ||
280 | } | ||
281 | |||
282 | /* Ok, looks good, set it up. Don't touch the irq_chain or | ||
283 | * the pending flag. | ||
284 | */ | ||
285 | bucket = &ivector_table[ino]; | ||
286 | if ((bucket->flags & IBF_ACTIVE) || | ||
287 | (bucket->irq_info != NULL)) { | ||
288 | /* This is a gross fatal error if it happens here. */ | ||
289 | prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n"); | ||
290 | prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n", | ||
291 | ino, pil, inofixup, iclr, imap); | ||
292 | prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n", | ||
293 | bucket->pil, bucket->iclr, bucket->imap); | ||
294 | prom_printf("IRQ: Cannot continue, halting...\n"); | ||
295 | prom_halt(); | ||
296 | } | ||
297 | bucket->imap = imap; | ||
298 | bucket->iclr = iclr; | ||
299 | bucket->pil = pil; | ||
300 | bucket->flags = 0; | ||
301 | |||
302 | bucket->irq_info = NULL; | ||
303 | |||
304 | return __irq(bucket); | ||
305 | } | ||
306 | |||
307 | static void atomic_bucket_insert(struct ino_bucket *bucket) | ||
308 | { | ||
309 | unsigned long pstate; | ||
310 | unsigned int *ent; | ||
311 | |||
312 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | ||
313 | __asm__ __volatile__("wrpr %0, %1, %%pstate" | ||
314 | : : "r" (pstate), "i" (PSTATE_IE)); | ||
315 | ent = irq_work(smp_processor_id(), bucket->pil); | ||
316 | bucket->irq_chain = *ent; | ||
317 | *ent = __irq(bucket); | ||
318 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); | ||
319 | } | ||
320 | |||
321 | int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), | ||
322 | unsigned long irqflags, const char *name, void *dev_id) | ||
323 | { | ||
324 | struct irqaction *action, *tmp = NULL; | ||
325 | struct ino_bucket *bucket = __bucket(irq); | ||
326 | unsigned long flags; | ||
327 | int pending = 0; | ||
328 | |||
329 | if ((bucket != &pil0_dummy_bucket) && | ||
330 | (bucket < &ivector_table[0] || | ||
331 | bucket >= &ivector_table[NUM_IVECS])) { | ||
332 | unsigned int *caller; | ||
333 | |||
334 | __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); | ||
335 | printk(KERN_CRIT "request_irq: Old style IRQ registry attempt " | ||
336 | "from %p, irq %08x.\n", caller, irq); | ||
337 | return -EINVAL; | ||
338 | } | ||
339 | if (!handler) | ||
340 | return -EINVAL; | ||
341 | |||
342 | if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) { | ||
343 | /* | ||
344 | * This function might sleep, we want to call it first, | ||
345 | * outside of the atomic block. In SA_STATIC_ALLOC case, | ||
346 | * random driver's kmalloc will fail, but it is safe. | ||
347 | * If already initialized, random driver will not reinit. | ||
348 | * Yes, this might clear the entropy pool if the wrong | ||
349 | * driver is attempted to be loaded, without actually | ||
350 | * installing a new handler, but is this really a problem, | ||
351 | * only the sysadmin is able to do this. | ||
352 | */ | ||
353 | rand_initialize_irq(irq); | ||
354 | } | ||
355 | |||
356 | spin_lock_irqsave(&irq_action_lock, flags); | ||
357 | |||
358 | action = *(bucket->pil + irq_action); | ||
359 | if (action) { | ||
360 | if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) | ||
361 | for (tmp = action; tmp->next; tmp = tmp->next) | ||
362 | ; | ||
363 | else { | ||
364 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
365 | return -EBUSY; | ||
366 | } | ||
367 | action = NULL; /* Or else! */ | ||
368 | } | ||
369 | |||
370 | /* If this is flagged as statically allocated then we use our | ||
371 | * private struct which is never freed. | ||
372 | */ | ||
373 | if (irqflags & SA_STATIC_ALLOC) { | ||
374 | if (static_irq_count < MAX_STATIC_ALLOC) | ||
375 | action = &static_irqaction[static_irq_count++]; | ||
376 | else | ||
377 | printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed " | ||
378 | "using kmalloc\n", irq, name); | ||
379 | } | ||
380 | if (action == NULL) | ||
381 | action = (struct irqaction *)kmalloc(sizeof(struct irqaction), | ||
382 | GFP_ATOMIC); | ||
383 | |||
384 | if (!action) { | ||
385 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
386 | return -ENOMEM; | ||
387 | } | ||
388 | |||
389 | if (bucket == &pil0_dummy_bucket) { | ||
390 | bucket->irq_info = action; | ||
391 | bucket->flags |= IBF_ACTIVE; | ||
392 | } else { | ||
393 | if ((bucket->flags & IBF_ACTIVE) != 0) { | ||
394 | void *orig = bucket->irq_info; | ||
395 | void **vector = NULL; | ||
396 | |||
397 | if ((bucket->flags & IBF_PCI) == 0) { | ||
398 | printk("IRQ: Trying to share non-PCI bucket.\n"); | ||
399 | goto free_and_ebusy; | ||
400 | } | ||
401 | if ((bucket->flags & IBF_MULTI) == 0) { | ||
402 | vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC); | ||
403 | if (vector == NULL) | ||
404 | goto free_and_enomem; | ||
405 | |||
406 | /* We might have slept. */ | ||
407 | if ((bucket->flags & IBF_MULTI) != 0) { | ||
408 | int ent; | ||
409 | |||
410 | kfree(vector); | ||
411 | vector = (void **)bucket->irq_info; | ||
412 | for(ent = 0; ent < 4; ent++) { | ||
413 | if (vector[ent] == NULL) { | ||
414 | vector[ent] = action; | ||
415 | break; | ||
416 | } | ||
417 | } | ||
418 | if (ent == 4) | ||
419 | goto free_and_ebusy; | ||
420 | } else { | ||
421 | vector[0] = orig; | ||
422 | vector[1] = action; | ||
423 | vector[2] = NULL; | ||
424 | vector[3] = NULL; | ||
425 | bucket->irq_info = vector; | ||
426 | bucket->flags |= IBF_MULTI; | ||
427 | } | ||
428 | } else { | ||
429 | int ent; | ||
430 | |||
431 | vector = (void **)orig; | ||
432 | for (ent = 0; ent < 4; ent++) { | ||
433 | if (vector[ent] == NULL) { | ||
434 | vector[ent] = action; | ||
435 | break; | ||
436 | } | ||
437 | } | ||
438 | if (ent == 4) | ||
439 | goto free_and_ebusy; | ||
440 | } | ||
441 | } else { | ||
442 | bucket->irq_info = action; | ||
443 | bucket->flags |= IBF_ACTIVE; | ||
444 | } | ||
445 | pending = bucket->pending; | ||
446 | if (pending) | ||
447 | bucket->pending = 0; | ||
448 | } | ||
449 | |||
450 | action->handler = handler; | ||
451 | action->flags = irqflags; | ||
452 | action->name = name; | ||
453 | action->next = NULL; | ||
454 | action->dev_id = dev_id; | ||
455 | put_ino_in_irqaction(action, irq); | ||
456 | put_smpaff_in_irqaction(action, CPU_MASK_NONE); | ||
457 | |||
458 | if (tmp) | ||
459 | tmp->next = action; | ||
460 | else | ||
461 | *(bucket->pil + irq_action) = action; | ||
462 | |||
463 | enable_irq(irq); | ||
464 | |||
465 | /* We ate the IVEC already, this makes sure it does not get lost. */ | ||
466 | if (pending) { | ||
467 | atomic_bucket_insert(bucket); | ||
468 | set_softint(1 << bucket->pil); | ||
469 | } | ||
470 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
471 | if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC))) | ||
472 | register_irq_proc(__irq_ino(irq)); | ||
473 | |||
474 | #ifdef CONFIG_SMP | ||
475 | distribute_irqs(); | ||
476 | #endif | ||
477 | return 0; | ||
478 | |||
479 | free_and_ebusy: | ||
480 | kfree(action); | ||
481 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
482 | return -EBUSY; | ||
483 | |||
484 | free_and_enomem: | ||
485 | kfree(action); | ||
486 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
487 | return -ENOMEM; | ||
488 | } | ||
489 | |||
490 | EXPORT_SYMBOL(request_irq); | ||
491 | |||
492 | void free_irq(unsigned int irq, void *dev_id) | ||
493 | { | ||
494 | struct irqaction *action; | ||
495 | struct irqaction *tmp = NULL; | ||
496 | unsigned long flags; | ||
497 | struct ino_bucket *bucket = __bucket(irq), *bp; | ||
498 | |||
499 | if ((bucket != &pil0_dummy_bucket) && | ||
500 | (bucket < &ivector_table[0] || | ||
501 | bucket >= &ivector_table[NUM_IVECS])) { | ||
502 | unsigned int *caller; | ||
503 | |||
504 | __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); | ||
505 | printk(KERN_CRIT "free_irq: Old style IRQ removal attempt " | ||
506 | "from %p, irq %08x.\n", caller, irq); | ||
507 | return; | ||
508 | } | ||
509 | |||
510 | spin_lock_irqsave(&irq_action_lock, flags); | ||
511 | |||
512 | action = *(bucket->pil + irq_action); | ||
513 | if (!action->handler) { | ||
514 | printk("Freeing free IRQ %d\n", bucket->pil); | ||
515 | return; | ||
516 | } | ||
517 | if (dev_id) { | ||
518 | for ( ; action; action = action->next) { | ||
519 | if (action->dev_id == dev_id) | ||
520 | break; | ||
521 | tmp = action; | ||
522 | } | ||
523 | if (!action) { | ||
524 | printk("Trying to free free shared IRQ %d\n", bucket->pil); | ||
525 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
526 | return; | ||
527 | } | ||
528 | } else if (action->flags & SA_SHIRQ) { | ||
529 | printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil); | ||
530 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
531 | return; | ||
532 | } | ||
533 | |||
534 | if (action->flags & SA_STATIC_ALLOC) { | ||
535 | printk("Attempt to free statically allocated IRQ %d (%s)\n", | ||
536 | bucket->pil, action->name); | ||
537 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
538 | return; | ||
539 | } | ||
540 | |||
541 | if (action && tmp) | ||
542 | tmp->next = action->next; | ||
543 | else | ||
544 | *(bucket->pil + irq_action) = action->next; | ||
545 | |||
546 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
547 | |||
548 | synchronize_irq(irq); | ||
549 | |||
550 | spin_lock_irqsave(&irq_action_lock, flags); | ||
551 | |||
552 | if (bucket != &pil0_dummy_bucket) { | ||
553 | unsigned long imap = bucket->imap; | ||
554 | void **vector, *orig; | ||
555 | int ent; | ||
556 | |||
557 | orig = bucket->irq_info; | ||
558 | vector = (void **)orig; | ||
559 | |||
560 | if ((bucket->flags & IBF_MULTI) != 0) { | ||
561 | int other = 0; | ||
562 | void *orphan = NULL; | ||
563 | for (ent = 0; ent < 4; ent++) { | ||
564 | if (vector[ent] == action) | ||
565 | vector[ent] = NULL; | ||
566 | else if (vector[ent] != NULL) { | ||
567 | orphan = vector[ent]; | ||
568 | other++; | ||
569 | } | ||
570 | } | ||
571 | |||
572 | /* Only free when no other shared irq | ||
573 | * uses this bucket. | ||
574 | */ | ||
575 | if (other) { | ||
576 | if (other == 1) { | ||
577 | /* Convert back to non-shared bucket. */ | ||
578 | bucket->irq_info = orphan; | ||
579 | bucket->flags &= ~(IBF_MULTI); | ||
580 | kfree(vector); | ||
581 | } | ||
582 | goto out; | ||
583 | } | ||
584 | } else { | ||
585 | bucket->irq_info = NULL; | ||
586 | } | ||
587 | |||
588 | /* This unique interrupt source is now inactive. */ | ||
589 | bucket->flags &= ~IBF_ACTIVE; | ||
590 | |||
591 | /* See if any other buckets share this bucket's IMAP | ||
592 | * and are still active. | ||
593 | */ | ||
594 | for (ent = 0; ent < NUM_IVECS; ent++) { | ||
595 | bp = &ivector_table[ent]; | ||
596 | if (bp != bucket && | ||
597 | bp->imap == imap && | ||
598 | (bp->flags & IBF_ACTIVE) != 0) | ||
599 | break; | ||
600 | } | ||
601 | |||
602 | /* Only disable when no other sub-irq levels of | ||
603 | * the same IMAP are active. | ||
604 | */ | ||
605 | if (ent == NUM_IVECS) | ||
606 | disable_irq(irq); | ||
607 | } | ||
608 | |||
609 | out: | ||
610 | kfree(action); | ||
611 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
612 | } | ||
613 | |||
614 | EXPORT_SYMBOL(free_irq); | ||
615 | |||
616 | #ifdef CONFIG_SMP | ||
617 | void synchronize_irq(unsigned int irq) | ||
618 | { | ||
619 | struct ino_bucket *bucket = __bucket(irq); | ||
620 | |||
621 | #if 0 | ||
622 | /* The following is how I wish I could implement this. | ||
623 | * Unfortunately the ICLR registers are read-only, you can | ||
624 | * only write ICLR_foo values to them. To get the current | ||
625 | * IRQ status you would need to get at the IRQ diag registers | ||
626 | * in the PCI/SBUS controller and the layout of those vary | ||
627 | * from one controller to the next, sigh... -DaveM | ||
628 | */ | ||
629 | unsigned long iclr = bucket->iclr; | ||
630 | |||
631 | while (1) { | ||
632 | u32 tmp = upa_readl(iclr); | ||
633 | |||
634 | if (tmp == ICLR_TRANSMIT || | ||
635 | tmp == ICLR_PENDING) { | ||
636 | cpu_relax(); | ||
637 | continue; | ||
638 | } | ||
639 | break; | ||
640 | } | ||
641 | #else | ||
642 | /* So we have to do this with a INPROGRESS bit just like x86. */ | ||
643 | while (bucket->flags & IBF_INPROGRESS) | ||
644 | cpu_relax(); | ||
645 | #endif | ||
646 | } | ||
647 | #endif /* CONFIG_SMP */ | ||
648 | |||
649 | void catch_disabled_ivec(struct pt_regs *regs) | ||
650 | { | ||
651 | int cpu = smp_processor_id(); | ||
652 | struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0)); | ||
653 | |||
654 | /* We can actually see this on Ultra/PCI PCI cards, which are bridges | ||
655 | * to other devices. Here a single IMAP enabled potentially multiple | ||
656 | * unique interrupt sources (which each do have a unique ICLR register. | ||
657 | * | ||
658 | * So what we do is just register that the IVEC arrived, when registered | ||
659 | * for real the request_irq() code will check the bit and signal | ||
660 | * a local CPU interrupt for it. | ||
661 | */ | ||
662 | #if 0 | ||
663 | printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n", | ||
664 | bucket - &ivector_table[0], regs->tpc); | ||
665 | #endif | ||
666 | *irq_work(cpu, 0) = 0; | ||
667 | bucket->pending = 1; | ||
668 | } | ||
669 | |||
670 | /* Tune this... */ | ||
671 | #define FORWARD_VOLUME 12 | ||
672 | |||
673 | #ifdef CONFIG_SMP | ||
674 | |||
675 | static inline void redirect_intr(int cpu, struct ino_bucket *bp) | ||
676 | { | ||
677 | /* Ok, here is what is going on: | ||
678 | * 1) Retargeting IRQs on Starfire is very | ||
679 | * expensive so just forget about it on them. | ||
680 | * 2) Moving around very high priority interrupts | ||
681 | * is a losing game. | ||
682 | * 3) If the current cpu is idle, interrupts are | ||
683 | * useful work, so keep them here. But do not | ||
684 | * pass to our neighbour if he is not very idle. | ||
685 | * 4) If sysadmin explicitly asks for directed intrs, | ||
686 | * Just Do It. | ||
687 | */ | ||
688 | struct irqaction *ap = bp->irq_info; | ||
689 | cpumask_t cpu_mask; | ||
690 | unsigned int buddy, ticks; | ||
691 | |||
692 | cpu_mask = get_smpaff_in_irqaction(ap); | ||
693 | cpus_and(cpu_mask, cpu_mask, cpu_online_map); | ||
694 | if (cpus_empty(cpu_mask)) | ||
695 | cpu_mask = cpu_online_map; | ||
696 | |||
697 | if (this_is_starfire != 0 || | ||
698 | bp->pil >= 10 || current->pid == 0) | ||
699 | goto out; | ||
700 | |||
701 | /* 'cpu' is the MID (ie. UPAID), calculate the MID | ||
702 | * of our buddy. | ||
703 | */ | ||
704 | buddy = cpu + 1; | ||
705 | if (buddy >= NR_CPUS) | ||
706 | buddy = 0; | ||
707 | |||
708 | ticks = 0; | ||
709 | while (!cpu_isset(buddy, cpu_mask)) { | ||
710 | if (++buddy >= NR_CPUS) | ||
711 | buddy = 0; | ||
712 | if (++ticks > NR_CPUS) { | ||
713 | put_smpaff_in_irqaction(ap, CPU_MASK_NONE); | ||
714 | goto out; | ||
715 | } | ||
716 | } | ||
717 | |||
718 | if (buddy == cpu) | ||
719 | goto out; | ||
720 | |||
721 | /* Voo-doo programming. */ | ||
722 | if (cpu_data(buddy).idle_volume < FORWARD_VOLUME) | ||
723 | goto out; | ||
724 | |||
725 | /* This just so happens to be correct on Cheetah | ||
726 | * at the moment. | ||
727 | */ | ||
728 | buddy <<= 26; | ||
729 | |||
730 | /* Push it to our buddy. */ | ||
731 | upa_writel(buddy | IMAP_VALID, bp->imap); | ||
732 | |||
733 | out: | ||
734 | return; | ||
735 | } | ||
736 | |||
737 | #endif | ||
738 | |||
739 | void handler_irq(int irq, struct pt_regs *regs) | ||
740 | { | ||
741 | struct ino_bucket *bp, *nbp; | ||
742 | int cpu = smp_processor_id(); | ||
743 | |||
744 | #ifndef CONFIG_SMP | ||
745 | /* | ||
746 | * Check for TICK_INT on level 14 softint. | ||
747 | */ | ||
748 | { | ||
749 | unsigned long clr_mask = 1 << irq; | ||
750 | unsigned long tick_mask = tick_ops->softint_mask; | ||
751 | |||
752 | if ((irq == 14) && (get_softint() & tick_mask)) { | ||
753 | irq = 0; | ||
754 | clr_mask = tick_mask; | ||
755 | } | ||
756 | clear_softint(clr_mask); | ||
757 | } | ||
758 | #else | ||
759 | int should_forward = 1; | ||
760 | |||
761 | clear_softint(1 << irq); | ||
762 | #endif | ||
763 | |||
764 | irq_enter(); | ||
765 | kstat_this_cpu.irqs[irq]++; | ||
766 | |||
767 | /* Sliiiick... */ | ||
768 | #ifndef CONFIG_SMP | ||
769 | bp = ((irq != 0) ? | ||
770 | __bucket(xchg32(irq_work(cpu, irq), 0)) : | ||
771 | &pil0_dummy_bucket); | ||
772 | #else | ||
773 | bp = __bucket(xchg32(irq_work(cpu, irq), 0)); | ||
774 | #endif | ||
775 | for ( ; bp != NULL; bp = nbp) { | ||
776 | unsigned char flags = bp->flags; | ||
777 | unsigned char random = 0; | ||
778 | |||
779 | nbp = __bucket(bp->irq_chain); | ||
780 | bp->irq_chain = 0; | ||
781 | |||
782 | bp->flags |= IBF_INPROGRESS; | ||
783 | |||
784 | if ((flags & IBF_ACTIVE) != 0) { | ||
785 | #ifdef CONFIG_PCI | ||
786 | if ((flags & IBF_DMA_SYNC) != 0) { | ||
787 | upa_readl(dma_sync_reg_table[bp->synctab_ent]); | ||
788 | upa_readq(pci_dma_wsync); | ||
789 | } | ||
790 | #endif | ||
791 | if ((flags & IBF_MULTI) == 0) { | ||
792 | struct irqaction *ap = bp->irq_info; | ||
793 | int ret; | ||
794 | |||
795 | ret = ap->handler(__irq(bp), ap->dev_id, regs); | ||
796 | if (ret == IRQ_HANDLED) | ||
797 | random |= ap->flags; | ||
798 | } else { | ||
799 | void **vector = (void **)bp->irq_info; | ||
800 | int ent; | ||
801 | for (ent = 0; ent < 4; ent++) { | ||
802 | struct irqaction *ap = vector[ent]; | ||
803 | if (ap != NULL) { | ||
804 | int ret; | ||
805 | |||
806 | ret = ap->handler(__irq(bp), | ||
807 | ap->dev_id, | ||
808 | regs); | ||
809 | if (ret == IRQ_HANDLED) | ||
810 | random |= ap->flags; | ||
811 | } | ||
812 | } | ||
813 | } | ||
814 | /* Only the dummy bucket lacks IMAP/ICLR. */ | ||
815 | if (bp->pil != 0) { | ||
816 | #ifdef CONFIG_SMP | ||
817 | if (should_forward) { | ||
818 | redirect_intr(cpu, bp); | ||
819 | should_forward = 0; | ||
820 | } | ||
821 | #endif | ||
822 | upa_writel(ICLR_IDLE, bp->iclr); | ||
823 | |||
824 | /* Test and add entropy */ | ||
825 | if (random & SA_SAMPLE_RANDOM) | ||
826 | add_interrupt_randomness(irq); | ||
827 | } | ||
828 | } else | ||
829 | bp->pending = 1; | ||
830 | |||
831 | bp->flags &= ~IBF_INPROGRESS; | ||
832 | } | ||
833 | irq_exit(); | ||
834 | } | ||
835 | |||
836 | #ifdef CONFIG_BLK_DEV_FD | ||
837 | extern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs); | ||
838 | |||
839 | void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs) | ||
840 | { | ||
841 | struct irqaction *action = *(irq + irq_action); | ||
842 | struct ino_bucket *bucket; | ||
843 | int cpu = smp_processor_id(); | ||
844 | |||
845 | irq_enter(); | ||
846 | kstat_this_cpu.irqs[irq]++; | ||
847 | |||
848 | *(irq_work(cpu, irq)) = 0; | ||
849 | bucket = get_ino_in_irqaction(action) + ivector_table; | ||
850 | |||
851 | bucket->flags |= IBF_INPROGRESS; | ||
852 | |||
853 | floppy_interrupt(irq, dev_cookie, regs); | ||
854 | upa_writel(ICLR_IDLE, bucket->iclr); | ||
855 | |||
856 | bucket->flags &= ~IBF_INPROGRESS; | ||
857 | |||
858 | irq_exit(); | ||
859 | } | ||
860 | #endif | ||
861 | |||
862 | /* The following assumes that the branch lies before the place we | ||
863 | * are branching to. This is the case for a trap vector... | ||
864 | * You have been warned. | ||
865 | */ | ||
866 | #define SPARC_BRANCH(dest_addr, inst_addr) \ | ||
867 | (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff)) | ||
868 | |||
869 | #define SPARC_NOP (0x01000000) | ||
870 | |||
871 | static void install_fast_irq(unsigned int cpu_irq, | ||
872 | irqreturn_t (*handler)(int, void *, struct pt_regs *)) | ||
873 | { | ||
874 | extern unsigned long sparc64_ttable_tl0; | ||
875 | unsigned long ttent = (unsigned long) &sparc64_ttable_tl0; | ||
876 | unsigned int *insns; | ||
877 | |||
878 | ttent += 0x820; | ||
879 | ttent += (cpu_irq - 1) << 5; | ||
880 | insns = (unsigned int *) ttent; | ||
881 | insns[0] = SPARC_BRANCH(((unsigned long) handler), | ||
882 | ((unsigned long)&insns[0])); | ||
883 | insns[1] = SPARC_NOP; | ||
884 | __asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent)); | ||
885 | } | ||
886 | |||
887 | int request_fast_irq(unsigned int irq, | ||
888 | irqreturn_t (*handler)(int, void *, struct pt_regs *), | ||
889 | unsigned long irqflags, const char *name, void *dev_id) | ||
890 | { | ||
891 | struct irqaction *action; | ||
892 | struct ino_bucket *bucket = __bucket(irq); | ||
893 | unsigned long flags; | ||
894 | |||
895 | /* No pil0 dummy buckets allowed here. */ | ||
896 | if (bucket < &ivector_table[0] || | ||
897 | bucket >= &ivector_table[NUM_IVECS]) { | ||
898 | unsigned int *caller; | ||
899 | |||
900 | __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); | ||
901 | printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt " | ||
902 | "from %p, irq %08x.\n", caller, irq); | ||
903 | return -EINVAL; | ||
904 | } | ||
905 | |||
906 | if (!handler) | ||
907 | return -EINVAL; | ||
908 | |||
909 | if ((bucket->pil == 0) || (bucket->pil == 14)) { | ||
910 | printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n"); | ||
911 | return -EBUSY; | ||
912 | } | ||
913 | |||
914 | spin_lock_irqsave(&irq_action_lock, flags); | ||
915 | |||
916 | action = *(bucket->pil + irq_action); | ||
917 | if (action) { | ||
918 | if (action->flags & SA_SHIRQ) | ||
919 | panic("Trying to register fast irq when already shared.\n"); | ||
920 | if (irqflags & SA_SHIRQ) | ||
921 | panic("Trying to register fast irq as shared.\n"); | ||
922 | printk("request_fast_irq: Trying to register yet already owned.\n"); | ||
923 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
924 | return -EBUSY; | ||
925 | } | ||
926 | |||
927 | /* | ||
928 | * We do not check for SA_SAMPLE_RANDOM in this path. Neither do we | ||
929 | * support smp intr affinity in this path. | ||
930 | */ | ||
931 | if (irqflags & SA_STATIC_ALLOC) { | ||
932 | if (static_irq_count < MAX_STATIC_ALLOC) | ||
933 | action = &static_irqaction[static_irq_count++]; | ||
934 | else | ||
935 | printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed " | ||
936 | "using kmalloc\n", bucket->pil, name); | ||
937 | } | ||
938 | if (action == NULL) | ||
939 | action = (struct irqaction *)kmalloc(sizeof(struct irqaction), | ||
940 | GFP_ATOMIC); | ||
941 | if (!action) { | ||
942 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
943 | return -ENOMEM; | ||
944 | } | ||
945 | install_fast_irq(bucket->pil, handler); | ||
946 | |||
947 | bucket->irq_info = action; | ||
948 | bucket->flags |= IBF_ACTIVE; | ||
949 | |||
950 | action->handler = handler; | ||
951 | action->flags = irqflags; | ||
952 | action->dev_id = NULL; | ||
953 | action->name = name; | ||
954 | action->next = NULL; | ||
955 | put_ino_in_irqaction(action, irq); | ||
956 | put_smpaff_in_irqaction(action, CPU_MASK_NONE); | ||
957 | |||
958 | *(bucket->pil + irq_action) = action; | ||
959 | enable_irq(irq); | ||
960 | |||
961 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
962 | |||
963 | #ifdef CONFIG_SMP | ||
964 | distribute_irqs(); | ||
965 | #endif | ||
966 | return 0; | ||
967 | } | ||
968 | |||
969 | /* We really don't need these at all on the Sparc. We only have | ||
970 | * stubs here because they are exported to modules. | ||
971 | */ | ||
972 | unsigned long probe_irq_on(void) | ||
973 | { | ||
974 | return 0; | ||
975 | } | ||
976 | |||
977 | EXPORT_SYMBOL(probe_irq_on); | ||
978 | |||
979 | int probe_irq_off(unsigned long mask) | ||
980 | { | ||
981 | return 0; | ||
982 | } | ||
983 | |||
984 | EXPORT_SYMBOL(probe_irq_off); | ||
985 | |||
986 | #ifdef CONFIG_SMP | ||
987 | static int retarget_one_irq(struct irqaction *p, int goal_cpu) | ||
988 | { | ||
989 | struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table; | ||
990 | unsigned long imap = bucket->imap; | ||
991 | unsigned int tid; | ||
992 | |||
993 | while (!cpu_online(goal_cpu)) { | ||
994 | if (++goal_cpu >= NR_CPUS) | ||
995 | goal_cpu = 0; | ||
996 | } | ||
997 | |||
998 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
999 | tid = goal_cpu << 26; | ||
1000 | tid &= IMAP_AID_SAFARI; | ||
1001 | } else if (this_is_starfire == 0) { | ||
1002 | tid = goal_cpu << 26; | ||
1003 | tid &= IMAP_TID_UPA; | ||
1004 | } else { | ||
1005 | tid = (starfire_translate(imap, goal_cpu) << 26); | ||
1006 | tid &= IMAP_TID_UPA; | ||
1007 | } | ||
1008 | upa_writel(tid | IMAP_VALID, imap); | ||
1009 | |||
1010 | while (!cpu_online(goal_cpu)) { | ||
1011 | if (++goal_cpu >= NR_CPUS) | ||
1012 | goal_cpu = 0; | ||
1013 | } | ||
1014 | |||
1015 | return goal_cpu; | ||
1016 | } | ||
1017 | |||
1018 | /* Called from request_irq. */ | ||
1019 | static void distribute_irqs(void) | ||
1020 | { | ||
1021 | unsigned long flags; | ||
1022 | int cpu, level; | ||
1023 | |||
1024 | spin_lock_irqsave(&irq_action_lock, flags); | ||
1025 | cpu = 0; | ||
1026 | |||
1027 | /* | ||
1028 | * Skip the timer at [0], and very rare error/power intrs at [15]. | ||
1029 | * Also level [12], it causes problems on Ex000 systems. | ||
1030 | */ | ||
1031 | for (level = 1; level < NR_IRQS; level++) { | ||
1032 | struct irqaction *p = irq_action[level]; | ||
1033 | if (level == 12) continue; | ||
1034 | while(p) { | ||
1035 | cpu = retarget_one_irq(p, cpu); | ||
1036 | p = p->next; | ||
1037 | } | ||
1038 | } | ||
1039 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
1040 | } | ||
1041 | #endif | ||
1042 | |||
1043 | |||
1044 | struct sun5_timer *prom_timers; | ||
1045 | static u64 prom_limit0, prom_limit1; | ||
1046 | |||
1047 | static void map_prom_timers(void) | ||
1048 | { | ||
1049 | unsigned int addr[3]; | ||
1050 | int tnode, err; | ||
1051 | |||
1052 | /* PROM timer node hangs out in the top level of device siblings... */ | ||
1053 | tnode = prom_finddevice("/counter-timer"); | ||
1054 | |||
1055 | /* Assume if node is not present, PROM uses different tick mechanism | ||
1056 | * which we should not care about. | ||
1057 | */ | ||
1058 | if (tnode == 0 || tnode == -1) { | ||
1059 | prom_timers = (struct sun5_timer *) 0; | ||
1060 | return; | ||
1061 | } | ||
1062 | |||
1063 | /* If PROM is really using this, it must be mapped by him. */ | ||
1064 | err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr)); | ||
1065 | if (err == -1) { | ||
1066 | prom_printf("PROM does not have timer mapped, trying to continue.\n"); | ||
1067 | prom_timers = (struct sun5_timer *) 0; | ||
1068 | return; | ||
1069 | } | ||
1070 | prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]); | ||
1071 | } | ||
1072 | |||
1073 | static void kill_prom_timer(void) | ||
1074 | { | ||
1075 | if (!prom_timers) | ||
1076 | return; | ||
1077 | |||
1078 | /* Save them away for later. */ | ||
1079 | prom_limit0 = prom_timers->limit0; | ||
1080 | prom_limit1 = prom_timers->limit1; | ||
1081 | |||
1082 | /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14. | ||
1083 | * We turn both off here just to be paranoid. | ||
1084 | */ | ||
1085 | prom_timers->limit0 = 0; | ||
1086 | prom_timers->limit1 = 0; | ||
1087 | |||
1088 | /* Wheee, eat the interrupt packet too... */ | ||
1089 | __asm__ __volatile__( | ||
1090 | " mov 0x40, %%g2\n" | ||
1091 | " ldxa [%%g0] %0, %%g1\n" | ||
1092 | " ldxa [%%g2] %1, %%g1\n" | ||
1093 | " stxa %%g0, [%%g0] %0\n" | ||
1094 | " membar #Sync\n" | ||
1095 | : /* no outputs */ | ||
1096 | : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R) | ||
1097 | : "g1", "g2"); | ||
1098 | } | ||
1099 | |||
1100 | void enable_prom_timer(void) | ||
1101 | { | ||
1102 | if (!prom_timers) | ||
1103 | return; | ||
1104 | |||
1105 | /* Set it to whatever was there before. */ | ||
1106 | prom_timers->limit1 = prom_limit1; | ||
1107 | prom_timers->count1 = 0; | ||
1108 | prom_timers->limit0 = prom_limit0; | ||
1109 | prom_timers->count0 = 0; | ||
1110 | } | ||
1111 | |||
1112 | void init_irqwork_curcpu(void) | ||
1113 | { | ||
1114 | register struct irq_work_struct *workp asm("o2"); | ||
1115 | register unsigned long tmp asm("o3"); | ||
1116 | int cpu = hard_smp_processor_id(); | ||
1117 | |||
1118 | memset(__irq_work + cpu, 0, sizeof(*workp)); | ||
1119 | |||
1120 | /* Make sure we are called with PSTATE_IE disabled. */ | ||
1121 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" | ||
1122 | : "=r" (tmp)); | ||
1123 | if (tmp & PSTATE_IE) { | ||
1124 | prom_printf("BUG: init_irqwork_curcpu() called with " | ||
1125 | "PSTATE_IE enabled, bailing.\n"); | ||
1126 | __asm__ __volatile__("mov %%i7, %0\n\t" | ||
1127 | : "=r" (tmp)); | ||
1128 | prom_printf("BUG: Called from %lx\n", tmp); | ||
1129 | prom_halt(); | ||
1130 | } | ||
1131 | |||
1132 | /* Set interrupt globals. */ | ||
1133 | workp = &__irq_work[cpu]; | ||
1134 | __asm__ __volatile__( | ||
1135 | "rdpr %%pstate, %0\n\t" | ||
1136 | "wrpr %0, %1, %%pstate\n\t" | ||
1137 | "mov %2, %%g6\n\t" | ||
1138 | "wrpr %0, 0x0, %%pstate\n\t" | ||
1139 | : "=&r" (tmp) | ||
1140 | : "i" (PSTATE_IG), "r" (workp)); | ||
1141 | } | ||
1142 | |||
1143 | /* Only invoked on boot processor. */ | ||
1144 | void __init init_IRQ(void) | ||
1145 | { | ||
1146 | map_prom_timers(); | ||
1147 | kill_prom_timer(); | ||
1148 | memset(&ivector_table[0], 0, sizeof(ivector_table)); | ||
1149 | |||
1150 | /* We need to clear any IRQ's pending in the soft interrupt | ||
1151 | * registers, a spurious one could be left around from the | ||
1152 | * PROM timer which we just disabled. | ||
1153 | */ | ||
1154 | clear_softint(get_softint()); | ||
1155 | |||
1156 | /* Now that ivector table is initialized, it is safe | ||
1157 | * to receive IRQ vector traps. We will normally take | ||
1158 | * one or two right now, in case some device PROM used | ||
1159 | * to boot us wants to speak to us. We just ignore them. | ||
1160 | */ | ||
1161 | __asm__ __volatile__("rdpr %%pstate, %%g1\n\t" | ||
1162 | "or %%g1, %0, %%g1\n\t" | ||
1163 | "wrpr %%g1, 0x0, %%pstate" | ||
1164 | : /* No outputs */ | ||
1165 | : "i" (PSTATE_IE) | ||
1166 | : "g1"); | ||
1167 | } | ||
1168 | |||
1169 | static struct proc_dir_entry * root_irq_dir; | ||
1170 | static struct proc_dir_entry * irq_dir [NUM_IVECS]; | ||
1171 | |||
1172 | #ifdef CONFIG_SMP | ||
1173 | |||
1174 | static int irq_affinity_read_proc (char *page, char **start, off_t off, | ||
1175 | int count, int *eof, void *data) | ||
1176 | { | ||
1177 | struct ino_bucket *bp = ivector_table + (long)data; | ||
1178 | struct irqaction *ap = bp->irq_info; | ||
1179 | cpumask_t mask; | ||
1180 | int len; | ||
1181 | |||
1182 | mask = get_smpaff_in_irqaction(ap); | ||
1183 | if (cpus_empty(mask)) | ||
1184 | mask = cpu_online_map; | ||
1185 | |||
1186 | len = cpumask_scnprintf(page, count, mask); | ||
1187 | if (count - len < 2) | ||
1188 | return -EINVAL; | ||
1189 | len += sprintf(page + len, "\n"); | ||
1190 | return len; | ||
1191 | } | ||
1192 | |||
1193 | static inline void set_intr_affinity(int irq, cpumask_t hw_aff) | ||
1194 | { | ||
1195 | struct ino_bucket *bp = ivector_table + irq; | ||
1196 | |||
1197 | /* Users specify affinity in terms of hw cpu ids. | ||
1198 | * As soon as we do this, handler_irq() might see and take action. | ||
1199 | */ | ||
1200 | put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, hw_aff); | ||
1201 | |||
1202 | /* Migration is simply done by the next cpu to service this | ||
1203 | * interrupt. | ||
1204 | */ | ||
1205 | } | ||
1206 | |||
1207 | static int irq_affinity_write_proc (struct file *file, const char __user *buffer, | ||
1208 | unsigned long count, void *data) | ||
1209 | { | ||
1210 | int irq = (long) data, full_count = count, err; | ||
1211 | cpumask_t new_value; | ||
1212 | |||
1213 | err = cpumask_parse(buffer, count, new_value); | ||
1214 | |||
1215 | /* | ||
1216 | * Do not allow disabling IRQs completely - it's a too easy | ||
1217 | * way to make the system unusable accidentally :-) At least | ||
1218 | * one online CPU still has to be targeted. | ||
1219 | */ | ||
1220 | cpus_and(new_value, new_value, cpu_online_map); | ||
1221 | if (cpus_empty(new_value)) | ||
1222 | return -EINVAL; | ||
1223 | |||
1224 | set_intr_affinity(irq, new_value); | ||
1225 | |||
1226 | return full_count; | ||
1227 | } | ||
1228 | |||
1229 | #endif | ||
1230 | |||
1231 | #define MAX_NAMELEN 10 | ||
1232 | |||
1233 | static void register_irq_proc (unsigned int irq) | ||
1234 | { | ||
1235 | char name [MAX_NAMELEN]; | ||
1236 | |||
1237 | if (!root_irq_dir || irq_dir[irq]) | ||
1238 | return; | ||
1239 | |||
1240 | memset(name, 0, MAX_NAMELEN); | ||
1241 | sprintf(name, "%x", irq); | ||
1242 | |||
1243 | /* create /proc/irq/1234 */ | ||
1244 | irq_dir[irq] = proc_mkdir(name, root_irq_dir); | ||
1245 | |||
1246 | #ifdef CONFIG_SMP | ||
1247 | /* XXX SMP affinity not supported on starfire yet. */ | ||
1248 | if (this_is_starfire == 0) { | ||
1249 | struct proc_dir_entry *entry; | ||
1250 | |||
1251 | /* create /proc/irq/1234/smp_affinity */ | ||
1252 | entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]); | ||
1253 | |||
1254 | if (entry) { | ||
1255 | entry->nlink = 1; | ||
1256 | entry->data = (void *)(long)irq; | ||
1257 | entry->read_proc = irq_affinity_read_proc; | ||
1258 | entry->write_proc = irq_affinity_write_proc; | ||
1259 | } | ||
1260 | } | ||
1261 | #endif | ||
1262 | } | ||
1263 | |||
1264 | void init_irq_proc (void) | ||
1265 | { | ||
1266 | /* create /proc/irq */ | ||
1267 | root_irq_dir = proc_mkdir("irq", NULL); | ||
1268 | } | ||
1269 | |||