aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/irq.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-06-20 04:21:57 -0400
committerDavid S. Miller <davem@davemloft.net>2006-06-20 04:21:57 -0400
commit37cdcd9e82108f9b899f1631f66ade2e45738a6e (patch)
tree452b4a106d767947664b99797640194c7483047e /arch/sparc64/kernel/irq.c
parentc6387a48cf5958e43c201fc27a158c328927531a (diff)
[SPARC64]: Kill ino_bucket->pil
And reuse that struct member for virt_irq, which will be used in future changesets for the implementation of mapping between real and virtual IRQ numbers. This nicely kills off a ton of SBUS and PCI controller PIL assignment code which is no longer necessary. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/irq.c')
-rw-r--r--arch/sparc64/kernel/irq.c70
1 files changed, 26 insertions, 44 deletions
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index ad134bbc151c..f2668f2bed9c 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -70,7 +70,7 @@ struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BY
70 */ 70 */
71#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist) 71#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
72 72
73static struct irqaction *irq_action[NR_IRQS+1]; 73static struct irqaction *irq_action[NR_IRQS];
74 74
75/* This only synchronizes entities which modify IRQ handler 75/* This only synchronizes entities which modify IRQ handler
76 * state and some selected user-level spots that want to 76 * state and some selected user-level spots that want to
@@ -116,12 +116,9 @@ int show_interrupts(struct seq_file *p, void *v)
116 kstat_cpu(j).irqs[i]); 116 kstat_cpu(j).irqs[i]);
117 } 117 }
118#endif 118#endif
119 seq_printf(p, " %s:%lx", action->name, 119 seq_printf(p, " %s", action->name);
120 get_ino_in_irqaction(action)); 120 for (action = action->next; action; action = action->next)
121 for (action = action->next; action; action = action->next) { 121 seq_printf(p, ", %s", action->name);
122 seq_printf(p, ", %s:%lx", action->name,
123 get_ino_in_irqaction(action));
124 }
125 seq_putc(p, '\n'); 122 seq_putc(p, '\n');
126 } 123 }
127out_unlock: 124out_unlock:
@@ -245,48 +242,47 @@ void disable_irq(unsigned int irq)
245 } 242 }
246} 243}
247 244
248static void build_irq_error(const char *msg, unsigned int ino, int pil, int inofixup, 245static void build_irq_error(const char *msg, unsigned int ino, int inofixup,
249 unsigned long iclr, unsigned long imap, 246 unsigned long iclr, unsigned long imap,
250 struct ino_bucket *bucket) 247 struct ino_bucket *bucket)
251{ 248{
252 prom_printf("IRQ: INO %04x (%d:%016lx:%016lx) --> " 249 prom_printf("IRQ: INO %04x (%016lx:%016lx) --> "
253 "(%d:%d:%016lx:%016lx), halting...\n", 250 "(%d:%016lx:%016lx), halting...\n",
254 ino, bucket->pil, bucket->iclr, bucket->imap, 251 ino, bucket->iclr, bucket->imap,
255 pil, inofixup, iclr, imap); 252 inofixup, iclr, imap);
256 prom_halt(); 253 prom_halt();
257} 254}
258 255
259unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap) 256unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
260{ 257{
261 struct ino_bucket *bucket; 258 struct ino_bucket *bucket;
262 int ino; 259 int ino;
263 260
264 BUG_ON(pil == 0);
265 BUG_ON(tlb_type == hypervisor); 261 BUG_ON(tlb_type == hypervisor);
266 262
267 /* RULE: Both must be specified in all other cases. */ 263 /* RULE: Both must be specified in all other cases. */
268 if (iclr == 0UL || imap == 0UL) { 264 if (iclr == 0UL || imap == 0UL) {
269 prom_printf("Invalid build_irq %d %d %016lx %016lx\n", 265 prom_printf("Invalid build_irq %d %016lx %016lx\n",
270 pil, inofixup, iclr, imap); 266 inofixup, iclr, imap);
271 prom_halt(); 267 prom_halt();
272 } 268 }
273 269
274 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; 270 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
275 if (ino > NUM_IVECS) { 271 if (ino > NUM_IVECS) {
276 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n", 272 prom_printf("Invalid INO %04x (%d:%016lx:%016lx)\n",
277 ino, pil, inofixup, iclr, imap); 273 ino, inofixup, iclr, imap);
278 prom_halt(); 274 prom_halt();
279 } 275 }
280 276
281 bucket = &ivector_table[ino]; 277 bucket = &ivector_table[ino];
282 if (bucket->flags & IBF_ACTIVE) 278 if (bucket->flags & IBF_ACTIVE)
283 build_irq_error("IRQ: Trying to build active INO bucket.\n", 279 build_irq_error("IRQ: Trying to build active INO bucket.\n",
284 ino, pil, inofixup, iclr, imap, bucket); 280 ino, inofixup, iclr, imap, bucket);
285 281
286 if (bucket->irq_info) { 282 if (bucket->irq_info) {
287 if (bucket->imap != imap || bucket->iclr != iclr) 283 if (bucket->imap != imap || bucket->iclr != iclr)
288 build_irq_error("IRQ: Trying to reinit INO bucket.\n", 284 build_irq_error("IRQ: Trying to reinit INO bucket.\n",
289 ino, pil, inofixup, iclr, imap, bucket); 285 ino, inofixup, iclr, imap, bucket);
290 286
291 goto out; 287 goto out;
292 } 288 }
@@ -302,14 +298,13 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long
302 */ 298 */
303 bucket->imap = imap; 299 bucket->imap = imap;
304 bucket->iclr = iclr; 300 bucket->iclr = iclr;
305 bucket->pil = pil;
306 bucket->flags = 0; 301 bucket->flags = 0;
307 302
308out: 303out:
309 return __irq(bucket); 304 return __irq(bucket);
310} 305}
311 306
312unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsigned char flags) 307unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char flags)
313{ 308{
314 struct ino_bucket *bucket; 309 struct ino_bucket *bucket;
315 unsigned long sysino; 310 unsigned long sysino;
@@ -328,7 +323,6 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsign
328 bucket->imap = ~0UL - sysino; 323 bucket->imap = ~0UL - sysino;
329 bucket->iclr = ~0UL - sysino; 324 bucket->iclr = ~0UL - sysino;
330 325
331 bucket->pil = pil;
332 bucket->flags = flags; 326 bucket->flags = flags;
333 327
334 bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC); 328 bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC);
@@ -356,16 +350,12 @@ static void atomic_bucket_insert(struct ino_bucket *bucket)
356 350
357static int check_irq_sharing(int pil, unsigned long irqflags) 351static int check_irq_sharing(int pil, unsigned long irqflags)
358{ 352{
359 struct irqaction *action, *tmp; 353 struct irqaction *action;
360 354
361 action = *(irq_action + pil); 355 action = *(irq_action + pil);
362 if (action) { 356 if (action) {
363 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) { 357 if (!(action->flags & SA_SHIRQ) || !(irqflags & SA_SHIRQ))
364 for (tmp = action; tmp->next; tmp = tmp->next)
365 ;
366 } else {
367 return -EBUSY; 358 return -EBUSY;
368 }
369 } 359 }
370 return 0; 360 return 0;
371} 361}
@@ -425,12 +415,12 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
425 * installing a new handler, but is this really a problem, 415 * installing a new handler, but is this really a problem,
426 * only the sysadmin is able to do this. 416 * only the sysadmin is able to do this.
427 */ 417 */
428 rand_initialize_irq(irq); 418 rand_initialize_irq(PIL_DEVICE_IRQ);
429 } 419 }
430 420
431 spin_lock_irqsave(&irq_action_lock, flags); 421 spin_lock_irqsave(&irq_action_lock, flags);
432 422
433 if (check_irq_sharing(bucket->pil, irqflags)) { 423 if (check_irq_sharing(PIL_DEVICE_IRQ, irqflags)) {
434 spin_unlock_irqrestore(&irq_action_lock, flags); 424 spin_unlock_irqrestore(&irq_action_lock, flags);
435 return -EBUSY; 425 return -EBUSY;
436 } 426 }
@@ -454,7 +444,7 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
454 put_ino_in_irqaction(action, irq); 444 put_ino_in_irqaction(action, irq);
455 put_smpaff_in_irqaction(action, CPU_MASK_NONE); 445 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
456 446
457 append_irq_action(bucket->pil, action); 447 append_irq_action(PIL_DEVICE_IRQ, action);
458 448
459 enable_irq(irq); 449 enable_irq(irq);
460 450
@@ -478,16 +468,15 @@ EXPORT_SYMBOL(request_irq);
478 468
479static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id) 469static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id)
480{ 470{
481 struct ino_bucket *bucket = __bucket(irq);
482 struct irqaction *action, **pp; 471 struct irqaction *action, **pp;
483 472
484 pp = irq_action + bucket->pil; 473 pp = irq_action + PIL_DEVICE_IRQ;
485 action = *pp; 474 action = *pp;
486 if (unlikely(!action)) 475 if (unlikely(!action))
487 return NULL; 476 return NULL;
488 477
489 if (unlikely(!action->handler)) { 478 if (unlikely(!action->handler)) {
490 printk("Freeing free IRQ %d\n", bucket->pil); 479 printk("Freeing free IRQ %d\n", PIL_DEVICE_IRQ);
491 return NULL; 480 return NULL;
492 } 481 }
493 482
@@ -648,7 +637,7 @@ static void process_bucket(struct ino_bucket *bp, struct pt_regs *regs)
648 637
649 /* Test and add entropy */ 638 /* Test and add entropy */
650 if (random & SA_SAMPLE_RANDOM) 639 if (random & SA_SAMPLE_RANDOM)
651 add_interrupt_randomness(bp->pil); 640 add_interrupt_randomness(PIL_DEVICE_IRQ);
652out: 641out:
653 bp->flags &= ~IBF_INPROGRESS; 642 bp->flags &= ~IBF_INPROGRESS;
654} 643}
@@ -691,7 +680,7 @@ void handler_irq(int irq, struct pt_regs *regs)
691 while (bp) { 680 while (bp) {
692 struct ino_bucket *nbp = __bucket(bp->irq_chain); 681 struct ino_bucket *nbp = __bucket(bp->irq_chain);
693 682
694 kstat_this_cpu.irqs[bp->pil]++; 683 kstat_this_cpu.irqs[bp->virt_irq]++;
695 684
696 bp->irq_chain = 0; 685 bp->irq_chain = 0;
697 process_bucket(bp, regs); 686 process_bucket(bp, regs);
@@ -817,16 +806,9 @@ static void distribute_irqs(void)
817 spin_lock_irqsave(&irq_action_lock, flags); 806 spin_lock_irqsave(&irq_action_lock, flags);
818 cpu = 0; 807 cpu = 0;
819 808
820 /*
821 * Skip the timer at [0], and very rare error/power intrs at [15].
822 * Also level [12], it causes problems on Ex000 systems.
823 */
824 for (level = 1; level < NR_IRQS; level++) { 809 for (level = 1; level < NR_IRQS; level++) {
825 struct irqaction *p = irq_action[level]; 810 struct irqaction *p = irq_action[level];
826 811
827 if (level == 12)
828 continue;
829
830 while(p) { 812 while(p) {
831 cpu = retarget_one_irq(p, cpu); 813 cpu = retarget_one_irq(p, cpu);
832 p = p->next; 814 p = p->next;