aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-06-20 04:22:35 -0400
committerDavid S. Miller <davem@davemloft.net>2006-06-20 04:22:35 -0400
commit8047e247c899f80c33a23ad7e9e250224f0d26a5 (patch)
tree253a7ba3c902730928214df5c2b5630d7875cc11 /arch/sparc64
parent37cdcd9e82108f9b899f1631f66ade2e45738a6e (diff)
[SPARC64]: Virtualize IRQ numbers.
Inspired by PowerPC XICS interrupt support code. All IRQs are virtualized in order to keep NR_IRQS from needing to be too large. Interrupts on sparc64 are arbitrary 11-bit values, but we don't need to define NR_IRQS to 2048 if we virtualize the IRQs. As PCI and SBUS controller drivers build device IRQs, we divy out virtual IRQ numbers incrementally starting at 1. Zero is a special virtual IRQ used for the timer interrupt. So device drivers all see virtual IRQs, and all the normal interfaces such as request_irq(), enable_irq(), etc. translate that into a real IRQ number in order to configure the IRQ. At this point knowledge of the struct ino_bucket is almost entirely contained within arch/sparc64/kernel/irq.c There are a few small bits in the PCI controller drivers that need to be swept away before we can remove ino_bucket's definition out of asm-sparc64/irq.h and privately into kernel/irq.c Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/kernel/irq.c263
-rw-r--r--arch/sparc64/kernel/pci_psycho.c6
-rw-r--r--arch/sparc64/kernel/pci_sabre.c18
-rw-r--r--arch/sparc64/kernel/pci_schizo.c86
-rw-r--r--arch/sparc64/kernel/sbus.c2
5 files changed, 231 insertions, 144 deletions
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index f2668f2bed9c..49ad9cd0f8c8 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -70,7 +70,10 @@ struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BY
70 */ 70 */
71#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist) 71#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
72 72
73static struct irqaction *irq_action[NR_IRQS]; 73static struct irqaction timer_irq_action = {
74 .name = "timer",
75};
76static struct irqaction *irq_action[NR_IRQS] = { &timer_irq_action, };
74 77
75/* This only synchronizes entities which modify IRQ handler 78/* This only synchronizes entities which modify IRQ handler
76 * state and some selected user-level spots that want to 79 * state and some selected user-level spots that want to
@@ -79,6 +82,59 @@ static struct irqaction *irq_action[NR_IRQS];
79 */ 82 */
80static DEFINE_SPINLOCK(irq_action_lock); 83static DEFINE_SPINLOCK(irq_action_lock);
81 84
85static unsigned int virt_to_real_irq_table[NR_IRQS];
86static unsigned char virt_irq_cur = 1;
87
88static unsigned char virt_irq_alloc(unsigned int real_irq)
89{
90 unsigned char ent;
91
92 BUILD_BUG_ON(NR_IRQS >= 256);
93
94 ent = virt_irq_cur;
95 if (ent >= NR_IRQS) {
96 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
97 return 0;
98 }
99
100 virt_irq_cur = ent + 1;
101 virt_to_real_irq_table[ent] = real_irq;
102
103 return ent;
104}
105
106#if 0 /* Currently unused. */
107static unsigned char real_to_virt_irq(unsigned int real_irq)
108{
109 struct ino_bucket *bucket = __bucket(real_irq);
110
111 return bucket->virt_irq;
112}
113#endif
114
115static unsigned int virt_to_real_irq(unsigned char virt_irq)
116{
117 return virt_to_real_irq_table[virt_irq];
118}
119
120void irq_install_pre_handler(int virt_irq,
121 void (*func)(struct ino_bucket *, void *, void *),
122 void *arg1, void *arg2)
123{
124 unsigned int real_irq = virt_to_real_irq(virt_irq);
125 struct ino_bucket *bucket;
126 struct irq_desc *d;
127
128 if (unlikely(!real_irq))
129 return;
130
131 bucket = __bucket(real_irq);
132 d = bucket->irq_info;
133 d->pre_handler = func;
134 d->pre_handler_arg1 = arg1;
135 d->pre_handler_arg2 = arg2;
136}
137
82static void register_irq_proc (unsigned int irq); 138static void register_irq_proc (unsigned int irq);
83 139
84/* 140/*
@@ -164,14 +220,18 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
164 return tid; 220 return tid;
165} 221}
166 222
167/* Now these are always passed a true fully specified sun4u INO. */ 223void enable_irq(unsigned int virt_irq)
168void enable_irq(unsigned int irq)
169{ 224{
170 struct ino_bucket *bucket = __bucket(irq); 225 unsigned int real_irq = virt_to_real_irq(virt_irq);
226 struct ino_bucket *bucket;
171 unsigned long imap, cpuid; 227 unsigned long imap, cpuid;
172 228
229 if (unlikely(!real_irq))
230 return;
231
232 bucket = __bucket(real_irq);
173 imap = bucket->imap; 233 imap = bucket->imap;
174 if (imap == 0UL) 234 if (unlikely(imap == 0UL))
175 return; 235 return;
176 236
177 preempt_disable(); 237 preempt_disable();
@@ -182,7 +242,7 @@ void enable_irq(unsigned int irq)
182 cpuid = real_hard_smp_processor_id(); 242 cpuid = real_hard_smp_processor_id();
183 243
184 if (tlb_type == hypervisor) { 244 if (tlb_type == hypervisor) {
185 unsigned int ino = __irq_ino(irq); 245 unsigned int ino = __irq_ino(real_irq);
186 int err; 246 int err;
187 247
188 err = sun4v_intr_settarget(ino, cpuid); 248 err = sun4v_intr_settarget(ino, cpuid);
@@ -211,34 +271,39 @@ void enable_irq(unsigned int irq)
211 preempt_enable(); 271 preempt_enable();
212} 272}
213 273
214/* This now gets passed true ino's as well. */ 274void disable_irq(unsigned int virt_irq)
215void disable_irq(unsigned int irq)
216{ 275{
217 struct ino_bucket *bucket = __bucket(irq); 276 unsigned int real_irq = virt_to_real_irq(virt_irq);
277 struct ino_bucket *bucket;
218 unsigned long imap; 278 unsigned long imap;
219 279
280 if (unlikely(!real_irq))
281 return;
282
283 bucket = __bucket(real_irq);
220 imap = bucket->imap; 284 imap = bucket->imap;
221 if (imap != 0UL) { 285 if (unlikely(imap == 0UL))
222 if (tlb_type == hypervisor) { 286 return;
223 unsigned int ino = __irq_ino(irq); 287
224 int err; 288 if (tlb_type == hypervisor) {
225 289 unsigned int ino = __irq_ino(real_irq);
226 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); 290 int err;
227 if (err != HV_EOK) 291
228 printk("sun4v_intr_setenabled(%x): " 292 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
229 "err(%d)\n", ino, err); 293 if (err != HV_EOK)
230 } else { 294 printk("sun4v_intr_setenabled(%x): "
231 u32 tmp; 295 "err(%d)\n", ino, err);
232 296 } else {
233 /* NOTE: We do not want to futz with the IRQ clear registers 297 u32 tmp;
234 * and move the state to IDLE, the SCSI code does call 298
235 * disable_irq() to assure atomicity in the queue cmd 299 /* NOTE: We do not want to futz with the IRQ clear registers
236 * SCSI adapter driver code. Thus we'd lose interrupts. 300 * and move the state to IDLE, the SCSI code does call
237 */ 301 * disable_irq() to assure atomicity in the queue cmd
238 tmp = upa_readl(imap); 302 * SCSI adapter driver code. Thus we'd lose interrupts.
239 tmp &= ~IMAP_VALID; 303 */
240 upa_writel(tmp, imap); 304 tmp = upa_readl(imap);
241 } 305 tmp &= ~IMAP_VALID;
306 upa_writel(tmp, imap);
242 } 307 }
243} 308}
244 309
@@ -253,14 +318,14 @@ static void build_irq_error(const char *msg, unsigned int ino, int inofixup,
253 prom_halt(); 318 prom_halt();
254} 319}
255 320
256unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) 321unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap, unsigned char flags)
257{ 322{
258 struct ino_bucket *bucket; 323 struct ino_bucket *bucket;
259 int ino; 324 int ino;
260 325
261 BUG_ON(tlb_type == hypervisor); 326 BUG_ON(tlb_type == hypervisor);
262 327
263 /* RULE: Both must be specified in all other cases. */ 328 /* RULE: Both must be specified. */
264 if (iclr == 0UL || imap == 0UL) { 329 if (iclr == 0UL || imap == 0UL) {
265 prom_printf("Invalid build_irq %d %016lx %016lx\n", 330 prom_printf("Invalid build_irq %d %016lx %016lx\n",
266 inofixup, iclr, imap); 331 inofixup, iclr, imap);
@@ -298,10 +363,12 @@ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
298 */ 363 */
299 bucket->imap = imap; 364 bucket->imap = imap;
300 bucket->iclr = iclr; 365 bucket->iclr = iclr;
301 bucket->flags = 0; 366 if (!bucket->virt_irq)
367 bucket->virt_irq = virt_irq_alloc(__irq(bucket));
368 bucket->flags = flags;
302 369
303out: 370out:
304 return __irq(bucket); 371 return bucket->virt_irq;
305} 372}
306 373
307unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char flags) 374unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char flags)
@@ -322,7 +389,8 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char f
322 */ 389 */
323 bucket->imap = ~0UL - sysino; 390 bucket->imap = ~0UL - sysino;
324 bucket->iclr = ~0UL - sysino; 391 bucket->iclr = ~0UL - sysino;
325 392 if (!bucket->virt_irq)
393 bucket->virt_irq = virt_irq_alloc(__irq(bucket));
326 bucket->flags = flags; 394 bucket->flags = flags;
327 395
328 bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC); 396 bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC);
@@ -331,7 +399,7 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char f
331 prom_halt(); 399 prom_halt();
332 } 400 }
333 401
334 return __irq(bucket); 402 return bucket->virt_irq;
335} 403}
336 404
337static void atomic_bucket_insert(struct ino_bucket *bucket) 405static void atomic_bucket_insert(struct ino_bucket *bucket)
@@ -390,37 +458,42 @@ static struct irqaction *get_action_slot(struct ino_bucket *bucket)
390 return NULL; 458 return NULL;
391} 459}
392 460
393int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), 461int request_irq(unsigned int virt_irq,
462 irqreturn_t (*handler)(int, void *, struct pt_regs *),
394 unsigned long irqflags, const char *name, void *dev_id) 463 unsigned long irqflags, const char *name, void *dev_id)
395{ 464{
396 struct irqaction *action; 465 struct irqaction *action;
397 struct ino_bucket *bucket = __bucket(irq); 466 struct ino_bucket *bucket;
398 unsigned long flags; 467 unsigned long flags;
468 unsigned int real_irq;
399 int pending = 0; 469 int pending = 0;
400 470
471 real_irq = virt_to_real_irq(virt_irq);
472 if (unlikely(!real_irq))
473 return -EINVAL;
474
401 if (unlikely(!handler)) 475 if (unlikely(!handler))
402 return -EINVAL; 476 return -EINVAL;
403 477
478 bucket = __bucket(real_irq);
404 if (unlikely(!bucket->irq_info)) 479 if (unlikely(!bucket->irq_info))
405 return -ENODEV; 480 return -ENODEV;
406 481
407 if (irqflags & SA_SAMPLE_RANDOM) { 482 if (irqflags & SA_SAMPLE_RANDOM) {
408 /* 483 /*
409 * This function might sleep, we want to call it first, 484 * This function might sleep, we want to call it first,
410 * outside of the atomic block. In SA_STATIC_ALLOC case, 485 * outside of the atomic block.
411 * random driver's kmalloc will fail, but it is safe. 486 * Yes, this might clear the entropy pool if the wrong
412 * If already initialized, random driver will not reinit. 487 * driver is attempted to be loaded, without actually
413 * Yes, this might clear the entropy pool if the wrong 488 * installing a new handler, but is this really a problem,
414 * driver is attempted to be loaded, without actually 489 * only the sysadmin is able to do this.
415 * installing a new handler, but is this really a problem, 490 */
416 * only the sysadmin is able to do this. 491 rand_initialize_irq(virt_irq);
417 */
418 rand_initialize_irq(PIL_DEVICE_IRQ);
419 } 492 }
420 493
421 spin_lock_irqsave(&irq_action_lock, flags); 494 spin_lock_irqsave(&irq_action_lock, flags);
422 495
423 if (check_irq_sharing(PIL_DEVICE_IRQ, irqflags)) { 496 if (check_irq_sharing(virt_irq, irqflags)) {
424 spin_unlock_irqrestore(&irq_action_lock, flags); 497 spin_unlock_irqrestore(&irq_action_lock, flags);
425 return -EBUSY; 498 return -EBUSY;
426 } 499 }
@@ -441,12 +514,12 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
441 action->name = name; 514 action->name = name;
442 action->next = NULL; 515 action->next = NULL;
443 action->dev_id = dev_id; 516 action->dev_id = dev_id;
444 put_ino_in_irqaction(action, irq); 517 put_ino_in_irqaction(action, __irq_ino(real_irq));
445 put_smpaff_in_irqaction(action, CPU_MASK_NONE); 518 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
446 519
447 append_irq_action(PIL_DEVICE_IRQ, action); 520 append_irq_action(virt_irq, action);
448 521
449 enable_irq(irq); 522 enable_irq(virt_irq);
450 523
451 /* We ate the IVEC already, this makes sure it does not get lost. */ 524 /* We ate the IVEC already, this makes sure it does not get lost. */
452 if (pending) { 525 if (pending) {
@@ -456,7 +529,7 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
456 529
457 spin_unlock_irqrestore(&irq_action_lock, flags); 530 spin_unlock_irqrestore(&irq_action_lock, flags);
458 531
459 register_irq_proc(__irq_ino(irq)); 532 register_irq_proc(virt_irq);
460 533
461#ifdef CONFIG_SMP 534#ifdef CONFIG_SMP
462 distribute_irqs(); 535 distribute_irqs();
@@ -466,17 +539,17 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
466 539
467EXPORT_SYMBOL(request_irq); 540EXPORT_SYMBOL(request_irq);
468 541
469static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id) 542static struct irqaction *unlink_irq_action(unsigned int virt_irq, void *dev_id)
470{ 543{
471 struct irqaction *action, **pp; 544 struct irqaction *action, **pp;
472 545
473 pp = irq_action + PIL_DEVICE_IRQ; 546 pp = irq_action + virt_irq;
474 action = *pp; 547 action = *pp;
475 if (unlikely(!action)) 548 if (unlikely(!action))
476 return NULL; 549 return NULL;
477 550
478 if (unlikely(!action->handler)) { 551 if (unlikely(!action->handler)) {
479 printk("Freeing free IRQ %d\n", PIL_DEVICE_IRQ); 552 printk("Freeing free IRQ %d\n", virt_irq);
480 return NULL; 553 return NULL;
481 } 554 }
482 555
@@ -491,28 +564,33 @@ static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id)
491 return action; 564 return action;
492} 565}
493 566
494void free_irq(unsigned int irq, void *dev_id) 567void free_irq(unsigned int virt_irq, void *dev_id)
495{ 568{
496 struct irqaction *action; 569 struct irqaction *action;
497 struct ino_bucket *bucket; 570 struct ino_bucket *bucket;
498 struct irq_desc *desc; 571 struct irq_desc *desc;
499 unsigned long flags; 572 unsigned long flags;
573 unsigned int real_irq;
500 int ent, i; 574 int ent, i;
501 575
576 real_irq = virt_to_real_irq(virt_irq);
577 if (unlikely(!real_irq))
578 return;
579
502 spin_lock_irqsave(&irq_action_lock, flags); 580 spin_lock_irqsave(&irq_action_lock, flags);
503 581
504 action = unlink_irq_action(irq, dev_id); 582 action = unlink_irq_action(virt_irq, dev_id);
505 583
506 spin_unlock_irqrestore(&irq_action_lock, flags); 584 spin_unlock_irqrestore(&irq_action_lock, flags);
507 585
508 if (unlikely(!action)) 586 if (unlikely(!action))
509 return; 587 return;
510 588
511 synchronize_irq(irq); 589 synchronize_irq(virt_irq);
512 590
513 spin_lock_irqsave(&irq_action_lock, flags); 591 spin_lock_irqsave(&irq_action_lock, flags);
514 592
515 bucket = __bucket(irq); 593 bucket = __bucket(real_irq);
516 desc = bucket->irq_info; 594 desc = bucket->irq_info;
517 595
518 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { 596 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
@@ -545,7 +623,7 @@ void free_irq(unsigned int irq, void *dev_id)
545 * the same IMAP are active. 623 * the same IMAP are active.
546 */ 624 */
547 if (ent == NUM_IVECS) 625 if (ent == NUM_IVECS)
548 disable_irq(irq); 626 disable_irq(virt_irq);
549 } 627 }
550 628
551 spin_unlock_irqrestore(&irq_action_lock, flags); 629 spin_unlock_irqrestore(&irq_action_lock, flags);
@@ -554,10 +632,15 @@ void free_irq(unsigned int irq, void *dev_id)
554EXPORT_SYMBOL(free_irq); 632EXPORT_SYMBOL(free_irq);
555 633
556#ifdef CONFIG_SMP 634#ifdef CONFIG_SMP
557void synchronize_irq(unsigned int irq) 635void synchronize_irq(unsigned int virt_irq)
558{ 636{
559 struct ino_bucket *bucket = __bucket(irq); 637 unsigned int real_irq = virt_to_real_irq(virt_irq);
638 struct ino_bucket *bucket;
639
640 if (unlikely(!real_irq))
641 return;
560 642
643 bucket = __bucket(real_irq);
561#if 0 644#if 0
562 /* The following is how I wish I could implement this. 645 /* The following is how I wish I could implement this.
563 * Unfortunately the ICLR registers are read-only, you can 646 * Unfortunately the ICLR registers are read-only, you can
@@ -616,7 +699,7 @@ static void process_bucket(struct ino_bucket *bp, struct pt_regs *regs)
616 699
617 action_mask &= ~mask; 700 action_mask &= ~mask;
618 701
619 if (p->handler(__irq(bp), p->dev_id, regs) == IRQ_HANDLED) 702 if (p->handler(bp->virt_irq, p->dev_id, regs) == IRQ_HANDLED)
620 random |= p->flags; 703 random |= p->flags;
621 704
622 if (!action_mask) 705 if (!action_mask)
@@ -637,7 +720,7 @@ static void process_bucket(struct ino_bucket *bp, struct pt_regs *regs)
637 720
638 /* Test and add entropy */ 721 /* Test and add entropy */
639 if (random & SA_SAMPLE_RANDOM) 722 if (random & SA_SAMPLE_RANDOM)
640 add_interrupt_randomness(PIL_DEVICE_IRQ); 723 add_interrupt_randomness(bp->virt_irq);
641out: 724out:
642 bp->flags &= ~IBF_INPROGRESS; 725 bp->flags &= ~IBF_INPROGRESS;
643} 726}
@@ -657,7 +740,7 @@ void timer_irq(int irq, struct pt_regs *regs)
657 clear_softint(clr_mask); 740 clear_softint(clr_mask);
658 741
659 irq_enter(); 742 irq_enter();
660 kstat_this_cpu.irqs[irq]++; 743 kstat_this_cpu.irqs[0]++;
661 timer_interrupt(irq, NULL, regs); 744 timer_interrupt(irq, NULL, regs);
662 irq_exit(); 745 irq_exit();
663} 746}
@@ -1022,13 +1105,13 @@ void __init init_IRQ(void)
1022 : "g1"); 1105 : "g1");
1023} 1106}
1024 1107
1025static struct proc_dir_entry * root_irq_dir; 1108static struct proc_dir_entry *root_irq_dir;
1026static struct proc_dir_entry * irq_dir [NUM_IVECS]; 1109static struct proc_dir_entry *irq_dir[NR_IRQS];
1027 1110
1028#ifdef CONFIG_SMP 1111#ifdef CONFIG_SMP
1029 1112
1030static int irq_affinity_read_proc (char *page, char **start, off_t off, 1113static int irq_affinity_read_proc(char *page, char **start, off_t off,
1031 int count, int *eof, void *data) 1114 int count, int *eof, void *data)
1032{ 1115{
1033 struct ino_bucket *bp = ivector_table + (long)data; 1116 struct ino_bucket *bp = ivector_table + (long)data;
1034 struct irq_desc *desc = bp->irq_info; 1117 struct irq_desc *desc = bp->irq_info;
@@ -1047,11 +1130,20 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off,
1047 return len; 1130 return len;
1048} 1131}
1049 1132
1050static inline void set_intr_affinity(int irq, cpumask_t hw_aff) 1133static inline void set_intr_affinity(int virt_irq, cpumask_t hw_aff)
1051{ 1134{
1052 struct ino_bucket *bp = ivector_table + irq; 1135 struct ino_bucket *bp;
1053 struct irq_desc *desc = bp->irq_info; 1136 struct irq_desc *desc;
1054 struct irqaction *ap = desc->action; 1137 struct irqaction *ap;
1138 unsigned int real_irq;
1139
1140 real_irq = virt_to_real_irq(virt_irq);
1141 if (unlikely(!real_irq))
1142 return;
1143
1144 bp = __bucket(real_irq);
1145 desc = bp->irq_info;
1146 ap = desc->action;
1055 1147
1056 /* Users specify affinity in terms of hw cpu ids. 1148 /* Users specify affinity in terms of hw cpu ids.
1057 * As soon as we do this, handler_irq() might see and take action. 1149 * As soon as we do this, handler_irq() might see and take action.
@@ -1060,13 +1152,16 @@ static inline void set_intr_affinity(int irq, cpumask_t hw_aff)
1060 1152
1061 /* Migration is simply done by the next cpu to service this 1153 /* Migration is simply done by the next cpu to service this
1062 * interrupt. 1154 * interrupt.
1155 *
1156 * XXX Broken, this doesn't happen anymore...
1063 */ 1157 */
1064} 1158}
1065 1159
1066static int irq_affinity_write_proc (struct file *file, const char __user *buffer, 1160static int irq_affinity_write_proc(struct file *file,
1067 unsigned long count, void *data) 1161 const char __user *buffer,
1162 unsigned long count, void *data)
1068{ 1163{
1069 int irq = (long) data, full_count = count, err; 1164 int virt_irq = (long) data, full_count = count, err;
1070 cpumask_t new_value; 1165 cpumask_t new_value;
1071 1166
1072 err = cpumask_parse(buffer, count, new_value); 1167 err = cpumask_parse(buffer, count, new_value);
@@ -1080,7 +1175,7 @@ static int irq_affinity_write_proc (struct file *file, const char __user *buffer
1080 if (cpus_empty(new_value)) 1175 if (cpus_empty(new_value))
1081 return -EINVAL; 1176 return -EINVAL;
1082 1177
1083 set_intr_affinity(irq, new_value); 1178 set_intr_affinity(virt_irq, new_value);
1084 1179
1085 return full_count; 1180 return full_count;
1086} 1181}
@@ -1089,18 +1184,18 @@ static int irq_affinity_write_proc (struct file *file, const char __user *buffer
1089 1184
1090#define MAX_NAMELEN 10 1185#define MAX_NAMELEN 10
1091 1186
1092static void register_irq_proc (unsigned int irq) 1187static void register_irq_proc(unsigned int virt_irq)
1093{ 1188{
1094 char name [MAX_NAMELEN]; 1189 char name [MAX_NAMELEN];
1095 1190
1096 if (!root_irq_dir || irq_dir[irq]) 1191 if (!root_irq_dir || irq_dir[virt_irq])
1097 return; 1192 return;
1098 1193
1099 memset(name, 0, MAX_NAMELEN); 1194 memset(name, 0, MAX_NAMELEN);
1100 sprintf(name, "%x", irq); 1195 sprintf(name, "%d", virt_irq);
1101 1196
1102 /* create /proc/irq/1234 */ 1197 /* create /proc/irq/1234 */
1103 irq_dir[irq] = proc_mkdir(name, root_irq_dir); 1198 irq_dir[virt_irq] = proc_mkdir(name, root_irq_dir);
1104 1199
1105#ifdef CONFIG_SMP 1200#ifdef CONFIG_SMP
1106 /* XXX SMP affinity not supported on starfire yet. */ 1201 /* XXX SMP affinity not supported on starfire yet. */
@@ -1112,7 +1207,7 @@ static void register_irq_proc (unsigned int irq)
1112 1207
1113 if (entry) { 1208 if (entry) {
1114 entry->nlink = 1; 1209 entry->nlink = 1;
1115 entry->data = (void *)(long)irq; 1210 entry->data = (void *)(long)virt_irq;
1116 entry->read_proc = irq_affinity_read_proc; 1211 entry->read_proc = irq_affinity_read_proc;
1117 entry->write_proc = irq_affinity_write_proc; 1212 entry->write_proc = irq_affinity_write_proc;
1118 } 1213 }
@@ -1120,7 +1215,7 @@ static void register_irq_proc (unsigned int irq)
1120#endif 1215#endif
1121} 1216}
1122 1217
1123void init_irq_proc (void) 1218void init_irq_proc(void)
1124{ 1219{
1125 /* create /proc/irq */ 1220 /* create /proc/irq */
1126 root_irq_dir = proc_mkdir("irq", NULL); 1221 root_irq_dir = proc_mkdir("irq", NULL);
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index 5743e1316a93..f2d1097f541d 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -280,7 +280,6 @@ static unsigned int psycho_irq_build(struct pci_pbm_info *pbm,
280 struct pci_dev *pdev, 280 struct pci_dev *pdev,
281 unsigned int ino) 281 unsigned int ino)
282{ 282{
283 struct ino_bucket *bucket;
284 unsigned long imap, iclr; 283 unsigned long imap, iclr;
285 unsigned long imap_off, iclr_off; 284 unsigned long imap_off, iclr_off;
286 int inofixup = 0; 285 int inofixup = 0;
@@ -309,10 +308,7 @@ static unsigned int psycho_irq_build(struct pci_pbm_info *pbm,
309 if ((ino & 0x20) == 0) 308 if ((ino & 0x20) == 0)
310 inofixup = ino & 0x03; 309 inofixup = ino & 0x03;
311 310
312 bucket = __bucket(build_irq(inofixup, iclr, imap)); 311 return build_irq(inofixup, iclr, imap, IBF_PCI);
313 bucket->flags |= IBF_PCI;
314
315 return __irq(bucket);
316} 312}
317 313
318/* PSYCHO error handling support. */ 314/* PSYCHO error handling support. */
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index caa7aeed5d14..846c1205aa9a 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -544,10 +544,10 @@ static unsigned int sabre_irq_build(struct pci_pbm_info *pbm,
544 struct pci_dev *pdev, 544 struct pci_dev *pdev,
545 unsigned int ino) 545 unsigned int ino)
546{ 546{
547 struct ino_bucket *bucket;
548 unsigned long imap, iclr; 547 unsigned long imap, iclr;
549 unsigned long imap_off, iclr_off; 548 unsigned long imap_off, iclr_off;
550 int inofixup = 0; 549 int inofixup = 0;
550 int virt_irq;
551 551
552 ino &= PCI_IRQ_INO; 552 ino &= PCI_IRQ_INO;
553 if (ino < SABRE_ONBOARD_IRQ_BASE) { 553 if (ino < SABRE_ONBOARD_IRQ_BASE) {
@@ -573,23 +573,23 @@ static unsigned int sabre_irq_build(struct pci_pbm_info *pbm,
573 if ((ino & 0x20) == 0) 573 if ((ino & 0x20) == 0)
574 inofixup = ino & 0x03; 574 inofixup = ino & 0x03;
575 575
576 bucket = __bucket(build_irq(inofixup, iclr, imap)); 576 virt_irq = build_irq(inofixup, iclr, imap, IBF_PCI);
577 bucket->flags |= IBF_PCI;
578 577
579 if (pdev) { 578 if (pdev) {
580 struct pcidev_cookie *pcp = pdev->sysdata; 579 struct pcidev_cookie *pcp = pdev->sysdata;
581 580
582 if (pdev->bus->number != pcp->pbm->pci_first_busno) { 581 if (pdev->bus->number != pcp->pbm->pci_first_busno) {
583 struct pci_controller_info *p = pcp->pbm->parent; 582 struct pci_controller_info *p = pcp->pbm->parent;
584 struct irq_desc *d = bucket->irq_info;
585 583
586 d->pre_handler = sabre_wsync_handler; 584 irq_install_pre_handler(virt_irq,
587 d->pre_handler_arg1 = pdev; 585 sabre_wsync_handler,
588 d->pre_handler_arg2 = (void *) 586 pdev,
589 p->pbm_A.controller_regs + SABRE_WRSYNC; 587 (void *)
588 p->pbm_A.controller_regs +
589 SABRE_WRSYNC);
590 } 590 }
591 } 591 }
592 return __irq(bucket); 592 return virt_irq;
593} 593}
594 594
595/* SABRE error handling support. */ 595/* SABRE error handling support. */
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index ca49ef08236d..0c400b5fa5bd 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -270,25 +270,33 @@ static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void
270 } 270 }
271} 271}
272 272
273static unsigned long schizo_ino_to_iclr(struct pci_pbm_info *pbm,
274 unsigned int ino)
275{
276 ino &= PCI_IRQ_INO;
277 return pbm->pbm_regs + schizo_iclr_offset(ino) + 4;
278}
279
280static unsigned long schizo_ino_to_imap(struct pci_pbm_info *pbm,
281 unsigned int ino)
282{
283 ino &= PCI_IRQ_INO;
284 return pbm->pbm_regs + schizo_imap_offset(ino) + 4;
285}
286
273static unsigned int schizo_irq_build(struct pci_pbm_info *pbm, 287static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
274 struct pci_dev *pdev, 288 struct pci_dev *pdev,
275 unsigned int ino) 289 unsigned int ino)
276{ 290{
277 struct ino_bucket *bucket;
278 unsigned long imap, iclr; 291 unsigned long imap, iclr;
279 unsigned long imap_off, iclr_off;
280 int ign_fixup; 292 int ign_fixup;
293 int virt_irq;
281 294
282 ino &= PCI_IRQ_INO; 295 ino &= PCI_IRQ_INO;
283 imap_off = schizo_imap_offset(ino);
284 296
285 /* Now build the IRQ bucket. */ 297 /* Now build the IRQ bucket. */
286 imap = pbm->pbm_regs + imap_off; 298 imap = schizo_ino_to_imap(pbm, ino);
287 imap += 4; 299 iclr = schizo_ino_to_iclr(pbm, ino);
288
289 iclr_off = schizo_iclr_offset(ino);
290 iclr = pbm->pbm_regs + iclr_off;
291 iclr += 4;
292 300
293 /* On Schizo, no inofixup occurs. This is because each 301 /* On Schizo, no inofixup occurs. This is because each
294 * INO has it's own IMAP register. On Psycho and Sabre 302 * INO has it's own IMAP register. On Psycho and Sabre
@@ -305,19 +313,17 @@ static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
305 ign_fixup = (1 << 6); 313 ign_fixup = (1 << 6);
306 } 314 }
307 315
308 bucket = __bucket(build_irq(ign_fixup, iclr, imap)); 316 virt_irq = build_irq(ign_fixup, iclr, imap, IBF_PCI);
309 bucket->flags |= IBF_PCI;
310 317
311 if (pdev && pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) { 318 if (pdev && pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
312 struct irq_desc *p = bucket->irq_info; 319 irq_install_pre_handler(virt_irq,
313 320 tomatillo_wsync_handler,
314 p->pre_handler = tomatillo_wsync_handler; 321 ((pbm->chip_version <= 4) ?
315 p->pre_handler_arg1 = ((pbm->chip_version <= 4) ? 322 (void *) 1 : (void *) 0),
316 (void *) 1 : (void *) 0); 323 (void *) pbm->sync_reg);
317 p->pre_handler_arg2 = (void *) pbm->sync_reg;
318 } 324 }
319 325
320 return __irq(bucket); 326 return virt_irq;
321} 327}
322 328
323/* SCHIZO error handling support. */ 329/* SCHIZO error handling support. */
@@ -358,7 +364,6 @@ struct pci_pbm_info *pbm_for_ino(struct pci_controller_info *p, u32 ino)
358static void schizo_clear_other_err_intr(struct pci_controller_info *p, int irq) 364static void schizo_clear_other_err_intr(struct pci_controller_info *p, int irq)
359{ 365{
360 struct pci_pbm_info *pbm; 366 struct pci_pbm_info *pbm;
361 struct ino_bucket *bucket;
362 unsigned long iclr; 367 unsigned long iclr;
363 368
364 /* Do not clear the interrupt for the other PCI bus. 369 /* Do not clear the interrupt for the other PCI bus.
@@ -376,11 +381,11 @@ static void schizo_clear_other_err_intr(struct pci_controller_info *p, int irq)
376 else 381 else
377 pbm = &p->pbm_A; 382 pbm = &p->pbm_A;
378 383
379 irq = schizo_irq_build(pbm, NULL, 384 schizo_irq_build(pbm, NULL,
380 (pbm->portid << 6) | (irq & IMAP_INO)); 385 (pbm->portid << 6) | (irq & IMAP_INO));
381 bucket = __bucket(irq);
382 iclr = bucket->iclr;
383 386
387 iclr = schizo_ino_to_iclr(pbm,
388 (pbm->portid << 6) | (irq & IMAP_INO));
384 upa_writel(ICLR_IDLE, iclr); 389 upa_writel(ICLR_IDLE, iclr);
385} 390}
386 391
@@ -1125,7 +1130,6 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
1125{ 1130{
1126 struct pci_pbm_info *pbm; 1131 struct pci_pbm_info *pbm;
1127 unsigned int irq; 1132 unsigned int irq;
1128 struct ino_bucket *bucket;
1129 u64 tmp, err_mask, err_no_mask; 1133 u64 tmp, err_mask, err_no_mask;
1130 1134
1131 /* Build IRQs and register handlers. */ 1135 /* Build IRQs and register handlers. */
@@ -1137,8 +1141,7 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
1137 pbm->name); 1141 pbm->name);
1138 prom_halt(); 1142 prom_halt();
1139 } 1143 }
1140 bucket = __bucket(irq); 1144 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_UE_INO));
1141 tmp = upa_readl(bucket->imap);
1142 upa_writel(tmp, (pbm->pbm_regs + 1145 upa_writel(tmp, (pbm->pbm_regs +
1143 schizo_imap_offset(SCHIZO_UE_INO) + 4)); 1146 schizo_imap_offset(SCHIZO_UE_INO) + 4));
1144 1147
@@ -1150,8 +1153,7 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
1150 pbm->name); 1153 pbm->name);
1151 prom_halt(); 1154 prom_halt();
1152 } 1155 }
1153 bucket = __bucket(irq); 1156 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_CE_INO));
1154 tmp = upa_readl(bucket->imap);
1155 upa_writel(tmp, (pbm->pbm_regs + 1157 upa_writel(tmp, (pbm->pbm_regs +
1156 schizo_imap_offset(SCHIZO_CE_INO) + 4)); 1158 schizo_imap_offset(SCHIZO_CE_INO) + 4));
1157 1159
@@ -1164,8 +1166,8 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
1164 pbm->name); 1166 pbm->name);
1165 prom_halt(); 1167 prom_halt();
1166 } 1168 }
1167 bucket = __bucket(irq); 1169 tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) |
1168 tmp = upa_readl(bucket->imap); 1170 SCHIZO_PCIERR_A_INO)));
1169 upa_writel(tmp, (pbm->pbm_regs + 1171 upa_writel(tmp, (pbm->pbm_regs +
1170 schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4)); 1172 schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));
1171 1173
@@ -1178,8 +1180,8 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
1178 pbm->name); 1180 pbm->name);
1179 prom_halt(); 1181 prom_halt();
1180 } 1182 }
1181 bucket = __bucket(irq); 1183 tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) |
1182 tmp = upa_readl(bucket->imap); 1184 SCHIZO_PCIERR_B_INO)));
1183 upa_writel(tmp, (pbm->pbm_regs + 1185 upa_writel(tmp, (pbm->pbm_regs +
1184 schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4)); 1186 schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));
1185 1187
@@ -1191,8 +1193,8 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
1191 pbm->name); 1193 pbm->name);
1192 prom_halt(); 1194 prom_halt();
1193 } 1195 }
1194 bucket = __bucket(irq); 1196 tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) |
1195 tmp = upa_readl(bucket->imap); 1197 SCHIZO_SERR_INO)));
1196 upa_writel(tmp, (pbm->pbm_regs + 1198 upa_writel(tmp, (pbm->pbm_regs +
1197 schizo_imap_offset(SCHIZO_SERR_INO) + 4)); 1199 schizo_imap_offset(SCHIZO_SERR_INO) + 4));
1198 1200
@@ -1263,7 +1265,6 @@ static void schizo_register_error_handlers(struct pci_controller_info *p)
1263{ 1265{
1264 struct pci_pbm_info *pbm; 1266 struct pci_pbm_info *pbm;
1265 unsigned int irq; 1267 unsigned int irq;
1266 struct ino_bucket *bucket;
1267 u64 tmp, err_mask, err_no_mask; 1268 u64 tmp, err_mask, err_no_mask;
1268 1269
1269 /* Build IRQs and register handlers. */ 1270 /* Build IRQs and register handlers. */
@@ -1275,8 +1276,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p)
1275 pbm->name); 1276 pbm->name);
1276 prom_halt(); 1277 prom_halt();
1277 } 1278 }
1278 bucket = __bucket(irq); 1279 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_UE_INO));
1279 tmp = upa_readl(bucket->imap);
1280 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_UE_INO) + 4)); 1280 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_UE_INO) + 4));
1281 1281
1282 pbm = pbm_for_ino(p, SCHIZO_CE_INO); 1282 pbm = pbm_for_ino(p, SCHIZO_CE_INO);
@@ -1287,8 +1287,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p)
1287 pbm->name); 1287 pbm->name);
1288 prom_halt(); 1288 prom_halt();
1289 } 1289 }
1290 bucket = __bucket(irq); 1290 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_CE_INO));
1291 tmp = upa_readl(bucket->imap);
1292 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_CE_INO) + 4)); 1291 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_CE_INO) + 4));
1293 1292
1294 pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO); 1293 pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO);
@@ -1299,8 +1298,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p)
1299 pbm->name); 1298 pbm->name);
1300 prom_halt(); 1299 prom_halt();
1301 } 1300 }
1302 bucket = __bucket(irq); 1301 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_PCIERR_A_INO));
1303 tmp = upa_readl(bucket->imap);
1304 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4)); 1302 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));
1305 1303
1306 pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO); 1304 pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO);
@@ -1311,8 +1309,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p)
1311 pbm->name); 1309 pbm->name);
1312 prom_halt(); 1310 prom_halt();
1313 } 1311 }
1314 bucket = __bucket(irq); 1312 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_PCIERR_B_INO));
1315 tmp = upa_readl(bucket->imap);
1316 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4)); 1313 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));
1317 1314
1318 pbm = pbm_for_ino(p, SCHIZO_SERR_INO); 1315 pbm = pbm_for_ino(p, SCHIZO_SERR_INO);
@@ -1323,8 +1320,7 @@ static void schizo_register_error_handlers(struct pci_controller_info *p)
1323 pbm->name); 1320 pbm->name);
1324 prom_halt(); 1321 prom_halt();
1325 } 1322 }
1326 bucket = __bucket(irq); 1323 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_SERR_INO));
1327 tmp = upa_readl(bucket->imap);
1328 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_SERR_INO) + 4)); 1324 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_SERR_INO) + 4));
1329 1325
1330 /* Enable UE and CE interrupts for controller. */ 1326 /* Enable UE and CE interrupts for controller. */
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
index 8812417247d4..5544cf5d38b2 100644
--- a/arch/sparc64/kernel/sbus.c
+++ b/arch/sparc64/kernel/sbus.c
@@ -821,7 +821,7 @@ unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
821 821
822 iclr += ((unsigned long)sbus_level - 1UL) * 8UL; 822 iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
823 } 823 }
824 return build_irq(sbus_level, iclr, imap); 824 return build_irq(sbus_level, iclr, imap, 0);
825} 825}
826 826
827/* Error interrupt handling. */ 827/* Error interrupt handling. */