diff options
author | Sam Ravnborg <sam@ravnborg.org> | 2011-01-22 06:32:19 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-03-16 21:19:03 -0400 |
commit | fe41493f749797f516deb84ba07747fb7d8c04c1 (patch) | |
tree | 7b4998c2095c34d5bf4128801c836d5ad532d7b5 /arch/sparc | |
parent | 4832b992553ef6c714c604809d9d3f0f4d137b7e (diff) |
sparc64: rename virt_irq => irq - I
The generic irq support uses the term 'irq' for the
allocated irq number.
Fix it so sparc64 use the same term for an irq as the
generic irq support does.
For a naive reader this is less confusing.
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/kernel/entry.h | 4 | ||||
-rw-r--r-- | arch/sparc/kernel/irq_64.c | 153 |
2 files changed, 77 insertions, 80 deletions
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h index c011b932bb17..d1f1361c4167 100644 --- a/arch/sparc/kernel/entry.h +++ b/arch/sparc/kernel/entry.h | |||
@@ -213,8 +213,8 @@ extern struct cheetah_err_info *cheetah_error_log; | |||
213 | struct ino_bucket { | 213 | struct ino_bucket { |
214 | /*0x00*/unsigned long __irq_chain_pa; | 214 | /*0x00*/unsigned long __irq_chain_pa; |
215 | 215 | ||
216 | /* Virtual interrupt number assigned to this INO. */ | 216 | /* Interrupt number assigned to this INO. */ |
217 | /*0x08*/unsigned int __virt_irq; | 217 | /*0x08*/unsigned int __irq; |
218 | /*0x0c*/unsigned int __pad; | 218 | /*0x0c*/unsigned int __pad; |
219 | }; | 219 | }; |
220 | 220 | ||
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index f356e4cd7420..eb16e3b8a2dd 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -82,7 +82,7 @@ static void bucket_clear_chain_pa(unsigned long bucket_pa) | |||
82 | "i" (ASI_PHYS_USE_EC)); | 82 | "i" (ASI_PHYS_USE_EC)); |
83 | } | 83 | } |
84 | 84 | ||
85 | static unsigned int bucket_get_virt_irq(unsigned long bucket_pa) | 85 | static unsigned int bucket_get_irq(unsigned long bucket_pa) |
86 | { | 86 | { |
87 | unsigned int ret; | 87 | unsigned int ret; |
88 | 88 | ||
@@ -90,21 +90,20 @@ static unsigned int bucket_get_virt_irq(unsigned long bucket_pa) | |||
90 | : "=&r" (ret) | 90 | : "=&r" (ret) |
91 | : "r" (bucket_pa + | 91 | : "r" (bucket_pa + |
92 | offsetof(struct ino_bucket, | 92 | offsetof(struct ino_bucket, |
93 | __virt_irq)), | 93 | __irq)), |
94 | "i" (ASI_PHYS_USE_EC)); | 94 | "i" (ASI_PHYS_USE_EC)); |
95 | 95 | ||
96 | return ret; | 96 | return ret; |
97 | } | 97 | } |
98 | 98 | ||
99 | static void bucket_set_virt_irq(unsigned long bucket_pa, | 99 | static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq) |
100 | unsigned int virt_irq) | ||
101 | { | 100 | { |
102 | __asm__ __volatile__("stwa %0, [%1] %2" | 101 | __asm__ __volatile__("stwa %0, [%1] %2" |
103 | : /* no outputs */ | 102 | : /* no outputs */ |
104 | : "r" (virt_irq), | 103 | : "r" (irq), |
105 | "r" (bucket_pa + | 104 | "r" (bucket_pa + |
106 | offsetof(struct ino_bucket, | 105 | offsetof(struct ino_bucket, |
107 | __virt_irq)), | 106 | __irq)), |
108 | "i" (ASI_PHYS_USE_EC)); | 107 | "i" (ASI_PHYS_USE_EC)); |
109 | } | 108 | } |
110 | 109 | ||
@@ -114,50 +113,49 @@ static struct { | |||
114 | unsigned int dev_handle; | 113 | unsigned int dev_handle; |
115 | unsigned int dev_ino; | 114 | unsigned int dev_ino; |
116 | unsigned int in_use; | 115 | unsigned int in_use; |
117 | } virt_irq_table[NR_IRQS]; | 116 | } irq_table[NR_IRQS]; |
118 | static DEFINE_SPINLOCK(virt_irq_alloc_lock); | 117 | static DEFINE_SPINLOCK(irq_alloc_lock); |
119 | 118 | ||
120 | unsigned char virt_irq_alloc(unsigned int dev_handle, | 119 | unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino) |
121 | unsigned int dev_ino) | ||
122 | { | 120 | { |
123 | unsigned long flags; | 121 | unsigned long flags; |
124 | unsigned char ent; | 122 | unsigned char ent; |
125 | 123 | ||
126 | BUILD_BUG_ON(NR_IRQS >= 256); | 124 | BUILD_BUG_ON(NR_IRQS >= 256); |
127 | 125 | ||
128 | spin_lock_irqsave(&virt_irq_alloc_lock, flags); | 126 | spin_lock_irqsave(&irq_alloc_lock, flags); |
129 | 127 | ||
130 | for (ent = 1; ent < NR_IRQS; ent++) { | 128 | for (ent = 1; ent < NR_IRQS; ent++) { |
131 | if (!virt_irq_table[ent].in_use) | 129 | if (!irq_table[ent].in_use) |
132 | break; | 130 | break; |
133 | } | 131 | } |
134 | if (ent >= NR_IRQS) { | 132 | if (ent >= NR_IRQS) { |
135 | printk(KERN_ERR "IRQ: Out of virtual IRQs.\n"); | 133 | printk(KERN_ERR "IRQ: Out of virtual IRQs.\n"); |
136 | ent = 0; | 134 | ent = 0; |
137 | } else { | 135 | } else { |
138 | virt_irq_table[ent].dev_handle = dev_handle; | 136 | irq_table[ent].dev_handle = dev_handle; |
139 | virt_irq_table[ent].dev_ino = dev_ino; | 137 | irq_table[ent].dev_ino = dev_ino; |
140 | virt_irq_table[ent].in_use = 1; | 138 | irq_table[ent].in_use = 1; |
141 | } | 139 | } |
142 | 140 | ||
143 | spin_unlock_irqrestore(&virt_irq_alloc_lock, flags); | 141 | spin_unlock_irqrestore(&irq_alloc_lock, flags); |
144 | 142 | ||
145 | return ent; | 143 | return ent; |
146 | } | 144 | } |
147 | 145 | ||
148 | #ifdef CONFIG_PCI_MSI | 146 | #ifdef CONFIG_PCI_MSI |
149 | void virt_irq_free(unsigned int virt_irq) | 147 | void irq_free(unsigned int irq) |
150 | { | 148 | { |
151 | unsigned long flags; | 149 | unsigned long flags; |
152 | 150 | ||
153 | if (virt_irq >= NR_IRQS) | 151 | if (irq >= NR_IRQS) |
154 | return; | 152 | return; |
155 | 153 | ||
156 | spin_lock_irqsave(&virt_irq_alloc_lock, flags); | 154 | spin_lock_irqsave(&irq_alloc_lock, flags); |
157 | 155 | ||
158 | virt_irq_table[virt_irq].in_use = 0; | 156 | irq_table[irq].in_use = 0; |
159 | 157 | ||
160 | spin_unlock_irqrestore(&virt_irq_alloc_lock, flags); | 158 | spin_unlock_irqrestore(&irq_alloc_lock, flags); |
161 | } | 159 | } |
162 | #endif | 160 | #endif |
163 | 161 | ||
@@ -253,25 +251,25 @@ struct irq_handler_data { | |||
253 | }; | 251 | }; |
254 | 252 | ||
255 | #ifdef CONFIG_SMP | 253 | #ifdef CONFIG_SMP |
256 | static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity) | 254 | static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity) |
257 | { | 255 | { |
258 | cpumask_t mask; | 256 | cpumask_t mask; |
259 | int cpuid; | 257 | int cpuid; |
260 | 258 | ||
261 | cpumask_copy(&mask, affinity); | 259 | cpumask_copy(&mask, affinity); |
262 | if (cpus_equal(mask, cpu_online_map)) { | 260 | if (cpus_equal(mask, cpu_online_map)) { |
263 | cpuid = map_to_cpu(virt_irq); | 261 | cpuid = map_to_cpu(irq); |
264 | } else { | 262 | } else { |
265 | cpumask_t tmp; | 263 | cpumask_t tmp; |
266 | 264 | ||
267 | cpus_and(tmp, cpu_online_map, mask); | 265 | cpus_and(tmp, cpu_online_map, mask); |
268 | cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp); | 266 | cpuid = cpus_empty(tmp) ? map_to_cpu(irq) : first_cpu(tmp); |
269 | } | 267 | } |
270 | 268 | ||
271 | return cpuid; | 269 | return cpuid; |
272 | } | 270 | } |
273 | #else | 271 | #else |
274 | #define irq_choose_cpu(virt_irq, affinity) \ | 272 | #define irq_choose_cpu(irq, affinity) \ |
275 | real_hard_smp_processor_id() | 273 | real_hard_smp_processor_id() |
276 | #endif | 274 | #endif |
277 | 275 | ||
@@ -357,7 +355,7 @@ static void sun4u_irq_eoi(struct irq_data *data) | |||
357 | 355 | ||
358 | static void sun4v_irq_enable(struct irq_data *data) | 356 | static void sun4v_irq_enable(struct irq_data *data) |
359 | { | 357 | { |
360 | unsigned int ino = virt_irq_table[data->irq].dev_ino; | 358 | unsigned int ino = irq_table[data->irq].dev_ino; |
361 | unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity); | 359 | unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity); |
362 | int err; | 360 | int err; |
363 | 361 | ||
@@ -378,7 +376,7 @@ static void sun4v_irq_enable(struct irq_data *data) | |||
378 | static int sun4v_set_affinity(struct irq_data *data, | 376 | static int sun4v_set_affinity(struct irq_data *data, |
379 | const struct cpumask *mask, bool force) | 377 | const struct cpumask *mask, bool force) |
380 | { | 378 | { |
381 | unsigned int ino = virt_irq_table[data->irq].dev_ino; | 379 | unsigned int ino = irq_table[data->irq].dev_ino; |
382 | unsigned long cpuid = irq_choose_cpu(data->irq, mask); | 380 | unsigned long cpuid = irq_choose_cpu(data->irq, mask); |
383 | int err; | 381 | int err; |
384 | 382 | ||
@@ -392,7 +390,7 @@ static int sun4v_set_affinity(struct irq_data *data, | |||
392 | 390 | ||
393 | static void sun4v_irq_disable(struct irq_data *data) | 391 | static void sun4v_irq_disable(struct irq_data *data) |
394 | { | 392 | { |
395 | unsigned int ino = virt_irq_table[data->irq].dev_ino; | 393 | unsigned int ino = irq_table[data->irq].dev_ino; |
396 | int err; | 394 | int err; |
397 | 395 | ||
398 | err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); | 396 | err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); |
@@ -403,7 +401,7 @@ static void sun4v_irq_disable(struct irq_data *data) | |||
403 | 401 | ||
404 | static void sun4v_irq_eoi(struct irq_data *data) | 402 | static void sun4v_irq_eoi(struct irq_data *data) |
405 | { | 403 | { |
406 | unsigned int ino = virt_irq_table[data->irq].dev_ino; | 404 | unsigned int ino = irq_table[data->irq].dev_ino; |
407 | struct irq_desc *desc = irq_desc + data->irq; | 405 | struct irq_desc *desc = irq_desc + data->irq; |
408 | int err; | 406 | int err; |
409 | 407 | ||
@@ -423,8 +421,8 @@ static void sun4v_virq_enable(struct irq_data *data) | |||
423 | 421 | ||
424 | cpuid = irq_choose_cpu(data->irq, data->affinity); | 422 | cpuid = irq_choose_cpu(data->irq, data->affinity); |
425 | 423 | ||
426 | dev_handle = virt_irq_table[data->irq].dev_handle; | 424 | dev_handle = irq_table[data->irq].dev_handle; |
427 | dev_ino = virt_irq_table[data->irq].dev_ino; | 425 | dev_ino = irq_table[data->irq].dev_ino; |
428 | 426 | ||
429 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); | 427 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); |
430 | if (err != HV_EOK) | 428 | if (err != HV_EOK) |
@@ -453,8 +451,8 @@ static int sun4v_virt_set_affinity(struct irq_data *data, | |||
453 | 451 | ||
454 | cpuid = irq_choose_cpu(data->irq, mask); | 452 | cpuid = irq_choose_cpu(data->irq, mask); |
455 | 453 | ||
456 | dev_handle = virt_irq_table[data->irq].dev_handle; | 454 | dev_handle = irq_table[data->irq].dev_handle; |
457 | dev_ino = virt_irq_table[data->irq].dev_ino; | 455 | dev_ino = irq_table[data->irq].dev_ino; |
458 | 456 | ||
459 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); | 457 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); |
460 | if (err != HV_EOK) | 458 | if (err != HV_EOK) |
@@ -470,8 +468,8 @@ static void sun4v_virq_disable(struct irq_data *data) | |||
470 | unsigned long dev_handle, dev_ino; | 468 | unsigned long dev_handle, dev_ino; |
471 | int err; | 469 | int err; |
472 | 470 | ||
473 | dev_handle = virt_irq_table[data->irq].dev_handle; | 471 | dev_handle = irq_table[data->irq].dev_handle; |
474 | dev_ino = virt_irq_table[data->irq].dev_ino; | 472 | dev_ino = irq_table[data->irq].dev_ino; |
475 | 473 | ||
476 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, | 474 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, |
477 | HV_INTR_DISABLED); | 475 | HV_INTR_DISABLED); |
@@ -490,8 +488,8 @@ static void sun4v_virq_eoi(struct irq_data *data) | |||
490 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) | 488 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) |
491 | return; | 489 | return; |
492 | 490 | ||
493 | dev_handle = virt_irq_table[data->irq].dev_handle; | 491 | dev_handle = irq_table[data->irq].dev_handle; |
494 | dev_ino = virt_irq_table[data->irq].dev_ino; | 492 | dev_ino = irq_table[data->irq].dev_ino; |
495 | 493 | ||
496 | err = sun4v_vintr_set_state(dev_handle, dev_ino, | 494 | err = sun4v_vintr_set_state(dev_handle, dev_ino, |
497 | HV_INTR_STATE_IDLE); | 495 | HV_INTR_STATE_IDLE); |
@@ -525,23 +523,22 @@ static struct irq_chip sun4v_virq = { | |||
525 | .irq_set_affinity = sun4v_virt_set_affinity, | 523 | .irq_set_affinity = sun4v_virt_set_affinity, |
526 | }; | 524 | }; |
527 | 525 | ||
528 | static void pre_flow_handler(unsigned int virt_irq, | 526 | static void pre_flow_handler(unsigned int irq, struct irq_desc *desc) |
529 | struct irq_desc *desc) | ||
530 | { | 527 | { |
531 | struct irq_handler_data *handler_data = get_irq_data(virt_irq); | 528 | struct irq_handler_data *handler_data = get_irq_data(irq); |
532 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | 529 | unsigned int ino = irq_table[irq].dev_ino; |
533 | 530 | ||
534 | handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2); | 531 | handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2); |
535 | 532 | ||
536 | handle_fasteoi_irq(virt_irq, desc); | 533 | handle_fasteoi_irq(irq, desc); |
537 | } | 534 | } |
538 | 535 | ||
539 | void irq_install_pre_handler(int virt_irq, | 536 | void irq_install_pre_handler(int irq, |
540 | void (*func)(unsigned int, void *, void *), | 537 | void (*func)(unsigned int, void *, void *), |
541 | void *arg1, void *arg2) | 538 | void *arg1, void *arg2) |
542 | { | 539 | { |
543 | struct irq_handler_data *handler_data = get_irq_data(virt_irq); | 540 | struct irq_handler_data *handler_data = get_irq_data(irq); |
544 | struct irq_desc *desc = irq_desc + virt_irq; | 541 | struct irq_desc *desc = irq_desc + irq; |
545 | 542 | ||
546 | handler_data->pre_handler = func; | 543 | handler_data->pre_handler = func; |
547 | handler_data->arg1 = arg1; | 544 | handler_data->arg1 = arg1; |
@@ -554,24 +551,24 @@ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) | |||
554 | { | 551 | { |
555 | struct ino_bucket *bucket; | 552 | struct ino_bucket *bucket; |
556 | struct irq_handler_data *handler_data; | 553 | struct irq_handler_data *handler_data; |
557 | unsigned int virt_irq; | 554 | unsigned int irq; |
558 | int ino; | 555 | int ino; |
559 | 556 | ||
560 | BUG_ON(tlb_type == hypervisor); | 557 | BUG_ON(tlb_type == hypervisor); |
561 | 558 | ||
562 | ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; | 559 | ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; |
563 | bucket = &ivector_table[ino]; | 560 | bucket = &ivector_table[ino]; |
564 | virt_irq = bucket_get_virt_irq(__pa(bucket)); | 561 | irq = bucket_get_irq(__pa(bucket)); |
565 | if (!virt_irq) { | 562 | if (!irq) { |
566 | virt_irq = virt_irq_alloc(0, ino); | 563 | irq = irq_alloc(0, ino); |
567 | bucket_set_virt_irq(__pa(bucket), virt_irq); | 564 | bucket_set_irq(__pa(bucket), irq); |
568 | set_irq_chip_and_handler_name(virt_irq, | 565 | set_irq_chip_and_handler_name(irq, |
569 | &sun4u_irq, | 566 | &sun4u_irq, |
570 | handle_fasteoi_irq, | 567 | handle_fasteoi_irq, |
571 | "IVEC"); | 568 | "IVEC"); |
572 | } | 569 | } |
573 | 570 | ||
574 | handler_data = get_irq_data(virt_irq); | 571 | handler_data = get_irq_data(irq); |
575 | if (unlikely(handler_data)) | 572 | if (unlikely(handler_data)) |
576 | goto out; | 573 | goto out; |
577 | 574 | ||
@@ -580,13 +577,13 @@ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) | |||
580 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); | 577 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); |
581 | prom_halt(); | 578 | prom_halt(); |
582 | } | 579 | } |
583 | set_irq_data(virt_irq, handler_data); | 580 | set_irq_data(irq, handler_data); |
584 | 581 | ||
585 | handler_data->imap = imap; | 582 | handler_data->imap = imap; |
586 | handler_data->iclr = iclr; | 583 | handler_data->iclr = iclr; |
587 | 584 | ||
588 | out: | 585 | out: |
589 | return virt_irq; | 586 | return irq; |
590 | } | 587 | } |
591 | 588 | ||
592 | static unsigned int sun4v_build_common(unsigned long sysino, | 589 | static unsigned int sun4v_build_common(unsigned long sysino, |
@@ -594,21 +591,21 @@ static unsigned int sun4v_build_common(unsigned long sysino, | |||
594 | { | 591 | { |
595 | struct ino_bucket *bucket; | 592 | struct ino_bucket *bucket; |
596 | struct irq_handler_data *handler_data; | 593 | struct irq_handler_data *handler_data; |
597 | unsigned int virt_irq; | 594 | unsigned int irq; |
598 | 595 | ||
599 | BUG_ON(tlb_type != hypervisor); | 596 | BUG_ON(tlb_type != hypervisor); |
600 | 597 | ||
601 | bucket = &ivector_table[sysino]; | 598 | bucket = &ivector_table[sysino]; |
602 | virt_irq = bucket_get_virt_irq(__pa(bucket)); | 599 | irq = bucket_get_irq(__pa(bucket)); |
603 | if (!virt_irq) { | 600 | if (!irq) { |
604 | virt_irq = virt_irq_alloc(0, sysino); | 601 | irq = irq_alloc(0, sysino); |
605 | bucket_set_virt_irq(__pa(bucket), virt_irq); | 602 | bucket_set_irq(__pa(bucket), irq); |
606 | set_irq_chip_and_handler_name(virt_irq, chip, | 603 | set_irq_chip_and_handler_name(irq, chip, |
607 | handle_fasteoi_irq, | 604 | handle_fasteoi_irq, |
608 | "IVEC"); | 605 | "IVEC"); |
609 | } | 606 | } |
610 | 607 | ||
611 | handler_data = get_irq_data(virt_irq); | 608 | handler_data = get_irq_data(irq); |
612 | if (unlikely(handler_data)) | 609 | if (unlikely(handler_data)) |
613 | goto out; | 610 | goto out; |
614 | 611 | ||
@@ -617,7 +614,7 @@ static unsigned int sun4v_build_common(unsigned long sysino, | |||
617 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); | 614 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); |
618 | prom_halt(); | 615 | prom_halt(); |
619 | } | 616 | } |
620 | set_irq_data(virt_irq, handler_data); | 617 | set_irq_data(irq, handler_data); |
621 | 618 | ||
622 | /* Catch accidental accesses to these things. IMAP/ICLR handling | 619 | /* Catch accidental accesses to these things. IMAP/ICLR handling |
623 | * is done by hypervisor calls on sun4v platforms, not by direct | 620 | * is done by hypervisor calls on sun4v platforms, not by direct |
@@ -627,7 +624,7 @@ static unsigned int sun4v_build_common(unsigned long sysino, | |||
627 | handler_data->iclr = ~0UL; | 624 | handler_data->iclr = ~0UL; |
628 | 625 | ||
629 | out: | 626 | out: |
630 | return virt_irq; | 627 | return irq; |
631 | } | 628 | } |
632 | 629 | ||
633 | unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) | 630 | unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) |
@@ -643,7 +640,7 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) | |||
643 | unsigned long hv_err, cookie; | 640 | unsigned long hv_err, cookie; |
644 | struct ino_bucket *bucket; | 641 | struct ino_bucket *bucket; |
645 | struct irq_desc *desc; | 642 | struct irq_desc *desc; |
646 | unsigned int virt_irq; | 643 | unsigned int irq; |
647 | 644 | ||
648 | bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); | 645 | bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); |
649 | if (unlikely(!bucket)) | 646 | if (unlikely(!bucket)) |
@@ -660,10 +657,10 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) | |||
660 | ((unsigned long) bucket + | 657 | ((unsigned long) bucket + |
661 | sizeof(struct ino_bucket))); | 658 | sizeof(struct ino_bucket))); |
662 | 659 | ||
663 | virt_irq = virt_irq_alloc(devhandle, devino); | 660 | irq = irq_alloc(devhandle, devino); |
664 | bucket_set_virt_irq(__pa(bucket), virt_irq); | 661 | bucket_set_irq(__pa(bucket), irq); |
665 | 662 | ||
666 | set_irq_chip_and_handler_name(virt_irq, &sun4v_virq, | 663 | set_irq_chip_and_handler_name(irq, &sun4v_virq, |
667 | handle_fasteoi_irq, | 664 | handle_fasteoi_irq, |
668 | "IVEC"); | 665 | "IVEC"); |
669 | 666 | ||
@@ -675,10 +672,10 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) | |||
675 | * especially wrt. locking, we do not let request_irq() enable | 672 | * especially wrt. locking, we do not let request_irq() enable |
676 | * the interrupt. | 673 | * the interrupt. |
677 | */ | 674 | */ |
678 | desc = irq_desc + virt_irq; | 675 | desc = irq_desc + irq; |
679 | desc->status |= IRQ_NOAUTOEN; | 676 | desc->status |= IRQ_NOAUTOEN; |
680 | 677 | ||
681 | set_irq_data(virt_irq, handler_data); | 678 | set_irq_data(irq, handler_data); |
682 | 679 | ||
683 | /* Catch accidental accesses to these things. IMAP/ICLR handling | 680 | /* Catch accidental accesses to these things. IMAP/ICLR handling |
684 | * is done by hypervisor calls on sun4v platforms, not by direct | 681 | * is done by hypervisor calls on sun4v platforms, not by direct |
@@ -695,18 +692,18 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) | |||
695 | prom_halt(); | 692 | prom_halt(); |
696 | } | 693 | } |
697 | 694 | ||
698 | return virt_irq; | 695 | return irq; |
699 | } | 696 | } |
700 | 697 | ||
701 | void ack_bad_irq(unsigned int virt_irq) | 698 | void ack_bad_irq(unsigned int irq) |
702 | { | 699 | { |
703 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | 700 | unsigned int ino = irq_table[irq].dev_ino; |
704 | 701 | ||
705 | if (!ino) | 702 | if (!ino) |
706 | ino = 0xdeadbeef; | 703 | ino = 0xdeadbeef; |
707 | 704 | ||
708 | printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n", | 705 | printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n", |
709 | ino, virt_irq); | 706 | ino, irq); |
710 | } | 707 | } |
711 | 708 | ||
712 | void *hardirq_stack[NR_CPUS]; | 709 | void *hardirq_stack[NR_CPUS]; |
@@ -739,16 +736,16 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs) | |||
739 | while (bucket_pa) { | 736 | while (bucket_pa) { |
740 | struct irq_desc *desc; | 737 | struct irq_desc *desc; |
741 | unsigned long next_pa; | 738 | unsigned long next_pa; |
742 | unsigned int virt_irq; | 739 | unsigned int irq; |
743 | 740 | ||
744 | next_pa = bucket_get_chain_pa(bucket_pa); | 741 | next_pa = bucket_get_chain_pa(bucket_pa); |
745 | virt_irq = bucket_get_virt_irq(bucket_pa); | 742 | irq = bucket_get_irq(bucket_pa); |
746 | bucket_clear_chain_pa(bucket_pa); | 743 | bucket_clear_chain_pa(bucket_pa); |
747 | 744 | ||
748 | desc = irq_desc + virt_irq; | 745 | desc = irq_desc + irq; |
749 | 746 | ||
750 | if (!(desc->status & IRQ_DISABLED)) | 747 | if (!(desc->status & IRQ_DISABLED)) |
751 | desc->handle_irq(virt_irq, desc); | 748 | desc->handle_irq(irq, desc); |
752 | 749 | ||
753 | bucket_pa = next_pa; | 750 | bucket_pa = next_pa; |
754 | } | 751 | } |