aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/irq_64.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-11 20:36:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-11 20:36:34 -0400
commit052db7ec86dff26f734031c3ef5c2c03a94af0af (patch)
treede4ca607d1ede889a3a804e87f25bd38304f9016 /arch/sparc/kernel/irq_64.c
parentfd9879b9bb3258ebc27a4cc6d2d29f528f71901f (diff)
parentbdcf81b658ebc4c2640c3c2c55c8b31c601b6996 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc updates from David Miller: 1) Move to 4-level page tables on sparc64 and support up to 53-bits of physical addressing. Kernel static image BSS size reduced by several megabytes. 2) M6/M7 cpu support, from Allan Pais. 3) Move to sparse IRQs, handle hypervisor TLB call errors more gracefully, and add T5 perf_event support. From Bob Picco. 4) Recognize cdroms and compute geometry from capacity in virtual disk driver, also from Allan Pais. 5) Fix memset() return value on sparc32, from Andreas Larsson. 6) Respect gfp flags in dma_alloc_coherent on sparc32, from Daniel Hellstrom. 7) Fix handling of compound pages in virtual disk driver, from Dwight Engen. 8) Fix lockdep warnings in LDC layer by moving IRQ requesting to ldc_alloc() from ldc_bind(). 9) Increase boot string length to 1024 bytes, from Dave Kleikamp. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc: (31 commits) sparc64: Fix lockdep warnings on reboot on Ultra-5 sparc64: Increase size of boot string to 1024 bytes sparc64: Kill unnecessary tables and increase MAX_BANKS. sparc64: sparse irq sparc64: Adjust vmalloc region size based upon available virtual address bits. sparc64: Increase MAX_PHYS_ADDRESS_BITS to 53. sparc64: Use kernel page tables for vmemmap. sparc64: Fix physical memory management regressions with large max_phys_bits. sparc64: Adjust KTSB assembler to support larger physical addresses. sparc64: Define VA hole at run time, rather than at compile time. sparc64: Switch to 4-level page tables. sparc64: Fix reversed start/end in flush_tlb_kernel_range() sparc64: Add vio_set_intr() to enable/disable Rx interrupts vio: fix reuse of vio_dring slot sunvdc: limit each sg segment to a page sunvdc: compute vdisk geometry from capacity sunvdc: add cdrom and v1.1 protocol support sparc: VIO protocol version 1.6 sparc64: Fix hibernation code refrence to PAGE_OFFSET. sparc64: Move request_irq() from ldc_bind() to ldc_alloc() ...
Diffstat (limited to 'arch/sparc/kernel/irq_64.c')
-rw-r--r--arch/sparc/kernel/irq_64.c507
1 files changed, 338 insertions, 169 deletions
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 666193f4e8bb..4033c23bdfa6 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -47,8 +47,6 @@
47#include "cpumap.h" 47#include "cpumap.h"
48#include "kstack.h" 48#include "kstack.h"
49 49
50#define NUM_IVECS (IMAP_INR + 1)
51
52struct ino_bucket *ivector_table; 50struct ino_bucket *ivector_table;
53unsigned long ivector_table_pa; 51unsigned long ivector_table_pa;
54 52
@@ -107,55 +105,196 @@ static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
107 105
108#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa) 106#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
109 107
110static struct { 108static unsigned long hvirq_major __initdata;
111 unsigned int dev_handle; 109static int __init early_hvirq_major(char *p)
112 unsigned int dev_ino; 110{
113 unsigned int in_use; 111 int rc = kstrtoul(p, 10, &hvirq_major);
114} irq_table[NR_IRQS]; 112
115static DEFINE_SPINLOCK(irq_alloc_lock); 113 return rc;
114}
115early_param("hvirq", early_hvirq_major);
116
117static int hv_irq_version;
118
119/* Major version 2.0 of HV_GRP_INTR added support for the VIRQ cookie
120 * based interfaces, but:
121 *
122 * 1) Several OSs, Solaris and Linux included, use them even when only
123 * negotiating version 1.0 (or failing to negotiate at all). So the
124 * hypervisor has a workaround that provides the VIRQ interfaces even
125 * when only verion 1.0 of the API is in use.
126 *
127 * 2) Second, and more importantly, with major version 2.0 these VIRQ
128 * interfaces only were actually hooked up for LDC interrupts, even
129 * though the Hypervisor specification clearly stated:
130 *
131 * The new interrupt API functions will be available to a guest
132 * when it negotiates version 2.0 in the interrupt API group 0x2. When
133 * a guest negotiates version 2.0, all interrupt sources will only
134 * support using the cookie interface, and any attempt to use the
135 * version 1.0 interrupt APIs numbered 0xa0 to 0xa6 will result in the
136 * ENOTSUPPORTED error being returned.
137 *
138 * with an emphasis on "all interrupt sources".
139 *
140 * To correct this, major version 3.0 was created which does actually
141 * support VIRQs for all interrupt sources (not just LDC devices). So
142 * if we want to move completely over the cookie based VIRQs we must
143 * negotiate major version 3.0 or later of HV_GRP_INTR.
144 */
145static bool sun4v_cookie_only_virqs(void)
146{
147 if (hv_irq_version >= 3)
148 return true;
149 return false;
150}
116 151
117unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino) 152static void __init irq_init_hv(void)
118{ 153{
119 unsigned long flags; 154 unsigned long hv_error, major, minor = 0;
120 unsigned char ent; 155
156 if (tlb_type != hypervisor)
157 return;
121 158
122 BUILD_BUG_ON(NR_IRQS >= 256); 159 if (hvirq_major)
160 major = hvirq_major;
161 else
162 major = 3;
123 163
124 spin_lock_irqsave(&irq_alloc_lock, flags); 164 hv_error = sun4v_hvapi_register(HV_GRP_INTR, major, &minor);
165 if (!hv_error)
166 hv_irq_version = major;
167 else
168 hv_irq_version = 1;
125 169
126 for (ent = 1; ent < NR_IRQS; ent++) { 170 pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n",
127 if (!irq_table[ent].in_use) 171 hv_irq_version,
172 sun4v_cookie_only_virqs() ? "enabled" : "disabled");
173}
174
175/* This function is for the timer interrupt.*/
176int __init arch_probe_nr_irqs(void)
177{
178 return 1;
179}
180
181#define DEFAULT_NUM_IVECS (0xfffU)
182static unsigned int nr_ivec = DEFAULT_NUM_IVECS;
183#define NUM_IVECS (nr_ivec)
184
185static unsigned int __init size_nr_ivec(void)
186{
187 if (tlb_type == hypervisor) {
188 switch (sun4v_chip_type) {
189 /* Athena's devhandle|devino is large.*/
190 case SUN4V_CHIP_SPARC64X:
191 nr_ivec = 0xffff;
128 break; 192 break;
193 }
129 } 194 }
130 if (ent >= NR_IRQS) { 195 return nr_ivec;
131 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n"); 196}
132 ent = 0; 197
133 } else { 198struct irq_handler_data {
134 irq_table[ent].dev_handle = dev_handle; 199 union {
135 irq_table[ent].dev_ino = dev_ino; 200 struct {
136 irq_table[ent].in_use = 1; 201 unsigned int dev_handle;
137 } 202 unsigned int dev_ino;
203 };
204 unsigned long sysino;
205 };
206 struct ino_bucket bucket;
207 unsigned long iclr;
208 unsigned long imap;
209};
210
211static inline unsigned int irq_data_to_handle(struct irq_data *data)
212{
213 struct irq_handler_data *ihd = data->handler_data;
214
215 return ihd->dev_handle;
216}
217
218static inline unsigned int irq_data_to_ino(struct irq_data *data)
219{
220 struct irq_handler_data *ihd = data->handler_data;
138 221
139 spin_unlock_irqrestore(&irq_alloc_lock, flags); 222 return ihd->dev_ino;
223}
224
225static inline unsigned long irq_data_to_sysino(struct irq_data *data)
226{
227 struct irq_handler_data *ihd = data->handler_data;
140 228
141 return ent; 229 return ihd->sysino;
142} 230}
143 231
144#ifdef CONFIG_PCI_MSI
145void irq_free(unsigned int irq) 232void irq_free(unsigned int irq)
146{ 233{
147 unsigned long flags; 234 void *data = irq_get_handler_data(irq);
148 235
149 if (irq >= NR_IRQS) 236 kfree(data);
150 return; 237 irq_set_handler_data(irq, NULL);
238 irq_free_descs(irq, 1);
239}
151 240
152 spin_lock_irqsave(&irq_alloc_lock, flags); 241unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
242{
243 int irq;
153 244
154 irq_table[irq].in_use = 0; 245 irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL);
246 if (irq <= 0)
247 goto out;
155 248
156 spin_unlock_irqrestore(&irq_alloc_lock, flags); 249 return irq;
250out:
251 return 0;
252}
253
254static unsigned int cookie_exists(u32 devhandle, unsigned int devino)
255{
256 unsigned long hv_err, cookie;
257 struct ino_bucket *bucket;
258 unsigned int irq = 0U;
259
260 hv_err = sun4v_vintr_get_cookie(devhandle, devino, &cookie);
261 if (hv_err) {
262 pr_err("HV get cookie failed hv_err = %ld\n", hv_err);
263 goto out;
264 }
265
266 if (cookie & ((1UL << 63UL))) {
267 cookie = ~cookie;
268 bucket = (struct ino_bucket *) __va(cookie);
269 irq = bucket->__irq;
270 }
271out:
272 return irq;
273}
274
275static unsigned int sysino_exists(u32 devhandle, unsigned int devino)
276{
277 unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
278 struct ino_bucket *bucket;
279 unsigned int irq;
280
281 bucket = &ivector_table[sysino];
282 irq = bucket_get_irq(__pa(bucket));
283
284 return irq;
285}
286
287void ack_bad_irq(unsigned int irq)
288{
289 pr_crit("BAD IRQ ack %d\n", irq);
290}
291
292void irq_install_pre_handler(int irq,
293 void (*func)(unsigned int, void *, void *),
294 void *arg1, void *arg2)
295{
296 pr_warn("IRQ pre handler NOT supported.\n");
157} 297}
158#endif
159 298
160/* 299/*
161 * /proc/interrupts printing: 300 * /proc/interrupts printing:
@@ -206,15 +345,6 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
206 return tid; 345 return tid;
207} 346}
208 347
209struct irq_handler_data {
210 unsigned long iclr;
211 unsigned long imap;
212
213 void (*pre_handler)(unsigned int, void *, void *);
214 void *arg1;
215 void *arg2;
216};
217
218#ifdef CONFIG_SMP 348#ifdef CONFIG_SMP
219static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity) 349static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
220{ 350{
@@ -316,8 +446,8 @@ static void sun4u_irq_eoi(struct irq_data *data)
316 446
317static void sun4v_irq_enable(struct irq_data *data) 447static void sun4v_irq_enable(struct irq_data *data)
318{ 448{
319 unsigned int ino = irq_table[data->irq].dev_ino;
320 unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity); 449 unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
450 unsigned int ino = irq_data_to_sysino(data);
321 int err; 451 int err;
322 452
323 err = sun4v_intr_settarget(ino, cpuid); 453 err = sun4v_intr_settarget(ino, cpuid);
@@ -337,8 +467,8 @@ static void sun4v_irq_enable(struct irq_data *data)
337static int sun4v_set_affinity(struct irq_data *data, 467static int sun4v_set_affinity(struct irq_data *data,
338 const struct cpumask *mask, bool force) 468 const struct cpumask *mask, bool force)
339{ 469{
340 unsigned int ino = irq_table[data->irq].dev_ino;
341 unsigned long cpuid = irq_choose_cpu(data->irq, mask); 470 unsigned long cpuid = irq_choose_cpu(data->irq, mask);
471 unsigned int ino = irq_data_to_sysino(data);
342 int err; 472 int err;
343 473
344 err = sun4v_intr_settarget(ino, cpuid); 474 err = sun4v_intr_settarget(ino, cpuid);
@@ -351,7 +481,7 @@ static int sun4v_set_affinity(struct irq_data *data,
351 481
352static void sun4v_irq_disable(struct irq_data *data) 482static void sun4v_irq_disable(struct irq_data *data)
353{ 483{
354 unsigned int ino = irq_table[data->irq].dev_ino; 484 unsigned int ino = irq_data_to_sysino(data);
355 int err; 485 int err;
356 486
357 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); 487 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
@@ -362,7 +492,7 @@ static void sun4v_irq_disable(struct irq_data *data)
362 492
363static void sun4v_irq_eoi(struct irq_data *data) 493static void sun4v_irq_eoi(struct irq_data *data)
364{ 494{
365 unsigned int ino = irq_table[data->irq].dev_ino; 495 unsigned int ino = irq_data_to_sysino(data);
366 int err; 496 int err;
367 497
368 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); 498 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
@@ -373,14 +503,13 @@ static void sun4v_irq_eoi(struct irq_data *data)
373 503
374static void sun4v_virq_enable(struct irq_data *data) 504static void sun4v_virq_enable(struct irq_data *data)
375{ 505{
376 unsigned long cpuid, dev_handle, dev_ino; 506 unsigned long dev_handle = irq_data_to_handle(data);
507 unsigned long dev_ino = irq_data_to_ino(data);
508 unsigned long cpuid;
377 int err; 509 int err;
378 510
379 cpuid = irq_choose_cpu(data->irq, data->affinity); 511 cpuid = irq_choose_cpu(data->irq, data->affinity);
380 512
381 dev_handle = irq_table[data->irq].dev_handle;
382 dev_ino = irq_table[data->irq].dev_ino;
383
384 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); 513 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
385 if (err != HV_EOK) 514 if (err != HV_EOK)
386 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " 515 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
@@ -403,14 +532,13 @@ static void sun4v_virq_enable(struct irq_data *data)
403static int sun4v_virt_set_affinity(struct irq_data *data, 532static int sun4v_virt_set_affinity(struct irq_data *data,
404 const struct cpumask *mask, bool force) 533 const struct cpumask *mask, bool force)
405{ 534{
406 unsigned long cpuid, dev_handle, dev_ino; 535 unsigned long dev_handle = irq_data_to_handle(data);
536 unsigned long dev_ino = irq_data_to_ino(data);
537 unsigned long cpuid;
407 int err; 538 int err;
408 539
409 cpuid = irq_choose_cpu(data->irq, mask); 540 cpuid = irq_choose_cpu(data->irq, mask);
410 541
411 dev_handle = irq_table[data->irq].dev_handle;
412 dev_ino = irq_table[data->irq].dev_ino;
413
414 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); 542 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
415 if (err != HV_EOK) 543 if (err != HV_EOK)
416 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " 544 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
@@ -422,11 +550,10 @@ static int sun4v_virt_set_affinity(struct irq_data *data,
422 550
423static void sun4v_virq_disable(struct irq_data *data) 551static void sun4v_virq_disable(struct irq_data *data)
424{ 552{
425 unsigned long dev_handle, dev_ino; 553 unsigned long dev_handle = irq_data_to_handle(data);
554 unsigned long dev_ino = irq_data_to_ino(data);
426 int err; 555 int err;
427 556
428 dev_handle = irq_table[data->irq].dev_handle;
429 dev_ino = irq_table[data->irq].dev_ino;
430 557
431 err = sun4v_vintr_set_valid(dev_handle, dev_ino, 558 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
432 HV_INTR_DISABLED); 559 HV_INTR_DISABLED);
@@ -438,12 +565,10 @@ static void sun4v_virq_disable(struct irq_data *data)
438 565
439static void sun4v_virq_eoi(struct irq_data *data) 566static void sun4v_virq_eoi(struct irq_data *data)
440{ 567{
441 unsigned long dev_handle, dev_ino; 568 unsigned long dev_handle = irq_data_to_handle(data);
569 unsigned long dev_ino = irq_data_to_ino(data);
442 int err; 570 int err;
443 571
444 dev_handle = irq_table[data->irq].dev_handle;
445 dev_ino = irq_table[data->irq].dev_ino;
446
447 err = sun4v_vintr_set_state(dev_handle, dev_ino, 572 err = sun4v_vintr_set_state(dev_handle, dev_ino,
448 HV_INTR_STATE_IDLE); 573 HV_INTR_STATE_IDLE);
449 if (err != HV_EOK) 574 if (err != HV_EOK)
@@ -479,31 +604,10 @@ static struct irq_chip sun4v_virq = {
479 .flags = IRQCHIP_EOI_IF_HANDLED, 604 .flags = IRQCHIP_EOI_IF_HANDLED,
480}; 605};
481 606
482static void pre_flow_handler(struct irq_data *d)
483{
484 struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d);
485 unsigned int ino = irq_table[d->irq].dev_ino;
486
487 handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
488}
489
490void irq_install_pre_handler(int irq,
491 void (*func)(unsigned int, void *, void *),
492 void *arg1, void *arg2)
493{
494 struct irq_handler_data *handler_data = irq_get_handler_data(irq);
495
496 handler_data->pre_handler = func;
497 handler_data->arg1 = arg1;
498 handler_data->arg2 = arg2;
499
500 __irq_set_preflow_handler(irq, pre_flow_handler);
501}
502
503unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) 607unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
504{ 608{
505 struct ino_bucket *bucket;
506 struct irq_handler_data *handler_data; 609 struct irq_handler_data *handler_data;
610 struct ino_bucket *bucket;
507 unsigned int irq; 611 unsigned int irq;
508 int ino; 612 int ino;
509 613
@@ -537,119 +641,166 @@ out:
537 return irq; 641 return irq;
538} 642}
539 643
540static unsigned int sun4v_build_common(unsigned long sysino, 644static unsigned int sun4v_build_common(u32 devhandle, unsigned int devino,
541 struct irq_chip *chip) 645 void (*handler_data_init)(struct irq_handler_data *data,
646 u32 devhandle, unsigned int devino),
647 struct irq_chip *chip)
542{ 648{
543 struct ino_bucket *bucket; 649 struct irq_handler_data *data;
544 struct irq_handler_data *handler_data;
545 unsigned int irq; 650 unsigned int irq;
546 651
547 BUG_ON(tlb_type != hypervisor); 652 irq = irq_alloc(devhandle, devino);
653 if (!irq)
654 goto out;
548 655
549 bucket = &ivector_table[sysino]; 656 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
550 irq = bucket_get_irq(__pa(bucket)); 657 if (unlikely(!data)) {
551 if (!irq) { 658 pr_err("IRQ handler data allocation failed.\n");
552 irq = irq_alloc(0, sysino); 659 irq_free(irq);
553 bucket_set_irq(__pa(bucket), irq); 660 irq = 0;
554 irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, 661 goto out;
555 "IVEC");
556 } 662 }
557 663
558 handler_data = irq_get_handler_data(irq); 664 irq_set_handler_data(irq, data);
559 if (unlikely(handler_data)) 665 handler_data_init(data, devhandle, devino);
560 goto out; 666 irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, "IVEC");
667 data->imap = ~0UL;
668 data->iclr = ~0UL;
669out:
670 return irq;
671}
561 672
562 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); 673static unsigned long cookie_assign(unsigned int irq, u32 devhandle,
563 if (unlikely(!handler_data)) { 674 unsigned int devino)
564 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); 675{
565 prom_halt(); 676 struct irq_handler_data *ihd = irq_get_handler_data(irq);
566 } 677 unsigned long hv_error, cookie;
567 irq_set_handler_data(irq, handler_data);
568 678
569 /* Catch accidental accesses to these things. IMAP/ICLR handling 679 /* handler_irq needs to find the irq. cookie is seen signed in
570 * is done by hypervisor calls on sun4v platforms, not by direct 680 * sun4v_dev_mondo and treated as a non ivector_table delivery.
571 * register accesses.
572 */ 681 */
573 handler_data->imap = ~0UL; 682 ihd->bucket.__irq = irq;
574 handler_data->iclr = ~0UL; 683 cookie = ~__pa(&ihd->bucket);
575 684
576out: 685 hv_error = sun4v_vintr_set_cookie(devhandle, devino, cookie);
577 return irq; 686 if (hv_error)
687 pr_err("HV vintr set cookie failed = %ld\n", hv_error);
688
689 return hv_error;
578} 690}
579 691
580unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) 692static void cookie_handler_data(struct irq_handler_data *data,
693 u32 devhandle, unsigned int devino)
581{ 694{
582 unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino); 695 data->dev_handle = devhandle;
696 data->dev_ino = devino;
697}
583 698
584 return sun4v_build_common(sysino, &sun4v_irq); 699static unsigned int cookie_build_irq(u32 devhandle, unsigned int devino,
700 struct irq_chip *chip)
701{
702 unsigned long hv_error;
703 unsigned int irq;
704
705 irq = sun4v_build_common(devhandle, devino, cookie_handler_data, chip);
706
707 hv_error = cookie_assign(irq, devhandle, devino);
708 if (hv_error) {
709 irq_free(irq);
710 irq = 0;
711 }
712
713 return irq;
585} 714}
586 715
587unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) 716static unsigned int sun4v_build_cookie(u32 devhandle, unsigned int devino)
588{ 717{
589 struct irq_handler_data *handler_data;
590 unsigned long hv_err, cookie;
591 struct ino_bucket *bucket;
592 unsigned int irq; 718 unsigned int irq;
593 719
594 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); 720 irq = cookie_exists(devhandle, devino);
595 if (unlikely(!bucket)) 721 if (irq)
596 return 0; 722 goto out;
597 723
598 /* The only reference we store to the IRQ bucket is 724 irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
599 * by physical address which kmemleak can't see, tell
600 * it that this object explicitly is not a leak and
601 * should be scanned.
602 */
603 kmemleak_not_leak(bucket);
604 725
605 __flush_dcache_range((unsigned long) bucket, 726out:
606 ((unsigned long) bucket + 727 return irq;
607 sizeof(struct ino_bucket))); 728}
608 729
609 irq = irq_alloc(devhandle, devino); 730static void sysino_set_bucket(unsigned int irq)
731{
732 struct irq_handler_data *ihd = irq_get_handler_data(irq);
733 struct ino_bucket *bucket;
734 unsigned long sysino;
735
736 sysino = sun4v_devino_to_sysino(ihd->dev_handle, ihd->dev_ino);
737 BUG_ON(sysino >= nr_ivec);
738 bucket = &ivector_table[sysino];
610 bucket_set_irq(__pa(bucket), irq); 739 bucket_set_irq(__pa(bucket), irq);
740}
611 741
612 irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq, 742static void sysino_handler_data(struct irq_handler_data *data,
613 "IVEC"); 743 u32 devhandle, unsigned int devino)
744{
745 unsigned long sysino;
614 746
615 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); 747 sysino = sun4v_devino_to_sysino(devhandle, devino);
616 if (unlikely(!handler_data)) 748 data->sysino = sysino;
617 return 0; 749}
618 750
619 /* In order to make the LDC channel startup sequence easier, 751static unsigned int sysino_build_irq(u32 devhandle, unsigned int devino,
620 * especially wrt. locking, we do not let request_irq() enable 752 struct irq_chip *chip)
621 * the interrupt. 753{
622 */ 754 unsigned int irq;
623 irq_set_status_flags(irq, IRQ_NOAUTOEN);
624 irq_set_handler_data(irq, handler_data);
625 755
626 /* Catch accidental accesses to these things. IMAP/ICLR handling 756 irq = sun4v_build_common(devhandle, devino, sysino_handler_data, chip);
627 * is done by hypervisor calls on sun4v platforms, not by direct 757 if (!irq)
628 * register accesses. 758 goto out;
629 */
630 handler_data->imap = ~0UL;
631 handler_data->iclr = ~0UL;
632 759
633 cookie = ~__pa(bucket); 760 sysino_set_bucket(irq);
634 hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie); 761out:
635 if (hv_err) { 762 return irq;
636 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] " 763}
637 "err=%lu\n", devhandle, devino, hv_err);
638 prom_halt();
639 }
640 764
765static int sun4v_build_sysino(u32 devhandle, unsigned int devino)
766{
767 int irq;
768
769 irq = sysino_exists(devhandle, devino);
770 if (irq)
771 goto out;
772
773 irq = sysino_build_irq(devhandle, devino, &sun4v_irq);
774out:
641 return irq; 775 return irq;
642} 776}
643 777
644void ack_bad_irq(unsigned int irq) 778unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
645{ 779{
646 unsigned int ino = irq_table[irq].dev_ino; 780 unsigned int irq;
647 781
648 if (!ino) 782 if (sun4v_cookie_only_virqs())
649 ino = 0xdeadbeef; 783 irq = sun4v_build_cookie(devhandle, devino);
784 else
785 irq = sun4v_build_sysino(devhandle, devino);
650 786
651 printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n", 787 return irq;
652 ino, irq); 788}
789
790unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
791{
792 int irq;
793
794 irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
795 if (!irq)
796 goto out;
797
798 /* This is borrowed from the original function.
799 */
800 irq_set_status_flags(irq, IRQ_NOAUTOEN);
801
802out:
803 return irq;
653} 804}
654 805
655void *hardirq_stack[NR_CPUS]; 806void *hardirq_stack[NR_CPUS];
@@ -720,9 +871,12 @@ void fixup_irqs(void)
720 871
721 for (irq = 0; irq < NR_IRQS; irq++) { 872 for (irq = 0; irq < NR_IRQS; irq++) {
722 struct irq_desc *desc = irq_to_desc(irq); 873 struct irq_desc *desc = irq_to_desc(irq);
723 struct irq_data *data = irq_desc_get_irq_data(desc); 874 struct irq_data *data;
724 unsigned long flags; 875 unsigned long flags;
725 876
877 if (!desc)
878 continue;
879 data = irq_desc_get_irq_data(desc);
726 raw_spin_lock_irqsave(&desc->lock, flags); 880 raw_spin_lock_irqsave(&desc->lock, flags);
727 if (desc->action && !irqd_is_per_cpu(data)) { 881 if (desc->action && !irqd_is_per_cpu(data)) {
728 if (data->chip->irq_set_affinity) 882 if (data->chip->irq_set_affinity)
@@ -922,16 +1076,22 @@ static struct irqaction timer_irq_action = {
922 .name = "timer", 1076 .name = "timer",
923}; 1077};
924 1078
925/* Only invoked on boot processor. */ 1079static void __init irq_ivector_init(void)
926void __init init_IRQ(void)
927{ 1080{
928 unsigned long size; 1081 unsigned long size, order;
1082 unsigned int ivecs;
929 1083
930 map_prom_timers(); 1084 /* If we are doing cookie only VIRQs then we do not need the ivector
931 kill_prom_timer(); 1085 * table to process interrupts.
1086 */
1087 if (sun4v_cookie_only_virqs())
1088 return;
932 1089
933 size = sizeof(struct ino_bucket) * NUM_IVECS; 1090 ivecs = size_nr_ivec();
934 ivector_table = kzalloc(size, GFP_KERNEL); 1091 size = sizeof(struct ino_bucket) * ivecs;
1092 order = get_order(size);
1093 ivector_table = (struct ino_bucket *)
1094 __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
935 if (!ivector_table) { 1095 if (!ivector_table) {
936 prom_printf("Fatal error, cannot allocate ivector_table\n"); 1096 prom_printf("Fatal error, cannot allocate ivector_table\n");
937 prom_halt(); 1097 prom_halt();
@@ -940,6 +1100,15 @@ void __init init_IRQ(void)
940 ((unsigned long) ivector_table) + size); 1100 ((unsigned long) ivector_table) + size);
941 1101
942 ivector_table_pa = __pa(ivector_table); 1102 ivector_table_pa = __pa(ivector_table);
1103}
1104
1105/* Only invoked on boot processor.*/
1106void __init init_IRQ(void)
1107{
1108 irq_init_hv();
1109 irq_ivector_init();
1110 map_prom_timers();
1111 kill_prom_timer();
943 1112
944 if (tlb_type == hypervisor) 1113 if (tlb_type == hypervisor)
945 sun4v_init_mondo_queues(); 1114 sun4v_init_mondo_queues();