aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r--arch/sparc64/kernel/Makefile1
-rw-r--r--arch/sparc64/kernel/auxio.c4
-rw-r--r--arch/sparc64/kernel/entry.S14
-rw-r--r--arch/sparc64/kernel/irq.c610
-rw-r--r--arch/sparc64/kernel/of_device.c5
-rw-r--r--arch/sparc64/kernel/pci.c60
-rw-r--r--arch/sparc64/kernel/pci_fire.c279
-rw-r--r--arch/sparc64/kernel/pci_impl.h32
-rw-r--r--arch/sparc64/kernel/pci_msi.c433
-rw-r--r--arch/sparc64/kernel/pci_psycho.c6
-rw-r--r--arch/sparc64/kernel/pci_schizo.c3
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c405
-rw-r--r--arch/sparc64/kernel/power.c4
-rw-r--r--arch/sparc64/kernel/sun4v_ivec.S22
-rw-r--r--arch/sparc64/kernel/sys_sparc.c15
-rw-r--r--arch/sparc64/kernel/time.c4
-rw-r--r--arch/sparc64/kernel/traps.c4
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S211
18 files changed, 1302 insertions, 810 deletions
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index 40d2f3aae91..112c46e6657 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
18obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o \ 18obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o \
19 pci_psycho.o pci_sabre.o pci_schizo.o \ 19 pci_psycho.o pci_sabre.o pci_schizo.o \
20 pci_sun4v.o pci_sun4v_asm.o pci_fire.o 20 pci_sun4v.o pci_sun4v_asm.o pci_fire.o
21obj-$(CONFIG_PCI_MSI) += pci_msi.o
21obj-$(CONFIG_SMP) += smp.o trampoline.o hvtramp.o 22obj-$(CONFIG_SMP) += smp.o trampoline.o hvtramp.o
22obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o 23obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o
23obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o 24obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
diff --git a/arch/sparc64/kernel/auxio.c b/arch/sparc64/kernel/auxio.c
index 7b379761e9f..c55f0293eac 100644
--- a/arch/sparc64/kernel/auxio.c
+++ b/arch/sparc64/kernel/auxio.c
@@ -148,9 +148,11 @@ static int __devinit auxio_probe(struct of_device *dev, const struct of_device_i
148} 148}
149 149
150static struct of_platform_driver auxio_driver = { 150static struct of_platform_driver auxio_driver = {
151 .name = "auxio",
152 .match_table = auxio_match, 151 .match_table = auxio_match,
153 .probe = auxio_probe, 152 .probe = auxio_probe,
153 .driver = {
154 .name = "auxio",
155 },
154}; 156};
155 157
156static int __init auxio_init(void) 158static int __init auxio_init(void)
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 8059531bf0a..c9b0d7af64a 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -429,16 +429,16 @@ do_ivec:
429 stxa %g0, [%g0] ASI_INTR_RECEIVE 429 stxa %g0, [%g0] ASI_INTR_RECEIVE
430 membar #Sync 430 membar #Sync
431 431
432 sethi %hi(ivector_table), %g2 432 sethi %hi(ivector_table_pa), %g2
433 sllx %g3, 3, %g3 433 ldx [%g2 + %lo(ivector_table_pa)], %g2
434 or %g2, %lo(ivector_table), %g2 434 sllx %g3, 4, %g3
435 add %g2, %g3, %g3 435 add %g2, %g3, %g3
436 436
437 TRAP_LOAD_IRQ_WORK(%g6, %g1) 437 TRAP_LOAD_IRQ_WORK_PA(%g6, %g1)
438 438
439 lduw [%g6], %g5 /* g5 = irq_work(cpu) */ 439 ldx [%g6], %g5
440 stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ 440 stxa %g5, [%g3] ASI_PHYS_USE_EC
441 stw %g3, [%g6] /* irq_work(cpu) = bucket */ 441 stx %g3, [%g6]
442 wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint 442 wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
443 retry 443 retry
444do_ivec_xcall: 444do_ivec_xcall:
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 23956096b3b..f3922e5a89f 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -21,7 +21,6 @@
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/bootmem.h> 22#include <linux/bootmem.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/msi.h>
25 24
26#include <asm/ptrace.h> 25#include <asm/ptrace.h>
27#include <asm/processor.h> 26#include <asm/processor.h>
@@ -43,6 +42,7 @@
43#include <asm/auxio.h> 42#include <asm/auxio.h>
44#include <asm/head.h> 43#include <asm/head.h>
45#include <asm/hypervisor.h> 44#include <asm/hypervisor.h>
45#include <asm/cacheflush.h>
46 46
47/* UPA nodes send interrupt packet to UltraSparc with first data reg 47/* UPA nodes send interrupt packet to UltraSparc with first data reg
48 * value low 5 (7 on Starfire) bits holding the IRQ identifier being 48 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
@@ -52,86 +52,128 @@
52 * To make processing these packets efficient and race free we use 52 * To make processing these packets efficient and race free we use
53 * an array of irq buckets below. The interrupt vector handler in 53 * an array of irq buckets below. The interrupt vector handler in
54 * entry.S feeds incoming packets into per-cpu pil-indexed lists. 54 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
55 * The IVEC handler does not need to act atomically, the PIL dispatch
56 * code uses CAS to get an atomic snapshot of the list and clear it
57 * at the same time.
58 * 55 *
59 * If you make changes to ino_bucket, please update hand coded assembler 56 * If you make changes to ino_bucket, please update hand coded assembler
60 * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S 57 * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
61 */ 58 */
62struct ino_bucket { 59struct ino_bucket {
63 /* Next handler in per-CPU IRQ worklist. We know that 60/*0x00*/unsigned long __irq_chain_pa;
64 * bucket pointers have the high 32-bits clear, so to
65 * save space we only store the bits we need.
66 */
67/*0x00*/unsigned int irq_chain;
68 61
69 /* Virtual interrupt number assigned to this INO. */ 62 /* Virtual interrupt number assigned to this INO. */
70/*0x04*/unsigned int virt_irq; 63/*0x08*/unsigned int __virt_irq;
64/*0x0c*/unsigned int __pad;
71}; 65};
72 66
73#define NUM_IVECS (IMAP_INR + 1) 67#define NUM_IVECS (IMAP_INR + 1)
74struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES))); 68struct ino_bucket *ivector_table;
75 69unsigned long ivector_table_pa;
76#define __irq_ino(irq) \ 70
77 (((struct ino_bucket *)(unsigned long)(irq)) - &ivector_table[0]) 71/* On several sun4u processors, it is illegal to mix bypass and
78#define __bucket(irq) ((struct ino_bucket *)(unsigned long)(irq)) 72 * non-bypass accesses. Therefore we access all INO buckets
79#define __irq(bucket) ((unsigned int)(unsigned long)(bucket)) 73 * using bypass accesses only.
80
81/* This has to be in the main kernel image, it cannot be
82 * turned into per-cpu data. The reason is that the main
83 * kernel image is locked into the TLB and this structure
84 * is accessed from the vectored interrupt trap handler. If
85 * access to this structure takes a TLB miss it could cause
86 * the 5-level sparc v9 trap stack to overflow.
87 */ 74 */
88#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist) 75static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
76{
77 unsigned long ret;
78
79 __asm__ __volatile__("ldxa [%1] %2, %0"
80 : "=&r" (ret)
81 : "r" (bucket_pa +
82 offsetof(struct ino_bucket,
83 __irq_chain_pa)),
84 "i" (ASI_PHYS_USE_EC));
85
86 return ret;
87}
88
89static void bucket_clear_chain_pa(unsigned long bucket_pa)
90{
91 __asm__ __volatile__("stxa %%g0, [%0] %1"
92 : /* no outputs */
93 : "r" (bucket_pa +
94 offsetof(struct ino_bucket,
95 __irq_chain_pa)),
96 "i" (ASI_PHYS_USE_EC));
97}
98
99static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
100{
101 unsigned int ret;
102
103 __asm__ __volatile__("lduwa [%1] %2, %0"
104 : "=&r" (ret)
105 : "r" (bucket_pa +
106 offsetof(struct ino_bucket,
107 __virt_irq)),
108 "i" (ASI_PHYS_USE_EC));
109
110 return ret;
111}
112
113static void bucket_set_virt_irq(unsigned long bucket_pa,
114 unsigned int virt_irq)
115{
116 __asm__ __volatile__("stwa %0, [%1] %2"
117 : /* no outputs */
118 : "r" (virt_irq),
119 "r" (bucket_pa +
120 offsetof(struct ino_bucket,
121 __virt_irq)),
122 "i" (ASI_PHYS_USE_EC));
123}
124
125#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
89 126
90static struct { 127static struct {
91 unsigned int irq;
92 unsigned int dev_handle; 128 unsigned int dev_handle;
93 unsigned int dev_ino; 129 unsigned int dev_ino;
94} virt_to_real_irq_table[NR_IRQS]; 130 unsigned int in_use;
131} virt_irq_table[NR_IRQS];
132static DEFINE_SPINLOCK(virt_irq_alloc_lock);
95 133
96static unsigned char virt_irq_alloc(unsigned int real_irq) 134unsigned char virt_irq_alloc(unsigned int dev_handle,
135 unsigned int dev_ino)
97{ 136{
137 unsigned long flags;
98 unsigned char ent; 138 unsigned char ent;
99 139
100 BUILD_BUG_ON(NR_IRQS >= 256); 140 BUILD_BUG_ON(NR_IRQS >= 256);
101 141
142 spin_lock_irqsave(&virt_irq_alloc_lock, flags);
143
102 for (ent = 1; ent < NR_IRQS; ent++) { 144 for (ent = 1; ent < NR_IRQS; ent++) {
103 if (!virt_to_real_irq_table[ent].irq) 145 if (!virt_irq_table[ent].in_use)
104 break; 146 break;
105 } 147 }
106 if (ent >= NR_IRQS) { 148 if (ent >= NR_IRQS) {
107 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n"); 149 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
108 return 0; 150 ent = 0;
151 } else {
152 virt_irq_table[ent].dev_handle = dev_handle;
153 virt_irq_table[ent].dev_ino = dev_ino;
154 virt_irq_table[ent].in_use = 1;
109 } 155 }
110 156
111 virt_to_real_irq_table[ent].irq = real_irq; 157 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
112 158
113 return ent; 159 return ent;
114} 160}
115 161
116#ifdef CONFIG_PCI_MSI 162#ifdef CONFIG_PCI_MSI
117static void virt_irq_free(unsigned int virt_irq) 163void virt_irq_free(unsigned int virt_irq)
118{ 164{
119 unsigned int real_irq; 165 unsigned long flags;
120 166
121 if (virt_irq >= NR_IRQS) 167 if (virt_irq >= NR_IRQS)
122 return; 168 return;
123 169
124 real_irq = virt_to_real_irq_table[virt_irq].irq; 170 spin_lock_irqsave(&virt_irq_alloc_lock, flags);
125 virt_to_real_irq_table[virt_irq].irq = 0;
126 171
127 __bucket(real_irq)->virt_irq = 0; 172 virt_irq_table[virt_irq].in_use = 0;
128}
129#endif
130 173
131static unsigned int virt_to_real_irq(unsigned char virt_irq) 174 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
132{
133 return virt_to_real_irq_table[virt_irq].irq;
134} 175}
176#endif
135 177
136/* 178/*
137 * /proc/interrupts printing: 179 * /proc/interrupts printing:
@@ -217,38 +259,8 @@ struct irq_handler_data {
217 void (*pre_handler)(unsigned int, void *, void *); 259 void (*pre_handler)(unsigned int, void *, void *);
218 void *pre_handler_arg1; 260 void *pre_handler_arg1;
219 void *pre_handler_arg2; 261 void *pre_handler_arg2;
220
221 u32 msi;
222}; 262};
223 263
224void sparc64_set_msi(unsigned int virt_irq, u32 msi)
225{
226 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
227
228 if (data)
229 data->msi = msi;
230}
231
232u32 sparc64_get_msi(unsigned int virt_irq)
233{
234 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
235
236 if (data)
237 return data->msi;
238 return 0xffffffff;
239}
240
241static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
242{
243 unsigned int real_irq = virt_to_real_irq(virt_irq);
244 struct ino_bucket *bucket = NULL;
245
246 if (likely(real_irq))
247 bucket = __bucket(real_irq);
248
249 return bucket;
250}
251
252#ifdef CONFIG_SMP 264#ifdef CONFIG_SMP
253static int irq_choose_cpu(unsigned int virt_irq) 265static int irq_choose_cpu(unsigned int virt_irq)
254{ 266{
@@ -348,201 +360,152 @@ static void sun4u_irq_end(unsigned int virt_irq)
348 360
349static void sun4v_irq_enable(unsigned int virt_irq) 361static void sun4v_irq_enable(unsigned int virt_irq)
350{ 362{
351 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); 363 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
352 unsigned int ino = bucket - &ivector_table[0]; 364 unsigned long cpuid = irq_choose_cpu(virt_irq);
353 365 int err;
354 if (likely(bucket)) {
355 unsigned long cpuid;
356 int err;
357 366
358 cpuid = irq_choose_cpu(virt_irq); 367 err = sun4v_intr_settarget(ino, cpuid);
359 368 if (err != HV_EOK)
360 err = sun4v_intr_settarget(ino, cpuid); 369 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
361 if (err != HV_EOK) 370 "err(%d)\n", ino, cpuid, err);
362 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " 371 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
363 "err(%d)\n", ino, cpuid, err); 372 if (err != HV_EOK)
364 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); 373 printk(KERN_ERR "sun4v_intr_setstate(%x): "
365 if (err != HV_EOK) 374 "err(%d)\n", ino, err);
366 printk(KERN_ERR "sun4v_intr_setstate(%x): " 375 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
367 "err(%d)\n", ino, err); 376 if (err != HV_EOK)
368 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); 377 printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
369 if (err != HV_EOK) 378 ino, err);
370 printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
371 ino, err);
372 }
373} 379}
374 380
375static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask) 381static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask)
376{ 382{
377 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); 383 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
378 unsigned int ino = bucket - &ivector_table[0]; 384 unsigned long cpuid = irq_choose_cpu(virt_irq);
385 int err;
379 386
380 if (likely(bucket)) { 387 err = sun4v_intr_settarget(ino, cpuid);
381 unsigned long cpuid; 388 if (err != HV_EOK)
382 int err; 389 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
383 390 "err(%d)\n", ino, cpuid, err);
384 cpuid = irq_choose_cpu(virt_irq);
385
386 err = sun4v_intr_settarget(ino, cpuid);
387 if (err != HV_EOK)
388 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
389 "err(%d)\n", ino, cpuid, err);
390 }
391} 391}
392 392
393static void sun4v_irq_disable(unsigned int virt_irq) 393static void sun4v_irq_disable(unsigned int virt_irq)
394{ 394{
395 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); 395 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
396 unsigned int ino = bucket - &ivector_table[0]; 396 int err;
397
398 if (likely(bucket)) {
399 int err;
400
401 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
402 if (err != HV_EOK)
403 printk(KERN_ERR "sun4v_intr_setenabled(%x): "
404 "err(%d)\n", ino, err);
405 }
406}
407
408#ifdef CONFIG_PCI_MSI
409static void sun4v_msi_enable(unsigned int virt_irq)
410{
411 sun4v_irq_enable(virt_irq);
412 unmask_msi_irq(virt_irq);
413}
414 397
415static void sun4v_msi_disable(unsigned int virt_irq) 398 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
416{ 399 if (err != HV_EOK)
417 mask_msi_irq(virt_irq); 400 printk(KERN_ERR "sun4v_intr_setenabled(%x): "
418 sun4v_irq_disable(virt_irq); 401 "err(%d)\n", ino, err);
419} 402}
420#endif
421 403
422static void sun4v_irq_end(unsigned int virt_irq) 404static void sun4v_irq_end(unsigned int virt_irq)
423{ 405{
424 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); 406 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
425 unsigned int ino = bucket - &ivector_table[0];
426 struct irq_desc *desc = irq_desc + virt_irq; 407 struct irq_desc *desc = irq_desc + virt_irq;
408 int err;
427 409
428 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) 410 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
429 return; 411 return;
430 412
431 if (likely(bucket)) { 413 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
432 int err; 414 if (err != HV_EOK)
433 415 printk(KERN_ERR "sun4v_intr_setstate(%x): "
434 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); 416 "err(%d)\n", ino, err);
435 if (err != HV_EOK)
436 printk(KERN_ERR "sun4v_intr_setstate(%x): "
437 "err(%d)\n", ino, err);
438 }
439} 417}
440 418
441static void sun4v_virq_enable(unsigned int virt_irq) 419static void sun4v_virq_enable(unsigned int virt_irq)
442{ 420{
443 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); 421 unsigned long cpuid, dev_handle, dev_ino;
444 422 int err;
445 if (likely(bucket)) { 423
446 unsigned long cpuid, dev_handle, dev_ino; 424 cpuid = irq_choose_cpu(virt_irq);
447 int err; 425
448 426 dev_handle = virt_irq_table[virt_irq].dev_handle;
449 cpuid = irq_choose_cpu(virt_irq); 427 dev_ino = virt_irq_table[virt_irq].dev_ino;
450 428
451 dev_handle = virt_to_real_irq_table[virt_irq].dev_handle; 429 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
452 dev_ino = virt_to_real_irq_table[virt_irq].dev_ino; 430 if (err != HV_EOK)
453 431 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
454 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); 432 "err(%d)\n",
455 if (err != HV_EOK) 433 dev_handle, dev_ino, cpuid, err);
456 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " 434 err = sun4v_vintr_set_state(dev_handle, dev_ino,
457 "err(%d)\n", 435 HV_INTR_STATE_IDLE);
458 dev_handle, dev_ino, cpuid, err); 436 if (err != HV_EOK)
459 err = sun4v_vintr_set_state(dev_handle, dev_ino, 437 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
460 HV_INTR_STATE_IDLE); 438 "HV_INTR_STATE_IDLE): err(%d)\n",
461 if (err != HV_EOK) 439 dev_handle, dev_ino, err);
462 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," 440 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
463 "HV_INTR_STATE_IDLE): err(%d)\n", 441 HV_INTR_ENABLED);
464 dev_handle, dev_ino, err); 442 if (err != HV_EOK)
465 err = sun4v_vintr_set_valid(dev_handle, dev_ino, 443 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
466 HV_INTR_ENABLED); 444 "HV_INTR_ENABLED): err(%d)\n",
467 if (err != HV_EOK) 445 dev_handle, dev_ino, err);
468 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
469 "HV_INTR_ENABLED): err(%d)\n",
470 dev_handle, dev_ino, err);
471 }
472} 446}
473 447
474static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask) 448static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask)
475{ 449{
476 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); 450 unsigned long cpuid, dev_handle, dev_ino;
451 int err;
477 452
478 if (likely(bucket)) { 453 cpuid = irq_choose_cpu(virt_irq);
479 unsigned long cpuid, dev_handle, dev_ino;
480 int err;
481 454
482 cpuid = irq_choose_cpu(virt_irq); 455 dev_handle = virt_irq_table[virt_irq].dev_handle;
456 dev_ino = virt_irq_table[virt_irq].dev_ino;
483 457
484 dev_handle = virt_to_real_irq_table[virt_irq].dev_handle; 458 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
485 dev_ino = virt_to_real_irq_table[virt_irq].dev_ino; 459 if (err != HV_EOK)
486 460 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
487 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); 461 "err(%d)\n",
488 if (err != HV_EOK) 462 dev_handle, dev_ino, cpuid, err);
489 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
490 "err(%d)\n",
491 dev_handle, dev_ino, cpuid, err);
492 }
493} 463}
494 464
495static void sun4v_virq_disable(unsigned int virt_irq) 465static void sun4v_virq_disable(unsigned int virt_irq)
496{ 466{
497 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); 467 unsigned long dev_handle, dev_ino;
468 int err;
498 469
499 if (likely(bucket)) { 470 dev_handle = virt_irq_table[virt_irq].dev_handle;
500 unsigned long dev_handle, dev_ino; 471 dev_ino = virt_irq_table[virt_irq].dev_ino;
501 int err;
502 472
503 dev_handle = virt_to_real_irq_table[virt_irq].dev_handle; 473 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
504 dev_ino = virt_to_real_irq_table[virt_irq].dev_ino; 474 HV_INTR_DISABLED);
505 475 if (err != HV_EOK)
506 err = sun4v_vintr_set_valid(dev_handle, dev_ino, 476 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
507 HV_INTR_DISABLED); 477 "HV_INTR_DISABLED): err(%d)\n",
508 if (err != HV_EOK) 478 dev_handle, dev_ino, err);
509 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
510 "HV_INTR_DISABLED): err(%d)\n",
511 dev_handle, dev_ino, err);
512 }
513} 479}
514 480
515static void sun4v_virq_end(unsigned int virt_irq) 481static void sun4v_virq_end(unsigned int virt_irq)
516{ 482{
517 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
518 struct irq_desc *desc = irq_desc + virt_irq; 483 struct irq_desc *desc = irq_desc + virt_irq;
484 unsigned long dev_handle, dev_ino;
485 int err;
519 486
520 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) 487 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
521 return; 488 return;
522 489
523 if (likely(bucket)) { 490 dev_handle = virt_irq_table[virt_irq].dev_handle;
524 unsigned long dev_handle, dev_ino; 491 dev_ino = virt_irq_table[virt_irq].dev_ino;
525 int err;
526 492
527 dev_handle = virt_to_real_irq_table[virt_irq].dev_handle; 493 err = sun4v_vintr_set_state(dev_handle, dev_ino,
528 dev_ino = virt_to_real_irq_table[virt_irq].dev_ino; 494 HV_INTR_STATE_IDLE);
529 495 if (err != HV_EOK)
530 err = sun4v_vintr_set_state(dev_handle, dev_ino, 496 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
531 HV_INTR_STATE_IDLE); 497 "HV_INTR_STATE_IDLE): err(%d)\n",
532 if (err != HV_EOK) 498 dev_handle, dev_ino, err);
533 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
534 "HV_INTR_STATE_IDLE): err(%d)\n",
535 dev_handle, dev_ino, err);
536 }
537} 499}
538 500
539static void run_pre_handler(unsigned int virt_irq) 501static void run_pre_handler(unsigned int virt_irq)
540{ 502{
541 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
542 struct irq_handler_data *data = get_irq_chip_data(virt_irq); 503 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
504 unsigned int ino;
543 505
506 ino = virt_irq_table[virt_irq].dev_ino;
544 if (likely(data->pre_handler)) { 507 if (likely(data->pre_handler)) {
545 data->pre_handler(__irq_ino(__irq(bucket)), 508 data->pre_handler(ino,
546 data->pre_handler_arg1, 509 data->pre_handler_arg1,
547 data->pre_handler_arg2); 510 data->pre_handler_arg2);
548 } 511 }
@@ -573,28 +536,6 @@ static struct irq_chip sun4v_irq = {
573 .set_affinity = sun4v_set_affinity, 536 .set_affinity = sun4v_set_affinity,
574}; 537};
575 538
576static struct irq_chip sun4v_irq_ack = {
577 .typename = "sun4v+ack",
578 .enable = sun4v_irq_enable,
579 .disable = sun4v_irq_disable,
580 .ack = run_pre_handler,
581 .end = sun4v_irq_end,
582 .set_affinity = sun4v_set_affinity,
583};
584
585#ifdef CONFIG_PCI_MSI
586static struct irq_chip sun4v_msi = {
587 .typename = "sun4v+msi",
588 .mask = mask_msi_irq,
589 .unmask = unmask_msi_irq,
590 .enable = sun4v_msi_enable,
591 .disable = sun4v_msi_disable,
592 .ack = run_pre_handler,
593 .end = sun4v_irq_end,
594 .set_affinity = sun4v_set_affinity,
595};
596#endif
597
598static struct irq_chip sun4v_virq = { 539static struct irq_chip sun4v_virq = {
599 .typename = "vsun4v", 540 .typename = "vsun4v",
600 .enable = sun4v_virq_enable, 541 .enable = sun4v_virq_enable,
@@ -603,59 +544,48 @@ static struct irq_chip sun4v_virq = {
603 .set_affinity = sun4v_virt_set_affinity, 544 .set_affinity = sun4v_virt_set_affinity,
604}; 545};
605 546
606static struct irq_chip sun4v_virq_ack = {
607 .typename = "vsun4v+ack",
608 .enable = sun4v_virq_enable,
609 .disable = sun4v_virq_disable,
610 .ack = run_pre_handler,
611 .end = sun4v_virq_end,
612 .set_affinity = sun4v_virt_set_affinity,
613};
614
615void irq_install_pre_handler(int virt_irq, 547void irq_install_pre_handler(int virt_irq,
616 void (*func)(unsigned int, void *, void *), 548 void (*func)(unsigned int, void *, void *),
617 void *arg1, void *arg2) 549 void *arg1, void *arg2)
618{ 550{
619 struct irq_handler_data *data = get_irq_chip_data(virt_irq); 551 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
620 struct irq_chip *chip; 552 struct irq_chip *chip = get_irq_chip(virt_irq);
553
554 if (WARN_ON(chip == &sun4v_irq || chip == &sun4v_virq)) {
555 printk(KERN_ERR "IRQ: Trying to install pre-handler on "
556 "sun4v irq %u\n", virt_irq);
557 return;
558 }
621 559
622 data->pre_handler = func; 560 data->pre_handler = func;
623 data->pre_handler_arg1 = arg1; 561 data->pre_handler_arg1 = arg1;
624 data->pre_handler_arg2 = arg2; 562 data->pre_handler_arg2 = arg2;
625 563
626 chip = get_irq_chip(virt_irq); 564 if (chip == &sun4u_irq_ack)
627 if (chip == &sun4u_irq_ack ||
628 chip == &sun4v_irq_ack ||
629 chip == &sun4v_virq_ack
630#ifdef CONFIG_PCI_MSI
631 || chip == &sun4v_msi
632#endif
633 )
634 return; 565 return;
635 566
636 chip = (chip == &sun4u_irq ? 567 set_irq_chip(virt_irq, &sun4u_irq_ack);
637 &sun4u_irq_ack :
638 (chip == &sun4v_irq ?
639 &sun4v_irq_ack : &sun4v_virq_ack));
640 set_irq_chip(virt_irq, chip);
641} 568}
642 569
643unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) 570unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
644{ 571{
645 struct ino_bucket *bucket; 572 struct ino_bucket *bucket;
646 struct irq_handler_data *data; 573 struct irq_handler_data *data;
574 unsigned int virt_irq;
647 int ino; 575 int ino;
648 576
649 BUG_ON(tlb_type == hypervisor); 577 BUG_ON(tlb_type == hypervisor);
650 578
651 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; 579 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
652 bucket = &ivector_table[ino]; 580 bucket = &ivector_table[ino];
653 if (!bucket->virt_irq) { 581 virt_irq = bucket_get_virt_irq(__pa(bucket));
654 bucket->virt_irq = virt_irq_alloc(__irq(bucket)); 582 if (!virt_irq) {
655 set_irq_chip(bucket->virt_irq, &sun4u_irq); 583 virt_irq = virt_irq_alloc(0, ino);
584 bucket_set_virt_irq(__pa(bucket), virt_irq);
585 set_irq_chip(virt_irq, &sun4u_irq);
656 } 586 }
657 587
658 data = get_irq_chip_data(bucket->virt_irq); 588 data = get_irq_chip_data(virt_irq);
659 if (unlikely(data)) 589 if (unlikely(data))
660 goto out; 590 goto out;
661 591
@@ -664,13 +594,13 @@ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
664 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); 594 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
665 prom_halt(); 595 prom_halt();
666 } 596 }
667 set_irq_chip_data(bucket->virt_irq, data); 597 set_irq_chip_data(virt_irq, data);
668 598
669 data->imap = imap; 599 data->imap = imap;
670 data->iclr = iclr; 600 data->iclr = iclr;
671 601
672out: 602out:
673 return bucket->virt_irq; 603 return virt_irq;
674} 604}
675 605
676static unsigned int sun4v_build_common(unsigned long sysino, 606static unsigned int sun4v_build_common(unsigned long sysino,
@@ -678,16 +608,19 @@ static unsigned int sun4v_build_common(unsigned long sysino,
678{ 608{
679 struct ino_bucket *bucket; 609 struct ino_bucket *bucket;
680 struct irq_handler_data *data; 610 struct irq_handler_data *data;
611 unsigned int virt_irq;
681 612
682 BUG_ON(tlb_type != hypervisor); 613 BUG_ON(tlb_type != hypervisor);
683 614
684 bucket = &ivector_table[sysino]; 615 bucket = &ivector_table[sysino];
685 if (!bucket->virt_irq) { 616 virt_irq = bucket_get_virt_irq(__pa(bucket));
686 bucket->virt_irq = virt_irq_alloc(__irq(bucket)); 617 if (!virt_irq) {
687 set_irq_chip(bucket->virt_irq, chip); 618 virt_irq = virt_irq_alloc(0, sysino);
619 bucket_set_virt_irq(__pa(bucket), virt_irq);
620 set_irq_chip(virt_irq, chip);
688 } 621 }
689 622
690 data = get_irq_chip_data(bucket->virt_irq); 623 data = get_irq_chip_data(virt_irq);
691 if (unlikely(data)) 624 if (unlikely(data))
692 goto out; 625 goto out;
693 626
@@ -696,7 +629,7 @@ static unsigned int sun4v_build_common(unsigned long sysino,
696 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); 629 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
697 prom_halt(); 630 prom_halt();
698 } 631 }
699 set_irq_chip_data(bucket->virt_irq, data); 632 set_irq_chip_data(virt_irq, data);
700 633
701 /* Catch accidental accesses to these things. IMAP/ICLR handling 634 /* Catch accidental accesses to these things. IMAP/ICLR handling
702 * is done by hypervisor calls on sun4v platforms, not by direct 635 * is done by hypervisor calls on sun4v platforms, not by direct
@@ -706,7 +639,7 @@ static unsigned int sun4v_build_common(unsigned long sysino,
706 data->iclr = ~0UL; 639 data->iclr = ~0UL;
707 640
708out: 641out:
709 return bucket->virt_irq; 642 return virt_irq;
710} 643}
711 644
712unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) 645unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
@@ -718,86 +651,52 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
718 651
719unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) 652unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
720{ 653{
721 unsigned long sysino, hv_err;
722 unsigned int virq;
723
724 BUG_ON(devhandle & devino);
725
726 sysino = devhandle | devino;
727 BUG_ON(sysino & ~(IMAP_IGN | IMAP_INO));
728
729 hv_err = sun4v_vintr_set_cookie(devhandle, devino, sysino);
730 if (hv_err) {
731 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
732 "err=%lu\n", devhandle, devino, hv_err);
733 prom_halt();
734 }
735
736 virq = sun4v_build_common(sysino, &sun4v_virq);
737
738 virt_to_real_irq_table[virq].dev_handle = devhandle;
739 virt_to_real_irq_table[virq].dev_ino = devino;
740
741 return virq;
742}
743
744#ifdef CONFIG_PCI_MSI
745unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
746 unsigned int msi_start, unsigned int msi_end)
747{
748 struct ino_bucket *bucket;
749 struct irq_handler_data *data; 654 struct irq_handler_data *data;
750 unsigned long sysino; 655 struct ino_bucket *bucket;
751 unsigned int devino; 656 unsigned long hv_err, cookie;
752 657 unsigned int virt_irq;
753 BUG_ON(tlb_type != hypervisor);
754
755 /* Find a free devino in the given range. */
756 for (devino = msi_start; devino < msi_end; devino++) {
757 sysino = sun4v_devino_to_sysino(devhandle, devino);
758 bucket = &ivector_table[sysino];
759 if (!bucket->virt_irq)
760 break;
761 }
762 if (devino >= msi_end)
763 return -ENOSPC;
764 658
765 sysino = sun4v_devino_to_sysino(devhandle, devino); 659 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
766 bucket = &ivector_table[sysino]; 660 if (unlikely(!bucket))
767 bucket->virt_irq = virt_irq_alloc(__irq(bucket)); 661 return 0;
768 *virt_irq_p = bucket->virt_irq; 662 __flush_dcache_range((unsigned long) bucket,
769 set_irq_chip(bucket->virt_irq, &sun4v_msi); 663 ((unsigned long) bucket +
664 sizeof(struct ino_bucket)));
770 665
771 data = get_irq_chip_data(bucket->virt_irq); 666 virt_irq = virt_irq_alloc(devhandle, devino);
772 if (unlikely(data)) 667 bucket_set_virt_irq(__pa(bucket), virt_irq);
773 return devino; 668 set_irq_chip(virt_irq, &sun4v_virq);
774 669
775 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); 670 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
776 if (unlikely(!data)) { 671 if (unlikely(!data))
777 virt_irq_free(*virt_irq_p); 672 return 0;
778 return -ENOMEM; 673
779 } 674 set_irq_chip_data(virt_irq, data);
780 set_irq_chip_data(bucket->virt_irq, data);
781 675
676 /* Catch accidental accesses to these things. IMAP/ICLR handling
677 * is done by hypervisor calls on sun4v platforms, not by direct
678 * register accesses.
679 */
782 data->imap = ~0UL; 680 data->imap = ~0UL;
783 data->iclr = ~0UL; 681 data->iclr = ~0UL;
784 682
785 return devino; 683 cookie = ~__pa(bucket);
786} 684 hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
685 if (hv_err) {
686 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
687 "err=%lu\n", devhandle, devino, hv_err);
688 prom_halt();
689 }
787 690
788void sun4v_destroy_msi(unsigned int virt_irq) 691 return virt_irq;
789{
790 virt_irq_free(virt_irq);
791} 692}
792#endif
793 693
794void ack_bad_irq(unsigned int virt_irq) 694void ack_bad_irq(unsigned int virt_irq)
795{ 695{
796 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); 696 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
797 unsigned int ino = 0xdeadbeef;
798 697
799 if (bucket) 698 if (!ino)
800 ino = bucket - &ivector_table[0]; 699 ino = 0xdeadbeef;
801 700
802 printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n", 701 printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
803 ino, virt_irq); 702 ino, virt_irq);
@@ -805,7 +704,7 @@ void ack_bad_irq(unsigned int virt_irq)
805 704
806void handler_irq(int irq, struct pt_regs *regs) 705void handler_irq(int irq, struct pt_regs *regs)
807{ 706{
808 struct ino_bucket *bucket; 707 unsigned long pstate, bucket_pa;
809 struct pt_regs *old_regs; 708 struct pt_regs *old_regs;
810 709
811 clear_softint(1 << irq); 710 clear_softint(1 << irq);
@@ -813,15 +712,28 @@ void handler_irq(int irq, struct pt_regs *regs)
813 old_regs = set_irq_regs(regs); 712 old_regs = set_irq_regs(regs);
814 irq_enter(); 713 irq_enter();
815 714
816 /* Sliiiick... */ 715 /* Grab an atomic snapshot of the pending IVECs. */
817 bucket = __bucket(xchg32(irq_work(smp_processor_id()), 0)); 716 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
818 while (bucket) { 717 "wrpr %0, %3, %%pstate\n\t"
819 struct ino_bucket *next = __bucket(bucket->irq_chain); 718 "ldx [%2], %1\n\t"
719 "stx %%g0, [%2]\n\t"
720 "wrpr %0, 0x0, %%pstate\n\t"
721 : "=&r" (pstate), "=&r" (bucket_pa)
722 : "r" (irq_work_pa(smp_processor_id())),
723 "i" (PSTATE_IE)
724 : "memory");
725
726 while (bucket_pa) {
727 unsigned long next_pa;
728 unsigned int virt_irq;
820 729
821 bucket->irq_chain = 0; 730 next_pa = bucket_get_chain_pa(bucket_pa);
822 __do_IRQ(bucket->virt_irq); 731 virt_irq = bucket_get_virt_irq(bucket_pa);
732 bucket_clear_chain_pa(bucket_pa);
823 733
824 bucket = next; 734 __do_IRQ(virt_irq);
735
736 bucket_pa = next_pa;
825 } 737 }
826 738
827 irq_exit(); 739 irq_exit();
@@ -921,7 +833,7 @@ void init_irqwork_curcpu(void)
921{ 833{
922 int cpu = hard_smp_processor_id(); 834 int cpu = hard_smp_processor_id();
923 835
924 trap_block[cpu].irq_worklist = 0; 836 trap_block[cpu].irq_worklist_pa = 0UL;
925} 837}
926 838
927/* Please be very careful with register_one_mondo() and 839/* Please be very careful with register_one_mondo() and
@@ -1035,9 +947,21 @@ static struct irqaction timer_irq_action = {
1035/* Only invoked on boot processor. */ 947/* Only invoked on boot processor. */
1036void __init init_IRQ(void) 948void __init init_IRQ(void)
1037{ 949{
950 unsigned long size;
951
1038 map_prom_timers(); 952 map_prom_timers();
1039 kill_prom_timer(); 953 kill_prom_timer();
1040 memset(&ivector_table[0], 0, sizeof(ivector_table)); 954
955 size = sizeof(struct ino_bucket) * NUM_IVECS;
956 ivector_table = alloc_bootmem_low(size);
957 if (!ivector_table) {
958 prom_printf("Fatal error, cannot allocate ivector_table\n");
959 prom_halt();
960 }
961 __flush_dcache_range((unsigned long) ivector_table,
962 ((unsigned long) ivector_table) + size);
963
964 ivector_table_pa = __pa(ivector_table);
1041 965
1042 if (tlb_type == hypervisor) 966 if (tlb_type == hypervisor)
1043 sun4v_init_mondo_queues(); 967 sun4v_init_mondo_queues();
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
index 4cc77485f53..42d779866fb 100644
--- a/arch/sparc64/kernel/of_device.c
+++ b/arch/sparc64/kernel/of_device.c
@@ -872,7 +872,10 @@ __setup("of_debug=", of_debug);
872int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus) 872int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus)
873{ 873{
874 /* initialize common driver fields */ 874 /* initialize common driver fields */
875 drv->driver.name = drv->name; 875 if (!drv->driver.name)
876 drv->driver.name = drv->name;
877 if (!drv->driver.owner)
878 drv->driver.owner = drv->owner;
876 drv->driver.bus = bus; 879 drv->driver.bus = bus;
877 880
878 /* register with core */ 881 /* register with core */
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index e8dac81d8a0..9b808640a19 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -29,8 +29,6 @@
29 29
30#include "pci_impl.h" 30#include "pci_impl.h"
31 31
32unsigned long pci_memspace_mask = 0xffffffffUL;
33
34#ifndef CONFIG_PCI 32#ifndef CONFIG_PCI
35/* A "nop" PCI implementation. */ 33/* A "nop" PCI implementation. */
36asmlinkage int sys_pciconfig_read(unsigned long bus, unsigned long dfn, 34asmlinkage int sys_pciconfig_read(unsigned long bus, unsigned long dfn,
@@ -1066,8 +1064,8 @@ static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struc
1066 return 0; 1064 return 0;
1067} 1065}
1068 1066
1069/* Adjust vm_pgoff of VMA such that it is the physical page offset corresponding 1067/* Adjust vm_pgoff of VMA such that it is the physical page offset
1070 * to the 32-bit pci bus offset for DEV requested by the user. 1068 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
1071 * 1069 *
1072 * Basically, the user finds the base address for his device which he wishes 1070 * Basically, the user finds the base address for his device which he wishes
1073 * to mmap. They read the 32-bit value from the config space base register, 1071 * to mmap. They read the 32-bit value from the config space base register,
@@ -1076,21 +1074,35 @@ static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struc
1076 * 1074 *
1077 * Returns negative error code on failure, zero on success. 1075 * Returns negative error code on failure, zero on success.
1078 */ 1076 */
1079static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma, 1077static int __pci_mmap_make_offset(struct pci_dev *pdev,
1078 struct vm_area_struct *vma,
1080 enum pci_mmap_state mmap_state) 1079 enum pci_mmap_state mmap_state)
1081{ 1080{
1082 unsigned long user_offset = vma->vm_pgoff << PAGE_SHIFT; 1081 unsigned long user_paddr, user_size;
1083 unsigned long user32 = user_offset & pci_memspace_mask; 1082 int i, err;
1084 unsigned long largest_base, this_base, addr32;
1085 int i;
1086 1083
1087 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) 1084 /* First compute the physical address in vma->vm_pgoff,
1088 return __pci_mmap_make_offset_bus(dev, vma, mmap_state); 1085 * making sure the user offset is within range in the
1086 * appropriate PCI space.
1087 */
1088 err = __pci_mmap_make_offset_bus(pdev, vma, mmap_state);
1089 if (err)
1090 return err;
1091
1092 /* If this is a mapping on a host bridge, any address
1093 * is OK.
1094 */
1095 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
1096 return err;
1097
1098 /* Otherwise make sure it's in the range for one of the
1099 * device's resources.
1100 */
1101 user_paddr = vma->vm_pgoff << PAGE_SHIFT;
1102 user_size = vma->vm_end - vma->vm_start;
1089 1103
1090 /* Figure out which base address this is for. */
1091 largest_base = 0UL;
1092 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 1104 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
1093 struct resource *rp = &dev->resource[i]; 1105 struct resource *rp = &pdev->resource[i];
1094 1106
1095 /* Active? */ 1107 /* Active? */
1096 if (!rp->flags) 1108 if (!rp->flags)
@@ -1108,26 +1120,14 @@ static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vm
1108 continue; 1120 continue;
1109 } 1121 }
1110 1122
1111 this_base = rp->start; 1123 if ((rp->start <= user_paddr) &&
1112 1124 (user_paddr + user_size) <= (rp->end + 1UL))
1113 addr32 = (this_base & PAGE_MASK) & pci_memspace_mask; 1125 break;
1114
1115 if (mmap_state == pci_mmap_io)
1116 addr32 &= 0xffffff;
1117
1118 if (addr32 <= user32 && this_base > largest_base)
1119 largest_base = this_base;
1120 } 1126 }
1121 1127
1122 if (largest_base == 0UL) 1128 if (i > PCI_ROM_RESOURCE)
1123 return -EINVAL; 1129 return -EINVAL;
1124 1130
1125 /* Now construct the final physical address. */
1126 if (mmap_state == pci_mmap_io)
1127 vma->vm_pgoff = (((largest_base & ~0xffffffUL) | user32) >> PAGE_SHIFT);
1128 else
1129 vma->vm_pgoff = (((largest_base & ~(pci_memspace_mask)) | user32) >> PAGE_SHIFT);
1130
1131 return 0; 1131 return 0;
1132} 1132}
1133 1133
diff --git a/arch/sparc64/kernel/pci_fire.c b/arch/sparc64/kernel/pci_fire.c
index 14d67fe21ab..fef3b37487b 100644
--- a/arch/sparc64/kernel/pci_fire.c
+++ b/arch/sparc64/kernel/pci_fire.c
@@ -6,9 +6,12 @@
6#include <linux/pci.h> 6#include <linux/pci.h>
7#include <linux/slab.h> 7#include <linux/slab.h>
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/msi.h>
10#include <linux/irq.h>
9 11
10#include <asm/oplib.h> 12#include <asm/oplib.h>
11#include <asm/prom.h> 13#include <asm/prom.h>
14#include <asm/irq.h>
12 15
13#include "pci_impl.h" 16#include "pci_impl.h"
14 17
@@ -84,6 +87,266 @@ static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
84 return 0; 87 return 0;
85} 88}
86 89
90#ifdef CONFIG_PCI_MSI
91struct pci_msiq_entry {
92 u64 word0;
93#define MSIQ_WORD0_RESV 0x8000000000000000UL
94#define MSIQ_WORD0_FMT_TYPE 0x7f00000000000000UL
95#define MSIQ_WORD0_FMT_TYPE_SHIFT 56
96#define MSIQ_WORD0_LEN 0x00ffc00000000000UL
97#define MSIQ_WORD0_LEN_SHIFT 46
98#define MSIQ_WORD0_ADDR0 0x00003fff00000000UL
99#define MSIQ_WORD0_ADDR0_SHIFT 32
100#define MSIQ_WORD0_RID 0x00000000ffff0000UL
101#define MSIQ_WORD0_RID_SHIFT 16
102#define MSIQ_WORD0_DATA0 0x000000000000ffffUL
103#define MSIQ_WORD0_DATA0_SHIFT 0
104
105#define MSIQ_TYPE_MSG 0x6
106#define MSIQ_TYPE_MSI32 0xb
107#define MSIQ_TYPE_MSI64 0xf
108
109 u64 word1;
110#define MSIQ_WORD1_ADDR1 0xffffffffffff0000UL
111#define MSIQ_WORD1_ADDR1_SHIFT 16
112#define MSIQ_WORD1_DATA1 0x000000000000ffffUL
113#define MSIQ_WORD1_DATA1_SHIFT 0
114
115 u64 resv[6];
116};
117
118/* All MSI registers are offset from pbm->pbm_regs */
119#define EVENT_QUEUE_BASE_ADDR_REG 0x010000UL
120#define EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL
121
122#define EVENT_QUEUE_CONTROL_SET(EQ) (0x011000UL + (EQ) * 0x8UL)
123#define EVENT_QUEUE_CONTROL_SET_OFLOW 0x0200000000000000UL
124#define EVENT_QUEUE_CONTROL_SET_EN 0x0000100000000000UL
125
126#define EVENT_QUEUE_CONTROL_CLEAR(EQ) (0x011200UL + (EQ) * 0x8UL)
127#define EVENT_QUEUE_CONTROL_CLEAR_OF 0x0200000000000000UL
128#define EVENT_QUEUE_CONTROL_CLEAR_E2I 0x0000800000000000UL
129#define EVENT_QUEUE_CONTROL_CLEAR_DIS 0x0000100000000000UL
130
131#define EVENT_QUEUE_STATE(EQ) (0x011400UL + (EQ) * 0x8UL)
132#define EVENT_QUEUE_STATE_MASK 0x0000000000000007UL
133#define EVENT_QUEUE_STATE_IDLE 0x0000000000000001UL
134#define EVENT_QUEUE_STATE_ACTIVE 0x0000000000000002UL
135#define EVENT_QUEUE_STATE_ERROR 0x0000000000000004UL
136
137#define EVENT_QUEUE_TAIL(EQ) (0x011600UL + (EQ) * 0x8UL)
138#define EVENT_QUEUE_TAIL_OFLOW 0x0200000000000000UL
139#define EVENT_QUEUE_TAIL_VAL 0x000000000000007fUL
140
141#define EVENT_QUEUE_HEAD(EQ) (0x011800UL + (EQ) * 0x8UL)
142#define EVENT_QUEUE_HEAD_VAL 0x000000000000007fUL
143
144#define MSI_MAP(MSI) (0x020000UL + (MSI) * 0x8UL)
145#define MSI_MAP_VALID 0x8000000000000000UL
146#define MSI_MAP_EQWR_N 0x4000000000000000UL
147#define MSI_MAP_EQNUM 0x000000000000003fUL
148
149#define MSI_CLEAR(MSI) (0x028000UL + (MSI) * 0x8UL)
150#define MSI_CLEAR_EQWR_N 0x4000000000000000UL
151
152#define IMONDO_DATA0 0x02C000UL
153#define IMONDO_DATA0_DATA 0xffffffffffffffc0UL
154
155#define IMONDO_DATA1 0x02C008UL
156#define IMONDO_DATA1_DATA 0xffffffffffffffffUL
157
158#define MSI_32BIT_ADDR 0x034000UL
159#define MSI_32BIT_ADDR_VAL 0x00000000ffff0000UL
160
161#define MSI_64BIT_ADDR 0x034008UL
162#define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL
163
164static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
165 unsigned long *head)
166{
167 *head = fire_read(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
168 return 0;
169}
170
171static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid,
172 unsigned long *head, unsigned long *msi)
173{
174 unsigned long type_fmt, type, msi_num;
175 struct pci_msiq_entry *base, *ep;
176
177 base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192));
178 ep = &base[*head];
179
180 if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0)
181 return 0;
182
183 type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >>
184 MSIQ_WORD0_FMT_TYPE_SHIFT);
185 type = (type_fmt >> 3);
186 if (unlikely(type != MSIQ_TYPE_MSI32 &&
187 type != MSIQ_TYPE_MSI64))
188 return -EINVAL;
189
190 *msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >>
191 MSIQ_WORD0_DATA0_SHIFT);
192
193 fire_write(pbm->pbm_regs + MSI_CLEAR(msi_num),
194 MSI_CLEAR_EQWR_N);
195
196 /* Clear the entry. */
197 ep->word0 &= ~MSIQ_WORD0_FMT_TYPE;
198
199 /* Go to next entry in ring. */
200 (*head)++;
201 if (*head >= pbm->msiq_ent_count)
202 *head = 0;
203
204 return 1;
205}
206
207static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
208 unsigned long head)
209{
210 fire_write(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid), head);
211 return 0;
212}
213
214static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
215 unsigned long msi, int is_msi64)
216{
217 u64 val;
218
219 val = fire_read(pbm->pbm_regs + MSI_MAP(msi));
220 val &= ~(MSI_MAP_EQNUM);
221 val |= msiqid;
222 fire_write(pbm->pbm_regs + MSI_MAP(msi), val);
223
224 fire_write(pbm->pbm_regs + MSI_CLEAR(msi),
225 MSI_CLEAR_EQWR_N);
226
227 val = fire_read(pbm->pbm_regs + MSI_MAP(msi));
228 val |= MSI_MAP_VALID;
229 fire_write(pbm->pbm_regs + MSI_MAP(msi), val);
230
231 return 0;
232}
233
234static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
235{
236 unsigned long msiqid;
237 u64 val;
238
239 val = fire_read(pbm->pbm_regs + MSI_MAP(msi));
240 msiqid = (val & MSI_MAP_EQNUM);
241
242 val &= ~MSI_MAP_VALID;
243
244 fire_write(pbm->pbm_regs + MSI_MAP(msi), val);
245
246 return 0;
247}
248
249static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm)
250{
251 unsigned long pages, order, i;
252
253 order = get_order(512 * 1024);
254 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
255 if (pages == 0UL) {
256 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
257 order);
258 return -ENOMEM;
259 }
260 memset((char *)pages, 0, PAGE_SIZE << order);
261 pbm->msi_queues = (void *) pages;
262
263 fire_write(pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG,
264 (EVENT_QUEUE_BASE_ADDR_ALL_ONES |
265 __pa(pbm->msi_queues)));
266
267 fire_write(pbm->pbm_regs + IMONDO_DATA0,
268 pbm->portid << 6);
269 fire_write(pbm->pbm_regs + IMONDO_DATA1, 0);
270
271 fire_write(pbm->pbm_regs + MSI_32BIT_ADDR,
272 pbm->msi32_start);
273 fire_write(pbm->pbm_regs + MSI_64BIT_ADDR,
274 pbm->msi64_start);
275
276 for (i = 0; i < pbm->msiq_num; i++) {
277 fire_write(pbm->pbm_regs + EVENT_QUEUE_HEAD(i), 0);
278 fire_write(pbm->pbm_regs + EVENT_QUEUE_TAIL(i), 0);
279 }
280
281 return 0;
282}
283
284static void pci_fire_msiq_free(struct pci_pbm_info *pbm)
285{
286 unsigned long pages, order;
287
288 order = get_order(512 * 1024);
289 pages = (unsigned long) pbm->msi_queues;
290
291 free_pages(pages, order);
292
293 pbm->msi_queues = NULL;
294}
295
296static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
297 unsigned long msiqid,
298 unsigned long devino)
299{
300 unsigned long cregs = (unsigned long) pbm->pbm_regs;
301 unsigned long imap_reg, iclr_reg, int_ctrlr;
302 unsigned int virt_irq;
303 int fixup;
304 u64 val;
305
306 imap_reg = cregs + (0x001000UL + (devino * 0x08UL));
307 iclr_reg = cregs + (0x001400UL + (devino * 0x08UL));
308
309 /* XXX iterate amongst the 4 IRQ controllers XXX */
310 int_ctrlr = (1UL << 6);
311
312 val = fire_read(imap_reg);
313 val |= (1UL << 63) | int_ctrlr;
314 fire_write(imap_reg, val);
315
316 fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
317
318 virt_irq = build_irq(fixup, iclr_reg, imap_reg);
319 if (!virt_irq)
320 return -ENOMEM;
321
322 fire_write(pbm->pbm_regs +
323 EVENT_QUEUE_CONTROL_SET(msiqid),
324 EVENT_QUEUE_CONTROL_SET_EN);
325
326 return virt_irq;
327}
328
329static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
330 .get_head = pci_fire_get_head,
331 .dequeue_msi = pci_fire_dequeue_msi,
332 .set_head = pci_fire_set_head,
333 .msi_setup = pci_fire_msi_setup,
334 .msi_teardown = pci_fire_msi_teardown,
335 .msiq_alloc = pci_fire_msiq_alloc,
336 .msiq_free = pci_fire_msiq_free,
337 .msiq_build_irq = pci_fire_msiq_build_irq,
338};
339
340static void pci_fire_msi_init(struct pci_pbm_info *pbm)
341{
342 sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops);
343}
344#else /* CONFIG_PCI_MSI */
345static void pci_fire_msi_init(struct pci_pbm_info *pbm)
346{
347}
348#endif /* !(CONFIG_PCI_MSI) */
349
87/* Based at pbm->controller_regs */ 350/* Based at pbm->controller_regs */
88#define FIRE_PARITY_CONTROL 0x470010UL 351#define FIRE_PARITY_CONTROL 0x470010UL
89#define FIRE_PARITY_ENAB 0x8000000000000000UL 352#define FIRE_PARITY_ENAB 0x8000000000000000UL
@@ -176,6 +439,7 @@ static int pci_fire_pbm_init(struct pci_controller_info *p,
176{ 439{
177 const struct linux_prom64_registers *regs; 440 const struct linux_prom64_registers *regs;
178 struct pci_pbm_info *pbm; 441 struct pci_pbm_info *pbm;
442 int err;
179 443
180 if ((portid & 1) == 0) 444 if ((portid & 1) == 0)
181 pbm = &p->pbm_A; 445 pbm = &p->pbm_A;
@@ -208,7 +472,13 @@ static int pci_fire_pbm_init(struct pci_controller_info *p,
208 472
209 pci_fire_hw_init(pbm); 473 pci_fire_hw_init(pbm);
210 474
211 return pci_fire_pbm_iommu_init(pbm); 475 err = pci_fire_pbm_iommu_init(pbm);
476 if (err)
477 return err;
478
479 pci_fire_msi_init(pbm);
480
481 return 0;
212} 482}
213 483
214static inline int portid_compare(u32 x, u32 y) 484static inline int portid_compare(u32 x, u32 y)
@@ -249,13 +519,6 @@ void fire_pci_init(struct device_node *dp, const char *model_name)
249 519
250 p->pbm_B.iommu = iommu; 520 p->pbm_B.iommu = iommu;
251 521
252 /* XXX MSI support XXX */
253
254 /* Like PSYCHO and SCHIZO we have a 2GB aligned area
255 * for memory space.
256 */
257 pci_memspace_mask = 0x7fffffffUL;
258
259 if (pci_fire_pbm_init(p, dp, portid)) 522 if (pci_fire_pbm_init(p, dp, portid))
260 goto fatal_memory_error; 523 goto fatal_memory_error;
261 524
diff --git a/arch/sparc64/kernel/pci_impl.h b/arch/sparc64/kernel/pci_impl.h
index f660c2b685e..4a50da13ce4 100644
--- a/arch/sparc64/kernel/pci_impl.h
+++ b/arch/sparc64/kernel/pci_impl.h
@@ -29,6 +29,33 @@
29#define PCI_STC_FLUSHFLAG_SET(STC) \ 29#define PCI_STC_FLUSHFLAG_SET(STC) \
30 (*((STC)->strbuf_flushflag) != 0UL) 30 (*((STC)->strbuf_flushflag) != 0UL)
31 31
32#ifdef CONFIG_PCI_MSI
33struct pci_pbm_info;
34struct sparc64_msiq_ops {
35 int (*get_head)(struct pci_pbm_info *pbm, unsigned long msiqid,
36 unsigned long *head);
37 int (*dequeue_msi)(struct pci_pbm_info *pbm, unsigned long msiqid,
38 unsigned long *head, unsigned long *msi);
39 int (*set_head)(struct pci_pbm_info *pbm, unsigned long msiqid,
40 unsigned long head);
41 int (*msi_setup)(struct pci_pbm_info *pbm, unsigned long msiqid,
42 unsigned long msi, int is_msi64);
43 int (*msi_teardown)(struct pci_pbm_info *pbm, unsigned long msi);
44 int (*msiq_alloc)(struct pci_pbm_info *pbm);
45 void (*msiq_free)(struct pci_pbm_info *pbm);
46 int (*msiq_build_irq)(struct pci_pbm_info *pbm, unsigned long msiqid,
47 unsigned long devino);
48};
49
50extern void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
51 const struct sparc64_msiq_ops *ops);
52
53struct sparc64_msiq_cookie {
54 struct pci_pbm_info *pbm;
55 unsigned long msiqid;
56};
57#endif
58
32struct pci_controller_info; 59struct pci_controller_info;
33 60
34struct pci_pbm_info { 61struct pci_pbm_info {
@@ -90,6 +117,8 @@ struct pci_pbm_info {
90 u32 msiq_ent_count; 117 u32 msiq_ent_count;
91 u32 msiq_first; 118 u32 msiq_first;
92 u32 msiq_first_devino; 119 u32 msiq_first_devino;
120 u32 msiq_rotor;
121 struct sparc64_msiq_cookie *msiq_irq_cookies;
93 u32 msi_num; 122 u32 msi_num;
94 u32 msi_first; 123 u32 msi_first;
95 u32 msi_data_mask; 124 u32 msi_data_mask;
@@ -100,9 +129,11 @@ struct pci_pbm_info {
100 u32 msi64_len; 129 u32 msi64_len;
101 void *msi_queues; 130 void *msi_queues;
102 unsigned long *msi_bitmap; 131 unsigned long *msi_bitmap;
132 unsigned int *msi_irq_table;
103 int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev, 133 int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev,
104 struct msi_desc *entry); 134 struct msi_desc *entry);
105 void (*teardown_msi_irq)(unsigned int virt_irq, struct pci_dev *pdev); 135 void (*teardown_msi_irq)(unsigned int virt_irq, struct pci_dev *pdev);
136 const struct sparc64_msiq_ops *msi_ops;
106#endif /* !(CONFIG_PCI_MSI) */ 137#endif /* !(CONFIG_PCI_MSI) */
107 138
108 /* This PBM's streaming buffer. */ 139 /* This PBM's streaming buffer. */
@@ -126,7 +157,6 @@ struct pci_controller_info {
126}; 157};
127 158
128extern struct pci_pbm_info *pci_pbm_root; 159extern struct pci_pbm_info *pci_pbm_root;
129extern unsigned long pci_memspace_mask;
130 160
131extern int pci_num_pbms; 161extern int pci_num_pbms;
132 162
diff --git a/arch/sparc64/kernel/pci_msi.c b/arch/sparc64/kernel/pci_msi.c
new file mode 100644
index 00000000000..31a165fd3e4
--- /dev/null
+++ b/arch/sparc64/kernel/pci_msi.c
@@ -0,0 +1,433 @@
1/* pci_msi.c: Sparc64 MSI support common layer.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5#include <linux/kernel.h>
6#include <linux/interrupt.h>
7#include <linux/irq.h>
8
9#include "pci_impl.h"
10
11static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie)
12{
13 struct sparc64_msiq_cookie *msiq_cookie = cookie;
14 struct pci_pbm_info *pbm = msiq_cookie->pbm;
15 unsigned long msiqid = msiq_cookie->msiqid;
16 const struct sparc64_msiq_ops *ops;
17 unsigned long orig_head, head;
18 int err;
19
20 ops = pbm->msi_ops;
21
22 err = ops->get_head(pbm, msiqid, &head);
23 if (unlikely(err < 0))
24 goto err_get_head;
25
26 orig_head = head;
27 for (;;) {
28 unsigned long msi;
29
30 err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
31 if (likely(err > 0))
32 __do_IRQ(pbm->msi_irq_table[msi - pbm->msi_first]);
33
34 if (unlikely(err < 0))
35 goto err_dequeue;
36
37 if (err == 0)
38 break;
39 }
40 if (likely(head != orig_head)) {
41 err = ops->set_head(pbm, msiqid, head);
42 if (unlikely(err < 0))
43 goto err_set_head;
44 }
45 return IRQ_HANDLED;
46
47err_get_head:
48 printk(KERN_EMERG "MSI: Get head on msiqid[%lu] gives error %d\n",
49 msiqid, err);
50 goto err_out;
51
52err_dequeue:
53 printk(KERN_EMERG "MSI: Dequeue head[%lu] from msiqid[%lu] "
54 "gives error %d\n",
55 head, msiqid, err);
56 goto err_out;
57
58err_set_head:
59 printk(KERN_EMERG "MSI: Set head[%lu] on msiqid[%lu] "
60 "gives error %d\n",
61 head, msiqid, err);
62 goto err_out;
63
64err_out:
65 return IRQ_NONE;
66}
67
68static u32 pick_msiq(struct pci_pbm_info *pbm)
69{
70 static DEFINE_SPINLOCK(rotor_lock);
71 unsigned long flags;
72 u32 ret, rotor;
73
74 spin_lock_irqsave(&rotor_lock, flags);
75
76 rotor = pbm->msiq_rotor;
77 ret = pbm->msiq_first + rotor;
78
79 if (++rotor >= pbm->msiq_num)
80 rotor = 0;
81 pbm->msiq_rotor = rotor;
82
83 spin_unlock_irqrestore(&rotor_lock, flags);
84
85 return ret;
86}
87
88
89static int alloc_msi(struct pci_pbm_info *pbm)
90{
91 int i;
92
93 for (i = 0; i < pbm->msi_num; i++) {
94 if (!test_and_set_bit(i, pbm->msi_bitmap))
95 return i + pbm->msi_first;
96 }
97
98 return -ENOENT;
99}
100
101static void free_msi(struct pci_pbm_info *pbm, int msi_num)
102{
103 msi_num -= pbm->msi_first;
104 clear_bit(msi_num, pbm->msi_bitmap);
105}
106
107static struct irq_chip msi_irq = {
108 .typename = "PCI-MSI",
109 .mask = mask_msi_irq,
110 .unmask = unmask_msi_irq,
111 .enable = unmask_msi_irq,
112 .disable = mask_msi_irq,
113 /* XXX affinity XXX */
114};
115
116int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
117 struct pci_dev *pdev,
118 struct msi_desc *entry)
119{
120 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
121 const struct sparc64_msiq_ops *ops = pbm->msi_ops;
122 struct msi_msg msg;
123 int msi, err;
124 u32 msiqid;
125
126 *virt_irq_p = virt_irq_alloc(0, 0);
127 err = -ENOMEM;
128 if (!*virt_irq_p)
129 goto out_err;
130
131 set_irq_chip(*virt_irq_p, &msi_irq);
132
133 err = alloc_msi(pbm);
134 if (unlikely(err < 0))
135 goto out_virt_irq_free;
136
137 msi = err;
138
139 msiqid = pick_msiq(pbm);
140
141 err = ops->msi_setup(pbm, msiqid, msi,
142 (entry->msi_attrib.is_64 ? 1 : 0));
143 if (err)
144 goto out_msi_free;
145
146 pbm->msi_irq_table[msi - pbm->msi_first] = *virt_irq_p;
147
148 if (entry->msi_attrib.is_64) {
149 msg.address_hi = pbm->msi64_start >> 32;
150 msg.address_lo = pbm->msi64_start & 0xffffffff;
151 } else {
152 msg.address_hi = 0;
153 msg.address_lo = pbm->msi32_start;
154 }
155 msg.data = msi;
156
157 set_irq_msi(*virt_irq_p, entry);
158 write_msi_msg(*virt_irq_p, &msg);
159
160 return 0;
161
162out_msi_free:
163 free_msi(pbm, msi);
164
165out_virt_irq_free:
166 set_irq_chip(*virt_irq_p, NULL);
167 virt_irq_free(*virt_irq_p);
168 *virt_irq_p = 0;
169
170out_err:
171 return err;
172}
173
174void sparc64_teardown_msi_irq(unsigned int virt_irq,
175 struct pci_dev *pdev)
176{
177 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
178 const struct sparc64_msiq_ops *ops = pbm->msi_ops;
179 unsigned int msi_num;
180 int i, err;
181
182 for (i = 0; i < pbm->msi_num; i++) {
183 if (pbm->msi_irq_table[i] == virt_irq)
184 break;
185 }
186 if (i >= pbm->msi_num) {
187 printk(KERN_ERR "%s: teardown: No MSI for irq %u\n",
188 pbm->name, virt_irq);
189 return;
190 }
191
192 msi_num = pbm->msi_first + i;
193 pbm->msi_irq_table[i] = ~0U;
194
195 err = ops->msi_teardown(pbm, msi_num);
196 if (err) {
197 printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, "
198 "irq %u, gives error %d\n",
199 pbm->name, msi_num, virt_irq, err);
200 return;
201 }
202
203 free_msi(pbm, msi_num);
204
205 set_irq_chip(virt_irq, NULL);
206 virt_irq_free(virt_irq);
207}
208
209static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
210{
211 unsigned long size, bits_per_ulong;
212
213 bits_per_ulong = sizeof(unsigned long) * 8;
214 size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1);
215 size /= 8;
216 BUG_ON(size % sizeof(unsigned long));
217
218 pbm->msi_bitmap = kzalloc(size, GFP_KERNEL);
219 if (!pbm->msi_bitmap)
220 return -ENOMEM;
221
222 return 0;
223}
224
225static void msi_bitmap_free(struct pci_pbm_info *pbm)
226{
227 kfree(pbm->msi_bitmap);
228 pbm->msi_bitmap = NULL;
229}
230
231static int msi_table_alloc(struct pci_pbm_info *pbm)
232{
233 int size, i;
234
235 size = pbm->msiq_num * sizeof(struct sparc64_msiq_cookie);
236 pbm->msiq_irq_cookies = kzalloc(size, GFP_KERNEL);
237 if (!pbm->msiq_irq_cookies)
238 return -ENOMEM;
239
240 for (i = 0; i < pbm->msiq_num; i++) {
241 struct sparc64_msiq_cookie *p;
242
243 p = &pbm->msiq_irq_cookies[i];
244 p->pbm = pbm;
245 p->msiqid = pbm->msiq_first + i;
246 }
247
248 size = pbm->msi_num * sizeof(unsigned int);
249 pbm->msi_irq_table = kzalloc(size, GFP_KERNEL);
250 if (!pbm->msi_irq_table) {
251 kfree(pbm->msiq_irq_cookies);
252 pbm->msiq_irq_cookies = NULL;
253 return -ENOMEM;
254 }
255
256 return 0;
257}
258
259static void msi_table_free(struct pci_pbm_info *pbm)
260{
261 kfree(pbm->msiq_irq_cookies);
262 pbm->msiq_irq_cookies = NULL;
263
264 kfree(pbm->msi_irq_table);
265 pbm->msi_irq_table = NULL;
266}
267
268static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
269 const struct sparc64_msiq_ops *ops,
270 unsigned long msiqid,
271 unsigned long devino)
272{
273 int irq = ops->msiq_build_irq(pbm, msiqid, devino);
274 int err;
275
276 if (irq < 0)
277 return irq;
278
279 err = request_irq(irq, sparc64_msiq_interrupt, 0,
280 "MSIQ",
281 &pbm->msiq_irq_cookies[msiqid - pbm->msiq_first]);
282 if (err)
283 return err;
284
285 return 0;
286}
287
288static int sparc64_bringup_msi_queues(struct pci_pbm_info *pbm,
289 const struct sparc64_msiq_ops *ops)
290{
291 int i;
292
293 for (i = 0; i < pbm->msiq_num; i++) {
294 unsigned long msiqid = i + pbm->msiq_first;
295 unsigned long devino = i + pbm->msiq_first_devino;
296 int err;
297
298 err = bringup_one_msi_queue(pbm, ops, msiqid, devino);
299 if (err)
300 return err;
301 }
302
303 return 0;
304}
305
306void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
307 const struct sparc64_msiq_ops *ops)
308{
309 const u32 *val;
310 int len;
311
312 val = of_get_property(pbm->prom_node, "#msi-eqs", &len);
313 if (!val || len != 4)
314 goto no_msi;
315 pbm->msiq_num = *val;
316 if (pbm->msiq_num) {
317 const struct msiq_prop {
318 u32 first_msiq;
319 u32 num_msiq;
320 u32 first_devino;
321 } *mqp;
322 const struct msi_range_prop {
323 u32 first_msi;
324 u32 num_msi;
325 } *mrng;
326 const struct addr_range_prop {
327 u32 msi32_high;
328 u32 msi32_low;
329 u32 msi32_len;
330 u32 msi64_high;
331 u32 msi64_low;
332 u32 msi64_len;
333 } *arng;
334
335 val = of_get_property(pbm->prom_node, "msi-eq-size", &len);
336 if (!val || len != 4)
337 goto no_msi;
338
339 pbm->msiq_ent_count = *val;
340
341 mqp = of_get_property(pbm->prom_node,
342 "msi-eq-to-devino", &len);
343 if (!mqp)
344 mqp = of_get_property(pbm->prom_node,
345 "msi-eq-devino", &len);
346 if (!mqp || len != sizeof(struct msiq_prop))
347 goto no_msi;
348
349 pbm->msiq_first = mqp->first_msiq;
350 pbm->msiq_first_devino = mqp->first_devino;
351
352 val = of_get_property(pbm->prom_node, "#msi", &len);
353 if (!val || len != 4)
354 goto no_msi;
355 pbm->msi_num = *val;
356
357 mrng = of_get_property(pbm->prom_node, "msi-ranges", &len);
358 if (!mrng || len != sizeof(struct msi_range_prop))
359 goto no_msi;
360 pbm->msi_first = mrng->first_msi;
361
362 val = of_get_property(pbm->prom_node, "msi-data-mask", &len);
363 if (!val || len != 4)
364 goto no_msi;
365 pbm->msi_data_mask = *val;
366
367 val = of_get_property(pbm->prom_node, "msix-data-width", &len);
368 if (!val || len != 4)
369 goto no_msi;
370 pbm->msix_data_width = *val;
371
372 arng = of_get_property(pbm->prom_node, "msi-address-ranges",
373 &len);
374 if (!arng || len != sizeof(struct addr_range_prop))
375 goto no_msi;
376 pbm->msi32_start = ((u64)arng->msi32_high << 32) |
377 (u64) arng->msi32_low;
378 pbm->msi64_start = ((u64)arng->msi64_high << 32) |
379 (u64) arng->msi64_low;
380 pbm->msi32_len = arng->msi32_len;
381 pbm->msi64_len = arng->msi64_len;
382
383 if (msi_bitmap_alloc(pbm))
384 goto no_msi;
385
386 if (msi_table_alloc(pbm)) {
387 msi_bitmap_free(pbm);
388 goto no_msi;
389 }
390
391 if (ops->msiq_alloc(pbm)) {
392 msi_table_free(pbm);
393 msi_bitmap_free(pbm);
394 goto no_msi;
395 }
396
397 if (sparc64_bringup_msi_queues(pbm, ops)) {
398 ops->msiq_free(pbm);
399 msi_table_free(pbm);
400 msi_bitmap_free(pbm);
401 goto no_msi;
402 }
403
404 printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
405 "devino[0x%x]\n",
406 pbm->name,
407 pbm->msiq_first, pbm->msiq_num,
408 pbm->msiq_ent_count,
409 pbm->msiq_first_devino);
410 printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
411 "width[%u]\n",
412 pbm->name,
413 pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
414 pbm->msix_data_width);
415 printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] "
416 "addr64[0x%lx:0x%x]\n",
417 pbm->name,
418 pbm->msi32_start, pbm->msi32_len,
419 pbm->msi64_start, pbm->msi64_len);
420 printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n",
421 pbm->name,
422 __pa(pbm->msi_queues));
423
424 pbm->msi_ops = ops;
425 pbm->setup_msi_irq = sparc64_setup_msi_irq;
426 pbm->teardown_msi_irq = sparc64_teardown_msi_irq;
427 }
428 return;
429
430no_msi:
431 pbm->msiq_num = 0;
432 printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
433}
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index b6b4cfea5b5..d27ee5d528a 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -1058,12 +1058,6 @@ void psycho_init(struct device_node *dp, char *model_name)
1058 p->pbm_A.config_space = p->pbm_B.config_space = 1058 p->pbm_A.config_space = p->pbm_B.config_space =
1059 (pr_regs[2].phys_addr + PSYCHO_CONFIGSPACE); 1059 (pr_regs[2].phys_addr + PSYCHO_CONFIGSPACE);
1060 1060
1061 /*
1062 * Psycho's PCI MEM space is mapped to a 2GB aligned area, so
1063 * we need to adjust our MEM space mask.
1064 */
1065 pci_memspace_mask = 0x7fffffffUL;
1066
1067 psycho_controller_hwinit(&p->pbm_A); 1061 psycho_controller_hwinit(&p->pbm_A);
1068 1062
1069 if (psycho_iommu_init(&p->pbm_A)) 1063 if (psycho_iommu_init(&p->pbm_A))
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index 3c30bfa1f3a..9546ba9f5de 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -1464,9 +1464,6 @@ static void __schizo_init(struct device_node *dp, char *model_name, int chip_typ
1464 1464
1465 p->pbm_B.iommu = iommu; 1465 p->pbm_B.iommu = iommu;
1466 1466
1467 /* Like PSYCHO we have a 2GB aligned area for memory space. */
1468 pci_memspace_mask = 0x7fffffffUL;
1469
1470 if (schizo_pbm_init(p, dp, portid, chip_type)) 1467 if (schizo_pbm_init(p, dp, portid, chip_type))
1471 goto fatal_memory_error; 1468 goto fatal_memory_error;
1472 1469
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index da724b13e89..95de1444ee6 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -748,111 +748,102 @@ struct pci_sun4v_msiq_entry {
748 u64 reserved2; 748 u64 reserved2;
749}; 749};
750 750
751/* For now this just runs as a pre-handler for the real interrupt handler. 751static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
752 * So we just walk through the queue and ACK all the entries, update the 752 unsigned long *head)
753 * head pointer, and return.
754 *
755 * In the longer term it would be nice to do something more integrated
756 * wherein we can pass in some of this MSI info to the drivers. This
757 * would be most useful for PCIe fabric error messages, although we could
758 * invoke those directly from the loop here in order to pass the info around.
759 */
760static void pci_sun4v_msi_prehandler(unsigned int ino, void *data1, void *data2)
761{ 753{
762 struct pci_pbm_info *pbm = data1; 754 unsigned long err, limit;
763 struct pci_sun4v_msiq_entry *base, *ep;
764 unsigned long msiqid, orig_head, head, type, err;
765
766 msiqid = (unsigned long) data2;
767 755
768 head = 0xdeadbeef; 756 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
769 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, &head);
770 if (unlikely(err)) 757 if (unlikely(err))
771 goto hv_error_get; 758 return -ENXIO;
772
773 if (unlikely(head >= (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry))))
774 goto bad_offset;
775
776 head /= sizeof(struct pci_sun4v_msiq_entry);
777 orig_head = head;
778 base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
779 (pbm->msiq_ent_count *
780 sizeof(struct pci_sun4v_msiq_entry))));
781 ep = &base[head];
782 while ((ep->version_type & MSIQ_TYPE_MASK) != 0) {
783 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
784 if (unlikely(type != MSIQ_TYPE_MSI32 &&
785 type != MSIQ_TYPE_MSI64))
786 goto bad_type;
787
788 pci_sun4v_msi_setstate(pbm->devhandle,
789 ep->msi_data /* msi_num */,
790 HV_MSISTATE_IDLE);
791
792 /* Clear the entry. */
793 ep->version_type &= ~MSIQ_TYPE_MASK;
794
795 /* Go to next entry in ring. */
796 head++;
797 if (head >= pbm->msiq_ent_count)
798 head = 0;
799 ep = &base[head];
800 }
801 759
802 if (likely(head != orig_head)) { 760 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
803 /* ACK entries by updating head pointer. */ 761 if (unlikely(*head >= limit))
804 head *= sizeof(struct pci_sun4v_msiq_entry); 762 return -EFBIG;
805 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
806 if (unlikely(err))
807 goto hv_error_set;
808 }
809 return;
810 763
811hv_error_set: 764 return 0;
812 printk(KERN_EMERG "MSI: Hypervisor set head gives error %lu\n", err); 765}
813 goto hv_error_cont;
814 766
815hv_error_get: 767static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
816 printk(KERN_EMERG "MSI: Hypervisor get head gives error %lu\n", err); 768 unsigned long msiqid, unsigned long *head,
769 unsigned long *msi)
770{
771 struct pci_sun4v_msiq_entry *ep;
772 unsigned long err, type;
817 773
818hv_error_cont: 774 /* Note: void pointer arithmetic, 'head' is a byte offset */
819 printk(KERN_EMERG "MSI: devhandle[%x] msiqid[%lx] head[%lu]\n", 775 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
820 pbm->devhandle, msiqid, head); 776 (pbm->msiq_ent_count *
821 return; 777 sizeof(struct pci_sun4v_msiq_entry))) +
778 *head);
822 779
823bad_offset: 780 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
824 printk(KERN_EMERG "MSI: Hypervisor gives bad offset %lx max(%lx)\n", 781 return 0;
825 head, pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry));
826 return;
827 782
828bad_type: 783 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
829 printk(KERN_EMERG "MSI: Entry has bad type %lx\n", type); 784 if (unlikely(type != MSIQ_TYPE_MSI32 &&
830 return; 785 type != MSIQ_TYPE_MSI64))
786 return -EINVAL;
787
788 *msi = ep->msi_data;
789
790 err = pci_sun4v_msi_setstate(pbm->devhandle,
791 ep->msi_data /* msi_num */,
792 HV_MSISTATE_IDLE);
793 if (unlikely(err))
794 return -ENXIO;
795
796 /* Clear the entry. */
797 ep->version_type &= ~MSIQ_TYPE_MASK;
798
799 (*head) += sizeof(struct pci_sun4v_msiq_entry);
800 if (*head >=
801 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
802 *head = 0;
803
804 return 1;
831} 805}
832 806
833static int msi_bitmap_alloc(struct pci_pbm_info *pbm) 807static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
808 unsigned long head)
834{ 809{
835 unsigned long size, bits_per_ulong; 810 unsigned long err;
836 811
837 bits_per_ulong = sizeof(unsigned long) * 8; 812 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
838 size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1); 813 if (unlikely(err))
839 size /= 8; 814 return -EINVAL;
840 BUG_ON(size % sizeof(unsigned long));
841 815
842 pbm->msi_bitmap = kzalloc(size, GFP_KERNEL); 816 return 0;
843 if (!pbm->msi_bitmap) 817}
844 return -ENOMEM;
845 818
819static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
820 unsigned long msi, int is_msi64)
821{
822 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
823 (is_msi64 ?
824 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
825 return -ENXIO;
826 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
827 return -ENXIO;
828 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
829 return -ENXIO;
846 return 0; 830 return 0;
847} 831}
848 832
849static void msi_bitmap_free(struct pci_pbm_info *pbm) 833static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
850{ 834{
851 kfree(pbm->msi_bitmap); 835 unsigned long err, msiqid;
852 pbm->msi_bitmap = NULL; 836
837 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
838 if (err)
839 return -ENXIO;
840
841 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
842
843 return 0;
853} 844}
854 845
855static int msi_queue_alloc(struct pci_pbm_info *pbm) 846static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
856{ 847{
857 unsigned long q_size, alloc_size, pages, order; 848 unsigned long q_size, alloc_size, pages, order;
858 int i; 849 int i;
@@ -906,232 +897,59 @@ h_error:
906 return -EINVAL; 897 return -EINVAL;
907} 898}
908 899
909 900static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
910static int alloc_msi(struct pci_pbm_info *pbm)
911{ 901{
902 unsigned long q_size, alloc_size, pages, order;
912 int i; 903 int i;
913 904
914 for (i = 0; i < pbm->msi_num; i++) { 905 for (i = 0; i < pbm->msiq_num; i++) {
915 if (!test_and_set_bit(i, pbm->msi_bitmap)) 906 unsigned long msiqid = pbm->msiq_first + i;
916 return i + pbm->msi_first;
917 }
918
919 return -ENOENT;
920}
921
922static void free_msi(struct pci_pbm_info *pbm, int msi_num)
923{
924 msi_num -= pbm->msi_first;
925 clear_bit(msi_num, pbm->msi_bitmap);
926}
927
928static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
929 struct pci_dev *pdev,
930 struct msi_desc *entry)
931{
932 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
933 unsigned long devino, msiqid;
934 struct msi_msg msg;
935 int msi_num, err;
936
937 *virt_irq_p = 0;
938
939 msi_num = alloc_msi(pbm);
940 if (msi_num < 0)
941 return msi_num;
942
943 err = sun4v_build_msi(pbm->devhandle, virt_irq_p,
944 pbm->msiq_first_devino,
945 (pbm->msiq_first_devino +
946 pbm->msiq_num));
947 if (err < 0)
948 goto out_err;
949 devino = err;
950
951 msiqid = ((devino - pbm->msiq_first_devino) +
952 pbm->msiq_first);
953
954 err = -EINVAL;
955 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
956 if (err)
957 goto out_err;
958
959 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
960 goto out_err;
961
962 if (pci_sun4v_msi_setmsiq(pbm->devhandle,
963 msi_num, msiqid,
964 (entry->msi_attrib.is_64 ?
965 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
966 goto out_err;
967
968 if (pci_sun4v_msi_setstate(pbm->devhandle, msi_num, HV_MSISTATE_IDLE))
969 goto out_err;
970
971 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID))
972 goto out_err;
973
974 sparc64_set_msi(*virt_irq_p, msi_num);
975 907
976 if (entry->msi_attrib.is_64) { 908 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
977 msg.address_hi = pbm->msi64_start >> 32;
978 msg.address_lo = pbm->msi64_start & 0xffffffff;
979 } else {
980 msg.address_hi = 0;
981 msg.address_lo = pbm->msi32_start;
982 } 909 }
983 msg.data = msi_num;
984
985 set_irq_msi(*virt_irq_p, entry);
986 write_msi_msg(*virt_irq_p, &msg);
987 910
988 irq_install_pre_handler(*virt_irq_p, 911 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
989 pci_sun4v_msi_prehandler, 912 alloc_size = (pbm->msiq_num * q_size);
990 pbm, (void *) msiqid); 913 order = get_order(alloc_size);
991 914
992 return 0; 915 pages = (unsigned long) pbm->msi_queues;
993 916
994out_err: 917 free_pages(pages, order);
995 free_msi(pbm, msi_num);
996 return err;
997 918
919 pbm->msi_queues = NULL;
998} 920}
999 921
1000static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq, 922static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
1001 struct pci_dev *pdev) 923 unsigned long msiqid,
924 unsigned long devino)
1002{ 925{
1003 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; 926 unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
1004 unsigned long msiqid, err;
1005 unsigned int msi_num;
1006
1007 msi_num = sparc64_get_msi(virt_irq);
1008 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid);
1009 if (err) {
1010 printk(KERN_ERR "%s: getmsiq gives error %lu\n",
1011 pbm->name, err);
1012 return;
1013 }
1014 927
1015 pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_INVALID); 928 if (!virt_irq)
1016 pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_INVALID); 929 return -ENOMEM;
1017 930
1018 free_msi(pbm, msi_num); 931 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
932 return -EINVAL;
933 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
934 return -EINVAL;
1019 935
1020 /* The sun4v_destroy_msi() will liberate the devino and thus the MSIQ 936 return virt_irq;
1021 * allocation.
1022 */
1023 sun4v_destroy_msi(virt_irq);
1024} 937}
1025 938
939static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
940 .get_head = pci_sun4v_get_head,
941 .dequeue_msi = pci_sun4v_dequeue_msi,
942 .set_head = pci_sun4v_set_head,
943 .msi_setup = pci_sun4v_msi_setup,
944 .msi_teardown = pci_sun4v_msi_teardown,
945 .msiq_alloc = pci_sun4v_msiq_alloc,
946 .msiq_free = pci_sun4v_msiq_free,
947 .msiq_build_irq = pci_sun4v_msiq_build_irq,
948};
949
1026static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) 950static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1027{ 951{
1028 const u32 *val; 952 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
1029 int len;
1030
1031 val = of_get_property(pbm->prom_node, "#msi-eqs", &len);
1032 if (!val || len != 4)
1033 goto no_msi;
1034 pbm->msiq_num = *val;
1035 if (pbm->msiq_num) {
1036 const struct msiq_prop {
1037 u32 first_msiq;
1038 u32 num_msiq;
1039 u32 first_devino;
1040 } *mqp;
1041 const struct msi_range_prop {
1042 u32 first_msi;
1043 u32 num_msi;
1044 } *mrng;
1045 const struct addr_range_prop {
1046 u32 msi32_high;
1047 u32 msi32_low;
1048 u32 msi32_len;
1049 u32 msi64_high;
1050 u32 msi64_low;
1051 u32 msi64_len;
1052 } *arng;
1053
1054 val = of_get_property(pbm->prom_node, "msi-eq-size", &len);
1055 if (!val || len != 4)
1056 goto no_msi;
1057
1058 pbm->msiq_ent_count = *val;
1059
1060 mqp = of_get_property(pbm->prom_node,
1061 "msi-eq-to-devino", &len);
1062 if (!mqp || len != sizeof(struct msiq_prop))
1063 goto no_msi;
1064
1065 pbm->msiq_first = mqp->first_msiq;
1066 pbm->msiq_first_devino = mqp->first_devino;
1067
1068 val = of_get_property(pbm->prom_node, "#msi", &len);
1069 if (!val || len != 4)
1070 goto no_msi;
1071 pbm->msi_num = *val;
1072
1073 mrng = of_get_property(pbm->prom_node, "msi-ranges", &len);
1074 if (!mrng || len != sizeof(struct msi_range_prop))
1075 goto no_msi;
1076 pbm->msi_first = mrng->first_msi;
1077
1078 val = of_get_property(pbm->prom_node, "msi-data-mask", &len);
1079 if (!val || len != 4)
1080 goto no_msi;
1081 pbm->msi_data_mask = *val;
1082
1083 val = of_get_property(pbm->prom_node, "msix-data-width", &len);
1084 if (!val || len != 4)
1085 goto no_msi;
1086 pbm->msix_data_width = *val;
1087
1088 arng = of_get_property(pbm->prom_node, "msi-address-ranges",
1089 &len);
1090 if (!arng || len != sizeof(struct addr_range_prop))
1091 goto no_msi;
1092 pbm->msi32_start = ((u64)arng->msi32_high << 32) |
1093 (u64) arng->msi32_low;
1094 pbm->msi64_start = ((u64)arng->msi64_high << 32) |
1095 (u64) arng->msi64_low;
1096 pbm->msi32_len = arng->msi32_len;
1097 pbm->msi64_len = arng->msi64_len;
1098
1099 if (msi_bitmap_alloc(pbm))
1100 goto no_msi;
1101
1102 if (msi_queue_alloc(pbm)) {
1103 msi_bitmap_free(pbm);
1104 goto no_msi;
1105 }
1106
1107 printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
1108 "devino[0x%x]\n",
1109 pbm->name,
1110 pbm->msiq_first, pbm->msiq_num,
1111 pbm->msiq_ent_count,
1112 pbm->msiq_first_devino);
1113 printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
1114 "width[%u]\n",
1115 pbm->name,
1116 pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
1117 pbm->msix_data_width);
1118 printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] "
1119 "addr64[0x%lx:0x%x]\n",
1120 pbm->name,
1121 pbm->msi32_start, pbm->msi32_len,
1122 pbm->msi64_start, pbm->msi64_len);
1123 printk(KERN_INFO "%s: MSI queues at RA [%p]\n",
1124 pbm->name,
1125 pbm->msi_queues);
1126 }
1127 pbm->setup_msi_irq = pci_sun4v_setup_msi_irq;
1128 pbm->teardown_msi_irq = pci_sun4v_teardown_msi_irq;
1129
1130 return;
1131
1132no_msi:
1133 pbm->msiq_num = 0;
1134 printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
1135} 953}
1136#else /* CONFIG_PCI_MSI */ 954#else /* CONFIG_PCI_MSI */
1137static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) 955static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
@@ -1237,11 +1055,6 @@ void __init sun4v_pci_init(struct device_node *dp, char *model_name)
1237 1055
1238 p->pbm_B.iommu = iommu; 1056 p->pbm_B.iommu = iommu;
1239 1057
1240 /* Like PSYCHO and SCHIZO we have a 2GB aligned area
1241 * for memory space.
1242 */
1243 pci_memspace_mask = 0x7fffffffUL;
1244
1245 pci_sun4v_pbm_init(p, dp, devhandle); 1058 pci_sun4v_pbm_init(p, dp, devhandle);
1246 return; 1059 return;
1247 1060
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
index 881a09ee4c4..850cdffdd69 100644
--- a/arch/sparc64/kernel/power.c
+++ b/arch/sparc64/kernel/power.c
@@ -105,9 +105,11 @@ static struct of_device_id power_match[] = {
105}; 105};
106 106
107static struct of_platform_driver power_driver = { 107static struct of_platform_driver power_driver = {
108 .name = "power",
109 .match_table = power_match, 108 .match_table = power_match,
110 .probe = power_probe, 109 .probe = power_probe,
110 .driver = {
111 .name = "power",
112 },
111}; 113};
112 114
113void __init power_init(void) 115void __init power_init(void)
diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S
index 574bc248bca..e2f8e1b4882 100644
--- a/arch/sparc64/kernel/sun4v_ivec.S
+++ b/arch/sparc64/kernel/sun4v_ivec.S
@@ -96,19 +96,21 @@ sun4v_dev_mondo:
96 stxa %g2, [%g4] ASI_QUEUE 96 stxa %g2, [%g4] ASI_QUEUE
97 membar #Sync 97 membar #Sync
98 98
99 /* Get &__irq_work[smp_processor_id()] into %g1. */ 99 TRAP_LOAD_IRQ_WORK_PA(%g1, %g4)
100 TRAP_LOAD_IRQ_WORK(%g1, %g4)
101 100
102 /* Get &ivector_table[IVEC] into %g4. */ 101 /* For VIRQs, cookie is encoded as ~bucket_phys_addr */
103 sethi %hi(ivector_table), %g4 102 brlz,pt %g3, 1f
104 sllx %g3, 3, %g3 103 xnor %g3, %g0, %g4
105 or %g4, %lo(ivector_table), %g4 104
105 /* Get __pa(&ivector_table[IVEC]) into %g4. */
106 sethi %hi(ivector_table_pa), %g4
107 ldx [%g4 + %lo(ivector_table_pa)], %g4
108 sllx %g3, 4, %g3
106 add %g4, %g3, %g4 109 add %g4, %g3, %g4
107 110
108 /* Insert ivector_table[] entry into __irq_work[] queue. */ 1111: ldx [%g1], %g2
109 lduw [%g1], %g2 /* g2 = irq_work(cpu) */ 112 stxa %g2, [%g4] ASI_PHYS_USE_EC
110 stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */ 113 stx %g4, [%g1]
111 stw %g4, [%g1] /* irq_work(cpu) = bucket */
112 114
113 /* Signal the interrupt by setting (1 << pil) in %softint. */ 115 /* Signal the interrupt by setting (1 << pil) in %softint. */
114 wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint 116 wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
index d108eeb0734..0d5c5026494 100644
--- a/arch/sparc64/kernel/sys_sparc.c
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -436,7 +436,7 @@ out:
436asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second, 436asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
437 unsigned long third, void __user *ptr, long fifth) 437 unsigned long third, void __user *ptr, long fifth)
438{ 438{
439 int err; 439 long err;
440 440
441 /* No need for backward compatibility. We can start fresh... */ 441 /* No need for backward compatibility. We can start fresh... */
442 if (call <= SEMCTL) { 442 if (call <= SEMCTL) {
@@ -453,16 +453,9 @@ asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
453 err = sys_semget(first, (int)second, (int)third); 453 err = sys_semget(first, (int)second, (int)third);
454 goto out; 454 goto out;
455 case SEMCTL: { 455 case SEMCTL: {
456 union semun fourth; 456 err = sys_semctl(first, third,
457 err = -EINVAL; 457 (int)second | IPC_64,
458 if (!ptr) 458 (union semun) ptr);
459 goto out;
460 err = -EFAULT;
461 if (get_user(fourth.__pad,
462 (void __user * __user *) ptr))
463 goto out;
464 err = sys_semctl(first, (int)second | IPC_64,
465 (int)third, fourth);
466 goto out; 459 goto out;
467 } 460 }
468 default: 461 default:
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 69cad1b653c..cd8c740cba1 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -764,9 +764,11 @@ static struct of_device_id clock_match[] = {
764}; 764};
765 765
766static struct of_platform_driver clock_driver = { 766static struct of_platform_driver clock_driver = {
767 .name = "clock",
768 .match_table = clock_match, 767 .match_table = clock_match,
769 .probe = clock_probe, 768 .probe = clock_probe,
769 .driver = {
770 .name = "clock",
771 },
770}; 772};
771 773
772static int __init clock_init(void) 774static int __init clock_init(void)
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 6ef42b8e53d..34573a55b6e 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -2569,8 +2569,8 @@ void __init trap_init(void)
2569 offsetof(struct trap_per_cpu, tsb_huge)) || 2569 offsetof(struct trap_per_cpu, tsb_huge)) ||
2570 (TRAP_PER_CPU_TSB_HUGE_TEMP != 2570 (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2571 offsetof(struct trap_per_cpu, tsb_huge_temp)) || 2571 offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2572 (TRAP_PER_CPU_IRQ_WORKLIST != 2572 (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2573 offsetof(struct trap_per_cpu, irq_worklist)) || 2573 offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2574 (TRAP_PER_CPU_CPU_MONDO_QMASK != 2574 (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2575 offsetof(struct trap_per_cpu, cpu_mondo_qmask)) || 2575 offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2576 (TRAP_PER_CPU_DEV_MONDO_QMASK != 2576 (TRAP_PER_CPU_DEV_MONDO_QMASK !=
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index b982fa3dd74..9fcd503bc04 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -10,105 +10,138 @@ ENTRY(_start)
10jiffies = jiffies_64; 10jiffies = jiffies_64;
11SECTIONS 11SECTIONS
12{ 12{
13 swapper_low_pmd_dir = 0x0000000000402000; 13 swapper_low_pmd_dir = 0x0000000000402000;
14 . = 0x4000; 14 . = 0x4000;
15 .text 0x0000000000404000 : 15 .text 0x0000000000404000 : {
16 { 16 _text = .;
17 _text = .; 17 TEXT_TEXT
18 TEXT_TEXT 18 SCHED_TEXT
19 SCHED_TEXT 19 LOCK_TEXT
20 LOCK_TEXT 20 KPROBES_TEXT
21 KPROBES_TEXT 21 *(.gnu.warning)
22 *(.gnu.warning) 22 } = 0
23 } =0 23 _etext = .;
24 _etext = .; 24 PROVIDE (etext = .);
25 PROVIDE (etext = .);
26 25
27 RO_DATA(PAGE_SIZE) 26 RO_DATA(PAGE_SIZE)
27 .data : {
28 DATA_DATA
29 CONSTRUCTORS
30 }
31 .data1 : {
32 *(.data1)
33 }
34 . = ALIGN(64);
35 .data.cacheline_aligned : {
36 *(.data.cacheline_aligned)
37 }
38 . = ALIGN(64);
39 .data.read_mostly : {
40 *(.data.read_mostly)
41 }
42 _edata = .;
43 PROVIDE (edata = .);
44 .fixup : {
45 *(.fixup)
46 }
47 . = ALIGN(16);
48 __ex_table : {
49 __start___ex_table = .;
50 *(__ex_table)
51 __stop___ex_table = .;
52 }
53 NOTES
28 54
29 .data : 55 . = ALIGN(PAGE_SIZE);
30 { 56 .init.text : {
31 DATA_DATA 57 __init_begin = .;
32 CONSTRUCTORS 58 _sinittext = .;
33 } 59 *(.init.text)
34 .data1 : { *(.data1) } 60 _einittext = .;
35 . = ALIGN(64); 61 }
36 .data.cacheline_aligned : { *(.data.cacheline_aligned) } 62 .init.data : {
37 . = ALIGN(64); 63 *(.init.data)
38 .data.read_mostly : { *(.data.read_mostly) } 64 }
39 _edata = .; 65 . = ALIGN(16);
40 PROVIDE (edata = .); 66 .init.setup : {
41 .fixup : { *(.fixup) } 67 __setup_start = .;
68 *(.init.setup)
69 __setup_end = .;
70 }
71 .initcall.init : {
72 __initcall_start = .;
73 INITCALLS
74 __initcall_end = .;
75 }
76 .con_initcall.init : {
77 __con_initcall_start = .;
78 *(.con_initcall.init)
79 __con_initcall_end = .;
80 }
81 SECURITY_INIT
42 82
43 . = ALIGN(16); 83 . = ALIGN(4);
44 __start___ex_table = .; 84 .tsb_ldquad_phys_patch : {
45 __ex_table : { *(__ex_table) } 85 __tsb_ldquad_phys_patch = .;
46 __stop___ex_table = .; 86 *(.tsb_ldquad_phys_patch)
87 __tsb_ldquad_phys_patch_end = .;
88 }
47 89
48 NOTES 90 .tsb_phys_patch : {
91 __tsb_phys_patch = .;
92 *(.tsb_phys_patch)
93 __tsb_phys_patch_end = .;
94 }
49 95
50 . = ALIGN(PAGE_SIZE); 96 .cpuid_patch : {
51 __init_begin = .; 97 __cpuid_patch = .;
52 .init.text : { 98 *(.cpuid_patch)
53 _sinittext = .; 99 __cpuid_patch_end = .;
54 *(.init.text) 100 }
55 _einittext = .; 101
56 } 102 .sun4v_1insn_patch : {
57 .init.data : { *(.init.data) } 103 __sun4v_1insn_patch = .;
58 . = ALIGN(16); 104 *(.sun4v_1insn_patch)
59 __setup_start = .; 105 __sun4v_1insn_patch_end = .;
60 .init.setup : { *(.init.setup) } 106 }
61 __setup_end = .; 107 .sun4v_2insn_patch : {
62 __initcall_start = .; 108 __sun4v_2insn_patch = .;
63 .initcall.init : { 109 *(.sun4v_2insn_patch)
64 INITCALLS 110 __sun4v_2insn_patch_end = .;
65 } 111 }
66 __initcall_end = .;
67 __con_initcall_start = .;
68 .con_initcall.init : { *(.con_initcall.init) }
69 __con_initcall_end = .;
70 SECURITY_INIT
71 . = ALIGN(4);
72 __tsb_ldquad_phys_patch = .;
73 .tsb_ldquad_phys_patch : { *(.tsb_ldquad_phys_patch) }
74 __tsb_ldquad_phys_patch_end = .;
75 __tsb_phys_patch = .;
76 .tsb_phys_patch : { *(.tsb_phys_patch) }
77 __tsb_phys_patch_end = .;
78 __cpuid_patch = .;
79 .cpuid_patch : { *(.cpuid_patch) }
80 __cpuid_patch_end = .;
81 __sun4v_1insn_patch = .;
82 .sun4v_1insn_patch : { *(.sun4v_1insn_patch) }
83 __sun4v_1insn_patch_end = .;
84 __sun4v_2insn_patch = .;
85 .sun4v_2insn_patch : { *(.sun4v_2insn_patch) }
86 __sun4v_2insn_patch_end = .;
87 112
88#ifdef CONFIG_BLK_DEV_INITRD 113#ifdef CONFIG_BLK_DEV_INITRD
89 . = ALIGN(PAGE_SIZE); 114 . = ALIGN(PAGE_SIZE);
90 __initramfs_start = .; 115 .init.ramfs : {
91 .init.ramfs : { *(.init.ramfs) } 116 __initramfs_start = .;
92 __initramfs_end = .; 117 *(.init.ramfs)
118 __initramfs_end = .;
119 }
93#endif 120#endif
94 121
95 PERCPU(PAGE_SIZE) 122 PERCPU(PAGE_SIZE)
96 123
97 . = ALIGN(PAGE_SIZE); 124 . = ALIGN(PAGE_SIZE);
98 __init_end = .; 125 __init_end = .;
99 __bss_start = .; 126 __bss_start = .;
100 .sbss : { *(.sbss) *(.scommon) } 127 .sbss : {
101 .bss : 128 *(.sbss)
102 { 129 *(.scommon)
103 *(.dynbss) 130 }
104 *(.bss) 131 .bss : {
105 *(COMMON) 132 *(.dynbss)
106 } 133 *(.bss)
107 _end = . ; 134 *(COMMON)
108 PROVIDE (end = .); 135 }
109 /DISCARD/ : { *(.exit.text) *(.exit.data) *(.exitcall.exit) } 136 _end = . ;
137 PROVIDE (end = .);
110 138
111 STABS_DEBUG 139 /DISCARD/ : {
140 *(.exit.text)
141 *(.exit.data)
142 *(.exitcall.exit)
143 }
112 144
113 DWARF_DEBUG 145 STABS_DEBUG
146 DWARF_DEBUG
114} 147}