aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sparc64/kernel/Makefile1
-rw-r--r--arch/sparc64/kernel/irq.c230
-rw-r--r--arch/sparc64/kernel/pci_fire.c380
-rw-r--r--arch/sparc64/kernel/pci_impl.h31
-rw-r--r--arch/sparc64/kernel/pci_msi.c433
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c400
-rw-r--r--include/asm-sparc64/irq.h6
7 files changed, 704 insertions, 777 deletions
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index 40d2f3aae91e..112c46e66578 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
18obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o \ 18obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o \
19 pci_psycho.o pci_sabre.o pci_schizo.o \ 19 pci_psycho.o pci_sabre.o pci_schizo.o \
20 pci_sun4v.o pci_sun4v_asm.o pci_fire.o 20 pci_sun4v.o pci_sun4v_asm.o pci_fire.o
21obj-$(CONFIG_PCI_MSI) += pci_msi.o
21obj-$(CONFIG_SMP) += smp.o trampoline.o hvtramp.o 22obj-$(CONFIG_SMP) += smp.o trampoline.o hvtramp.o
22obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o 23obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o
23obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o 24obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 7f5a4c77e3e4..045ab27d4271 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -21,7 +21,6 @@
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/bootmem.h> 22#include <linux/bootmem.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/msi.h>
25 24
26#include <asm/ptrace.h> 25#include <asm/ptrace.h>
27#include <asm/processor.h> 26#include <asm/processor.h>
@@ -92,39 +91,46 @@ static struct {
92 unsigned int dev_handle; 91 unsigned int dev_handle;
93 unsigned int dev_ino; 92 unsigned int dev_ino;
94} virt_to_real_irq_table[NR_IRQS]; 93} virt_to_real_irq_table[NR_IRQS];
94static DEFINE_SPINLOCK(virt_irq_alloc_lock);
95 95
96static unsigned char virt_irq_alloc(unsigned int real_irq) 96unsigned char virt_irq_alloc(unsigned int real_irq)
97{ 97{
98 unsigned long flags;
98 unsigned char ent; 99 unsigned char ent;
99 100
100 BUILD_BUG_ON(NR_IRQS >= 256); 101 BUILD_BUG_ON(NR_IRQS >= 256);
101 102
103 spin_lock_irqsave(&virt_irq_alloc_lock, flags);
104
102 for (ent = 1; ent < NR_IRQS; ent++) { 105 for (ent = 1; ent < NR_IRQS; ent++) {
103 if (!virt_to_real_irq_table[ent].irq) 106 if (!virt_to_real_irq_table[ent].irq)
104 break; 107 break;
105 } 108 }
106 if (ent >= NR_IRQS) { 109 if (ent >= NR_IRQS) {
107 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n"); 110 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
108 return 0; 111 ent = 0;
112 } else {
113 virt_to_real_irq_table[ent].irq = real_irq;
109 } 114 }
110 115
111 virt_to_real_irq_table[ent].irq = real_irq; 116 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
112 117
113 return ent; 118 return ent;
114} 119}
115 120
116#ifdef CONFIG_PCI_MSI 121#ifdef CONFIG_PCI_MSI
117static void virt_irq_free(unsigned int virt_irq) 122void virt_irq_free(unsigned int virt_irq)
118{ 123{
119 unsigned int real_irq; 124 unsigned long flags;
120 125
121 if (virt_irq >= NR_IRQS) 126 if (virt_irq >= NR_IRQS)
122 return; 127 return;
123 128
124 real_irq = virt_to_real_irq_table[virt_irq].irq; 129 spin_lock_irqsave(&virt_irq_alloc_lock, flags);
130
125 virt_to_real_irq_table[virt_irq].irq = 0; 131 virt_to_real_irq_table[virt_irq].irq = 0;
126 132
127 __bucket(real_irq)->virt_irq = 0; 133 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
128} 134}
129#endif 135#endif
130 136
@@ -217,27 +223,8 @@ struct irq_handler_data {
217 void (*pre_handler)(unsigned int, void *, void *); 223 void (*pre_handler)(unsigned int, void *, void *);
218 void *pre_handler_arg1; 224 void *pre_handler_arg1;
219 void *pre_handler_arg2; 225 void *pre_handler_arg2;
220
221 u32 msi;
222}; 226};
223 227
224void sparc64_set_msi(unsigned int virt_irq, u32 msi)
225{
226 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
227
228 if (data)
229 data->msi = msi;
230}
231
232u32 sparc64_get_msi(unsigned int virt_irq)
233{
234 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
235
236 if (data)
237 return data->msi;
238 return 0xffffffff;
239}
240
241static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq) 228static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
242{ 229{
243 unsigned int real_irq = virt_to_real_irq(virt_irq); 230 unsigned int real_irq = virt_to_real_irq(virt_irq);
@@ -405,32 +392,6 @@ static void sun4v_irq_disable(unsigned int virt_irq)
405 } 392 }
406} 393}
407 394
408#ifdef CONFIG_PCI_MSI
409static void sun4u_msi_enable(unsigned int virt_irq)
410{
411 sun4u_irq_enable(virt_irq);
412 unmask_msi_irq(virt_irq);
413}
414
415static void sun4u_msi_disable(unsigned int virt_irq)
416{
417 mask_msi_irq(virt_irq);
418 sun4u_irq_disable(virt_irq);
419}
420
421static void sun4v_msi_enable(unsigned int virt_irq)
422{
423 sun4v_irq_enable(virt_irq);
424 unmask_msi_irq(virt_irq);
425}
426
427static void sun4v_msi_disable(unsigned int virt_irq)
428{
429 mask_msi_irq(virt_irq);
430 sun4v_irq_disable(virt_irq);
431}
432#endif
433
434static void sun4v_irq_end(unsigned int virt_irq) 395static void sun4v_irq_end(unsigned int virt_irq)
435{ 396{
436 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); 397 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
@@ -585,39 +546,6 @@ static struct irq_chip sun4v_irq = {
585 .set_affinity = sun4v_set_affinity, 546 .set_affinity = sun4v_set_affinity,
586}; 547};
587 548
588static struct irq_chip sun4v_irq_ack = {
589 .typename = "sun4v+ack",
590 .enable = sun4v_irq_enable,
591 .disable = sun4v_irq_disable,
592 .ack = run_pre_handler,
593 .end = sun4v_irq_end,
594 .set_affinity = sun4v_set_affinity,
595};
596
597#ifdef CONFIG_PCI_MSI
598static struct irq_chip sun4u_msi = {
599 .typename = "sun4u+msi",
600 .mask = mask_msi_irq,
601 .unmask = unmask_msi_irq,
602 .enable = sun4u_msi_enable,
603 .disable = sun4u_msi_disable,
604 .ack = run_pre_handler,
605 .end = sun4u_irq_end,
606 .set_affinity = sun4u_set_affinity,
607};
608
609static struct irq_chip sun4v_msi = {
610 .typename = "sun4v+msi",
611 .mask = mask_msi_irq,
612 .unmask = unmask_msi_irq,
613 .enable = sun4v_msi_enable,
614 .disable = sun4v_msi_disable,
615 .ack = run_pre_handler,
616 .end = sun4v_irq_end,
617 .set_affinity = sun4v_set_affinity,
618};
619#endif
620
621static struct irq_chip sun4v_virq = { 549static struct irq_chip sun4v_virq = {
622 .typename = "vsun4v", 550 .typename = "vsun4v",
623 .enable = sun4v_virq_enable, 551 .enable = sun4v_virq_enable,
@@ -626,42 +554,27 @@ static struct irq_chip sun4v_virq = {
626 .set_affinity = sun4v_virt_set_affinity, 554 .set_affinity = sun4v_virt_set_affinity,
627}; 555};
628 556
629static struct irq_chip sun4v_virq_ack = {
630 .typename = "vsun4v+ack",
631 .enable = sun4v_virq_enable,
632 .disable = sun4v_virq_disable,
633 .ack = run_pre_handler,
634 .end = sun4v_virq_end,
635 .set_affinity = sun4v_virt_set_affinity,
636};
637
638void irq_install_pre_handler(int virt_irq, 557void irq_install_pre_handler(int virt_irq,
639 void (*func)(unsigned int, void *, void *), 558 void (*func)(unsigned int, void *, void *),
640 void *arg1, void *arg2) 559 void *arg1, void *arg2)
641{ 560{
642 struct irq_handler_data *data = get_irq_chip_data(virt_irq); 561 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
643 struct irq_chip *chip; 562 struct irq_chip *chip = get_irq_chip(virt_irq);
563
564 if (WARN_ON(chip == &sun4v_irq || chip == &sun4v_virq)) {
565 printk(KERN_ERR "IRQ: Trying to install pre-handler on "
566 "sun4v irq %u\n", virt_irq);
567 return;
568 }
644 569
645 data->pre_handler = func; 570 data->pre_handler = func;
646 data->pre_handler_arg1 = arg1; 571 data->pre_handler_arg1 = arg1;
647 data->pre_handler_arg2 = arg2; 572 data->pre_handler_arg2 = arg2;
648 573
649 chip = get_irq_chip(virt_irq); 574 if (chip == &sun4u_irq_ack)
650 if (chip == &sun4u_irq_ack ||
651 chip == &sun4v_irq_ack ||
652 chip == &sun4v_virq_ack
653#ifdef CONFIG_PCI_MSI
654 || chip == &sun4u_msi
655 || chip == &sun4v_msi
656#endif
657 )
658 return; 575 return;
659 576
660 chip = (chip == &sun4u_irq ? 577 set_irq_chip(virt_irq, &sun4u_irq_ack);
661 &sun4u_irq_ack :
662 (chip == &sun4v_irq ?
663 &sun4v_irq_ack : &sun4v_virq_ack));
664 set_irq_chip(virt_irq, chip);
665} 578}
666 579
667unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) 580unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
@@ -765,103 +678,6 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
765 return virq; 678 return virq;
766} 679}
767 680
768#ifdef CONFIG_PCI_MSI
769unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
770 unsigned int msi_start, unsigned int msi_end)
771{
772 struct ino_bucket *bucket;
773 struct irq_handler_data *data;
774 unsigned long sysino;
775 unsigned int devino;
776
777 BUG_ON(tlb_type != hypervisor);
778
779 /* Find a free devino in the given range. */
780 for (devino = msi_start; devino < msi_end; devino++) {
781 sysino = sun4v_devino_to_sysino(devhandle, devino);
782 bucket = &ivector_table[sysino];
783 if (!bucket->virt_irq)
784 break;
785 }
786 if (devino >= msi_end)
787 return -ENOSPC;
788
789 sysino = sun4v_devino_to_sysino(devhandle, devino);
790 bucket = &ivector_table[sysino];
791 bucket->virt_irq = virt_irq_alloc(__irq(bucket));
792 *virt_irq_p = bucket->virt_irq;
793 set_irq_chip(bucket->virt_irq, &sun4v_msi);
794
795 data = get_irq_chip_data(bucket->virt_irq);
796 if (unlikely(data))
797 return devino;
798
799 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
800 if (unlikely(!data)) {
801 virt_irq_free(*virt_irq_p);
802 return -ENOMEM;
803 }
804 set_irq_chip_data(bucket->virt_irq, data);
805
806 data->imap = ~0UL;
807 data->iclr = ~0UL;
808
809 return devino;
810}
811
812void sun4v_destroy_msi(unsigned int virt_irq)
813{
814 virt_irq_free(virt_irq);
815}
816
817unsigned int sun4u_build_msi(u32 portid, unsigned int *virt_irq_p,
818 unsigned int msi_start, unsigned int msi_end,
819 unsigned long imap_base, unsigned long iclr_base)
820{
821 struct ino_bucket *bucket;
822 struct irq_handler_data *data;
823 unsigned long sysino;
824 unsigned int devino;
825
826 /* Find a free devino in the given range. */
827 for (devino = msi_start; devino < msi_end; devino++) {
828 sysino = (portid << 6) | devino;
829 bucket = &ivector_table[sysino];
830 if (!bucket->virt_irq)
831 break;
832 }
833 if (devino >= msi_end)
834 return -ENOSPC;
835
836 sysino = (portid << 6) | devino;
837 bucket = &ivector_table[sysino];
838 bucket->virt_irq = virt_irq_alloc(__irq(bucket));
839 *virt_irq_p = bucket->virt_irq;
840 set_irq_chip(bucket->virt_irq, &sun4u_msi);
841
842 data = get_irq_chip_data(bucket->virt_irq);
843 if (unlikely(data))
844 return devino;
845
846 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
847 if (unlikely(!data)) {
848 virt_irq_free(*virt_irq_p);
849 return -ENOMEM;
850 }
851 set_irq_chip_data(bucket->virt_irq, data);
852
853 data->imap = (imap_base + (devino * 0x8UL));
854 data->iclr = (iclr_base + (devino * 0x8UL));
855
856 return devino;
857}
858
859void sun4u_destroy_msi(unsigned int virt_irq)
860{
861 virt_irq_free(virt_irq);
862}
863#endif
864
865void ack_bad_irq(unsigned int virt_irq) 681void ack_bad_irq(unsigned int virt_irq)
866{ 682{
867 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); 683 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
diff --git a/arch/sparc64/kernel/pci_fire.c b/arch/sparc64/kernel/pci_fire.c
index 090f26579678..bcf6a5d425ab 100644
--- a/arch/sparc64/kernel/pci_fire.c
+++ b/arch/sparc64/kernel/pci_fire.c
@@ -161,90 +161,92 @@ struct pci_msiq_entry {
161#define MSI_64BIT_ADDR 0x034008UL 161#define MSI_64BIT_ADDR 0x034008UL
162#define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL 162#define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL
163 163
164/* For now this just runs as a pre-handler for the real interrupt handler. 164static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
165 * So we just walk through the queue and ACK all the entries, update the 165 unsigned long *head)
166 * head pointer, and return. 166{
167 * 167 *head = fire_read(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
168 * In the longer term it would be nice to do something more integrated 168 return 0;
169 * wherein we can pass in some of this MSI info to the drivers. This 169}
170 * would be most useful for PCIe fabric error messages, although we could 170
171 * invoke those directly from the loop here in order to pass the info around. 171static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid,
172 */ 172 unsigned long *head, unsigned long *msi)
173static void pci_msi_prehandler(unsigned int ino, void *data1, void *data2)
174{ 173{
175 unsigned long msiqid, orig_head, head, type_fmt, type; 174 unsigned long type_fmt, type, msi_num;
176 struct pci_pbm_info *pbm = data1;
177 struct pci_msiq_entry *base, *ep; 175 struct pci_msiq_entry *base, *ep;
178 176
179 msiqid = (unsigned long) data2; 177 base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192));
178 ep = &base[*head];
180 179
181 head = fire_read(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid)); 180 if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0)
181 return 0;
182 182
183 orig_head = head; 183 type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >>
184 base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192)); 184 MSIQ_WORD0_FMT_TYPE_SHIFT);
185 ep = &base[head]; 185 type = (type_fmt >> 3);
186 while ((ep->word0 & MSIQ_WORD0_FMT_TYPE) != 0) { 186 if (unlikely(type != MSIQ_TYPE_MSI32 &&
187 unsigned long msi_num; 187 type != MSIQ_TYPE_MSI64))
188 188 return -EINVAL;
189 type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >>
190 MSIQ_WORD0_FMT_TYPE_SHIFT);
191 type = (type_fmt >>3);
192 if (unlikely(type != MSIQ_TYPE_MSI32 &&
193 type != MSIQ_TYPE_MSI64))
194 goto bad_type;
195
196 msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >>
197 MSIQ_WORD0_DATA0_SHIFT);
198
199 fire_write(pbm->pbm_regs + MSI_CLEAR(msi_num),
200 MSI_CLEAR_EQWR_N);
201
202 /* Clear the entry. */
203 ep->word0 &= ~MSIQ_WORD0_FMT_TYPE;
204
205 /* Go to next entry in ring. */
206 head++;
207 if (head >= pbm->msiq_ent_count)
208 head = 0;
209 ep = &base[head];
210 }
211 189
212 if (likely(head != orig_head)) { 190 *msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >>
213 /* ACK entries by updating head pointer. */ 191 MSIQ_WORD0_DATA0_SHIFT);
214 fire_write(pbm->pbm_regs +
215 EVENT_QUEUE_HEAD(msiqid),
216 head);
217 }
218 return;
219 192
220bad_type: 193 fire_write(pbm->pbm_regs + MSI_CLEAR(msi_num),
221 printk(KERN_EMERG "MSI: Entry has bad type %lx\n", type); 194 MSI_CLEAR_EQWR_N);
222 return; 195
196 /* Clear the entry. */
197 ep->word0 &= ~MSIQ_WORD0_FMT_TYPE;
198
199 /* Go to next entry in ring. */
200 (*head)++;
201 if (*head >= pbm->msiq_ent_count)
202 *head = 0;
203
204 return 1;
223} 205}
224 206
225static int msi_bitmap_alloc(struct pci_pbm_info *pbm) 207static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
208 unsigned long head)
226{ 209{
227 unsigned long size, bits_per_ulong; 210 fire_write(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid), head);
211 return 0;
212}
228 213
229 bits_per_ulong = sizeof(unsigned long) * 8; 214static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
230 size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1); 215 unsigned long msi, int is_msi64)
231 size /= 8; 216{
232 BUG_ON(size % sizeof(unsigned long)); 217 u64 val;
233 218
234 pbm->msi_bitmap = kzalloc(size, GFP_KERNEL); 219 val = fire_read(pbm->pbm_regs + MSI_MAP(msi));
235 if (!pbm->msi_bitmap) 220 val &= ~(MSI_MAP_EQNUM);
236 return -ENOMEM; 221 val |= msiqid;
222 fire_write(pbm->pbm_regs + MSI_MAP(msi), val);
223
224 fire_write(pbm->pbm_regs + MSI_CLEAR(msi),
225 MSI_CLEAR_EQWR_N);
226
227 val = fire_read(pbm->pbm_regs + MSI_MAP(msi));
228 val |= MSI_MAP_VALID;
229 fire_write(pbm->pbm_regs + MSI_MAP(msi), val);
237 230
238 return 0; 231 return 0;
239} 232}
240 233
241static void msi_bitmap_free(struct pci_pbm_info *pbm) 234static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
242{ 235{
243 kfree(pbm->msi_bitmap); 236 unsigned long msiqid;
244 pbm->msi_bitmap = NULL; 237 u64 val;
238
239 val = fire_read(pbm->pbm_regs + MSI_MAP(msi));
240 msiqid = (val & MSI_MAP_EQNUM);
241
242 val &= ~MSI_MAP_VALID;
243
244 fire_write(pbm->pbm_regs + MSI_MAP(msi), val);
245
246 return 0;
245} 247}
246 248
247static int msi_queue_alloc(struct pci_pbm_info *pbm) 249static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm)
248{ 250{
249 unsigned long pages, order, i; 251 unsigned long pages, order, i;
250 252
@@ -279,241 +281,65 @@ static int msi_queue_alloc(struct pci_pbm_info *pbm)
279 return 0; 281 return 0;
280} 282}
281 283
282static int alloc_msi(struct pci_pbm_info *pbm) 284static void pci_fire_msiq_free(struct pci_pbm_info *pbm)
283{ 285{
284 int i; 286 unsigned long pages, order;
285 287
286 for (i = 0; i < pbm->msi_num; i++) { 288 order = get_order(512 * 1024);
287 if (!test_and_set_bit(i, pbm->msi_bitmap)) 289 pages = (unsigned long) pbm->msi_queues;
288 return i + pbm->msi_first;
289 }
290 290
291 return -ENOENT; 291 free_pages(pages, order);
292}
293 292
294static void free_msi(struct pci_pbm_info *pbm, int msi_num) 293 pbm->msi_queues = NULL;
295{
296 msi_num -= pbm->msi_first;
297 clear_bit(msi_num, pbm->msi_bitmap);
298} 294}
299 295
300static int pci_setup_msi_irq(unsigned int *virt_irq_p, 296static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
301 struct pci_dev *pdev, 297 unsigned long msiqid,
302 struct msi_desc *entry) 298 unsigned long devino)
303{ 299{
304 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; 300 unsigned long cregs = (unsigned long) pbm->pbm_regs;
305 unsigned long devino, msiqid, cregs, imap_off; 301 unsigned long imap_reg, iclr_reg, int_ctrlr;
306 struct msi_msg msg; 302 unsigned int virt_irq;
307 int msi_num, err; 303 int fixup;
308 u64 val; 304 u64 val;
309 305
310 *virt_irq_p = 0; 306 imap_reg = cregs + (0x001000UL + (devino * 0x08UL));
311 307 iclr_reg = cregs + (0x001400UL + (devino * 0x08UL));
312 msi_num = alloc_msi(pbm);
313 if (msi_num < 0)
314 return msi_num;
315
316 cregs = (unsigned long) pbm->pbm_regs;
317 308
318 err = sun4u_build_msi(pbm->portid, virt_irq_p, 309 /* XXX iterate amongst the 4 IRQ controllers XXX */
319 pbm->msiq_first_devino, 310 int_ctrlr = (1UL << 6);
320 (pbm->msiq_first_devino +
321 pbm->msiq_num),
322 cregs + 0x001000UL,
323 cregs + 0x001400UL);
324 if (err < 0)
325 goto out_err;
326 devino = err;
327 311
328 imap_off = 0x001000UL + (devino * 0x8UL); 312 val = fire_read(imap_reg);
313 val |= (1UL << 63) | int_ctrlr;
314 fire_write(imap_reg, val);
329 315
330 val = fire_read(pbm->pbm_regs + imap_off); 316 fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
331 val |= (1UL << 63) | (1UL << 6);
332 fire_write(pbm->pbm_regs + imap_off, val);
333 317
334 msiqid = ((devino - pbm->msiq_first_devino) + 318 virt_irq = build_irq(fixup, iclr_reg, imap_reg);
335 pbm->msiq_first); 319 if (!virt_irq)
320 return -ENOMEM;
336 321
337 fire_write(pbm->pbm_regs + 322 fire_write(pbm->pbm_regs +
338 EVENT_QUEUE_CONTROL_SET(msiqid), 323 EVENT_QUEUE_CONTROL_SET(msiqid),
339 EVENT_QUEUE_CONTROL_SET_EN); 324 EVENT_QUEUE_CONTROL_SET_EN);
340 325
341 val = fire_read(pbm->pbm_regs + MSI_MAP(msi_num)); 326 return virt_irq;
342 val &= ~(MSI_MAP_EQNUM);
343 val |= msiqid;
344 fire_write(pbm->pbm_regs + MSI_MAP(msi_num), val);
345
346 fire_write(pbm->pbm_regs + MSI_CLEAR(msi_num),
347 MSI_CLEAR_EQWR_N);
348
349 val = fire_read(pbm->pbm_regs + MSI_MAP(msi_num));
350 val |= MSI_MAP_VALID;
351 fire_write(pbm->pbm_regs + MSI_MAP(msi_num), val);
352
353 sparc64_set_msi(*virt_irq_p, msi_num);
354
355 if (entry->msi_attrib.is_64) {
356 msg.address_hi = pbm->msi64_start >> 32;
357 msg.address_lo = pbm->msi64_start & 0xffffffff;
358 } else {
359 msg.address_hi = 0;
360 msg.address_lo = pbm->msi32_start;
361 }
362 msg.data = msi_num;
363
364 set_irq_msi(*virt_irq_p, entry);
365 write_msi_msg(*virt_irq_p, &msg);
366
367 irq_install_pre_handler(*virt_irq_p,
368 pci_msi_prehandler,
369 pbm, (void *) msiqid);
370
371 return 0;
372
373out_err:
374 free_msi(pbm, msi_num);
375 return err;
376} 327}
377 328
378static void pci_teardown_msi_irq(unsigned int virt_irq, 329static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
379 struct pci_dev *pdev) 330 .get_head = pci_fire_get_head,
380{ 331 .dequeue_msi = pci_fire_dequeue_msi,
381 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; 332 .set_head = pci_fire_set_head,
382 unsigned long msiqid, msi_num; 333 .msi_setup = pci_fire_msi_setup,
383 u64 val; 334 .msi_teardown = pci_fire_msi_teardown,
384 335 .msiq_alloc = pci_fire_msiq_alloc,
385 msi_num = sparc64_get_msi(virt_irq); 336 .msiq_free = pci_fire_msiq_free,
386 337 .msiq_build_irq = pci_fire_msiq_build_irq,
387 val = fire_read(pbm->pbm_regs + MSI_MAP(msi_num)); 338};
388
389 msiqid = (val & MSI_MAP_EQNUM);
390
391 val &= ~MSI_MAP_VALID;
392 fire_write(pbm->pbm_regs + MSI_MAP(msi_num), val);
393
394 fire_write(pbm->pbm_regs + EVENT_QUEUE_CONTROL_CLEAR(msiqid),
395 EVENT_QUEUE_CONTROL_CLEAR_DIS);
396
397 free_msi(pbm, msi_num);
398
399 /* The sun4u_destroy_msi() will liberate the devino and thus the MSIQ
400 * allocation.
401 */
402 sun4u_destroy_msi(virt_irq);
403}
404 339
405static void pci_fire_msi_init(struct pci_pbm_info *pbm) 340static void pci_fire_msi_init(struct pci_pbm_info *pbm)
406{ 341{
407 const u32 *val; 342 sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops);
408 int len;
409
410 val = of_get_property(pbm->prom_node, "#msi-eqs", &len);
411 if (!val || len != 4)
412 goto no_msi;
413 pbm->msiq_num = *val;
414 if (pbm->msiq_num) {
415 const struct msiq_prop {
416 u32 first_msiq;
417 u32 num_msiq;
418 u32 first_devino;
419 } *mqp;
420 const struct msi_range_prop {
421 u32 first_msi;
422 u32 num_msi;
423 } *mrng;
424 const struct addr_range_prop {
425 u32 msi32_high;
426 u32 msi32_low;
427 u32 msi32_len;
428 u32 msi64_high;
429 u32 msi64_low;
430 u32 msi64_len;
431 } *arng;
432
433 val = of_get_property(pbm->prom_node, "msi-eq-size", &len);
434 if (!val || len != 4)
435 goto no_msi;
436
437 pbm->msiq_ent_count = *val;
438
439 mqp = of_get_property(pbm->prom_node,
440 "msi-eq-to-devino", &len);
441 if (!mqp)
442 mqp = of_get_property(pbm->prom_node,
443 "msi-eq-devino", &len);
444 if (!mqp || len != sizeof(struct msiq_prop))
445 goto no_msi;
446
447 pbm->msiq_first = mqp->first_msiq;
448 pbm->msiq_first_devino = mqp->first_devino;
449
450 val = of_get_property(pbm->prom_node, "#msi", &len);
451 if (!val || len != 4)
452 goto no_msi;
453 pbm->msi_num = *val;
454
455 mrng = of_get_property(pbm->prom_node, "msi-ranges", &len);
456 if (!mrng || len != sizeof(struct msi_range_prop))
457 goto no_msi;
458 pbm->msi_first = mrng->first_msi;
459
460 val = of_get_property(pbm->prom_node, "msi-data-mask", &len);
461 if (!val || len != 4)
462 goto no_msi;
463 pbm->msi_data_mask = *val;
464
465 val = of_get_property(pbm->prom_node, "msix-data-width", &len);
466 if (!val || len != 4)
467 goto no_msi;
468 pbm->msix_data_width = *val;
469
470 arng = of_get_property(pbm->prom_node, "msi-address-ranges",
471 &len);
472 if (!arng || len != sizeof(struct addr_range_prop))
473 goto no_msi;
474 pbm->msi32_start = ((u64)arng->msi32_high << 32) |
475 (u64) arng->msi32_low;
476 pbm->msi64_start = ((u64)arng->msi64_high << 32) |
477 (u64) arng->msi64_low;
478 pbm->msi32_len = arng->msi32_len;
479 pbm->msi64_len = arng->msi64_len;
480
481 if (msi_bitmap_alloc(pbm))
482 goto no_msi;
483
484 if (msi_queue_alloc(pbm)) {
485 msi_bitmap_free(pbm);
486 goto no_msi;
487 }
488
489 printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
490 "devino[0x%x]\n",
491 pbm->name,
492 pbm->msiq_first, pbm->msiq_num,
493 pbm->msiq_ent_count,
494 pbm->msiq_first_devino);
495 printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
496 "width[%u]\n",
497 pbm->name,
498 pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
499 pbm->msix_data_width);
500 printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] "
501 "addr64[0x%lx:0x%x]\n",
502 pbm->name,
503 pbm->msi32_start, pbm->msi32_len,
504 pbm->msi64_start, pbm->msi64_len);
505 printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n",
506 pbm->name,
507 __pa(pbm->msi_queues));
508 }
509 pbm->setup_msi_irq = pci_setup_msi_irq;
510 pbm->teardown_msi_irq = pci_teardown_msi_irq;
511
512 return;
513
514no_msi:
515 pbm->msiq_num = 0;
516 printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
517} 343}
518#else /* CONFIG_PCI_MSI */ 344#else /* CONFIG_PCI_MSI */
519static void pci_fire_msi_init(struct pci_pbm_info *pbm) 345static void pci_fire_msi_init(struct pci_pbm_info *pbm)
diff --git a/arch/sparc64/kernel/pci_impl.h b/arch/sparc64/kernel/pci_impl.h
index f660c2b685eb..ccbb188f2e61 100644
--- a/arch/sparc64/kernel/pci_impl.h
+++ b/arch/sparc64/kernel/pci_impl.h
@@ -29,6 +29,33 @@
29#define PCI_STC_FLUSHFLAG_SET(STC) \ 29#define PCI_STC_FLUSHFLAG_SET(STC) \
30 (*((STC)->strbuf_flushflag) != 0UL) 30 (*((STC)->strbuf_flushflag) != 0UL)
31 31
32#ifdef CONFIG_PCI_MSI
33struct pci_pbm_info;
34struct sparc64_msiq_ops {
35 int (*get_head)(struct pci_pbm_info *pbm, unsigned long msiqid,
36 unsigned long *head);
37 int (*dequeue_msi)(struct pci_pbm_info *pbm, unsigned long msiqid,
38 unsigned long *head, unsigned long *msi);
39 int (*set_head)(struct pci_pbm_info *pbm, unsigned long msiqid,
40 unsigned long head);
41 int (*msi_setup)(struct pci_pbm_info *pbm, unsigned long msiqid,
42 unsigned long msi, int is_msi64);
43 int (*msi_teardown)(struct pci_pbm_info *pbm, unsigned long msi);
44 int (*msiq_alloc)(struct pci_pbm_info *pbm);
45 void (*msiq_free)(struct pci_pbm_info *pbm);
46 int (*msiq_build_irq)(struct pci_pbm_info *pbm, unsigned long msiqid,
47 unsigned long devino);
48};
49
50extern void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
51 const struct sparc64_msiq_ops *ops);
52
53struct sparc64_msiq_cookie {
54 struct pci_pbm_info *pbm;
55 unsigned long msiqid;
56};
57#endif
58
32struct pci_controller_info; 59struct pci_controller_info;
33 60
34struct pci_pbm_info { 61struct pci_pbm_info {
@@ -90,6 +117,8 @@ struct pci_pbm_info {
90 u32 msiq_ent_count; 117 u32 msiq_ent_count;
91 u32 msiq_first; 118 u32 msiq_first;
92 u32 msiq_first_devino; 119 u32 msiq_first_devino;
120 u32 msiq_rotor;
121 struct sparc64_msiq_cookie *msiq_irq_cookies;
93 u32 msi_num; 122 u32 msi_num;
94 u32 msi_first; 123 u32 msi_first;
95 u32 msi_data_mask; 124 u32 msi_data_mask;
@@ -100,9 +129,11 @@ struct pci_pbm_info {
100 u32 msi64_len; 129 u32 msi64_len;
101 void *msi_queues; 130 void *msi_queues;
102 unsigned long *msi_bitmap; 131 unsigned long *msi_bitmap;
132 unsigned int *msi_irq_table;
103 int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev, 133 int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev,
104 struct msi_desc *entry); 134 struct msi_desc *entry);
105 void (*teardown_msi_irq)(unsigned int virt_irq, struct pci_dev *pdev); 135 void (*teardown_msi_irq)(unsigned int virt_irq, struct pci_dev *pdev);
136 const struct sparc64_msiq_ops *msi_ops;
106#endif /* !(CONFIG_PCI_MSI) */ 137#endif /* !(CONFIG_PCI_MSI) */
107 138
108 /* This PBM's streaming buffer. */ 139 /* This PBM's streaming buffer. */
diff --git a/arch/sparc64/kernel/pci_msi.c b/arch/sparc64/kernel/pci_msi.c
new file mode 100644
index 000000000000..0fa33b1f4274
--- /dev/null
+++ b/arch/sparc64/kernel/pci_msi.c
@@ -0,0 +1,433 @@
1/* pci_msi.c: Sparc64 MSI support common layer.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5#include <linux/kernel.h>
6#include <linux/interrupt.h>
7#include <linux/irq.h>
8
9#include "pci_impl.h"
10
11static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie)
12{
13 struct sparc64_msiq_cookie *msiq_cookie = cookie;
14 struct pci_pbm_info *pbm = msiq_cookie->pbm;
15 unsigned long msiqid = msiq_cookie->msiqid;
16 const struct sparc64_msiq_ops *ops;
17 unsigned long orig_head, head;
18 int err;
19
20 ops = pbm->msi_ops;
21
22 err = ops->get_head(pbm, msiqid, &head);
23 if (unlikely(err < 0))
24 goto err_get_head;
25
26 orig_head = head;
27 for (;;) {
28 unsigned long msi;
29
30 err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
31 if (likely(err > 0))
32 __do_IRQ(pbm->msi_irq_table[msi - pbm->msi_first]);
33
34 if (unlikely(err < 0))
35 goto err_dequeue;
36
37 if (err == 0)
38 break;
39 }
40 if (likely(head != orig_head)) {
41 err = ops->set_head(pbm, msiqid, head);
42 if (unlikely(err < 0))
43 goto err_set_head;
44 }
45 return IRQ_HANDLED;
46
47err_get_head:
48 printk(KERN_EMERG "MSI: Get head on msiqid[%lu] gives error %d\n",
49 msiqid, err);
50 goto err_out;
51
52err_dequeue:
53 printk(KERN_EMERG "MSI: Dequeue head[%lu] from msiqid[%lu] "
54 "gives error %d\n",
55 head, msiqid, err);
56 goto err_out;
57
58err_set_head:
59 printk(KERN_EMERG "MSI: Set head[%lu] on msiqid[%lu] "
60 "gives error %d\n",
61 head, msiqid, err);
62 goto err_out;
63
64err_out:
65 return IRQ_NONE;
66}
67
68static u32 pick_msiq(struct pci_pbm_info *pbm)
69{
70 static DEFINE_SPINLOCK(rotor_lock);
71 unsigned long flags;
72 u32 ret, rotor;
73
74 spin_lock_irqsave(&rotor_lock, flags);
75
76 rotor = pbm->msiq_rotor;
77 ret = pbm->msiq_first + rotor;
78
79 if (++rotor >= pbm->msiq_num)
80 rotor = 0;
81 pbm->msiq_rotor = rotor;
82
83 spin_unlock_irqrestore(&rotor_lock, flags);
84
85 return ret;
86}
87
88
89static int alloc_msi(struct pci_pbm_info *pbm)
90{
91 int i;
92
93 for (i = 0; i < pbm->msi_num; i++) {
94 if (!test_and_set_bit(i, pbm->msi_bitmap))
95 return i + pbm->msi_first;
96 }
97
98 return -ENOENT;
99}
100
101static void free_msi(struct pci_pbm_info *pbm, int msi_num)
102{
103 msi_num -= pbm->msi_first;
104 clear_bit(msi_num, pbm->msi_bitmap);
105}
106
107static struct irq_chip msi_irq = {
108 .typename = "PCI-MSI",
109 .mask = mask_msi_irq,
110 .unmask = unmask_msi_irq,
111 .enable = unmask_msi_irq,
112 .disable = mask_msi_irq,
113 /* XXX affinity XXX */
114};
115
116int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
117 struct pci_dev *pdev,
118 struct msi_desc *entry)
119{
120 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
121 const struct sparc64_msiq_ops *ops = pbm->msi_ops;
122 struct msi_msg msg;
123 int msi, err;
124 u32 msiqid;
125
126 *virt_irq_p = virt_irq_alloc(~0);
127 err = -ENOMEM;
128 if (!*virt_irq_p)
129 goto out_err;
130
131 set_irq_chip(*virt_irq_p, &msi_irq);
132
133 err = alloc_msi(pbm);
134 if (unlikely(err < 0))
135 goto out_virt_irq_free;
136
137 msi = err;
138
139 msiqid = pick_msiq(pbm);
140
141 err = ops->msi_setup(pbm, msiqid, msi,
142 (entry->msi_attrib.is_64 ? 1 : 0));
143 if (err)
144 goto out_msi_free;
145
146 pbm->msi_irq_table[msi - pbm->msi_first] = *virt_irq_p;
147
148 if (entry->msi_attrib.is_64) {
149 msg.address_hi = pbm->msi64_start >> 32;
150 msg.address_lo = pbm->msi64_start & 0xffffffff;
151 } else {
152 msg.address_hi = 0;
153 msg.address_lo = pbm->msi32_start;
154 }
155 msg.data = msi;
156
157 set_irq_msi(*virt_irq_p, entry);
158 write_msi_msg(*virt_irq_p, &msg);
159
160 return 0;
161
162out_msi_free:
163 free_msi(pbm, msi);
164
165out_virt_irq_free:
166 set_irq_chip(*virt_irq_p, NULL);
167 virt_irq_free(*virt_irq_p);
168 *virt_irq_p = 0;
169
170out_err:
171 return err;
172}
173
174void sparc64_teardown_msi_irq(unsigned int virt_irq,
175 struct pci_dev *pdev)
176{
177 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
178 const struct sparc64_msiq_ops *ops = pbm->msi_ops;
179 unsigned int msi_num;
180 int i, err;
181
182 for (i = 0; i < pbm->msi_num; i++) {
183 if (pbm->msi_irq_table[i] == virt_irq)
184 break;
185 }
186 if (i >= pbm->msi_num) {
187 printk(KERN_ERR "%s: teardown: No MSI for irq %u\n",
188 pbm->name, virt_irq);
189 return;
190 }
191
192 msi_num = pbm->msi_first + i;
193 pbm->msi_irq_table[i] = ~0U;
194
195 err = ops->msi_teardown(pbm, msi_num);
196 if (err) {
197 printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, "
198 "irq %u, gives error %d\n",
199 pbm->name, msi_num, virt_irq, err);
200 return;
201 }
202
203 free_msi(pbm, msi_num);
204
205 set_irq_chip(virt_irq, NULL);
206 virt_irq_free(virt_irq);
207}
208
209static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
210{
211 unsigned long size, bits_per_ulong;
212
213 bits_per_ulong = sizeof(unsigned long) * 8;
214 size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1);
215 size /= 8;
216 BUG_ON(size % sizeof(unsigned long));
217
218 pbm->msi_bitmap = kzalloc(size, GFP_KERNEL);
219 if (!pbm->msi_bitmap)
220 return -ENOMEM;
221
222 return 0;
223}
224
225static void msi_bitmap_free(struct pci_pbm_info *pbm)
226{
227 kfree(pbm->msi_bitmap);
228 pbm->msi_bitmap = NULL;
229}
230
231static int msi_table_alloc(struct pci_pbm_info *pbm)
232{
233 int size, i;
234
235 size = pbm->msiq_num * sizeof(struct sparc64_msiq_cookie);
236 pbm->msiq_irq_cookies = kzalloc(size, GFP_KERNEL);
237 if (!pbm->msiq_irq_cookies)
238 return -ENOMEM;
239
240 for (i = 0; i < pbm->msiq_num; i++) {
241 struct sparc64_msiq_cookie *p;
242
243 p = &pbm->msiq_irq_cookies[i];
244 p->pbm = pbm;
245 p->msiqid = pbm->msiq_first + i;
246 }
247
248 size = pbm->msi_num * sizeof(unsigned int);
249 pbm->msi_irq_table = kzalloc(size, GFP_KERNEL);
250 if (!pbm->msi_irq_table) {
251 kfree(pbm->msiq_irq_cookies);
252 pbm->msiq_irq_cookies = NULL;
253 return -ENOMEM;
254 }
255
256 return 0;
257}
258
259static void msi_table_free(struct pci_pbm_info *pbm)
260{
261 kfree(pbm->msiq_irq_cookies);
262 pbm->msiq_irq_cookies = NULL;
263
264 kfree(pbm->msi_irq_table);
265 pbm->msi_irq_table = NULL;
266}
267
268static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
269 const struct sparc64_msiq_ops *ops,
270 unsigned long msiqid,
271 unsigned long devino)
272{
273 int irq = ops->msiq_build_irq(pbm, msiqid, devino);
274 int err;
275
276 if (irq < 0)
277 return irq;
278
279 err = request_irq(irq, sparc64_msiq_interrupt, 0,
280 "MSIQ",
281 &pbm->msiq_irq_cookies[msiqid - pbm->msiq_first]);
282 if (err)
283 return err;
284
285 return 0;
286}
287
288static int sparc64_bringup_msi_queues(struct pci_pbm_info *pbm,
289 const struct sparc64_msiq_ops *ops)
290{
291 int i;
292
293 for (i = 0; i < pbm->msiq_num; i++) {
294 unsigned long msiqid = i + pbm->msiq_first;
295 unsigned long devino = i + pbm->msiq_first_devino;
296 int err;
297
298 err = bringup_one_msi_queue(pbm, ops, msiqid, devino);
299 if (err)
300 return err;
301 }
302
303 return 0;
304}
305
306void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
307 const struct sparc64_msiq_ops *ops)
308{
309 const u32 *val;
310 int len;
311
312 val = of_get_property(pbm->prom_node, "#msi-eqs", &len);
313 if (!val || len != 4)
314 goto no_msi;
315 pbm->msiq_num = *val;
316 if (pbm->msiq_num) {
317 const struct msiq_prop {
318 u32 first_msiq;
319 u32 num_msiq;
320 u32 first_devino;
321 } *mqp;
322 const struct msi_range_prop {
323 u32 first_msi;
324 u32 num_msi;
325 } *mrng;
326 const struct addr_range_prop {
327 u32 msi32_high;
328 u32 msi32_low;
329 u32 msi32_len;
330 u32 msi64_high;
331 u32 msi64_low;
332 u32 msi64_len;
333 } *arng;
334
335 val = of_get_property(pbm->prom_node, "msi-eq-size", &len);
336 if (!val || len != 4)
337 goto no_msi;
338
339 pbm->msiq_ent_count = *val;
340
341 mqp = of_get_property(pbm->prom_node,
342 "msi-eq-to-devino", &len);
343 if (!mqp)
344 mqp = of_get_property(pbm->prom_node,
345 "msi-eq-devino", &len);
346 if (!mqp || len != sizeof(struct msiq_prop))
347 goto no_msi;
348
349 pbm->msiq_first = mqp->first_msiq;
350 pbm->msiq_first_devino = mqp->first_devino;
351
352 val = of_get_property(pbm->prom_node, "#msi", &len);
353 if (!val || len != 4)
354 goto no_msi;
355 pbm->msi_num = *val;
356
357 mrng = of_get_property(pbm->prom_node, "msi-ranges", &len);
358 if (!mrng || len != sizeof(struct msi_range_prop))
359 goto no_msi;
360 pbm->msi_first = mrng->first_msi;
361
362 val = of_get_property(pbm->prom_node, "msi-data-mask", &len);
363 if (!val || len != 4)
364 goto no_msi;
365 pbm->msi_data_mask = *val;
366
367 val = of_get_property(pbm->prom_node, "msix-data-width", &len);
368 if (!val || len != 4)
369 goto no_msi;
370 pbm->msix_data_width = *val;
371
372 arng = of_get_property(pbm->prom_node, "msi-address-ranges",
373 &len);
374 if (!arng || len != sizeof(struct addr_range_prop))
375 goto no_msi;
376 pbm->msi32_start = ((u64)arng->msi32_high << 32) |
377 (u64) arng->msi32_low;
378 pbm->msi64_start = ((u64)arng->msi64_high << 32) |
379 (u64) arng->msi64_low;
380 pbm->msi32_len = arng->msi32_len;
381 pbm->msi64_len = arng->msi64_len;
382
383 if (msi_bitmap_alloc(pbm))
384 goto no_msi;
385
386 if (msi_table_alloc(pbm)) {
387 msi_bitmap_free(pbm);
388 goto no_msi;
389 }
390
391 if (ops->msiq_alloc(pbm)) {
392 msi_table_free(pbm);
393 msi_bitmap_free(pbm);
394 goto no_msi;
395 }
396
397 if (sparc64_bringup_msi_queues(pbm, ops)) {
398 ops->msiq_free(pbm);
399 msi_table_free(pbm);
400 msi_bitmap_free(pbm);
401 goto no_msi;
402 }
403
404 printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
405 "devino[0x%x]\n",
406 pbm->name,
407 pbm->msiq_first, pbm->msiq_num,
408 pbm->msiq_ent_count,
409 pbm->msiq_first_devino);
410 printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
411 "width[%u]\n",
412 pbm->name,
413 pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
414 pbm->msix_data_width);
415 printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] "
416 "addr64[0x%lx:0x%x]\n",
417 pbm->name,
418 pbm->msi32_start, pbm->msi32_len,
419 pbm->msi64_start, pbm->msi64_len);
420 printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n",
421 pbm->name,
422 __pa(pbm->msi_queues));
423
424 pbm->msi_ops = ops;
425 pbm->setup_msi_irq = sparc64_setup_msi_irq;
426 pbm->teardown_msi_irq = sparc64_teardown_msi_irq;
427 }
428 return;
429
430no_msi:
431 pbm->msiq_num = 0;
432 printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
433}
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index da724b13e89e..97c45b26b780 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -748,111 +748,102 @@ struct pci_sun4v_msiq_entry {
748 u64 reserved2; 748 u64 reserved2;
749}; 749};
750 750
751/* For now this just runs as a pre-handler for the real interrupt handler. 751static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
752 * So we just walk through the queue and ACK all the entries, update the 752 unsigned long *head)
753 * head pointer, and return.
754 *
755 * In the longer term it would be nice to do something more integrated
756 * wherein we can pass in some of this MSI info to the drivers. This
757 * would be most useful for PCIe fabric error messages, although we could
758 * invoke those directly from the loop here in order to pass the info around.
759 */
760static void pci_sun4v_msi_prehandler(unsigned int ino, void *data1, void *data2)
761{ 753{
762 struct pci_pbm_info *pbm = data1; 754 unsigned long err, limit;
763 struct pci_sun4v_msiq_entry *base, *ep;
764 unsigned long msiqid, orig_head, head, type, err;
765
766 msiqid = (unsigned long) data2;
767 755
768 head = 0xdeadbeef; 756 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
769 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, &head);
770 if (unlikely(err)) 757 if (unlikely(err))
771 goto hv_error_get; 758 return -ENXIO;
772
773 if (unlikely(head >= (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry))))
774 goto bad_offset;
775
776 head /= sizeof(struct pci_sun4v_msiq_entry);
777 orig_head = head;
778 base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
779 (pbm->msiq_ent_count *
780 sizeof(struct pci_sun4v_msiq_entry))));
781 ep = &base[head];
782 while ((ep->version_type & MSIQ_TYPE_MASK) != 0) {
783 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
784 if (unlikely(type != MSIQ_TYPE_MSI32 &&
785 type != MSIQ_TYPE_MSI64))
786 goto bad_type;
787
788 pci_sun4v_msi_setstate(pbm->devhandle,
789 ep->msi_data /* msi_num */,
790 HV_MSISTATE_IDLE);
791
792 /* Clear the entry. */
793 ep->version_type &= ~MSIQ_TYPE_MASK;
794
795 /* Go to next entry in ring. */
796 head++;
797 if (head >= pbm->msiq_ent_count)
798 head = 0;
799 ep = &base[head];
800 }
801 759
802 if (likely(head != orig_head)) { 760 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
803 /* ACK entries by updating head pointer. */ 761 if (unlikely(*head >= limit))
804 head *= sizeof(struct pci_sun4v_msiq_entry); 762 return -EFBIG;
805 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
806 if (unlikely(err))
807 goto hv_error_set;
808 }
809 return;
810 763
811hv_error_set: 764 return 0;
812 printk(KERN_EMERG "MSI: Hypervisor set head gives error %lu\n", err); 765}
813 goto hv_error_cont;
814 766
815hv_error_get: 767static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
816 printk(KERN_EMERG "MSI: Hypervisor get head gives error %lu\n", err); 768 unsigned long msiqid, unsigned long *head,
769 unsigned long *msi)
770{
771 struct pci_sun4v_msiq_entry *ep;
772 unsigned long err, type;
817 773
818hv_error_cont: 774 /* Note: void pointer arithmetic, 'head' is a byte offset */
819 printk(KERN_EMERG "MSI: devhandle[%x] msiqid[%lx] head[%lu]\n", 775 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
820 pbm->devhandle, msiqid, head); 776 (pbm->msiq_ent_count *
821 return; 777 sizeof(struct pci_sun4v_msiq_entry))) +
778 *head);
822 779
823bad_offset: 780 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
824 printk(KERN_EMERG "MSI: Hypervisor gives bad offset %lx max(%lx)\n", 781 return 0;
825 head, pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry));
826 return;
827 782
828bad_type: 783 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
829 printk(KERN_EMERG "MSI: Entry has bad type %lx\n", type); 784 if (unlikely(type != MSIQ_TYPE_MSI32 &&
830 return; 785 type != MSIQ_TYPE_MSI64))
786 return -EINVAL;
787
788 *msi = ep->msi_data;
789
790 err = pci_sun4v_msi_setstate(pbm->devhandle,
791 ep->msi_data /* msi_num */,
792 HV_MSISTATE_IDLE);
793 if (unlikely(err))
794 return -ENXIO;
795
796 /* Clear the entry. */
797 ep->version_type &= ~MSIQ_TYPE_MASK;
798
799 (*head) += sizeof(struct pci_sun4v_msiq_entry);
800 if (*head >=
801 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
802 *head = 0;
803
804 return 1;
831} 805}
832 806
833static int msi_bitmap_alloc(struct pci_pbm_info *pbm) 807static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
808 unsigned long head)
834{ 809{
835 unsigned long size, bits_per_ulong; 810 unsigned long err;
836 811
837 bits_per_ulong = sizeof(unsigned long) * 8; 812 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
838 size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1); 813 if (unlikely(err))
839 size /= 8; 814 return -EINVAL;
840 BUG_ON(size % sizeof(unsigned long));
841 815
842 pbm->msi_bitmap = kzalloc(size, GFP_KERNEL); 816 return 0;
843 if (!pbm->msi_bitmap) 817}
844 return -ENOMEM;
845 818
819static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
820 unsigned long msi, int is_msi64)
821{
822 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
823 (is_msi64 ?
824 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
825 return -ENXIO;
826 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
827 return -ENXIO;
828 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
829 return -ENXIO;
846 return 0; 830 return 0;
847} 831}
848 832
849static void msi_bitmap_free(struct pci_pbm_info *pbm) 833static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
850{ 834{
851 kfree(pbm->msi_bitmap); 835 unsigned long err, msiqid;
852 pbm->msi_bitmap = NULL; 836
837 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
838 if (err)
839 return -ENXIO;
840
841 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
842
843 return 0;
853} 844}
854 845
855static int msi_queue_alloc(struct pci_pbm_info *pbm) 846static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
856{ 847{
857 unsigned long q_size, alloc_size, pages, order; 848 unsigned long q_size, alloc_size, pages, order;
858 int i; 849 int i;
@@ -906,232 +897,59 @@ h_error:
906 return -EINVAL; 897 return -EINVAL;
907} 898}
908 899
909 900static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
910static int alloc_msi(struct pci_pbm_info *pbm)
911{ 901{
902 unsigned long q_size, alloc_size, pages, order;
912 int i; 903 int i;
913 904
914 for (i = 0; i < pbm->msi_num; i++) { 905 for (i = 0; i < pbm->msiq_num; i++) {
915 if (!test_and_set_bit(i, pbm->msi_bitmap)) 906 unsigned long msiqid = pbm->msiq_first + i;
916 return i + pbm->msi_first;
917 }
918
919 return -ENOENT;
920}
921
922static void free_msi(struct pci_pbm_info *pbm, int msi_num)
923{
924 msi_num -= pbm->msi_first;
925 clear_bit(msi_num, pbm->msi_bitmap);
926}
927
928static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
929 struct pci_dev *pdev,
930 struct msi_desc *entry)
931{
932 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
933 unsigned long devino, msiqid;
934 struct msi_msg msg;
935 int msi_num, err;
936
937 *virt_irq_p = 0;
938
939 msi_num = alloc_msi(pbm);
940 if (msi_num < 0)
941 return msi_num;
942
943 err = sun4v_build_msi(pbm->devhandle, virt_irq_p,
944 pbm->msiq_first_devino,
945 (pbm->msiq_first_devino +
946 pbm->msiq_num));
947 if (err < 0)
948 goto out_err;
949 devino = err;
950
951 msiqid = ((devino - pbm->msiq_first_devino) +
952 pbm->msiq_first);
953
954 err = -EINVAL;
955 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
956 if (err)
957 goto out_err;
958
959 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
960 goto out_err;
961
962 if (pci_sun4v_msi_setmsiq(pbm->devhandle,
963 msi_num, msiqid,
964 (entry->msi_attrib.is_64 ?
965 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
966 goto out_err;
967
968 if (pci_sun4v_msi_setstate(pbm->devhandle, msi_num, HV_MSISTATE_IDLE))
969 goto out_err;
970
971 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID))
972 goto out_err;
973
974 sparc64_set_msi(*virt_irq_p, msi_num);
975 907
976 if (entry->msi_attrib.is_64) { 908 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
977 msg.address_hi = pbm->msi64_start >> 32;
978 msg.address_lo = pbm->msi64_start & 0xffffffff;
979 } else {
980 msg.address_hi = 0;
981 msg.address_lo = pbm->msi32_start;
982 } 909 }
983 msg.data = msi_num;
984 910
985 set_irq_msi(*virt_irq_p, entry); 911 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
986 write_msi_msg(*virt_irq_p, &msg); 912 alloc_size = (pbm->msiq_num * q_size);
987 913 order = get_order(alloc_size);
988 irq_install_pre_handler(*virt_irq_p,
989 pci_sun4v_msi_prehandler,
990 pbm, (void *) msiqid);
991 914
992 return 0; 915 pages = (unsigned long) pbm->msi_queues;
993 916
994out_err: 917 free_pages(pages, order);
995 free_msi(pbm, msi_num);
996 return err;
997 918
919 pbm->msi_queues = NULL;
998} 920}
999 921
1000static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq, 922static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
1001 struct pci_dev *pdev) 923 unsigned long msiqid,
924 unsigned long devino)
1002{ 925{
1003 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; 926 unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
1004 unsigned long msiqid, err;
1005 unsigned int msi_num;
1006
1007 msi_num = sparc64_get_msi(virt_irq);
1008 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid);
1009 if (err) {
1010 printk(KERN_ERR "%s: getmsiq gives error %lu\n",
1011 pbm->name, err);
1012 return;
1013 }
1014 927
1015 pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_INVALID); 928 if (!virt_irq)
1016 pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_INVALID); 929 return -ENOMEM;
1017 930
1018 free_msi(pbm, msi_num); 931 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
932 return -EINVAL;
933 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
934 return -EINVAL;
1019 935
1020 /* The sun4v_destroy_msi() will liberate the devino and thus the MSIQ 936 return virt_irq;
1021 * allocation.
1022 */
1023 sun4v_destroy_msi(virt_irq);
1024} 937}
1025 938
939static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
940 .get_head = pci_sun4v_get_head,
941 .dequeue_msi = pci_sun4v_dequeue_msi,
942 .set_head = pci_sun4v_set_head,
943 .msi_setup = pci_sun4v_msi_setup,
944 .msi_teardown = pci_sun4v_msi_teardown,
945 .msiq_alloc = pci_sun4v_msiq_alloc,
946 .msiq_free = pci_sun4v_msiq_free,
947 .msiq_build_irq = pci_sun4v_msiq_build_irq,
948};
949
1026static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) 950static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1027{ 951{
1028 const u32 *val; 952 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
1029 int len;
1030
1031 val = of_get_property(pbm->prom_node, "#msi-eqs", &len);
1032 if (!val || len != 4)
1033 goto no_msi;
1034 pbm->msiq_num = *val;
1035 if (pbm->msiq_num) {
1036 const struct msiq_prop {
1037 u32 first_msiq;
1038 u32 num_msiq;
1039 u32 first_devino;
1040 } *mqp;
1041 const struct msi_range_prop {
1042 u32 first_msi;
1043 u32 num_msi;
1044 } *mrng;
1045 const struct addr_range_prop {
1046 u32 msi32_high;
1047 u32 msi32_low;
1048 u32 msi32_len;
1049 u32 msi64_high;
1050 u32 msi64_low;
1051 u32 msi64_len;
1052 } *arng;
1053
1054 val = of_get_property(pbm->prom_node, "msi-eq-size", &len);
1055 if (!val || len != 4)
1056 goto no_msi;
1057
1058 pbm->msiq_ent_count = *val;
1059
1060 mqp = of_get_property(pbm->prom_node,
1061 "msi-eq-to-devino", &len);
1062 if (!mqp || len != sizeof(struct msiq_prop))
1063 goto no_msi;
1064
1065 pbm->msiq_first = mqp->first_msiq;
1066 pbm->msiq_first_devino = mqp->first_devino;
1067
1068 val = of_get_property(pbm->prom_node, "#msi", &len);
1069 if (!val || len != 4)
1070 goto no_msi;
1071 pbm->msi_num = *val;
1072
1073 mrng = of_get_property(pbm->prom_node, "msi-ranges", &len);
1074 if (!mrng || len != sizeof(struct msi_range_prop))
1075 goto no_msi;
1076 pbm->msi_first = mrng->first_msi;
1077
1078 val = of_get_property(pbm->prom_node, "msi-data-mask", &len);
1079 if (!val || len != 4)
1080 goto no_msi;
1081 pbm->msi_data_mask = *val;
1082
1083 val = of_get_property(pbm->prom_node, "msix-data-width", &len);
1084 if (!val || len != 4)
1085 goto no_msi;
1086 pbm->msix_data_width = *val;
1087
1088 arng = of_get_property(pbm->prom_node, "msi-address-ranges",
1089 &len);
1090 if (!arng || len != sizeof(struct addr_range_prop))
1091 goto no_msi;
1092 pbm->msi32_start = ((u64)arng->msi32_high << 32) |
1093 (u64) arng->msi32_low;
1094 pbm->msi64_start = ((u64)arng->msi64_high << 32) |
1095 (u64) arng->msi64_low;
1096 pbm->msi32_len = arng->msi32_len;
1097 pbm->msi64_len = arng->msi64_len;
1098
1099 if (msi_bitmap_alloc(pbm))
1100 goto no_msi;
1101
1102 if (msi_queue_alloc(pbm)) {
1103 msi_bitmap_free(pbm);
1104 goto no_msi;
1105 }
1106
1107 printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
1108 "devino[0x%x]\n",
1109 pbm->name,
1110 pbm->msiq_first, pbm->msiq_num,
1111 pbm->msiq_ent_count,
1112 pbm->msiq_first_devino);
1113 printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
1114 "width[%u]\n",
1115 pbm->name,
1116 pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
1117 pbm->msix_data_width);
1118 printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] "
1119 "addr64[0x%lx:0x%x]\n",
1120 pbm->name,
1121 pbm->msi32_start, pbm->msi32_len,
1122 pbm->msi64_start, pbm->msi64_len);
1123 printk(KERN_INFO "%s: MSI queues at RA [%p]\n",
1124 pbm->name,
1125 pbm->msi_queues);
1126 }
1127 pbm->setup_msi_irq = pci_sun4v_setup_msi_irq;
1128 pbm->teardown_msi_irq = pci_sun4v_teardown_msi_irq;
1129
1130 return;
1131
1132no_msi:
1133 pbm->msiq_num = 0;
1134 printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
1135} 953}
1136#else /* CONFIG_PCI_MSI */ 954#else /* CONFIG_PCI_MSI */
1137static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) 955static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h
index 4de37254436e..bad3c28cdee8 100644
--- a/include/asm-sparc64/irq.h
+++ b/include/asm-sparc64/irq.h
@@ -59,8 +59,10 @@ extern unsigned int sun4u_build_msi(u32 portid, unsigned int *virt_irq_p,
59extern void sun4u_destroy_msi(unsigned int virt_irq); 59extern void sun4u_destroy_msi(unsigned int virt_irq);
60extern unsigned int sbus_build_irq(void *sbus, unsigned int ino); 60extern unsigned int sbus_build_irq(void *sbus, unsigned int ino);
61 61
62extern void sparc64_set_msi(unsigned int virt_irq, u32 msi); 62extern unsigned char virt_irq_alloc(unsigned int real_irq);
63extern u32 sparc64_get_msi(unsigned int virt_irq); 63#ifdef CONFIG_PCI_MSI
64extern void virt_irq_free(unsigned int virt_irq);
65#endif
64 66
65extern void fixup_irqs(void); 67extern void fixup_irqs(void);
66 68