aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-08-31 01:33:25 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-14 00:53:09 -0400
commit9bb3c227c47b23280eb50fac0872d96ef3e160a7 (patch)
treec519fa5e544bc1df018eafecb3563c7ce1b3c43b /arch
parentf9c97e5d7cd9ff5e51e16d5db08d7e54fa4cb6bb (diff)
[SPARC64]: Enable MSI on sun4u Fire PCI-E controllers.
The support code is identical to the hypervisor sun4v stuff, just replacing the hypervisor calls with register reads and writes in the Fire controller. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc64/kernel/irq.c71
-rw-r--r--arch/sparc64/kernel/pci_fire.c446
2 files changed, 516 insertions, 1 deletions
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 23956096b3bf..7f5a4c77e3e4 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -406,6 +406,18 @@ static void sun4v_irq_disable(unsigned int virt_irq)
406} 406}
407 407
408#ifdef CONFIG_PCI_MSI 408#ifdef CONFIG_PCI_MSI
409static void sun4u_msi_enable(unsigned int virt_irq)
410{
411 sun4u_irq_enable(virt_irq);
412 unmask_msi_irq(virt_irq);
413}
414
415static void sun4u_msi_disable(unsigned int virt_irq)
416{
417 mask_msi_irq(virt_irq);
418 sun4u_irq_disable(virt_irq);
419}
420
409static void sun4v_msi_enable(unsigned int virt_irq) 421static void sun4v_msi_enable(unsigned int virt_irq)
410{ 422{
411 sun4v_irq_enable(virt_irq); 423 sun4v_irq_enable(virt_irq);
@@ -583,6 +595,17 @@ static struct irq_chip sun4v_irq_ack = {
583}; 595};
584 596
585#ifdef CONFIG_PCI_MSI 597#ifdef CONFIG_PCI_MSI
598static struct irq_chip sun4u_msi = {
599 .typename = "sun4u+msi",
600 .mask = mask_msi_irq,
601 .unmask = unmask_msi_irq,
602 .enable = sun4u_msi_enable,
603 .disable = sun4u_msi_disable,
604 .ack = run_pre_handler,
605 .end = sun4u_irq_end,
606 .set_affinity = sun4u_set_affinity,
607};
608
586static struct irq_chip sun4v_msi = { 609static struct irq_chip sun4v_msi = {
587 .typename = "sun4v+msi", 610 .typename = "sun4v+msi",
588 .mask = mask_msi_irq, 611 .mask = mask_msi_irq,
@@ -628,6 +651,7 @@ void irq_install_pre_handler(int virt_irq,
628 chip == &sun4v_irq_ack || 651 chip == &sun4v_irq_ack ||
629 chip == &sun4v_virq_ack 652 chip == &sun4v_virq_ack
630#ifdef CONFIG_PCI_MSI 653#ifdef CONFIG_PCI_MSI
654 || chip == &sun4u_msi
631 || chip == &sun4v_msi 655 || chip == &sun4v_msi
632#endif 656#endif
633 ) 657 )
@@ -789,6 +813,53 @@ void sun4v_destroy_msi(unsigned int virt_irq)
789{ 813{
790 virt_irq_free(virt_irq); 814 virt_irq_free(virt_irq);
791} 815}
816
817unsigned int sun4u_build_msi(u32 portid, unsigned int *virt_irq_p,
818 unsigned int msi_start, unsigned int msi_end,
819 unsigned long imap_base, unsigned long iclr_base)
820{
821 struct ino_bucket *bucket;
822 struct irq_handler_data *data;
823 unsigned long sysino;
824 unsigned int devino;
825
826 /* Find a free devino in the given range. */
827 for (devino = msi_start; devino < msi_end; devino++) {
828 sysino = (portid << 6) | devino;
829 bucket = &ivector_table[sysino];
830 if (!bucket->virt_irq)
831 break;
832 }
833 if (devino >= msi_end)
834 return -ENOSPC;
835
836 sysino = (portid << 6) | devino;
837 bucket = &ivector_table[sysino];
838 bucket->virt_irq = virt_irq_alloc(__irq(bucket));
839 *virt_irq_p = bucket->virt_irq;
840 set_irq_chip(bucket->virt_irq, &sun4u_msi);
841
842 data = get_irq_chip_data(bucket->virt_irq);
843 if (unlikely(data))
844 return devino;
845
846 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
847 if (unlikely(!data)) {
848 virt_irq_free(*virt_irq_p);
849 return -ENOMEM;
850 }
851 set_irq_chip_data(bucket->virt_irq, data);
852
853 data->imap = (imap_base + (devino * 0x8UL));
854 data->iclr = (iclr_base + (devino * 0x8UL));
855
856 return devino;
857}
858
859void sun4u_destroy_msi(unsigned int virt_irq)
860{
861 virt_irq_free(virt_irq);
862}
792#endif 863#endif
793 864
794void ack_bad_irq(unsigned int virt_irq) 865void ack_bad_irq(unsigned int virt_irq)
diff --git a/arch/sparc64/kernel/pci_fire.c b/arch/sparc64/kernel/pci_fire.c
index 14d67fe21ab2..090f26579678 100644
--- a/arch/sparc64/kernel/pci_fire.c
+++ b/arch/sparc64/kernel/pci_fire.c
@@ -6,9 +6,12 @@
6#include <linux/pci.h> 6#include <linux/pci.h>
7#include <linux/slab.h> 7#include <linux/slab.h>
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/msi.h>
10#include <linux/irq.h>
9 11
10#include <asm/oplib.h> 12#include <asm/oplib.h>
11#include <asm/prom.h> 13#include <asm/prom.h>
14#include <asm/irq.h>
12 15
13#include "pci_impl.h" 16#include "pci_impl.h"
14 17
@@ -84,6 +87,440 @@ static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
84 return 0; 87 return 0;
85} 88}
86 89
90#ifdef CONFIG_PCI_MSI
91struct pci_msiq_entry {
92 u64 word0;
93#define MSIQ_WORD0_RESV 0x8000000000000000UL
94#define MSIQ_WORD0_FMT_TYPE 0x7f00000000000000UL
95#define MSIQ_WORD0_FMT_TYPE_SHIFT 56
96#define MSIQ_WORD0_LEN 0x00ffc00000000000UL
97#define MSIQ_WORD0_LEN_SHIFT 46
98#define MSIQ_WORD0_ADDR0 0x00003fff00000000UL
99#define MSIQ_WORD0_ADDR0_SHIFT 32
100#define MSIQ_WORD0_RID 0x00000000ffff0000UL
101#define MSIQ_WORD0_RID_SHIFT 16
102#define MSIQ_WORD0_DATA0 0x000000000000ffffUL
103#define MSIQ_WORD0_DATA0_SHIFT 0
104
105#define MSIQ_TYPE_MSG 0x6
106#define MSIQ_TYPE_MSI32 0xb
107#define MSIQ_TYPE_MSI64 0xf
108
109 u64 word1;
110#define MSIQ_WORD1_ADDR1 0xffffffffffff0000UL
111#define MSIQ_WORD1_ADDR1_SHIFT 16
112#define MSIQ_WORD1_DATA1 0x000000000000ffffUL
113#define MSIQ_WORD1_DATA1_SHIFT 0
114
115 u64 resv[6];
116};
117
118/* All MSI registers are offset from pbm->pbm_regs */
119#define EVENT_QUEUE_BASE_ADDR_REG 0x010000UL
120#define EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL
121
122#define EVENT_QUEUE_CONTROL_SET(EQ) (0x011000UL + (EQ) * 0x8UL)
123#define EVENT_QUEUE_CONTROL_SET_OFLOW 0x0200000000000000UL
124#define EVENT_QUEUE_CONTROL_SET_EN 0x0000100000000000UL
125
126#define EVENT_QUEUE_CONTROL_CLEAR(EQ) (0x011200UL + (EQ) * 0x8UL)
127#define EVENT_QUEUE_CONTROL_CLEAR_OF 0x0200000000000000UL
128#define EVENT_QUEUE_CONTROL_CLEAR_E2I 0x0000800000000000UL
129#define EVENT_QUEUE_CONTROL_CLEAR_DIS 0x0000100000000000UL
130
131#define EVENT_QUEUE_STATE(EQ) (0x011400UL + (EQ) * 0x8UL)
132#define EVENT_QUEUE_STATE_MASK 0x0000000000000007UL
133#define EVENT_QUEUE_STATE_IDLE 0x0000000000000001UL
134#define EVENT_QUEUE_STATE_ACTIVE 0x0000000000000002UL
135#define EVENT_QUEUE_STATE_ERROR 0x0000000000000004UL
136
137#define EVENT_QUEUE_TAIL(EQ) (0x011600UL + (EQ) * 0x8UL)
138#define EVENT_QUEUE_TAIL_OFLOW 0x0200000000000000UL
139#define EVENT_QUEUE_TAIL_VAL 0x000000000000007fUL
140
141#define EVENT_QUEUE_HEAD(EQ) (0x011800UL + (EQ) * 0x8UL)
142#define EVENT_QUEUE_HEAD_VAL 0x000000000000007fUL
143
144#define MSI_MAP(MSI) (0x020000UL + (MSI) * 0x8UL)
145#define MSI_MAP_VALID 0x8000000000000000UL
146#define MSI_MAP_EQWR_N 0x4000000000000000UL
147#define MSI_MAP_EQNUM 0x000000000000003fUL
148
149#define MSI_CLEAR(MSI) (0x028000UL + (MSI) * 0x8UL)
150#define MSI_CLEAR_EQWR_N 0x4000000000000000UL
151
152#define IMONDO_DATA0 0x02C000UL
153#define IMONDO_DATA0_DATA 0xffffffffffffffc0UL
154
155#define IMONDO_DATA1 0x02C008UL
156#define IMONDO_DATA1_DATA 0xffffffffffffffffUL
157
158#define MSI_32BIT_ADDR 0x034000UL
159#define MSI_32BIT_ADDR_VAL 0x00000000ffff0000UL
160
161#define MSI_64BIT_ADDR 0x034008UL
162#define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL
163
164/* For now this just runs as a pre-handler for the real interrupt handler.
165 * So we just walk through the queue and ACK all the entries, update the
166 * head pointer, and return.
167 *
168 * In the longer term it would be nice to do something more integrated
169 * wherein we can pass in some of this MSI info to the drivers. This
170 * would be most useful for PCIe fabric error messages, although we could
171 * invoke those directly from the loop here in order to pass the info around.
172 */
173static void pci_msi_prehandler(unsigned int ino, void *data1, void *data2)
174{
175 unsigned long msiqid, orig_head, head, type_fmt, type;
176 struct pci_pbm_info *pbm = data1;
177 struct pci_msiq_entry *base, *ep;
178
179 msiqid = (unsigned long) data2;
180
181 head = fire_read(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
182
183 orig_head = head;
184 base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192));
185 ep = &base[head];
186 while ((ep->word0 & MSIQ_WORD0_FMT_TYPE) != 0) {
187 unsigned long msi_num;
188
189 type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >>
190 MSIQ_WORD0_FMT_TYPE_SHIFT);
191 type = (type_fmt >>3);
192 if (unlikely(type != MSIQ_TYPE_MSI32 &&
193 type != MSIQ_TYPE_MSI64))
194 goto bad_type;
195
196 msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >>
197 MSIQ_WORD0_DATA0_SHIFT);
198
199 fire_write(pbm->pbm_regs + MSI_CLEAR(msi_num),
200 MSI_CLEAR_EQWR_N);
201
202 /* Clear the entry. */
203 ep->word0 &= ~MSIQ_WORD0_FMT_TYPE;
204
205 /* Go to next entry in ring. */
206 head++;
207 if (head >= pbm->msiq_ent_count)
208 head = 0;
209 ep = &base[head];
210 }
211
212 if (likely(head != orig_head)) {
213 /* ACK entries by updating head pointer. */
214 fire_write(pbm->pbm_regs +
215 EVENT_QUEUE_HEAD(msiqid),
216 head);
217 }
218 return;
219
220bad_type:
221 printk(KERN_EMERG "MSI: Entry has bad type %lx\n", type);
222 return;
223}
224
225static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
226{
227 unsigned long size, bits_per_ulong;
228
229 bits_per_ulong = sizeof(unsigned long) * 8;
230 size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1);
231 size /= 8;
232 BUG_ON(size % sizeof(unsigned long));
233
234 pbm->msi_bitmap = kzalloc(size, GFP_KERNEL);
235 if (!pbm->msi_bitmap)
236 return -ENOMEM;
237
238 return 0;
239}
240
241static void msi_bitmap_free(struct pci_pbm_info *pbm)
242{
243 kfree(pbm->msi_bitmap);
244 pbm->msi_bitmap = NULL;
245}
246
247static int msi_queue_alloc(struct pci_pbm_info *pbm)
248{
249 unsigned long pages, order, i;
250
251 order = get_order(512 * 1024);
252 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
253 if (pages == 0UL) {
254 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
255 order);
256 return -ENOMEM;
257 }
258 memset((char *)pages, 0, PAGE_SIZE << order);
259 pbm->msi_queues = (void *) pages;
260
261 fire_write(pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG,
262 (EVENT_QUEUE_BASE_ADDR_ALL_ONES |
263 __pa(pbm->msi_queues)));
264
265 fire_write(pbm->pbm_regs + IMONDO_DATA0,
266 pbm->portid << 6);
267 fire_write(pbm->pbm_regs + IMONDO_DATA1, 0);
268
269 fire_write(pbm->pbm_regs + MSI_32BIT_ADDR,
270 pbm->msi32_start);
271 fire_write(pbm->pbm_regs + MSI_64BIT_ADDR,
272 pbm->msi64_start);
273
274 for (i = 0; i < pbm->msiq_num; i++) {
275 fire_write(pbm->pbm_regs + EVENT_QUEUE_HEAD(i), 0);
276 fire_write(pbm->pbm_regs + EVENT_QUEUE_TAIL(i), 0);
277 }
278
279 return 0;
280}
281
282static int alloc_msi(struct pci_pbm_info *pbm)
283{
284 int i;
285
286 for (i = 0; i < pbm->msi_num; i++) {
287 if (!test_and_set_bit(i, pbm->msi_bitmap))
288 return i + pbm->msi_first;
289 }
290
291 return -ENOENT;
292}
293
294static void free_msi(struct pci_pbm_info *pbm, int msi_num)
295{
296 msi_num -= pbm->msi_first;
297 clear_bit(msi_num, pbm->msi_bitmap);
298}
299
300static int pci_setup_msi_irq(unsigned int *virt_irq_p,
301 struct pci_dev *pdev,
302 struct msi_desc *entry)
303{
304 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
305 unsigned long devino, msiqid, cregs, imap_off;
306 struct msi_msg msg;
307 int msi_num, err;
308 u64 val;
309
310 *virt_irq_p = 0;
311
312 msi_num = alloc_msi(pbm);
313 if (msi_num < 0)
314 return msi_num;
315
316 cregs = (unsigned long) pbm->pbm_regs;
317
318 err = sun4u_build_msi(pbm->portid, virt_irq_p,
319 pbm->msiq_first_devino,
320 (pbm->msiq_first_devino +
321 pbm->msiq_num),
322 cregs + 0x001000UL,
323 cregs + 0x001400UL);
324 if (err < 0)
325 goto out_err;
326 devino = err;
327
328 imap_off = 0x001000UL + (devino * 0x8UL);
329
330 val = fire_read(pbm->pbm_regs + imap_off);
331 val |= (1UL << 63) | (1UL << 6);
332 fire_write(pbm->pbm_regs + imap_off, val);
333
334 msiqid = ((devino - pbm->msiq_first_devino) +
335 pbm->msiq_first);
336
337 fire_write(pbm->pbm_regs +
338 EVENT_QUEUE_CONTROL_SET(msiqid),
339 EVENT_QUEUE_CONTROL_SET_EN);
340
341 val = fire_read(pbm->pbm_regs + MSI_MAP(msi_num));
342 val &= ~(MSI_MAP_EQNUM);
343 val |= msiqid;
344 fire_write(pbm->pbm_regs + MSI_MAP(msi_num), val);
345
346 fire_write(pbm->pbm_regs + MSI_CLEAR(msi_num),
347 MSI_CLEAR_EQWR_N);
348
349 val = fire_read(pbm->pbm_regs + MSI_MAP(msi_num));
350 val |= MSI_MAP_VALID;
351 fire_write(pbm->pbm_regs + MSI_MAP(msi_num), val);
352
353 sparc64_set_msi(*virt_irq_p, msi_num);
354
355 if (entry->msi_attrib.is_64) {
356 msg.address_hi = pbm->msi64_start >> 32;
357 msg.address_lo = pbm->msi64_start & 0xffffffff;
358 } else {
359 msg.address_hi = 0;
360 msg.address_lo = pbm->msi32_start;
361 }
362 msg.data = msi_num;
363
364 set_irq_msi(*virt_irq_p, entry);
365 write_msi_msg(*virt_irq_p, &msg);
366
367 irq_install_pre_handler(*virt_irq_p,
368 pci_msi_prehandler,
369 pbm, (void *) msiqid);
370
371 return 0;
372
373out_err:
374 free_msi(pbm, msi_num);
375 return err;
376}
377
378static void pci_teardown_msi_irq(unsigned int virt_irq,
379 struct pci_dev *pdev)
380{
381 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
382 unsigned long msiqid, msi_num;
383 u64 val;
384
385 msi_num = sparc64_get_msi(virt_irq);
386
387 val = fire_read(pbm->pbm_regs + MSI_MAP(msi_num));
388
389 msiqid = (val & MSI_MAP_EQNUM);
390
391 val &= ~MSI_MAP_VALID;
392 fire_write(pbm->pbm_regs + MSI_MAP(msi_num), val);
393
394 fire_write(pbm->pbm_regs + EVENT_QUEUE_CONTROL_CLEAR(msiqid),
395 EVENT_QUEUE_CONTROL_CLEAR_DIS);
396
397 free_msi(pbm, msi_num);
398
399 /* The sun4u_destroy_msi() will liberate the devino and thus the MSIQ
400 * allocation.
401 */
402 sun4u_destroy_msi(virt_irq);
403}
404
405static void pci_fire_msi_init(struct pci_pbm_info *pbm)
406{
407 const u32 *val;
408 int len;
409
410 val = of_get_property(pbm->prom_node, "#msi-eqs", &len);
411 if (!val || len != 4)
412 goto no_msi;
413 pbm->msiq_num = *val;
414 if (pbm->msiq_num) {
415 const struct msiq_prop {
416 u32 first_msiq;
417 u32 num_msiq;
418 u32 first_devino;
419 } *mqp;
420 const struct msi_range_prop {
421 u32 first_msi;
422 u32 num_msi;
423 } *mrng;
424 const struct addr_range_prop {
425 u32 msi32_high;
426 u32 msi32_low;
427 u32 msi32_len;
428 u32 msi64_high;
429 u32 msi64_low;
430 u32 msi64_len;
431 } *arng;
432
433 val = of_get_property(pbm->prom_node, "msi-eq-size", &len);
434 if (!val || len != 4)
435 goto no_msi;
436
437 pbm->msiq_ent_count = *val;
438
439 mqp = of_get_property(pbm->prom_node,
440 "msi-eq-to-devino", &len);
441 if (!mqp)
442 mqp = of_get_property(pbm->prom_node,
443 "msi-eq-devino", &len);
444 if (!mqp || len != sizeof(struct msiq_prop))
445 goto no_msi;
446
447 pbm->msiq_first = mqp->first_msiq;
448 pbm->msiq_first_devino = mqp->first_devino;
449
450 val = of_get_property(pbm->prom_node, "#msi", &len);
451 if (!val || len != 4)
452 goto no_msi;
453 pbm->msi_num = *val;
454
455 mrng = of_get_property(pbm->prom_node, "msi-ranges", &len);
456 if (!mrng || len != sizeof(struct msi_range_prop))
457 goto no_msi;
458 pbm->msi_first = mrng->first_msi;
459
460 val = of_get_property(pbm->prom_node, "msi-data-mask", &len);
461 if (!val || len != 4)
462 goto no_msi;
463 pbm->msi_data_mask = *val;
464
465 val = of_get_property(pbm->prom_node, "msix-data-width", &len);
466 if (!val || len != 4)
467 goto no_msi;
468 pbm->msix_data_width = *val;
469
470 arng = of_get_property(pbm->prom_node, "msi-address-ranges",
471 &len);
472 if (!arng || len != sizeof(struct addr_range_prop))
473 goto no_msi;
474 pbm->msi32_start = ((u64)arng->msi32_high << 32) |
475 (u64) arng->msi32_low;
476 pbm->msi64_start = ((u64)arng->msi64_high << 32) |
477 (u64) arng->msi64_low;
478 pbm->msi32_len = arng->msi32_len;
479 pbm->msi64_len = arng->msi64_len;
480
481 if (msi_bitmap_alloc(pbm))
482 goto no_msi;
483
484 if (msi_queue_alloc(pbm)) {
485 msi_bitmap_free(pbm);
486 goto no_msi;
487 }
488
489 printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
490 "devino[0x%x]\n",
491 pbm->name,
492 pbm->msiq_first, pbm->msiq_num,
493 pbm->msiq_ent_count,
494 pbm->msiq_first_devino);
495 printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
496 "width[%u]\n",
497 pbm->name,
498 pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
499 pbm->msix_data_width);
500 printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] "
501 "addr64[0x%lx:0x%x]\n",
502 pbm->name,
503 pbm->msi32_start, pbm->msi32_len,
504 pbm->msi64_start, pbm->msi64_len);
505 printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n",
506 pbm->name,
507 __pa(pbm->msi_queues));
508 }
509 pbm->setup_msi_irq = pci_setup_msi_irq;
510 pbm->teardown_msi_irq = pci_teardown_msi_irq;
511
512 return;
513
514no_msi:
515 pbm->msiq_num = 0;
516 printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
517}
518#else /* CONFIG_PCI_MSI */
519static void pci_fire_msi_init(struct pci_pbm_info *pbm)
520{
521}
522#endif /* !(CONFIG_PCI_MSI) */
523
87/* Based at pbm->controller_regs */ 524/* Based at pbm->controller_regs */
88#define FIRE_PARITY_CONTROL 0x470010UL 525#define FIRE_PARITY_CONTROL 0x470010UL
89#define FIRE_PARITY_ENAB 0x8000000000000000UL 526#define FIRE_PARITY_ENAB 0x8000000000000000UL
@@ -176,6 +613,7 @@ static int pci_fire_pbm_init(struct pci_controller_info *p,
176{ 613{
177 const struct linux_prom64_registers *regs; 614 const struct linux_prom64_registers *regs;
178 struct pci_pbm_info *pbm; 615 struct pci_pbm_info *pbm;
616 int err;
179 617
180 if ((portid & 1) == 0) 618 if ((portid & 1) == 0)
181 pbm = &p->pbm_A; 619 pbm = &p->pbm_A;
@@ -208,7 +646,13 @@ static int pci_fire_pbm_init(struct pci_controller_info *p,
208 646
209 pci_fire_hw_init(pbm); 647 pci_fire_hw_init(pbm);
210 648
211 return pci_fire_pbm_iommu_init(pbm); 649 err = pci_fire_pbm_iommu_init(pbm);
650 if (err)
651 return err;
652
653 pci_fire_msi_init(pbm);
654
655 return 0;
212} 656}
213 657
214static inline int portid_compare(u32 x, u32 y) 658static inline int portid_compare(u32 x, u32 y)