aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/msi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/msi.c')
-rw-r--r--drivers/pci/msi.c160
1 files changed, 94 insertions, 66 deletions
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 362773247fbf..d986afb7032b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -75,22 +75,17 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
75} 75}
76#endif 76#endif
77 77
78static void __msi_set_enable(struct pci_dev *dev, int pos, int enable) 78static void msi_set_enable(struct pci_dev *dev, int pos, int enable)
79{ 79{
80 u16 control; 80 u16 control;
81 81
82 if (pos) { 82 BUG_ON(!pos);
83 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
84 control &= ~PCI_MSI_FLAGS_ENABLE;
85 if (enable)
86 control |= PCI_MSI_FLAGS_ENABLE;
87 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
88 }
89}
90 83
91static void msi_set_enable(struct pci_dev *dev, int enable) 84 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
92{ 85 control &= ~PCI_MSI_FLAGS_ENABLE;
93 __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable); 86 if (enable)
87 control |= PCI_MSI_FLAGS_ENABLE;
88 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
94} 89}
95 90
96static void msix_set_enable(struct pci_dev *dev, int enable) 91static void msix_set_enable(struct pci_dev *dev, int enable)
@@ -131,21 +126,24 @@ static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
131 * mask all MSI interrupts by clearing the MSI enable bit does not work 126 * mask all MSI interrupts by clearing the MSI enable bit does not work
132 * reliably as devices without an INTx disable bit will then generate a 127 * reliably as devices without an INTx disable bit will then generate a
133 * level IRQ which will never be cleared. 128 * level IRQ which will never be cleared.
134 *
135 * Returns 1 if it succeeded in masking the interrupt and 0 if the device
136 * doesn't support MSI masking.
137 */ 129 */
138static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) 130static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
139{ 131{
140 u32 mask_bits = desc->masked; 132 u32 mask_bits = desc->masked;
141 133
142 if (!desc->msi_attrib.maskbit) 134 if (!desc->msi_attrib.maskbit)
143 return; 135 return 0;
144 136
145 mask_bits &= ~mask; 137 mask_bits &= ~mask;
146 mask_bits |= flag; 138 mask_bits |= flag;
147 pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits); 139 pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
148 desc->masked = mask_bits; 140
141 return mask_bits;
142}
143
144static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
145{
146 desc->masked = __msi_mask_irq(desc, mask, flag);
149} 147}
150 148
151/* 149/*
@@ -155,15 +153,21 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
155 * file. This saves a few milliseconds when initialising devices with lots 153 * file. This saves a few milliseconds when initialising devices with lots
156 * of MSI-X interrupts. 154 * of MSI-X interrupts.
157 */ 155 */
158static void msix_mask_irq(struct msi_desc *desc, u32 flag) 156static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag)
159{ 157{
160 u32 mask_bits = desc->masked; 158 u32 mask_bits = desc->masked;
161 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 159 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
162 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 160 PCI_MSIX_ENTRY_VECTOR_CTRL;
163 mask_bits &= ~1; 161 mask_bits &= ~1;
164 mask_bits |= flag; 162 mask_bits |= flag;
165 writel(mask_bits, desc->mask_base + offset); 163 writel(mask_bits, desc->mask_base + offset);
166 desc->masked = mask_bits; 164
165 return mask_bits;
166}
167
168static void msix_mask_irq(struct msi_desc *desc, u32 flag)
169{
170 desc->masked = __msix_mask_irq(desc, flag);
167} 171}
168 172
169static void msi_set_mask_bit(unsigned irq, u32 flag) 173static void msi_set_mask_bit(unsigned irq, u32 flag)
@@ -196,9 +200,9 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
196 void __iomem *base = entry->mask_base + 200 void __iomem *base = entry->mask_base +
197 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; 201 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
198 202
199 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 203 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
200 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 204 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
201 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET); 205 msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
202 } else { 206 } else {
203 struct pci_dev *dev = entry->dev; 207 struct pci_dev *dev = entry->dev;
204 int pos = entry->msi_attrib.pos; 208 int pos = entry->msi_attrib.pos;
@@ -233,11 +237,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
233 base = entry->mask_base + 237 base = entry->mask_base +
234 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; 238 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
235 239
236 writel(msg->address_lo, 240 writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
237 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 241 writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
238 writel(msg->address_hi, 242 writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
239 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
240 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
241 } else { 243 } else {
242 struct pci_dev *dev = entry->dev; 244 struct pci_dev *dev = entry->dev;
243 int pos = entry->msi_attrib.pos; 245 int pos = entry->msi_attrib.pos;
@@ -303,7 +305,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
303 pos = entry->msi_attrib.pos; 305 pos = entry->msi_attrib.pos;
304 306
305 pci_intx_for_msi(dev, 0); 307 pci_intx_for_msi(dev, 0);
306 msi_set_enable(dev, 0); 308 msi_set_enable(dev, pos, 0);
307 write_msi_msg(dev->irq, &entry->msg); 309 write_msi_msg(dev->irq, &entry->msg);
308 310
309 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 311 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
@@ -321,22 +323,22 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
321 323
322 if (!dev->msix_enabled) 324 if (!dev->msix_enabled)
323 return; 325 return;
326 BUG_ON(list_empty(&dev->msi_list));
327 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
328 pos = entry->msi_attrib.pos;
329 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
324 330
325 /* route the table */ 331 /* route the table */
326 pci_intx_for_msi(dev, 0); 332 pci_intx_for_msi(dev, 0);
327 msix_set_enable(dev, 0); 333 control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;
334 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
328 335
329 list_for_each_entry(entry, &dev->msi_list, list) { 336 list_for_each_entry(entry, &dev->msi_list, list) {
330 write_msi_msg(entry->irq, &entry->msg); 337 write_msi_msg(entry->irq, &entry->msg);
331 msix_mask_irq(entry, entry->masked); 338 msix_mask_irq(entry, entry->masked);
332 } 339 }
333 340
334 BUG_ON(list_empty(&dev->msi_list));
335 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
336 pos = entry->msi_attrib.pos;
337 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
338 control &= ~PCI_MSIX_FLAGS_MASKALL; 341 control &= ~PCI_MSIX_FLAGS_MASKALL;
339 control |= PCI_MSIX_FLAGS_ENABLE;
340 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 342 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
341} 343}
342 344
@@ -365,9 +367,9 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
365 u16 control; 367 u16 control;
366 unsigned mask; 368 unsigned mask;
367 369
368 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
369
370 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 370 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
371 msi_set_enable(dev, pos, 0); /* Disable MSI during set up */
372
371 pci_read_config_word(dev, msi_control_reg(pos), &control); 373 pci_read_config_word(dev, msi_control_reg(pos), &control);
372 /* MSI Entry Initialization */ 374 /* MSI Entry Initialization */
373 entry = alloc_msi_entry(dev); 375 entry = alloc_msi_entry(dev);
@@ -381,7 +383,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
381 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 383 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
382 entry->msi_attrib.pos = pos; 384 entry->msi_attrib.pos = pos;
383 385
384 entry->mask_pos = msi_mask_bits_reg(pos, entry->msi_attrib.is_64); 386 entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64);
385 /* All MSIs are unmasked by default, Mask them all */ 387 /* All MSIs are unmasked by default, Mask them all */
386 if (entry->msi_attrib.maskbit) 388 if (entry->msi_attrib.maskbit)
387 pci_read_config_dword(dev, entry->mask_pos, &entry->masked); 389 pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
@@ -393,13 +395,14 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
393 /* Configure MSI capability structure */ 395 /* Configure MSI capability structure */
394 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); 396 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
395 if (ret) { 397 if (ret) {
398 msi_mask_irq(entry, mask, ~mask);
396 msi_free_irqs(dev); 399 msi_free_irqs(dev);
397 return ret; 400 return ret;
398 } 401 }
399 402
400 /* Set MSI enabled bits */ 403 /* Set MSI enabled bits */
401 pci_intx_for_msi(dev, 0); 404 pci_intx_for_msi(dev, 0);
402 msi_set_enable(dev, 1); 405 msi_set_enable(dev, pos, 1);
403 dev->msi_enabled = 1; 406 dev->msi_enabled = 1;
404 407
405 dev->irq = entry->irq; 408 dev->irq = entry->irq;
@@ -427,11 +430,14 @@ static int msix_capability_init(struct pci_dev *dev,
427 u8 bir; 430 u8 bir;
428 void __iomem *base; 431 void __iomem *base;
429 432
430 msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
431
432 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 433 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
434 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
435
436 /* Ensure MSI-X is disabled while it is set up */
437 control &= ~PCI_MSIX_FLAGS_ENABLE;
438 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
439
433 /* Request & Map MSI-X table region */ 440 /* Request & Map MSI-X table region */
434 pci_read_config_word(dev, msi_control_reg(pos), &control);
435 nr_entries = multi_msix_capable(control); 441 nr_entries = multi_msix_capable(control);
436 442
437 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); 443 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
@@ -442,11 +448,16 @@ static int msix_capability_init(struct pci_dev *dev,
442 if (base == NULL) 448 if (base == NULL)
443 return -ENOMEM; 449 return -ENOMEM;
444 450
445 /* MSI-X Table Initialization */
446 for (i = 0; i < nvec; i++) { 451 for (i = 0; i < nvec; i++) {
447 entry = alloc_msi_entry(dev); 452 entry = alloc_msi_entry(dev);
448 if (!entry) 453 if (!entry) {
449 break; 454 if (!i)
455 iounmap(base);
456 else
457 msi_free_irqs(dev);
458 /* No enough memory. Don't try again */
459 return -ENOMEM;
460 }
450 461
451 j = entries[i].entry; 462 j = entries[i].entry;
452 entry->msi_attrib.is_msix = 1; 463 entry->msi_attrib.is_msix = 1;
@@ -455,7 +466,6 @@ static int msix_capability_init(struct pci_dev *dev,
455 entry->msi_attrib.default_irq = dev->irq; 466 entry->msi_attrib.default_irq = dev->irq;
456 entry->msi_attrib.pos = pos; 467 entry->msi_attrib.pos = pos;
457 entry->mask_base = base; 468 entry->mask_base = base;
458 msix_mask_irq(entry, 1);
459 469
460 list_add_tail(&entry->list, &dev->msi_list); 470 list_add_tail(&entry->list, &dev->msi_list);
461 } 471 }
@@ -480,22 +490,31 @@ static int msix_capability_init(struct pci_dev *dev,
480 return ret; 490 return ret;
481 } 491 }
482 492
493 /*
494 * Some devices require MSI-X to be enabled before we can touch the
495 * MSI-X registers. We need to mask all the vectors to prevent
496 * interrupts coming in before they're fully set up.
497 */
498 control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
499 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
500
483 i = 0; 501 i = 0;
484 list_for_each_entry(entry, &dev->msi_list, list) { 502 list_for_each_entry(entry, &dev->msi_list, list) {
485 entries[i].vector = entry->irq; 503 entries[i].vector = entry->irq;
486 set_irq_msi(entry->irq, entry); 504 set_irq_msi(entry->irq, entry);
505 j = entries[i].entry;
506 entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
507 PCI_MSIX_ENTRY_VECTOR_CTRL);
508 msix_mask_irq(entry, 1);
487 i++; 509 i++;
488 } 510 }
489 /* Set MSI-X enabled bits */ 511
512 /* Set MSI-X enabled bits and unmask the function */
490 pci_intx_for_msi(dev, 0); 513 pci_intx_for_msi(dev, 0);
491 msix_set_enable(dev, 1);
492 dev->msix_enabled = 1; 514 dev->msix_enabled = 1;
493 515
494 list_for_each_entry(entry, &dev->msi_list, list) { 516 control &= ~PCI_MSIX_FLAGS_MASKALL;
495 int vector = entry->msi_attrib.entry_nr; 517 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
496 entry->masked = readl(base + vector * PCI_MSIX_ENTRY_SIZE +
497 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
498 }
499 518
500 return 0; 519 return 0;
501} 520}
@@ -596,19 +615,24 @@ void pci_msi_shutdown(struct pci_dev *dev)
596 struct msi_desc *desc; 615 struct msi_desc *desc;
597 u32 mask; 616 u32 mask;
598 u16 ctrl; 617 u16 ctrl;
618 unsigned pos;
599 619
600 if (!pci_msi_enable || !dev || !dev->msi_enabled) 620 if (!pci_msi_enable || !dev || !dev->msi_enabled)
601 return; 621 return;
602 622
603 msi_set_enable(dev, 0); 623 BUG_ON(list_empty(&dev->msi_list));
624 desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
625 pos = desc->msi_attrib.pos;
626
627 msi_set_enable(dev, pos, 0);
604 pci_intx_for_msi(dev, 1); 628 pci_intx_for_msi(dev, 1);
605 dev->msi_enabled = 0; 629 dev->msi_enabled = 0;
606 630
607 BUG_ON(list_empty(&dev->msi_list)); 631 /* Return the device with MSI unmasked as initial states */
608 desc = list_first_entry(&dev->msi_list, struct msi_desc, list); 632 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl);
609 pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, &ctrl);
610 mask = msi_capable_mask(ctrl); 633 mask = msi_capable_mask(ctrl);
611 msi_mask_irq(desc, mask, ~mask); 634 /* Keep cached state to be restored */
635 __msi_mask_irq(desc, mask, ~mask);
612 636
613 /* Restore dev->irq to its default pin-assertion irq */ 637 /* Restore dev->irq to its default pin-assertion irq */
614 dev->irq = desc->msi_attrib.default_irq; 638 dev->irq = desc->msi_attrib.default_irq;
@@ -648,10 +672,6 @@ static int msi_free_irqs(struct pci_dev* dev)
648 672
649 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { 673 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
650 if (entry->msi_attrib.is_msix) { 674 if (entry->msi_attrib.is_msix) {
651 writel(1, entry->mask_base + entry->msi_attrib.entry_nr
652 * PCI_MSIX_ENTRY_SIZE
653 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
654
655 if (list_is_last(&entry->list, &dev->msi_list)) 675 if (list_is_last(&entry->list, &dev->msi_list))
656 iounmap(entry->mask_base); 676 iounmap(entry->mask_base);
657 } 677 }
@@ -691,8 +711,8 @@ int pci_msix_table_size(struct pci_dev *dev)
691 * indicates the successful configuration of MSI-X capability structure 711 * indicates the successful configuration of MSI-X capability structure
692 * with new allocated MSI-X irqs. A return of < 0 indicates a failure. 712 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
693 * Or a return of > 0 indicates that driver request is exceeding the number 713 * Or a return of > 0 indicates that driver request is exceeding the number
694 * of irqs available. Driver should use the returned value to re-send 714 * of irqs or MSI-X vectors available. Driver should use the returned value to
695 * its request. 715 * re-send its request.
696 **/ 716 **/
697int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 717int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
698{ 718{
@@ -708,7 +728,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
708 728
709 nr_entries = pci_msix_table_size(dev); 729 nr_entries = pci_msix_table_size(dev);
710 if (nvec > nr_entries) 730 if (nvec > nr_entries)
711 return -EINVAL; 731 return nr_entries;
712 732
713 /* Check for any invalid entries */ 733 /* Check for any invalid entries */
714 for (i = 0; i < nvec; i++) { 734 for (i = 0; i < nvec; i++) {
@@ -739,9 +759,17 @@ static void msix_free_all_irqs(struct pci_dev *dev)
739 759
740void pci_msix_shutdown(struct pci_dev* dev) 760void pci_msix_shutdown(struct pci_dev* dev)
741{ 761{
762 struct msi_desc *entry;
763
742 if (!pci_msi_enable || !dev || !dev->msix_enabled) 764 if (!pci_msi_enable || !dev || !dev->msix_enabled)
743 return; 765 return;
744 766
767 /* Return the device with MSI-X masked as initial states */
768 list_for_each_entry(entry, &dev->msi_list, list) {
769 /* Keep cached states to be restored */
770 __msix_mask_irq(entry, 1);
771 }
772
745 msix_set_enable(dev, 0); 773 msix_set_enable(dev, 0);
746 pci_intx_for_msi(dev, 1); 774 pci_intx_for_msi(dev, 1);
747 dev->msix_enabled = 0; 775 dev->msix_enabled = 0;