aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/msi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/msi.c')
-rw-r--r--drivers/pci/msi.c283
1 files changed, 148 insertions, 135 deletions
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d986afb7032b..f9cf3173b23d 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -16,9 +16,8 @@
16#include <linux/proc_fs.h> 16#include <linux/proc_fs.h>
17#include <linux/msi.h> 17#include <linux/msi.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19 19#include <linux/errno.h>
20#include <asm/errno.h> 20#include <linux/io.h>
21#include <asm/io.h>
22 21
23#include "pci.h" 22#include "pci.h"
24#include "msi.h" 23#include "msi.h"
@@ -272,7 +271,30 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
272 write_msi_msg_desc(desc, msg); 271 write_msi_msg_desc(desc, msg);
273} 272}
274 273
275static int msi_free_irqs(struct pci_dev* dev); 274static void free_msi_irqs(struct pci_dev *dev)
275{
276 struct msi_desc *entry, *tmp;
277
278 list_for_each_entry(entry, &dev->msi_list, list) {
279 int i, nvec;
280 if (!entry->irq)
281 continue;
282 nvec = 1 << entry->msi_attrib.multiple;
283 for (i = 0; i < nvec; i++)
284 BUG_ON(irq_has_action(entry->irq + i));
285 }
286
287 arch_teardown_msi_irqs(dev);
288
289 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
290 if (entry->msi_attrib.is_msix) {
291 if (list_is_last(&entry->list, &dev->msi_list))
292 iounmap(entry->mask_base);
293 }
294 list_del(&entry->list);
295 kfree(entry);
296 }
297}
276 298
277static struct msi_desc *alloc_msi_entry(struct pci_dev *dev) 299static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
278{ 300{
@@ -324,7 +346,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
324 if (!dev->msix_enabled) 346 if (!dev->msix_enabled)
325 return; 347 return;
326 BUG_ON(list_empty(&dev->msi_list)); 348 BUG_ON(list_empty(&dev->msi_list));
327 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 349 entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
328 pos = entry->msi_attrib.pos; 350 pos = entry->msi_attrib.pos;
329 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 351 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
330 352
@@ -367,7 +389,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
367 u16 control; 389 u16 control;
368 unsigned mask; 390 unsigned mask;
369 391
370 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 392 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
371 msi_set_enable(dev, pos, 0); /* Disable MSI during set up */ 393 msi_set_enable(dev, pos, 0); /* Disable MSI during set up */
372 394
373 pci_read_config_word(dev, msi_control_reg(pos), &control); 395 pci_read_config_word(dev, msi_control_reg(pos), &control);
@@ -376,12 +398,12 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
376 if (!entry) 398 if (!entry)
377 return -ENOMEM; 399 return -ENOMEM;
378 400
379 entry->msi_attrib.is_msix = 0; 401 entry->msi_attrib.is_msix = 0;
380 entry->msi_attrib.is_64 = is_64bit_address(control); 402 entry->msi_attrib.is_64 = is_64bit_address(control);
381 entry->msi_attrib.entry_nr = 0; 403 entry->msi_attrib.entry_nr = 0;
382 entry->msi_attrib.maskbit = is_mask_bit_support(control); 404 entry->msi_attrib.maskbit = is_mask_bit_support(control);
383 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 405 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
384 entry->msi_attrib.pos = pos; 406 entry->msi_attrib.pos = pos;
385 407
386 entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64); 408 entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64);
387 /* All MSIs are unmasked by default, Mask them all */ 409 /* All MSIs are unmasked by default, Mask them all */
@@ -396,7 +418,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
396 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); 418 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
397 if (ret) { 419 if (ret) {
398 msi_mask_irq(entry, mask, ~mask); 420 msi_mask_irq(entry, mask, ~mask);
399 msi_free_irqs(dev); 421 free_msi_irqs(dev);
400 return ret; 422 return ret;
401 } 423 }
402 424
@@ -409,44 +431,27 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
409 return 0; 431 return 0;
410} 432}
411 433
412/** 434static void __iomem *msix_map_region(struct pci_dev *dev, unsigned pos,
413 * msix_capability_init - configure device's MSI-X capability 435 unsigned nr_entries)
414 * @dev: pointer to the pci_dev data structure of MSI-X device function
415 * @entries: pointer to an array of struct msix_entry entries
416 * @nvec: number of @entries
417 *
418 * Setup the MSI-X capability structure of device function with a
419 * single MSI-X irq. A return of zero indicates the successful setup of
420 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
421 **/
422static int msix_capability_init(struct pci_dev *dev,
423 struct msix_entry *entries, int nvec)
424{ 436{
425 struct msi_desc *entry;
426 int pos, i, j, nr_entries, ret;
427 unsigned long phys_addr; 437 unsigned long phys_addr;
428 u32 table_offset; 438 u32 table_offset;
429 u16 control;
430 u8 bir; 439 u8 bir;
431 void __iomem *base;
432 440
433 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 441 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
434 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
435
436 /* Ensure MSI-X is disabled while it is set up */
437 control &= ~PCI_MSIX_FLAGS_ENABLE;
438 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
439
440 /* Request & Map MSI-X table region */
441 nr_entries = multi_msix_capable(control);
442
443 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
444 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); 442 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
445 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; 443 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
446 phys_addr = pci_resource_start (dev, bir) + table_offset; 444 phys_addr = pci_resource_start(dev, bir) + table_offset;
447 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); 445
448 if (base == NULL) 446 return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
449 return -ENOMEM; 447}
448
449static int msix_setup_entries(struct pci_dev *dev, unsigned pos,
450 void __iomem *base, struct msix_entry *entries,
451 int nvec)
452{
453 struct msi_desc *entry;
454 int i;
450 455
451 for (i = 0; i < nvec; i++) { 456 for (i = 0; i < nvec; i++) {
452 entry = alloc_msi_entry(dev); 457 entry = alloc_msi_entry(dev);
@@ -454,41 +459,78 @@ static int msix_capability_init(struct pci_dev *dev,
454 if (!i) 459 if (!i)
455 iounmap(base); 460 iounmap(base);
456 else 461 else
457 msi_free_irqs(dev); 462 free_msi_irqs(dev);
458 /* No enough memory. Don't try again */ 463 /* No enough memory. Don't try again */
459 return -ENOMEM; 464 return -ENOMEM;
460 } 465 }
461 466
462 j = entries[i].entry; 467 entry->msi_attrib.is_msix = 1;
463 entry->msi_attrib.is_msix = 1; 468 entry->msi_attrib.is_64 = 1;
464 entry->msi_attrib.is_64 = 1; 469 entry->msi_attrib.entry_nr = entries[i].entry;
465 entry->msi_attrib.entry_nr = j; 470 entry->msi_attrib.default_irq = dev->irq;
466 entry->msi_attrib.default_irq = dev->irq; 471 entry->msi_attrib.pos = pos;
467 entry->msi_attrib.pos = pos; 472 entry->mask_base = base;
468 entry->mask_base = base;
469 473
470 list_add_tail(&entry->list, &dev->msi_list); 474 list_add_tail(&entry->list, &dev->msi_list);
471 } 475 }
472 476
473 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); 477 return 0;
474 if (ret < 0) { 478}
475 /* If we had some success report the number of irqs
476 * we succeeded in setting up. */
477 int avail = 0;
478 list_for_each_entry(entry, &dev->msi_list, list) {
479 if (entry->irq != 0) {
480 avail++;
481 }
482 }
483 479
484 if (avail != 0) 480static void msix_program_entries(struct pci_dev *dev,
485 ret = avail; 481 struct msix_entry *entries)
482{
483 struct msi_desc *entry;
484 int i = 0;
485
486 list_for_each_entry(entry, &dev->msi_list, list) {
487 int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE +
488 PCI_MSIX_ENTRY_VECTOR_CTRL;
489
490 entries[i].vector = entry->irq;
491 set_irq_msi(entry->irq, entry);
492 entry->masked = readl(entry->mask_base + offset);
493 msix_mask_irq(entry, 1);
494 i++;
486 } 495 }
496}
487 497
488 if (ret) { 498/**
489 msi_free_irqs(dev); 499 * msix_capability_init - configure device's MSI-X capability
500 * @dev: pointer to the pci_dev data structure of MSI-X device function
501 * @entries: pointer to an array of struct msix_entry entries
502 * @nvec: number of @entries
503 *
504 * Setup the MSI-X capability structure of device function with a
505 * single MSI-X irq. A return of zero indicates the successful setup of
506 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
507 **/
508static int msix_capability_init(struct pci_dev *dev,
509 struct msix_entry *entries, int nvec)
510{
511 int pos, ret;
512 u16 control;
513 void __iomem *base;
514
515 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
516 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
517
518 /* Ensure MSI-X is disabled while it is set up */
519 control &= ~PCI_MSIX_FLAGS_ENABLE;
520 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
521
522 /* Request & Map MSI-X table region */
523 base = msix_map_region(dev, pos, multi_msix_capable(control));
524 if (!base)
525 return -ENOMEM;
526
527 ret = msix_setup_entries(dev, pos, base, entries, nvec);
528 if (ret)
490 return ret; 529 return ret;
491 } 530
531 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
532 if (ret)
533 goto error;
492 534
493 /* 535 /*
494 * Some devices require MSI-X to be enabled before we can touch the 536 * Some devices require MSI-X to be enabled before we can touch the
@@ -498,16 +540,7 @@ static int msix_capability_init(struct pci_dev *dev,
498 control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE; 540 control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
499 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 541 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
500 542
501 i = 0; 543 msix_program_entries(dev, entries);
502 list_for_each_entry(entry, &dev->msi_list, list) {
503 entries[i].vector = entry->irq;
504 set_irq_msi(entry->irq, entry);
505 j = entries[i].entry;
506 entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
507 PCI_MSIX_ENTRY_VECTOR_CTRL);
508 msix_mask_irq(entry, 1);
509 i++;
510 }
511 544
512 /* Set MSI-X enabled bits and unmask the function */ 545 /* Set MSI-X enabled bits and unmask the function */
513 pci_intx_for_msi(dev, 0); 546 pci_intx_for_msi(dev, 0);
@@ -517,6 +550,27 @@ static int msix_capability_init(struct pci_dev *dev,
517 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 550 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
518 551
519 return 0; 552 return 0;
553
554error:
555 if (ret < 0) {
556 /*
557 * If we had some success, report the number of irqs
558 * we succeeded in setting up.
559 */
560 struct msi_desc *entry;
561 int avail = 0;
562
563 list_for_each_entry(entry, &dev->msi_list, list) {
564 if (entry->irq != 0)
565 avail++;
566 }
567 if (avail != 0)
568 ret = avail;
569 }
570
571 free_msi_irqs(dev);
572
573 return ret;
520} 574}
521 575
522/** 576/**
@@ -529,7 +583,7 @@ static int msix_capability_init(struct pci_dev *dev,
529 * to determine if MSI/-X are supported for the device. If MSI/-X is 583 * to determine if MSI/-X are supported for the device. If MSI/-X is
530 * supported return 0, else return an error code. 584 * supported return 0, else return an error code.
531 **/ 585 **/
532static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) 586static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
533{ 587{
534 struct pci_bus *bus; 588 struct pci_bus *bus;
535 int ret; 589 int ret;
@@ -546,8 +600,9 @@ static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
546 if (nvec < 1) 600 if (nvec < 1)
547 return -ERANGE; 601 return -ERANGE;
548 602
549 /* Any bridge which does NOT route MSI transactions from it's 603 /*
550 * secondary bus to it's primary bus must set NO_MSI flag on 604 * Any bridge which does NOT route MSI transactions from its
605 * secondary bus to its primary bus must set NO_MSI flag on
551 * the secondary pci_bus. 606 * the secondary pci_bus.
552 * We expect only arch-specific PCI host bus controller driver 607 * We expect only arch-specific PCI host bus controller driver
553 * or quirks for specific PCI bridges to be setting NO_MSI. 608 * or quirks for specific PCI bridges to be setting NO_MSI.
@@ -638,50 +693,16 @@ void pci_msi_shutdown(struct pci_dev *dev)
638 dev->irq = desc->msi_attrib.default_irq; 693 dev->irq = desc->msi_attrib.default_irq;
639} 694}
640 695
641void pci_disable_msi(struct pci_dev* dev) 696void pci_disable_msi(struct pci_dev *dev)
642{ 697{
643 struct msi_desc *entry;
644
645 if (!pci_msi_enable || !dev || !dev->msi_enabled) 698 if (!pci_msi_enable || !dev || !dev->msi_enabled)
646 return; 699 return;
647 700
648 pci_msi_shutdown(dev); 701 pci_msi_shutdown(dev);
649 702 free_msi_irqs(dev);
650 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
651 if (entry->msi_attrib.is_msix)
652 return;
653
654 msi_free_irqs(dev);
655} 703}
656EXPORT_SYMBOL(pci_disable_msi); 704EXPORT_SYMBOL(pci_disable_msi);
657 705
658static int msi_free_irqs(struct pci_dev* dev)
659{
660 struct msi_desc *entry, *tmp;
661
662 list_for_each_entry(entry, &dev->msi_list, list) {
663 int i, nvec;
664 if (!entry->irq)
665 continue;
666 nvec = 1 << entry->msi_attrib.multiple;
667 for (i = 0; i < nvec; i++)
668 BUG_ON(irq_has_action(entry->irq + i));
669 }
670
671 arch_teardown_msi_irqs(dev);
672
673 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
674 if (entry->msi_attrib.is_msix) {
675 if (list_is_last(&entry->list, &dev->msi_list))
676 iounmap(entry->mask_base);
677 }
678 list_del(&entry->list);
679 kfree(entry);
680 }
681
682 return 0;
683}
684
685/** 706/**
686 * pci_msix_table_size - return the number of device's MSI-X table entries 707 * pci_msix_table_size - return the number of device's MSI-X table entries
687 * @dev: pointer to the pci_dev data structure of MSI-X device function 708 * @dev: pointer to the pci_dev data structure of MSI-X device function
@@ -714,13 +735,13 @@ int pci_msix_table_size(struct pci_dev *dev)
714 * of irqs or MSI-X vectors available. Driver should use the returned value to 735 * of irqs or MSI-X vectors available. Driver should use the returned value to
715 * re-send its request. 736 * re-send its request.
716 **/ 737 **/
717int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 738int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
718{ 739{
719 int status, nr_entries; 740 int status, nr_entries;
720 int i, j; 741 int i, j;
721 742
722 if (!entries) 743 if (!entries)
723 return -EINVAL; 744 return -EINVAL;
724 745
725 status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); 746 status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
726 if (status) 747 if (status)
@@ -742,7 +763,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
742 WARN_ON(!!dev->msix_enabled); 763 WARN_ON(!!dev->msix_enabled);
743 764
744 /* Check whether driver already requested for MSI irq */ 765 /* Check whether driver already requested for MSI irq */
745 if (dev->msi_enabled) { 766 if (dev->msi_enabled) {
746 dev_info(&dev->dev, "can't enable MSI-X " 767 dev_info(&dev->dev, "can't enable MSI-X "
747 "(MSI IRQ already assigned)\n"); 768 "(MSI IRQ already assigned)\n");
748 return -EINVAL; 769 return -EINVAL;
@@ -752,12 +773,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
752} 773}
753EXPORT_SYMBOL(pci_enable_msix); 774EXPORT_SYMBOL(pci_enable_msix);
754 775
755static void msix_free_all_irqs(struct pci_dev *dev) 776void pci_msix_shutdown(struct pci_dev *dev)
756{
757 msi_free_irqs(dev);
758}
759
760void pci_msix_shutdown(struct pci_dev* dev)
761{ 777{
762 struct msi_desc *entry; 778 struct msi_desc *entry;
763 779
@@ -774,14 +790,14 @@ void pci_msix_shutdown(struct pci_dev* dev)
774 pci_intx_for_msi(dev, 1); 790 pci_intx_for_msi(dev, 1);
775 dev->msix_enabled = 0; 791 dev->msix_enabled = 0;
776} 792}
777void pci_disable_msix(struct pci_dev* dev) 793
794void pci_disable_msix(struct pci_dev *dev)
778{ 795{
779 if (!pci_msi_enable || !dev || !dev->msix_enabled) 796 if (!pci_msi_enable || !dev || !dev->msix_enabled)
780 return; 797 return;
781 798
782 pci_msix_shutdown(dev); 799 pci_msix_shutdown(dev);
783 800 free_msi_irqs(dev);
784 msix_free_all_irqs(dev);
785} 801}
786EXPORT_SYMBOL(pci_disable_msix); 802EXPORT_SYMBOL(pci_disable_msix);
787 803
@@ -794,16 +810,13 @@ EXPORT_SYMBOL(pci_disable_msix);
794 * allocated for this device function, are reclaimed to unused state, 810 * allocated for this device function, are reclaimed to unused state,
795 * which may be used later on. 811 * which may be used later on.
796 **/ 812 **/
797void msi_remove_pci_irq_vectors(struct pci_dev* dev) 813void msi_remove_pci_irq_vectors(struct pci_dev *dev)
798{ 814{
799 if (!pci_msi_enable || !dev) 815 if (!pci_msi_enable || !dev)
800 return; 816 return;
801
802 if (dev->msi_enabled)
803 msi_free_irqs(dev);
804 817
805 if (dev->msix_enabled) 818 if (dev->msi_enabled || dev->msix_enabled)
806 msix_free_all_irqs(dev); 819 free_msi_irqs(dev);
807} 820}
808 821
809void pci_no_msi(void) 822void pci_no_msi(void)