diff options
Diffstat (limited to 'drivers/pci/msi.c')
-rw-r--r-- | drivers/pci/msi.c | 17 |
1 files changed, 12 insertions, 5 deletions
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 0042c365b29b..ba44fdfda66b 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -298,7 +298,7 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | |||
298 | { | 298 | { |
299 | struct pci_dev *dev = msi_desc_to_pci_dev(entry); | 299 | struct pci_dev *dev = msi_desc_to_pci_dev(entry); |
300 | 300 | ||
301 | if (dev->current_state != PCI_D0) { | 301 | if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) { |
302 | /* Don't touch the hardware now */ | 302 | /* Don't touch the hardware now */ |
303 | } else if (entry->msi_attrib.is_msix) { | 303 | } else if (entry->msi_attrib.is_msix) { |
304 | void __iomem *base = pci_msix_desc_addr(entry); | 304 | void __iomem *base = pci_msix_desc_addr(entry); |
@@ -541,7 +541,8 @@ msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd) | |||
541 | if (affd) { | 541 | if (affd) { |
542 | masks = irq_create_affinity_masks(nvec, affd); | 542 | masks = irq_create_affinity_masks(nvec, affd); |
543 | if (!masks) | 543 | if (!masks) |
544 | pr_err("Unable to allocate affinity masks, ignoring\n"); | 544 | dev_err(&dev->dev, "can't allocate MSI affinity masks for %d vectors\n", |
545 | nvec); | ||
545 | } | 546 | } |
546 | 547 | ||
547 | /* MSI Entry Initialization */ | 548 | /* MSI Entry Initialization */ |
@@ -681,7 +682,8 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, | |||
681 | if (affd) { | 682 | if (affd) { |
682 | masks = irq_create_affinity_masks(nvec, affd); | 683 | masks = irq_create_affinity_masks(nvec, affd); |
683 | if (!masks) | 684 | if (!masks) |
684 | pr_err("Unable to allocate affinity masks, ignoring\n"); | 685 | dev_err(&dev->dev, "can't allocate MSI-X affinity masks for %d vectors\n", |
686 | nvec); | ||
685 | } | 687 | } |
686 | 688 | ||
687 | for (i = 0, curmsk = masks; i < nvec; i++) { | 689 | for (i = 0, curmsk = masks; i < nvec; i++) { |
@@ -882,7 +884,7 @@ int pci_msi_vec_count(struct pci_dev *dev) | |||
882 | } | 884 | } |
883 | EXPORT_SYMBOL(pci_msi_vec_count); | 885 | EXPORT_SYMBOL(pci_msi_vec_count); |
884 | 886 | ||
885 | void pci_msi_shutdown(struct pci_dev *dev) | 887 | static void pci_msi_shutdown(struct pci_dev *dev) |
886 | { | 888 | { |
887 | struct msi_desc *desc; | 889 | struct msi_desc *desc; |
888 | u32 mask; | 890 | u32 mask; |
@@ -973,13 +975,18 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, | |||
973 | return msix_capability_init(dev, entries, nvec, affd); | 975 | return msix_capability_init(dev, entries, nvec, affd); |
974 | } | 976 | } |
975 | 977 | ||
976 | void pci_msix_shutdown(struct pci_dev *dev) | 978 | static void pci_msix_shutdown(struct pci_dev *dev) |
977 | { | 979 | { |
978 | struct msi_desc *entry; | 980 | struct msi_desc *entry; |
979 | 981 | ||
980 | if (!pci_msi_enable || !dev || !dev->msix_enabled) | 982 | if (!pci_msi_enable || !dev || !dev->msix_enabled) |
981 | return; | 983 | return; |
982 | 984 | ||
985 | if (pci_dev_is_disconnected(dev)) { | ||
986 | dev->msix_enabled = 0; | ||
987 | return; | ||
988 | } | ||
989 | |||
983 | /* Return the device with MSI-X masked as initial states */ | 990 | /* Return the device with MSI-X masked as initial states */ |
984 | for_each_pci_msi_entry(entry, dev) { | 991 | for_each_pci_msi_entry(entry, dev) { |
985 | /* Keep cached states to be restored */ | 992 | /* Keep cached states to be restored */ |