aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/msi.c
diff options
context:
space:
mode:
authorJiang Liu <jiang.liu@linux.intel.com>2015-07-09 04:00:41 -0400
committerThomas Gleixner <tglx@linutronix.de>2015-07-22 12:37:43 -0400
commit5004e98a91e8ad600f5b00872e9ddad810258f08 (patch)
treef2b75a9f502ccc74746ef080e315dcb67da20256 /drivers/pci/msi.c
parent39118e31e1daae43048f5deaa3e0894d2732a7d6 (diff)
PCI: Use for_each_pci_msi_entry() to access MSI device list
Use accessor for_each_pci_msi_entry() to access MSI device list, so we could easily move msi_list from struct pci_dev into struct device later. Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Reviewed-by: Yijing Wang <wangyijing@huawei.com> Acked-by: Bjorn Helgaas <bhelgaas@google.com> Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Tony Luck <tony.luck@intel.com> Cc: linux-arm-kernel@lists.infradead.org Cc: xen-devel@lists.xenproject.org Cc: Grant Likely <grant.likely@linaro.org> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: Stuart Yoder <stuart.yoder@freescale.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: David Vrabel <david.vrabel@citrix.com> Link: http://lkml.kernel.org/r/1436428847-8886-7-git-send-email-jiang.liu@linux.intel.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'drivers/pci/msi.c')
-rw-r--r--drivers/pci/msi.c39
1 files changed, 20 insertions, 19 deletions
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index ab4174243962..540613e5560a 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -131,7 +131,7 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
131 if (type == PCI_CAP_ID_MSI && nvec > 1) 131 if (type == PCI_CAP_ID_MSI && nvec > 1)
132 return 1; 132 return 1;
133 133
134 list_for_each_entry(entry, &dev->msi_list, list) { 134 for_each_pci_msi_entry(entry, dev) {
135 ret = arch_setup_msi_irq(dev, entry); 135 ret = arch_setup_msi_irq(dev, entry);
136 if (ret < 0) 136 if (ret < 0)
137 return ret; 137 return ret;
@@ -151,7 +151,7 @@ void default_teardown_msi_irqs(struct pci_dev *dev)
151 int i; 151 int i;
152 struct msi_desc *entry; 152 struct msi_desc *entry;
153 153
154 list_for_each_entry(entry, &dev->msi_list, list) 154 for_each_pci_msi_entry(entry, dev)
155 if (entry->irq) 155 if (entry->irq)
156 for (i = 0; i < entry->nvec_used; i++) 156 for (i = 0; i < entry->nvec_used; i++)
157 arch_teardown_msi_irq(entry->irq + i); 157 arch_teardown_msi_irq(entry->irq + i);
@@ -168,7 +168,7 @@ static void default_restore_msi_irq(struct pci_dev *dev, int irq)
168 168
169 entry = NULL; 169 entry = NULL;
170 if (dev->msix_enabled) { 170 if (dev->msix_enabled) {
171 list_for_each_entry(entry, &dev->msi_list, list) { 171 for_each_pci_msi_entry(entry, dev) {
172 if (irq == entry->irq) 172 if (irq == entry->irq)
173 break; 173 break;
174 } 174 }
@@ -282,7 +282,7 @@ void default_restore_msi_irqs(struct pci_dev *dev)
282{ 282{
283 struct msi_desc *entry; 283 struct msi_desc *entry;
284 284
285 list_for_each_entry(entry, &dev->msi_list, list) 285 for_each_pci_msi_entry(entry, dev)
286 default_restore_msi_irq(dev, entry->irq); 286 default_restore_msi_irq(dev, entry->irq);
287} 287}
288 288
@@ -363,21 +363,22 @@ EXPORT_SYMBOL_GPL(pci_write_msi_msg);
363 363
364static void free_msi_irqs(struct pci_dev *dev) 364static void free_msi_irqs(struct pci_dev *dev)
365{ 365{
366 struct list_head *msi_list = dev_to_msi_list(&dev->dev);
366 struct msi_desc *entry, *tmp; 367 struct msi_desc *entry, *tmp;
367 struct attribute **msi_attrs; 368 struct attribute **msi_attrs;
368 struct device_attribute *dev_attr; 369 struct device_attribute *dev_attr;
369 int i, count = 0; 370 int i, count = 0;
370 371
371 list_for_each_entry(entry, &dev->msi_list, list) 372 for_each_pci_msi_entry(entry, dev)
372 if (entry->irq) 373 if (entry->irq)
373 for (i = 0; i < entry->nvec_used; i++) 374 for (i = 0; i < entry->nvec_used; i++)
374 BUG_ON(irq_has_action(entry->irq + i)); 375 BUG_ON(irq_has_action(entry->irq + i));
375 376
376 pci_msi_teardown_msi_irqs(dev); 377 pci_msi_teardown_msi_irqs(dev);
377 378
378 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { 379 list_for_each_entry_safe(entry, tmp, msi_list, list) {
379 if (entry->msi_attrib.is_msix) { 380 if (entry->msi_attrib.is_msix) {
380 if (list_is_last(&entry->list, &dev->msi_list)) 381 if (list_is_last(&entry->list, msi_list))
381 iounmap(entry->mask_base); 382 iounmap(entry->mask_base);
382 } 383 }
383 384
@@ -448,7 +449,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
448 449
449 if (!dev->msix_enabled) 450 if (!dev->msix_enabled)
450 return; 451 return;
451 BUG_ON(list_empty(&dev->msi_list)); 452 BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
452 453
453 /* route the table */ 454 /* route the table */
454 pci_intx_for_msi(dev, 0); 455 pci_intx_for_msi(dev, 0);
@@ -456,7 +457,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
456 PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); 457 PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
457 458
458 arch_restore_msi_irqs(dev); 459 arch_restore_msi_irqs(dev);
459 list_for_each_entry(entry, &dev->msi_list, list) 460 for_each_pci_msi_entry(entry, dev)
460 msix_mask_irq(entry, entry->masked); 461 msix_mask_irq(entry, entry->masked);
461 462
462 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); 463 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
@@ -501,7 +502,7 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
501 int count = 0; 502 int count = 0;
502 503
503 /* Determine how many msi entries we have */ 504 /* Determine how many msi entries we have */
504 list_for_each_entry(entry, &pdev->msi_list, list) 505 for_each_pci_msi_entry(entry, pdev)
505 ++num_msi; 506 ++num_msi;
506 if (!num_msi) 507 if (!num_msi)
507 return 0; 508 return 0;
@@ -510,7 +511,7 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
510 msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL); 511 msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL);
511 if (!msi_attrs) 512 if (!msi_attrs)
512 return -ENOMEM; 513 return -ENOMEM;
513 list_for_each_entry(entry, &pdev->msi_list, list) { 514 for_each_pci_msi_entry(entry, pdev) {
514 msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL); 515 msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
515 if (!msi_dev_attr) 516 if (!msi_dev_attr)
516 goto error_attrs; 517 goto error_attrs;
@@ -599,7 +600,7 @@ static int msi_verify_entries(struct pci_dev *dev)
599{ 600{
600 struct msi_desc *entry; 601 struct msi_desc *entry;
601 602
602 list_for_each_entry(entry, &dev->msi_list, list) { 603 for_each_pci_msi_entry(entry, dev) {
603 if (!dev->no_64bit_msi || !entry->msg.address_hi) 604 if (!dev->no_64bit_msi || !entry->msg.address_hi)
604 continue; 605 continue;
605 dev_err(&dev->dev, "Device has broken 64-bit MSI but arch" 606 dev_err(&dev->dev, "Device has broken 64-bit MSI but arch"
@@ -636,7 +637,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
636 mask = msi_mask(entry->msi_attrib.multi_cap); 637 mask = msi_mask(entry->msi_attrib.multi_cap);
637 msi_mask_irq(entry, mask, mask); 638 msi_mask_irq(entry, mask, mask);
638 639
639 list_add_tail(&entry->list, &dev->msi_list); 640 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
640 641
641 /* Configure MSI capability structure */ 642 /* Configure MSI capability structure */
642 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); 643 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
@@ -713,7 +714,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
713 entry->mask_base = base; 714 entry->mask_base = base;
714 entry->nvec_used = 1; 715 entry->nvec_used = 1;
715 716
716 list_add_tail(&entry->list, &dev->msi_list); 717 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
717 } 718 }
718 719
719 return 0; 720 return 0;
@@ -725,7 +726,7 @@ static void msix_program_entries(struct pci_dev *dev,
725 struct msi_desc *entry; 726 struct msi_desc *entry;
726 int i = 0; 727 int i = 0;
727 728
728 list_for_each_entry(entry, &dev->msi_list, list) { 729 for_each_pci_msi_entry(entry, dev) {
729 int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE + 730 int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE +
730 PCI_MSIX_ENTRY_VECTOR_CTRL; 731 PCI_MSIX_ENTRY_VECTOR_CTRL;
731 732
@@ -806,7 +807,7 @@ out_avail:
806 struct msi_desc *entry; 807 struct msi_desc *entry;
807 int avail = 0; 808 int avail = 0;
808 809
809 list_for_each_entry(entry, &dev->msi_list, list) { 810 for_each_pci_msi_entry(entry, dev) {
810 if (entry->irq != 0) 811 if (entry->irq != 0)
811 avail++; 812 avail++;
812 } 813 }
@@ -895,8 +896,8 @@ void pci_msi_shutdown(struct pci_dev *dev)
895 if (!pci_msi_enable || !dev || !dev->msi_enabled) 896 if (!pci_msi_enable || !dev || !dev->msi_enabled)
896 return; 897 return;
897 898
898 BUG_ON(list_empty(&dev->msi_list)); 899 BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
899 desc = list_first_entry(&dev->msi_list, struct msi_desc, list); 900 desc = first_msi_entry(dev);
900 901
901 pci_msi_set_enable(dev, 0); 902 pci_msi_set_enable(dev, 0);
902 pci_intx_for_msi(dev, 1); 903 pci_intx_for_msi(dev, 1);
@@ -1001,7 +1002,7 @@ void pci_msix_shutdown(struct pci_dev *dev)
1001 return; 1002 return;
1002 1003
1003 /* Return the device with MSI-X masked as initial states */ 1004 /* Return the device with MSI-X masked as initial states */
1004 list_for_each_entry(entry, &dev->msi_list, list) { 1005 for_each_pci_msi_entry(entry, dev) {
1005 /* Keep cached states to be restored */ 1006 /* Keep cached states to be restored */
1006 __pci_msix_desc_mask_irq(entry, 1); 1007 __pci_msix_desc_mask_irq(entry, 1);
1007 } 1008 }