diff options
author | Christoph Hellwig <hch@lst.de> | 2016-11-08 20:15:04 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-11-09 02:25:09 -0500 |
commit | 61e1c5905290efe48bacda5e342d4af4cb1b923b (patch) | |
tree | 127c6852ff805f052e4248556cd8f19026a298c5 | |
parent | 67c93c218dc5d1b45d547771f1fdb44a381e1faf (diff) |
PCI/MSI: Propagate IRQ affinity description through the MSI code
No API change yet, just pass it down all the way from
pci_alloc_irq_vectors() to the core MSI code.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Acked-by: Bjorn Helgaas <bhelgaas@google.com>
Acked-by: Jens Axboe <axboe@kernel.dk>
Cc: linux-block@vger.kernel.org
Cc: linux-pci@vger.kernel.org
Link: http://lkml.kernel.org/r/1478654107-7384-5-git-send-email-hch@lst.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | drivers/pci/msi.c | 66 |
1 files changed, 33 insertions, 33 deletions
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index f4a108b59336..512f388a74f2 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -551,15 +551,14 @@ error_attrs: | |||
551 | } | 551 | } |
552 | 552 | ||
553 | static struct msi_desc * | 553 | static struct msi_desc * |
554 | msi_setup_entry(struct pci_dev *dev, int nvec, bool affinity) | 554 | msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd) |
555 | { | 555 | { |
556 | static const struct irq_affinity default_affd; | ||
557 | struct cpumask *masks = NULL; | 556 | struct cpumask *masks = NULL; |
558 | struct msi_desc *entry; | 557 | struct msi_desc *entry; |
559 | u16 control; | 558 | u16 control; |
560 | 559 | ||
561 | if (affinity) { | 560 | if (affd) { |
562 | masks = irq_create_affinity_masks(nvec, &default_affd); | 561 | masks = irq_create_affinity_masks(nvec, affd); |
563 | if (!masks) | 562 | if (!masks) |
564 | pr_err("Unable to allocate affinity masks, ignoring\n"); | 563 | pr_err("Unable to allocate affinity masks, ignoring\n"); |
565 | } | 564 | } |
@@ -619,7 +618,8 @@ static int msi_verify_entries(struct pci_dev *dev) | |||
619 | * an error, and a positive return value indicates the number of interrupts | 618 | * an error, and a positive return value indicates the number of interrupts |
620 | * which could have been allocated. | 619 | * which could have been allocated. |
621 | */ | 620 | */ |
622 | static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity) | 621 | static int msi_capability_init(struct pci_dev *dev, int nvec, |
622 | const struct irq_affinity *affd) | ||
623 | { | 623 | { |
624 | struct msi_desc *entry; | 624 | struct msi_desc *entry; |
625 | int ret; | 625 | int ret; |
@@ -627,7 +627,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity) | |||
627 | 627 | ||
628 | pci_msi_set_enable(dev, 0); /* Disable MSI during set up */ | 628 | pci_msi_set_enable(dev, 0); /* Disable MSI during set up */ |
629 | 629 | ||
630 | entry = msi_setup_entry(dev, nvec, affinity); | 630 | entry = msi_setup_entry(dev, nvec, affd); |
631 | if (!entry) | 631 | if (!entry) |
632 | return -ENOMEM; | 632 | return -ENOMEM; |
633 | 633 | ||
@@ -691,15 +691,14 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) | |||
691 | 691 | ||
692 | static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, | 692 | static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, |
693 | struct msix_entry *entries, int nvec, | 693 | struct msix_entry *entries, int nvec, |
694 | bool affinity) | 694 | const struct irq_affinity *affd) |
695 | { | 695 | { |
696 | static const struct irq_affinity default_affd; | ||
697 | struct cpumask *curmsk, *masks = NULL; | 696 | struct cpumask *curmsk, *masks = NULL; |
698 | struct msi_desc *entry; | 697 | struct msi_desc *entry; |
699 | int ret, i; | 698 | int ret, i; |
700 | 699 | ||
701 | if (affinity) { | 700 | if (affd) { |
702 | masks = irq_create_affinity_masks(nvec, &default_affd); | 701 | masks = irq_create_affinity_masks(nvec, affd); |
703 | if (!masks) | 702 | if (!masks) |
704 | pr_err("Unable to allocate affinity masks, ignoring\n"); | 703 | pr_err("Unable to allocate affinity masks, ignoring\n"); |
705 | } | 704 | } |
@@ -755,14 +754,14 @@ static void msix_program_entries(struct pci_dev *dev, | |||
755 | * @dev: pointer to the pci_dev data structure of MSI-X device function | 754 | * @dev: pointer to the pci_dev data structure of MSI-X device function |
756 | * @entries: pointer to an array of struct msix_entry entries | 755 | * @entries: pointer to an array of struct msix_entry entries |
757 | * @nvec: number of @entries | 756 | * @nvec: number of @entries |
758 | * @affinity: flag to indicate cpu irq affinity mask should be set | 757 | * @affd: Optional pointer to enable automatic affinity assignement |
759 | * | 758 | * |
760 | * Setup the MSI-X capability structure of device function with a | 759 | * Setup the MSI-X capability structure of device function with a |
761 | * single MSI-X irq. A return of zero indicates the successful setup of | 760 | * single MSI-X irq. A return of zero indicates the successful setup of |
762 | * requested MSI-X entries with allocated irqs or non-zero for otherwise. | 761 | * requested MSI-X entries with allocated irqs or non-zero for otherwise. |
763 | **/ | 762 | **/ |
764 | static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, | 763 | static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, |
765 | int nvec, bool affinity) | 764 | int nvec, const struct irq_affinity *affd) |
766 | { | 765 | { |
767 | int ret; | 766 | int ret; |
768 | u16 control; | 767 | u16 control; |
@@ -777,7 +776,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, | |||
777 | if (!base) | 776 | if (!base) |
778 | return -ENOMEM; | 777 | return -ENOMEM; |
779 | 778 | ||
780 | ret = msix_setup_entries(dev, base, entries, nvec, affinity); | 779 | ret = msix_setup_entries(dev, base, entries, nvec, affd); |
781 | if (ret) | 780 | if (ret) |
782 | return ret; | 781 | return ret; |
783 | 782 | ||
@@ -958,7 +957,7 @@ int pci_msix_vec_count(struct pci_dev *dev) | |||
958 | EXPORT_SYMBOL(pci_msix_vec_count); | 957 | EXPORT_SYMBOL(pci_msix_vec_count); |
959 | 958 | ||
960 | static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, | 959 | static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, |
961 | int nvec, bool affinity) | 960 | int nvec, const struct irq_affinity *affd) |
962 | { | 961 | { |
963 | int nr_entries; | 962 | int nr_entries; |
964 | int i, j; | 963 | int i, j; |
@@ -990,7 +989,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, | |||
990 | dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n"); | 989 | dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n"); |
991 | return -EINVAL; | 990 | return -EINVAL; |
992 | } | 991 | } |
993 | return msix_capability_init(dev, entries, nvec, affinity); | 992 | return msix_capability_init(dev, entries, nvec, affd); |
994 | } | 993 | } |
995 | 994 | ||
996 | /** | 995 | /** |
@@ -1010,7 +1009,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, | |||
1010 | **/ | 1009 | **/ |
1011 | int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) | 1010 | int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) |
1012 | { | 1011 | { |
1013 | return __pci_enable_msix(dev, entries, nvec, false); | 1012 | return __pci_enable_msix(dev, entries, nvec, NULL); |
1014 | } | 1013 | } |
1015 | EXPORT_SYMBOL(pci_enable_msix); | 1014 | EXPORT_SYMBOL(pci_enable_msix); |
1016 | 1015 | ||
@@ -1061,10 +1060,8 @@ int pci_msi_enabled(void) | |||
1061 | EXPORT_SYMBOL(pci_msi_enabled); | 1060 | EXPORT_SYMBOL(pci_msi_enabled); |
1062 | 1061 | ||
1063 | static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, | 1062 | static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, |
1064 | unsigned int flags) | 1063 | const struct irq_affinity *affd) |
1065 | { | 1064 | { |
1066 | static const struct irq_affinity default_affd; | ||
1067 | bool affinity = flags & PCI_IRQ_AFFINITY; | ||
1068 | int nvec; | 1065 | int nvec; |
1069 | int rc; | 1066 | int rc; |
1070 | 1067 | ||
@@ -1093,13 +1090,13 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, | |||
1093 | nvec = maxvec; | 1090 | nvec = maxvec; |
1094 | 1091 | ||
1095 | for (;;) { | 1092 | for (;;) { |
1096 | if (affinity) { | 1093 | if (affd) { |
1097 | nvec = irq_calc_affinity_vectors(nvec, &default_affd); | 1094 | nvec = irq_calc_affinity_vectors(nvec, affd); |
1098 | if (nvec < minvec) | 1095 | if (nvec < minvec) |
1099 | return -ENOSPC; | 1096 | return -ENOSPC; |
1100 | } | 1097 | } |
1101 | 1098 | ||
1102 | rc = msi_capability_init(dev, nvec, affinity); | 1099 | rc = msi_capability_init(dev, nvec, affd); |
1103 | if (rc == 0) | 1100 | if (rc == 0) |
1104 | return nvec; | 1101 | return nvec; |
1105 | 1102 | ||
@@ -1126,29 +1123,27 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, | |||
1126 | **/ | 1123 | **/ |
1127 | int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) | 1124 | int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) |
1128 | { | 1125 | { |
1129 | return __pci_enable_msi_range(dev, minvec, maxvec, 0); | 1126 | return __pci_enable_msi_range(dev, minvec, maxvec, NULL); |
1130 | } | 1127 | } |
1131 | EXPORT_SYMBOL(pci_enable_msi_range); | 1128 | EXPORT_SYMBOL(pci_enable_msi_range); |
1132 | 1129 | ||
1133 | static int __pci_enable_msix_range(struct pci_dev *dev, | 1130 | static int __pci_enable_msix_range(struct pci_dev *dev, |
1134 | struct msix_entry *entries, int minvec, int maxvec, | 1131 | struct msix_entry *entries, int minvec, |
1135 | unsigned int flags) | 1132 | int maxvec, const struct irq_affinity *affd) |
1136 | { | 1133 | { |
1137 | static const struct irq_affinity default_affd; | ||
1138 | bool affinity = flags & PCI_IRQ_AFFINITY; | ||
1139 | int rc, nvec = maxvec; | 1134 | int rc, nvec = maxvec; |
1140 | 1135 | ||
1141 | if (maxvec < minvec) | 1136 | if (maxvec < minvec) |
1142 | return -ERANGE; | 1137 | return -ERANGE; |
1143 | 1138 | ||
1144 | for (;;) { | 1139 | for (;;) { |
1145 | if (affinity) { | 1140 | if (affd) { |
1146 | nvec = irq_calc_affinity_vectors(nvec, &default_affd); | 1141 | nvec = irq_calc_affinity_vectors(nvec, affd); |
1147 | if (nvec < minvec) | 1142 | if (nvec < minvec) |
1148 | return -ENOSPC; | 1143 | return -ENOSPC; |
1149 | } | 1144 | } |
1150 | 1145 | ||
1151 | rc = __pci_enable_msix(dev, entries, nvec, affinity); | 1146 | rc = __pci_enable_msix(dev, entries, nvec, affd); |
1152 | if (rc == 0) | 1147 | if (rc == 0) |
1153 | return nvec; | 1148 | return nvec; |
1154 | 1149 | ||
@@ -1179,7 +1174,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev, | |||
1179 | int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, | 1174 | int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, |
1180 | int minvec, int maxvec) | 1175 | int minvec, int maxvec) |
1181 | { | 1176 | { |
1182 | return __pci_enable_msix_range(dev, entries, minvec, maxvec, 0); | 1177 | return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL); |
1183 | } | 1178 | } |
1184 | EXPORT_SYMBOL(pci_enable_msix_range); | 1179 | EXPORT_SYMBOL(pci_enable_msix_range); |
1185 | 1180 | ||
@@ -1203,17 +1198,22 @@ EXPORT_SYMBOL(pci_enable_msix_range); | |||
1203 | int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, | 1198 | int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, |
1204 | unsigned int max_vecs, unsigned int flags) | 1199 | unsigned int max_vecs, unsigned int flags) |
1205 | { | 1200 | { |
1201 | static const struct irq_affinity msi_default_affd; | ||
1202 | const struct irq_affinity *affd = NULL; | ||
1206 | int vecs = -ENOSPC; | 1203 | int vecs = -ENOSPC; |
1207 | 1204 | ||
1205 | if (flags & PCI_IRQ_AFFINITY) | ||
1206 | affd = &msi_default_affd; | ||
1207 | |||
1208 | if (flags & PCI_IRQ_MSIX) { | 1208 | if (flags & PCI_IRQ_MSIX) { |
1209 | vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, | 1209 | vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, |
1210 | flags); | 1210 | affd); |
1211 | if (vecs > 0) | 1211 | if (vecs > 0) |
1212 | return vecs; | 1212 | return vecs; |
1213 | } | 1213 | } |
1214 | 1214 | ||
1215 | if (flags & PCI_IRQ_MSI) { | 1215 | if (flags & PCI_IRQ_MSI) { |
1216 | vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags); | 1216 | vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); |
1217 | if (vecs > 0) | 1217 | if (vecs > 0) |
1218 | return vecs; | 1218 | return vecs; |
1219 | } | 1219 | } |