summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2019-02-16 12:13:09 -0500
committerThomas Gleixner <tglx@linutronix.de>2019-02-18 05:21:28 -0500
commitc66d4bd110a1f8a68c1a88bfbf866eb50c6464b7 (patch)
tree772a8ffe770a1386abd0a5f541cc2b38f2f4c1cd
parent9cfef55bb57e7620c63087be18a76351628f8d0f (diff)
genirq/affinity: Add new callback for (re)calculating interrupt sets
The interrupt affinity spreading mechanism supports to spread out affinities for one or more interrupt sets. A interrupt set contains one or more interrupts. Each set is mapped to a specific functionality of a device, e.g. general I/O queues and read I/O queus of multiqueue block devices. The number of interrupts per set is defined by the driver. It depends on the total number of available interrupts for the device, which is determined by the PCI capabilites and the availability of underlying CPU resources, and the number of queues which the device provides and the driver wants to instantiate. The driver passes initial configuration for the interrupt allocation via a pointer to struct irq_affinity. Right now the allocation mechanism is complex as it requires to have a loop in the driver to determine the maximum number of interrupts which are provided by the PCI capabilities and the underlying CPU resources. This loop would have to be replicated in every driver which wants to utilize this mechanism. That's unwanted code duplication and error prone. In order to move this into generic facilities it is required to have a mechanism, which allows the recalculation of the interrupt sets and their size, in the core code. As the core code does not have any knowledge about the underlying device, a driver specific callback is required in struct irq_affinity, which can be invoked by the core code. The callback gets the number of available interupts as an argument, so the driver can calculate the corresponding number and size of interrupt sets. At the moment the struct irq_affinity pointer which is handed in from the driver and passed through to several core functions is marked 'const', but for the callback to be able to modify the data in the struct it's required to remove the 'const' qualifier. Add the optional callback to struct irq_affinity, which allows drivers to recalculate the number and size of interrupt sets and remove the 'const' qualifier. For simple invocations, which do not supply a callback, a default callback is installed, which just sets nr_sets to 1 and transfers the number of spreadable vectors to the set_size array at index 0. This is for now guarded by a check for nr_sets != 0 to keep the NVME driver working until it is converted to the callback mechanism. To make sure that the driver configuration is correct under all circumstances the callback is invoked even when there are no interrupts for queues left, i.e. the pre/post requirements already exhaust the numner of available interrupts. At the PCI layer irq_create_affinity_masks() has to be invoked even for the case where the legacy interrupt is used. That ensures that the callback is invoked and the device driver can adjust to that situation. [ tglx: Fixed the simple case (no sets required). Moved the sanity check for nr_sets after the invocation of the callback so it catches broken drivers. Fixed the kernel doc comments for struct irq_affinity and de-'This patch'-ed the changelog ] Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Bjorn Helgaas <helgaas@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: linux-block@vger.kernel.org Cc: Sagi Grimberg <sagi@grimberg.me> Cc: linux-nvme@lists.infradead.org Cc: linux-pci@vger.kernel.org Cc: Keith Busch <keith.busch@intel.com> Cc: Sumit Saxena <sumit.saxena@broadcom.com> Cc: Kashyap Desai <kashyap.desai@broadcom.com> Cc: Shivasharan Srikanteshwara <shivasharan.srikanteshwara@broadcom.com> Link: https://lkml.kernel.org/r/20190216172228.512444498@linutronix.de
-rw-r--r--drivers/pci/msi.c25
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2
-rw-r--r--include/linux/interrupt.h10
-rw-r--r--include/linux/pci.h4
-rw-r--r--kernel/irq/affinity.c62
5 files changed, 71 insertions, 32 deletions
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 4c0b47867258..7149d6315726 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -532,7 +532,7 @@ error_attrs:
532} 532}
533 533
534static struct msi_desc * 534static struct msi_desc *
535msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd) 535msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
536{ 536{
537 struct irq_affinity_desc *masks = NULL; 537 struct irq_affinity_desc *masks = NULL;
538 struct msi_desc *entry; 538 struct msi_desc *entry;
@@ -597,7 +597,7 @@ static int msi_verify_entries(struct pci_dev *dev)
597 * which could have been allocated. 597 * which could have been allocated.
598 */ 598 */
599static int msi_capability_init(struct pci_dev *dev, int nvec, 599static int msi_capability_init(struct pci_dev *dev, int nvec,
600 const struct irq_affinity *affd) 600 struct irq_affinity *affd)
601{ 601{
602 struct msi_desc *entry; 602 struct msi_desc *entry;
603 int ret; 603 int ret;
@@ -669,7 +669,7 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
669 669
670static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, 670static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
671 struct msix_entry *entries, int nvec, 671 struct msix_entry *entries, int nvec,
672 const struct irq_affinity *affd) 672 struct irq_affinity *affd)
673{ 673{
674 struct irq_affinity_desc *curmsk, *masks = NULL; 674 struct irq_affinity_desc *curmsk, *masks = NULL;
675 struct msi_desc *entry; 675 struct msi_desc *entry;
@@ -736,7 +736,7 @@ static void msix_program_entries(struct pci_dev *dev,
736 * requested MSI-X entries with allocated irqs or non-zero for otherwise. 736 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
737 **/ 737 **/
738static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, 738static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
739 int nvec, const struct irq_affinity *affd) 739 int nvec, struct irq_affinity *affd)
740{ 740{
741 int ret; 741 int ret;
742 u16 control; 742 u16 control;
@@ -932,7 +932,7 @@ int pci_msix_vec_count(struct pci_dev *dev)
932EXPORT_SYMBOL(pci_msix_vec_count); 932EXPORT_SYMBOL(pci_msix_vec_count);
933 933
934static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, 934static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
935 int nvec, const struct irq_affinity *affd) 935 int nvec, struct irq_affinity *affd)
936{ 936{
937 int nr_entries; 937 int nr_entries;
938 int i, j; 938 int i, j;
@@ -1018,7 +1018,7 @@ int pci_msi_enabled(void)
1018EXPORT_SYMBOL(pci_msi_enabled); 1018EXPORT_SYMBOL(pci_msi_enabled);
1019 1019
1020static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, 1020static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
1021 const struct irq_affinity *affd) 1021 struct irq_affinity *affd)
1022{ 1022{
1023 int nvec; 1023 int nvec;
1024 int rc; 1024 int rc;
@@ -1086,7 +1086,7 @@ EXPORT_SYMBOL(pci_enable_msi);
1086 1086
1087static int __pci_enable_msix_range(struct pci_dev *dev, 1087static int __pci_enable_msix_range(struct pci_dev *dev,
1088 struct msix_entry *entries, int minvec, 1088 struct msix_entry *entries, int minvec,
1089 int maxvec, const struct irq_affinity *affd) 1089 int maxvec, struct irq_affinity *affd)
1090{ 1090{
1091 int rc, nvec = maxvec; 1091 int rc, nvec = maxvec;
1092 1092
@@ -1165,9 +1165,9 @@ EXPORT_SYMBOL(pci_enable_msix_range);
1165 */ 1165 */
1166int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, 1166int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1167 unsigned int max_vecs, unsigned int flags, 1167 unsigned int max_vecs, unsigned int flags,
1168 const struct irq_affinity *affd) 1168 struct irq_affinity *affd)
1169{ 1169{
1170 static const struct irq_affinity msi_default_affd; 1170 struct irq_affinity msi_default_affd = {0};
1171 int msix_vecs = -ENOSPC; 1171 int msix_vecs = -ENOSPC;
1172 int msi_vecs = -ENOSPC; 1172 int msi_vecs = -ENOSPC;
1173 1173
@@ -1196,6 +1196,13 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1196 /* use legacy irq if allowed */ 1196 /* use legacy irq if allowed */
1197 if (flags & PCI_IRQ_LEGACY) { 1197 if (flags & PCI_IRQ_LEGACY) {
1198 if (min_vecs == 1 && dev->irq) { 1198 if (min_vecs == 1 && dev->irq) {
1199 /*
1200 * Invoke the affinity spreading logic to ensure that
1201 * the device driver can adjust queue configuration
1202 * for the single interrupt case.
1203 */
1204 if (affd)
1205 irq_create_affinity_masks(1, affd);
1199 pci_intx(dev, 1); 1206 pci_intx(dev, 1);
1200 return 1; 1207 return 1;
1201 } 1208 }
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 74e260027c7d..76e49d902609 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3566,7 +3566,7 @@ static void be2iscsi_enable_msix(struct beiscsi_hba *phba)
3566 3566
3567 /* if eqid_count == 1 fall back to INTX */ 3567 /* if eqid_count == 1 fall back to INTX */
3568 if (enable_msix && nvec > 1) { 3568 if (enable_msix && nvec > 1) {
3569 const struct irq_affinity desc = { .post_vectors = 1 }; 3569 struct irq_affinity desc = { .post_vectors = 1 };
3570 3570
3571 if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec, 3571 if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec,
3572 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) { 3572 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) {
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5afdfd5dc39b..dcdddf4fa76b 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -252,12 +252,18 @@ struct irq_affinity_notify {
252 * @nr_sets: The number of interrupt sets for which affinity 252 * @nr_sets: The number of interrupt sets for which affinity
253 * spreading is required 253 * spreading is required
254 * @set_size: Array holding the size of each interrupt set 254 * @set_size: Array holding the size of each interrupt set
255 * @calc_sets: Callback for calculating the number and size
256 * of interrupt sets
257 * @priv: Private data for usage by @calc_sets, usually a
258 * pointer to driver/device specific data.
255 */ 259 */
256struct irq_affinity { 260struct irq_affinity {
257 unsigned int pre_vectors; 261 unsigned int pre_vectors;
258 unsigned int post_vectors; 262 unsigned int post_vectors;
259 unsigned int nr_sets; 263 unsigned int nr_sets;
260 unsigned int set_size[IRQ_AFFINITY_MAX_SETS]; 264 unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
265 void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
266 void *priv;
261}; 267};
262 268
263/** 269/**
@@ -317,7 +323,7 @@ extern int
317irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); 323irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
318 324
319struct irq_affinity_desc * 325struct irq_affinity_desc *
320irq_create_affinity_masks(unsigned int nvec, const struct irq_affinity *affd); 326irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
321 327
322unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, 328unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
323 const struct irq_affinity *affd); 329 const struct irq_affinity *affd);
@@ -354,7 +360,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
354} 360}
355 361
356static inline struct irq_affinity_desc * 362static inline struct irq_affinity_desc *
357irq_create_affinity_masks(unsigned int nvec, const struct irq_affinity *affd) 363irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
358{ 364{
359 return NULL; 365 return NULL;
360} 366}
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 65f1d8c2f082..e7c51b00cdfe 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1393,7 +1393,7 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
1393} 1393}
1394int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, 1394int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1395 unsigned int max_vecs, unsigned int flags, 1395 unsigned int max_vecs, unsigned int flags,
1396 const struct irq_affinity *affd); 1396 struct irq_affinity *affd);
1397 1397
1398void pci_free_irq_vectors(struct pci_dev *dev); 1398void pci_free_irq_vectors(struct pci_dev *dev);
1399int pci_irq_vector(struct pci_dev *dev, unsigned int nr); 1399int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
@@ -1419,7 +1419,7 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
1419static inline int 1419static inline int
1420pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, 1420pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1421 unsigned int max_vecs, unsigned int flags, 1421 unsigned int max_vecs, unsigned int flags,
1422 const struct irq_affinity *aff_desc) 1422 struct irq_affinity *aff_desc)
1423{ 1423{
1424 if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq) 1424 if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq)
1425 return 1; 1425 return 1;
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 278289c091bb..d737dc60ab52 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -230,6 +230,12 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
230 return ret; 230 return ret;
231} 231}
232 232
233static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
234{
235 affd->nr_sets = 1;
236 affd->set_size[0] = affvecs;
237}
238
233/** 239/**
234 * irq_create_affinity_masks - Create affinity masks for multiqueue spreading 240 * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
235 * @nvecs: The total number of vectors 241 * @nvecs: The total number of vectors
@@ -240,20 +246,46 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
240struct irq_affinity_desc * 246struct irq_affinity_desc *
241irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd) 247irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
242{ 248{
243 unsigned int affvecs, curvec, usedvecs, nr_sets, i; 249 unsigned int affvecs, curvec, usedvecs, i;
244 unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
245 struct irq_affinity_desc *masks = NULL; 250 struct irq_affinity_desc *masks = NULL;
246 251
247 /* 252 /*
248 * If there aren't any vectors left after applying the pre/post 253 * Determine the number of vectors which need interrupt affinities
249 * vectors don't bother with assigning affinity. 254 * assigned. If the pre/post request exhausts the available vectors
255 * then nothing to do here except for invoking the calc_sets()
256 * callback so the device driver can adjust to the situation. If there
257 * is only a single vector, then managing the queue is pointless as
258 * well.
250 */ 259 */
251 if (nvecs == affd->pre_vectors + affd->post_vectors) 260 if (nvecs > 1 && nvecs > affd->pre_vectors + affd->post_vectors)
252 return NULL; 261 affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
262 else
263 affvecs = 0;
264
265 /*
266 * Simple invocations do not provide a calc_sets() callback. Install
267 * the generic one. The check for affd->nr_sets is a temporary
268 * workaround and will be removed after the NVME driver is converted
269 * over.
270 */
271 if (!affd->nr_sets && !affd->calc_sets)
272 affd->calc_sets = default_calc_sets;
273
274 /*
275 * If the device driver provided a calc_sets() callback let it
276 * recalculate the number of sets and their size. The check will go
277 * away once the NVME driver is converted over.
278 */
279 if (affd->calc_sets)
280 affd->calc_sets(affd, affvecs);
253 281
254 if (WARN_ON_ONCE(affd->nr_sets > IRQ_AFFINITY_MAX_SETS)) 282 if (WARN_ON_ONCE(affd->nr_sets > IRQ_AFFINITY_MAX_SETS))
255 return NULL; 283 return NULL;
256 284
285 /* Nothing to assign? */
286 if (!affvecs)
287 return NULL;
288
257 masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); 289 masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
258 if (!masks) 290 if (!masks)
259 return NULL; 291 return NULL;
@@ -261,21 +293,13 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
261 /* Fill out vectors at the beginning that don't need affinity */ 293 /* Fill out vectors at the beginning that don't need affinity */
262 for (curvec = 0; curvec < affd->pre_vectors; curvec++) 294 for (curvec = 0; curvec < affd->pre_vectors; curvec++)
263 cpumask_copy(&masks[curvec].mask, irq_default_affinity); 295 cpumask_copy(&masks[curvec].mask, irq_default_affinity);
296
264 /* 297 /*
265 * Spread on present CPUs starting from affd->pre_vectors. If we 298 * Spread on present CPUs starting from affd->pre_vectors. If we
266 * have multiple sets, build each sets affinity mask separately. 299 * have multiple sets, build each sets affinity mask separately.
267 */ 300 */
268 affvecs = nvecs - affd->pre_vectors - affd->post_vectors; 301 for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
269 nr_sets = affd->nr_sets; 302 unsigned int this_vecs = affd->set_size[i];
270 if (!nr_sets) {
271 nr_sets = 1;
272 set_size[0] = affvecs;
273 } else {
274 memcpy(set_size, affd->set_size, nr_sets * sizeof(unsigned int));
275 }
276
277 for (i = 0, usedvecs = 0; i < nr_sets; i++) {
278 unsigned int this_vecs = set_size[i];
279 int ret; 303 int ret;
280 304
281 ret = irq_build_affinity_masks(affd, curvec, this_vecs, 305 ret = irq_build_affinity_masks(affd, curvec, this_vecs,
@@ -318,7 +342,9 @@ unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
318 if (resv > minvec) 342 if (resv > minvec)
319 return 0; 343 return 0;
320 344
321 if (affd->nr_sets) { 345 if (affd->calc_sets) {
346 set_vecs = maxvec - resv;
347 } else if (affd->nr_sets) {
322 unsigned int i; 348 unsigned int i;
323 349
324 for (i = 0, set_vecs = 0; i < affd->nr_sets; i++) 350 for (i = 0, set_vecs = 0; i < affd->nr_sets; i++)