diff options
author | Christoph Hellwig <hch@lst.de> | 2016-11-08 20:15:03 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-11-09 02:25:09 -0500 |
commit | 67c93c218dc5d1b45d547771f1fdb44a381e1faf (patch) | |
tree | ac27583ebabefaed3bba36f955c57e12aa5136ef | |
parent | 212bd846223c718b6577d4df16fd8d05a55ad914 (diff) |
genirq/affinity: Handle pre/post vectors in irq_create_affinity_masks()
Only calculate the affinity for the main I/O vectors, and skip the
pre or post vectors specified by struct irq_affinity.
Also remove the irq_affinity cpumask argument that has never been used.
If we ever need it in the future we can pass it through struct
irq_affinity.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Acked-by: Bjorn Helgaas <bhelgaas@google.com>
Acked-by: Jens Axboe <axboe@kernel.dk>
Cc: linux-block@vger.kernel.org
Cc: linux-pci@vger.kernel.org
Link: http://lkml.kernel.org/r/1478654107-7384-4-git-send-email-hch@lst.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | drivers/pci/msi.c | 6 | ||||
-rw-r--r-- | include/linux/interrupt.h | 4 | ||||
-rw-r--r-- | kernel/irq/affinity.c | 46 |
3 files changed, 31 insertions, 25 deletions
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index dad2da7cf80e..f4a108b59336 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -553,12 +553,13 @@ error_attrs: | |||
553 | static struct msi_desc * | 553 | static struct msi_desc * |
554 | msi_setup_entry(struct pci_dev *dev, int nvec, bool affinity) | 554 | msi_setup_entry(struct pci_dev *dev, int nvec, bool affinity) |
555 | { | 555 | { |
556 | static const struct irq_affinity default_affd; | ||
556 | struct cpumask *masks = NULL; | 557 | struct cpumask *masks = NULL; |
557 | struct msi_desc *entry; | 558 | struct msi_desc *entry; |
558 | u16 control; | 559 | u16 control; |
559 | 560 | ||
560 | if (affinity) { | 561 | if (affinity) { |
561 | masks = irq_create_affinity_masks(dev->irq_affinity, nvec); | 562 | masks = irq_create_affinity_masks(nvec, &default_affd); |
562 | if (!masks) | 563 | if (!masks) |
563 | pr_err("Unable to allocate affinity masks, ignoring\n"); | 564 | pr_err("Unable to allocate affinity masks, ignoring\n"); |
564 | } | 565 | } |
@@ -692,12 +693,13 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, | |||
692 | struct msix_entry *entries, int nvec, | 693 | struct msix_entry *entries, int nvec, |
693 | bool affinity) | 694 | bool affinity) |
694 | { | 695 | { |
696 | static const struct irq_affinity default_affd; | ||
695 | struct cpumask *curmsk, *masks = NULL; | 697 | struct cpumask *curmsk, *masks = NULL; |
696 | struct msi_desc *entry; | 698 | struct msi_desc *entry; |
697 | int ret, i; | 699 | int ret, i; |
698 | 700 | ||
699 | if (affinity) { | 701 | if (affinity) { |
700 | masks = irq_create_affinity_masks(dev->irq_affinity, nvec); | 702 | masks = irq_create_affinity_masks(nvec, &default_affd); |
701 | if (!masks) | 703 | if (!masks) |
702 | pr_err("Unable to allocate affinity masks, ignoring\n"); | 704 | pr_err("Unable to allocate affinity masks, ignoring\n"); |
703 | } | 705 | } |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 9081f23bc0ff..53144e78a369 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -290,7 +290,7 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); | |||
290 | extern int | 290 | extern int |
291 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); | 291 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); |
292 | 292 | ||
293 | struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec); | 293 | struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); |
294 | int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd); | 294 | int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd); |
295 | 295 | ||
296 | #else /* CONFIG_SMP */ | 296 | #else /* CONFIG_SMP */ |
@@ -325,7 +325,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | |||
325 | } | 325 | } |
326 | 326 | ||
327 | static inline struct cpumask * | 327 | static inline struct cpumask * |
328 | irq_create_affinity_masks(const struct cpumask *affinity, int nvec) | 328 | irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) |
329 | { | 329 | { |
330 | return NULL; | 330 | return NULL; |
331 | } | 331 | } |
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index 8d9259727cb4..17360bd9619b 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c | |||
@@ -51,16 +51,16 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk) | |||
51 | 51 | ||
52 | /** | 52 | /** |
53 | * irq_create_affinity_masks - Create affinity masks for multiqueue spreading | 53 | * irq_create_affinity_masks - Create affinity masks for multiqueue spreading |
54 | * @affinity: The affinity mask to spread. If NULL cpu_online_mask | 54 | * @nvecs: The total number of vectors |
55 | * is used | 55 | * @affd: Description of the affinity requirements |
56 | * @nvecs: The number of vectors | ||
57 | * | 56 | * |
58 | * Returns the masks pointer or NULL if allocation failed. | 57 | * Returns the masks pointer or NULL if allocation failed. |
59 | */ | 58 | */ |
60 | struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, | 59 | struct cpumask * |
61 | int nvec) | 60 | irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) |
62 | { | 61 | { |
63 | int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec = 0; | 62 | int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec; |
63 | int affv = nvecs - affd->pre_vectors - affd->post_vectors; | ||
64 | nodemask_t nodemsk = NODE_MASK_NONE; | 64 | nodemask_t nodemsk = NODE_MASK_NONE; |
65 | struct cpumask *masks; | 65 | struct cpumask *masks; |
66 | cpumask_var_t nmsk; | 66 | cpumask_var_t nmsk; |
@@ -68,46 +68,46 @@ struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, | |||
68 | if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) | 68 | if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) |
69 | return NULL; | 69 | return NULL; |
70 | 70 | ||
71 | masks = kzalloc(nvec * sizeof(*masks), GFP_KERNEL); | 71 | masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); |
72 | if (!masks) | 72 | if (!masks) |
73 | goto out; | 73 | goto out; |
74 | 74 | ||
75 | /* Fill out vectors at the beginning that don't need affinity */ | ||
76 | for (curvec = 0; curvec < affd->pre_vectors; curvec++) | ||
77 | cpumask_copy(masks + curvec, cpu_possible_mask); | ||
78 | |||
75 | /* Stabilize the cpumasks */ | 79 | /* Stabilize the cpumasks */ |
76 | get_online_cpus(); | 80 | get_online_cpus(); |
77 | /* If the supplied affinity mask is NULL, use cpu online mask */ | 81 | nodes = get_nodes_in_cpumask(cpu_online_mask, &nodemsk); |
78 | if (!affinity) | ||
79 | affinity = cpu_online_mask; | ||
80 | |||
81 | nodes = get_nodes_in_cpumask(affinity, &nodemsk); | ||
82 | 82 | ||
83 | /* | 83 | /* |
84 | * If the number of nodes in the mask is less than or equal the | 84 | * If the number of nodes in the mask is less than or equal the |
85 | * number of vectors we just spread the vectors across the nodes. | 85 | * number of vectors we just spread the vectors across the nodes. |
86 | */ | 86 | */ |
87 | if (nvec <= nodes) { | 87 | if (affv <= nodes) { |
88 | for_each_node_mask(n, nodemsk) { | 88 | for_each_node_mask(n, nodemsk) { |
89 | cpumask_copy(masks + curvec, cpumask_of_node(n)); | 89 | cpumask_copy(masks + curvec, cpumask_of_node(n)); |
90 | if (++curvec == nvec) | 90 | if (++curvec == affv) |
91 | break; | 91 | break; |
92 | } | 92 | } |
93 | goto outonl; | 93 | goto done; |
94 | } | 94 | } |
95 | 95 | ||
96 | /* Spread the vectors per node */ | 96 | /* Spread the vectors per node */ |
97 | vecs_per_node = nvec / nodes; | 97 | vecs_per_node = affv / nodes; |
98 | /* Account for rounding errors */ | 98 | /* Account for rounding errors */ |
99 | extra_vecs = nvec - (nodes * vecs_per_node); | 99 | extra_vecs = affv - (nodes * vecs_per_node); |
100 | 100 | ||
101 | for_each_node_mask(n, nodemsk) { | 101 | for_each_node_mask(n, nodemsk) { |
102 | int ncpus, v, vecs_to_assign = vecs_per_node; | 102 | int ncpus, v, vecs_to_assign = vecs_per_node; |
103 | 103 | ||
104 | /* Get the cpus on this node which are in the mask */ | 104 | /* Get the cpus on this node which are in the mask */ |
105 | cpumask_and(nmsk, affinity, cpumask_of_node(n)); | 105 | cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n)); |
106 | 106 | ||
107 | /* Calculate the number of cpus per vector */ | 107 | /* Calculate the number of cpus per vector */ |
108 | ncpus = cpumask_weight(nmsk); | 108 | ncpus = cpumask_weight(nmsk); |
109 | 109 | ||
110 | for (v = 0; curvec < nvec && v < vecs_to_assign; curvec++, v++) { | 110 | for (v = 0; curvec < affv && v < vecs_to_assign; curvec++, v++) { |
111 | cpus_per_vec = ncpus / vecs_to_assign; | 111 | cpus_per_vec = ncpus / vecs_to_assign; |
112 | 112 | ||
113 | /* Account for extra vectors to compensate rounding errors */ | 113 | /* Account for extra vectors to compensate rounding errors */ |
@@ -119,12 +119,16 @@ struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, | |||
119 | irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); | 119 | irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); |
120 | } | 120 | } |
121 | 121 | ||
122 | if (curvec >= nvec) | 122 | if (curvec >= affv) |
123 | break; | 123 | break; |
124 | } | 124 | } |
125 | 125 | ||
126 | outonl: | 126 | done: |
127 | put_online_cpus(); | 127 | put_online_cpus(); |
128 | |||
129 | /* Fill out vectors at the end that don't need affinity */ | ||
130 | for (; curvec < nvecs; curvec++) | ||
131 | cpumask_copy(masks + curvec, cpu_possible_mask); | ||
128 | out: | 132 | out: |
129 | free_cpumask_var(nmsk); | 133 | free_cpumask_var(nmsk); |
130 | return masks; | 134 | return masks; |