summaryrefslogtreecommitdiffstats
path: root/kernel/irq/affinity.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-11-15 04:12:58 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-11-16 12:44:01 -0500
commitbfe130773862bb3a02cdc4d4c2169f7f0210a46b (patch)
treee026aa41056eb5d31188f47fc0b499566ae6ab91 /kernel/irq/affinity.c
parent0cf71b04467bc34063cecae577f12481da6cc565 (diff)
genirq/affinity: Take reserved vectors into account when spreading irqs
The recent addition of reserved vectors at the beginning or the end of the vector space did not take the reserved vectors at the beginning into account for the various loop exit conditions. As a consequence the last vectors of the spread area are not included into the spread algorithm and are treated like the reserved vectors at the end of the vector space and get the default affinity mask assigned. Sum up the affinity vectors and the reserved vectors at the beginning and use the sum as exit condition. [ tglx: Fixed all conditions instead of only one and massaged changelog ] Signed-off-by: Christoph Hellwig <hch@lst.de> Link: http://lkml.kernel.org/r/1479201178-29604-2-git-send-email-hch@lst.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/irq/affinity.c')
-rw-r--r--kernel/irq/affinity.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 17360bd9619b..49eb38d48816 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -61,6 +61,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
61{ 61{
62 int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec; 62 int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec;
63 int affv = nvecs - affd->pre_vectors - affd->post_vectors; 63 int affv = nvecs - affd->pre_vectors - affd->post_vectors;
64 int last_affv = affv + affd->pre_vectors;
64 nodemask_t nodemsk = NODE_MASK_NONE; 65 nodemask_t nodemsk = NODE_MASK_NONE;
65 struct cpumask *masks; 66 struct cpumask *masks;
66 cpumask_var_t nmsk; 67 cpumask_var_t nmsk;
@@ -87,7 +88,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
87 if (affv <= nodes) { 88 if (affv <= nodes) {
88 for_each_node_mask(n, nodemsk) { 89 for_each_node_mask(n, nodemsk) {
89 cpumask_copy(masks + curvec, cpumask_of_node(n)); 90 cpumask_copy(masks + curvec, cpumask_of_node(n));
90 if (++curvec == affv) 91 if (++curvec == last_affv)
91 break; 92 break;
92 } 93 }
93 goto done; 94 goto done;
@@ -107,7 +108,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
107 /* Calculate the number of cpus per vector */ 108 /* Calculate the number of cpus per vector */
108 ncpus = cpumask_weight(nmsk); 109 ncpus = cpumask_weight(nmsk);
109 110
110 for (v = 0; curvec < affv && v < vecs_to_assign; curvec++, v++) { 111 for (v = 0; curvec < last_affv && v < vecs_to_assign;
112 curvec++, v++) {
111 cpus_per_vec = ncpus / vecs_to_assign; 113 cpus_per_vec = ncpus / vecs_to_assign;
112 114
113 /* Account for extra vectors to compensate rounding errors */ 115 /* Account for extra vectors to compensate rounding errors */
@@ -119,7 +121,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
119 irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); 121 irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
120 } 122 }
121 123
122 if (curvec >= affv) 124 if (curvec >= last_affv)
123 break; 125 break;
124 } 126 }
125 127