summaryrefslogtreecommitdiffstats
path: root/kernel/irq/affinity.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2019-08-15 22:28:48 -0400
committerThomas Gleixner <tglx@linutronix.de>2019-08-27 10:31:17 -0400
commit53c1788b7d7720565214a466afffdc818d8c6e5f (patch)
tree01b97c870e8d6dd73c1b3c3b720ec4b54663d0cc /kernel/irq/affinity.c
parentb6a32bbd8735def2d0d696ba59205d1874b7800f (diff)
genirq/affinity: Improve __irq_build_affinity_masks()
One invariant of __irq_build_affinity_masks() is that all CPUs in the specified masks (cpu_mask AND node_to_cpumask for each node) should be covered during the spread. Even though all requested vectors have been reached, it's still required to spread vectors among remained CPUs. A similar policy has been taken in case of 'numvecs <= nodes' already. So remove the following check inside the loop: if (done >= numvecs) break; Meantime assign at least 1 vector for remaining nodes if 'numvecs' vectors have been handled already. Also, if the specified cpumask for one numa node is empty, simply do not spread vectors on this node. Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/20190816022849.14075-2-ming.lei@redhat.com
Diffstat (limited to 'kernel/irq/affinity.c')
-rw-r--r--kernel/irq/affinity.c26
1 files changed, 18 insertions, 8 deletions
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 6fef48033f96..c7cca942bd8a 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -129,14 +129,26 @@ static int __irq_build_affinity_masks(unsigned int startvec,
129 for_each_node_mask(n, nodemsk) { 129 for_each_node_mask(n, nodemsk) {
130 unsigned int ncpus, v, vecs_to_assign, vecs_per_node; 130 unsigned int ncpus, v, vecs_to_assign, vecs_per_node;
131 131
132 /* Spread the vectors per node */
133 vecs_per_node = (numvecs - (curvec - firstvec)) / nodes;
134
135 /* Get the cpus on this node which are in the mask */ 132 /* Get the cpus on this node which are in the mask */
136 cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); 133 cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
137
138 /* Calculate the number of cpus per vector */
139 ncpus = cpumask_weight(nmsk); 134 ncpus = cpumask_weight(nmsk);
135 if (!ncpus)
136 continue;
137
138 /*
139 * Calculate the number of cpus per vector
140 *
141 * Spread the vectors evenly per node. If the requested
142 * vector number has been reached, simply allocate one
143 * vector for each remaining node so that all nodes can
144 * be covered
145 */
146 if (numvecs > done)
147 vecs_per_node = max_t(unsigned,
148 (numvecs - done) / nodes, 1);
149 else
150 vecs_per_node = 1;
151
140 vecs_to_assign = min(vecs_per_node, ncpus); 152 vecs_to_assign = min(vecs_per_node, ncpus);
141 153
142 /* Account for rounding errors */ 154 /* Account for rounding errors */
@@ -156,13 +168,11 @@ static int __irq_build_affinity_masks(unsigned int startvec,
156 } 168 }
157 169
158 done += v; 170 done += v;
159 if (done >= numvecs)
160 break;
161 if (curvec >= last_affv) 171 if (curvec >= last_affv)
162 curvec = firstvec; 172 curvec = firstvec;
163 --nodes; 173 --nodes;
164 } 174 }
165 return done; 175 return done < numvecs ? done : numvecs;
166} 176}
167 177
168/* 178/*