summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2019-02-16 12:13:07 -0500
committerThomas Gleixner <tglx@linutronix.de>2019-02-18 05:21:27 -0500
commit0145c30e896d26e638d27c957d9eed72893c1c92 (patch)
tree408f9226dfbb5ddf0bdf2459f292ba08bb4d4d25
parentd869f86645fc07dc83b89b68f1a22d91ebe29439 (diff)
genirq/affinity: Code consolidation
All information and calculations in the interrupt affinity spreading code is strictly unsigned int. Though the code uses int all over the place. Convert it over to unsigned int. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Ming Lei <ming.lei@redhat.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Bjorn Helgaas <helgaas@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: linux-block@vger.kernel.org Cc: Sagi Grimberg <sagi@grimberg.me> Cc: linux-nvme@lists.infradead.org Cc: linux-pci@vger.kernel.org Cc: Keith Busch <keith.busch@intel.com> Cc: Sumit Saxena <sumit.saxena@broadcom.com> Cc: Kashyap Desai <kashyap.desai@broadcom.com> Cc: Shivasharan Srikanteshwara <shivasharan.srikanteshwara@broadcom.com> Link: https://lkml.kernel.org/r/20190216172228.336424556@linutronix.de
-rw-r--r--include/linux/interrupt.h20
-rw-r--r--kernel/irq/affinity.c56
2 files changed, 38 insertions, 38 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 4a728dba02e2..35e7389c2011 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -251,10 +251,10 @@ struct irq_affinity_notify {
251 * @sets: Number of affinitized sets 251 * @sets: Number of affinitized sets
252 */ 252 */
253struct irq_affinity { 253struct irq_affinity {
254 int pre_vectors; 254 unsigned int pre_vectors;
255 int post_vectors; 255 unsigned int post_vectors;
256 int nr_sets; 256 unsigned int nr_sets;
257 int *sets; 257 unsigned int *sets;
258}; 258};
259 259
260/** 260/**
@@ -314,9 +314,10 @@ extern int
314irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); 314irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
315 315
316struct irq_affinity_desc * 316struct irq_affinity_desc *
317irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); 317irq_create_affinity_masks(unsigned int nvec, const struct irq_affinity *affd);
318 318
319int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd); 319unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
320 const struct irq_affinity *affd);
320 321
321#else /* CONFIG_SMP */ 322#else /* CONFIG_SMP */
322 323
@@ -350,13 +351,14 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
350} 351}
351 352
352static inline struct irq_affinity_desc * 353static inline struct irq_affinity_desc *
353irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) 354irq_create_affinity_masks(unsigned int nvec, const struct irq_affinity *affd)
354{ 355{
355 return NULL; 356 return NULL;
356} 357}
357 358
358static inline int 359static inline unsigned int
359irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) 360irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
361 const struct irq_affinity *affd)
360{ 362{
361 return maxvec; 363 return maxvec;
362} 364}
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 118b66d64a53..82e8799374e9 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -9,7 +9,7 @@
9#include <linux/cpu.h> 9#include <linux/cpu.h>
10 10
11static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, 11static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
12 int cpus_per_vec) 12 unsigned int cpus_per_vec)
13{ 13{
14 const struct cpumask *siblmsk; 14 const struct cpumask *siblmsk;
15 int cpu, sibl; 15 int cpu, sibl;
@@ -95,15 +95,17 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
95} 95}
96 96
97static int __irq_build_affinity_masks(const struct irq_affinity *affd, 97static int __irq_build_affinity_masks(const struct irq_affinity *affd,
98 int startvec, int numvecs, int firstvec, 98 unsigned int startvec,
99 unsigned int numvecs,
100 unsigned int firstvec,
99 cpumask_var_t *node_to_cpumask, 101 cpumask_var_t *node_to_cpumask,
100 const struct cpumask *cpu_mask, 102 const struct cpumask *cpu_mask,
101 struct cpumask *nmsk, 103 struct cpumask *nmsk,
102 struct irq_affinity_desc *masks) 104 struct irq_affinity_desc *masks)
103{ 105{
104 int n, nodes, cpus_per_vec, extra_vecs, done = 0; 106 unsigned int n, nodes, cpus_per_vec, extra_vecs, done = 0;
105 int last_affv = firstvec + numvecs; 107 unsigned int last_affv = firstvec + numvecs;
106 int curvec = startvec; 108 unsigned int curvec = startvec;
107 nodemask_t nodemsk = NODE_MASK_NONE; 109 nodemask_t nodemsk = NODE_MASK_NONE;
108 110
109 if (!cpumask_weight(cpu_mask)) 111 if (!cpumask_weight(cpu_mask))
@@ -117,18 +119,16 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
117 */ 119 */
118 if (numvecs <= nodes) { 120 if (numvecs <= nodes) {
119 for_each_node_mask(n, nodemsk) { 121 for_each_node_mask(n, nodemsk) {
120 cpumask_or(&masks[curvec].mask, 122 cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
121 &masks[curvec].mask, 123 node_to_cpumask[n]);
122 node_to_cpumask[n]);
123 if (++curvec == last_affv) 124 if (++curvec == last_affv)
124 curvec = firstvec; 125 curvec = firstvec;
125 } 126 }
126 done = numvecs; 127 return numvecs;
127 goto out;
128 } 128 }
129 129
130 for_each_node_mask(n, nodemsk) { 130 for_each_node_mask(n, nodemsk) {
131 int ncpus, v, vecs_to_assign, vecs_per_node; 131 unsigned int ncpus, v, vecs_to_assign, vecs_per_node;
132 132
133 /* Spread the vectors per node */ 133 /* Spread the vectors per node */
134 vecs_per_node = (numvecs - (curvec - firstvec)) / nodes; 134 vecs_per_node = (numvecs - (curvec - firstvec)) / nodes;
@@ -163,8 +163,6 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
163 curvec = firstvec; 163 curvec = firstvec;
164 --nodes; 164 --nodes;
165 } 165 }
166
167out:
168 return done; 166 return done;
169} 167}
170 168
@@ -174,13 +172,14 @@ out:
174 * 2) spread other possible CPUs on these vectors 172 * 2) spread other possible CPUs on these vectors
175 */ 173 */
176static int irq_build_affinity_masks(const struct irq_affinity *affd, 174static int irq_build_affinity_masks(const struct irq_affinity *affd,
177 int startvec, int numvecs, int firstvec, 175 unsigned int startvec, unsigned int numvecs,
176 unsigned int firstvec,
178 struct irq_affinity_desc *masks) 177 struct irq_affinity_desc *masks)
179{ 178{
180 int curvec = startvec, nr_present, nr_others; 179 unsigned int curvec = startvec, nr_present, nr_others;
181 int ret = -ENOMEM;
182 cpumask_var_t nmsk, npresmsk;
183 cpumask_var_t *node_to_cpumask; 180 cpumask_var_t *node_to_cpumask;
181 cpumask_var_t nmsk, npresmsk;
182 int ret = -ENOMEM;
184 183
185 if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) 184 if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
186 return ret; 185 return ret;
@@ -239,12 +238,10 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
239 * Returns the irq_affinity_desc pointer or NULL if allocation failed. 238 * Returns the irq_affinity_desc pointer or NULL if allocation failed.
240 */ 239 */
241struct irq_affinity_desc * 240struct irq_affinity_desc *
242irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) 241irq_create_affinity_masks(unsigned int nvecs, const struct irq_affinity *affd)
243{ 242{
244 int affvecs = nvecs - affd->pre_vectors - affd->post_vectors; 243 unsigned int affvecs, curvec, usedvecs, nr_sets, i;
245 int curvec, usedvecs;
246 struct irq_affinity_desc *masks = NULL; 244 struct irq_affinity_desc *masks = NULL;
247 int i, nr_sets;
248 245
249 /* 246 /*
250 * If there aren't any vectors left after applying the pre/post 247 * If there aren't any vectors left after applying the pre/post
@@ -264,16 +261,17 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
264 * Spread on present CPUs starting from affd->pre_vectors. If we 261 * Spread on present CPUs starting from affd->pre_vectors. If we
265 * have multiple sets, build each sets affinity mask separately. 262 * have multiple sets, build each sets affinity mask separately.
266 */ 263 */
264 affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
267 nr_sets = affd->nr_sets; 265 nr_sets = affd->nr_sets;
268 if (!nr_sets) 266 if (!nr_sets)
269 nr_sets = 1; 267 nr_sets = 1;
270 268
271 for (i = 0, usedvecs = 0; i < nr_sets; i++) { 269 for (i = 0, usedvecs = 0; i < nr_sets; i++) {
272 int this_vecs = affd->sets ? affd->sets[i] : affvecs; 270 unsigned int this_vecs = affd->sets ? affd->sets[i] : affvecs;
273 int ret; 271 int ret;
274 272
275 ret = irq_build_affinity_masks(affd, curvec, this_vecs, 273 ret = irq_build_affinity_masks(affd, curvec, this_vecs,
276 curvec, masks); 274 curvec, masks);
277 if (ret) { 275 if (ret) {
278 kfree(masks); 276 kfree(masks);
279 return NULL; 277 return NULL;
@@ -303,17 +301,17 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
303 * @maxvec: The maximum number of vectors available 301 * @maxvec: The maximum number of vectors available
304 * @affd: Description of the affinity requirements 302 * @affd: Description of the affinity requirements
305 */ 303 */
306int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) 304unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
305 const struct irq_affinity *affd)
307{ 306{
308 int resv = affd->pre_vectors + affd->post_vectors; 307 unsigned int resv = affd->pre_vectors + affd->post_vectors;
309 int vecs = maxvec - resv; 308 unsigned int set_vecs;
310 int set_vecs;
311 309
312 if (resv > minvec) 310 if (resv > minvec)
313 return 0; 311 return 0;
314 312
315 if (affd->nr_sets) { 313 if (affd->nr_sets) {
316 int i; 314 unsigned int i;
317 315
318 for (i = 0, set_vecs = 0; i < affd->nr_sets; i++) 316 for (i = 0, set_vecs = 0; i < affd->nr_sets; i++)
319 set_vecs += affd->sets[i]; 317 set_vecs += affd->sets[i];
@@ -323,5 +321,5 @@ int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity
323 put_online_cpus(); 321 put_online_cpus();
324 } 322 }
325 323
326 return resv + min(set_vecs, vecs); 324 return resv + min(set_vecs, maxvec - resv);
327} 325}