diff options
Diffstat (limited to 'kernel/irq/affinity.c')
-rw-r--r-- | kernel/irq/affinity.c | 62 |
1 files changed, 44 insertions, 18 deletions
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index 278289c091bb..d737dc60ab52 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c | |||
@@ -230,6 +230,12 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd, | |||
230 | return ret; | 230 | return ret; |
231 | } | 231 | } |
232 | 232 | ||
233 | static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs) | ||
234 | { | ||
235 | affd->nr_sets = 1; | ||
236 | affd->set_size[0] = affvecs; | ||
237 | } | ||
238 | |||
233 | /** | 239 | /** |
234 | * irq_create_affinity_masks - Create affinity masks for multiqueue spreading | 240 | * irq_create_affinity_masks - Create affinity masks for multiqueue spreading |
235 | * @nvecs: The total number of vectors | 241 | * @nvecs: The total number of vectors |
@@ -240,20 +246,46 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd, | |||
240 | struct irq_affinity_desc * | 246 | struct irq_affinity_desc * |
241 | irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd) | 247 | irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd) |
242 | { | 248 | { |
243 | unsigned int affvecs, curvec, usedvecs, nr_sets, i; | 249 | unsigned int affvecs, curvec, usedvecs, i; |
244 | unsigned int set_size[IRQ_AFFINITY_MAX_SETS]; | ||
245 | struct irq_affinity_desc *masks = NULL; | 250 | struct irq_affinity_desc *masks = NULL; |
246 | 251 | ||
247 | /* | 252 | /* |
248 | * If there aren't any vectors left after applying the pre/post | 253 | * Determine the number of vectors which need interrupt affinities |
249 | * vectors don't bother with assigning affinity. | 254 | * assigned. If the pre/post request exhausts the available vectors |
255 | * then nothing to do here except for invoking the calc_sets() | ||
256 | * callback so the device driver can adjust to the situation. If there | ||
257 | * is only a single vector, then managing the queue is pointless as | ||
258 | * well. | ||
250 | */ | 259 | */ |
251 | if (nvecs == affd->pre_vectors + affd->post_vectors) | 260 | if (nvecs > 1 && nvecs > affd->pre_vectors + affd->post_vectors) |
252 | return NULL; | 261 | affvecs = nvecs - affd->pre_vectors - affd->post_vectors; |
262 | else | ||
263 | affvecs = 0; | ||
264 | |||
265 | /* | ||
266 | * Simple invocations do not provide a calc_sets() callback. Install | ||
267 | * the generic one. The check for affd->nr_sets is a temporary | ||
268 | * workaround and will be removed after the NVME driver is converted | ||
269 | * over. | ||
270 | */ | ||
271 | if (!affd->nr_sets && !affd->calc_sets) | ||
272 | affd->calc_sets = default_calc_sets; | ||
273 | |||
274 | /* | ||
275 | * If the device driver provided a calc_sets() callback let it | ||
276 | * recalculate the number of sets and their size. The check will go | ||
277 | * away once the NVME driver is converted over. | ||
278 | */ | ||
279 | if (affd->calc_sets) | ||
280 | affd->calc_sets(affd, affvecs); | ||
253 | 281 | ||
254 | if (WARN_ON_ONCE(affd->nr_sets > IRQ_AFFINITY_MAX_SETS)) | 282 | if (WARN_ON_ONCE(affd->nr_sets > IRQ_AFFINITY_MAX_SETS)) |
255 | return NULL; | 283 | return NULL; |
256 | 284 | ||
285 | /* Nothing to assign? */ | ||
286 | if (!affvecs) | ||
287 | return NULL; | ||
288 | |||
257 | masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); | 289 | masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); |
258 | if (!masks) | 290 | if (!masks) |
259 | return NULL; | 291 | return NULL; |
@@ -261,21 +293,13 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd) | |||
261 | /* Fill out vectors at the beginning that don't need affinity */ | 293 | /* Fill out vectors at the beginning that don't need affinity */ |
262 | for (curvec = 0; curvec < affd->pre_vectors; curvec++) | 294 | for (curvec = 0; curvec < affd->pre_vectors; curvec++) |
263 | cpumask_copy(&masks[curvec].mask, irq_default_affinity); | 295 | cpumask_copy(&masks[curvec].mask, irq_default_affinity); |
296 | |||
264 | /* | 297 | /* |
265 | * Spread on present CPUs starting from affd->pre_vectors. If we | 298 | * Spread on present CPUs starting from affd->pre_vectors. If we |
266 | * have multiple sets, build each sets affinity mask separately. | 299 | * have multiple sets, build each sets affinity mask separately. |
267 | */ | 300 | */ |
268 | affvecs = nvecs - affd->pre_vectors - affd->post_vectors; | 301 | for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) { |
269 | nr_sets = affd->nr_sets; | 302 | unsigned int this_vecs = affd->set_size[i]; |
270 | if (!nr_sets) { | ||
271 | nr_sets = 1; | ||
272 | set_size[0] = affvecs; | ||
273 | } else { | ||
274 | memcpy(set_size, affd->set_size, nr_sets * sizeof(unsigned int)); | ||
275 | } | ||
276 | |||
277 | for (i = 0, usedvecs = 0; i < nr_sets; i++) { | ||
278 | unsigned int this_vecs = set_size[i]; | ||
279 | int ret; | 303 | int ret; |
280 | 304 | ||
281 | ret = irq_build_affinity_masks(affd, curvec, this_vecs, | 305 | ret = irq_build_affinity_masks(affd, curvec, this_vecs, |
@@ -318,7 +342,9 @@ unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, | |||
318 | if (resv > minvec) | 342 | if (resv > minvec) |
319 | return 0; | 343 | return 0; |
320 | 344 | ||
321 | if (affd->nr_sets) { | 345 | if (affd->calc_sets) { |
346 | set_vecs = maxvec - resv; | ||
347 | } else if (affd->nr_sets) { | ||
322 | unsigned int i; | 348 | unsigned int i; |
323 | 349 | ||
324 | for (i = 0, set_vecs = 0; i < affd->nr_sets; i++) | 350 | for (i = 0, set_vecs = 0; i < affd->nr_sets; i++) |