diff options
author | Matias Bjørling <m@bjorling.me> | 2013-12-20 18:11:01 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2013-12-21 11:30:34 -0500 |
commit | fc1bc35443741e132dd0118e8dbac53f69a6f76e (patch) | |
tree | ba0da257a26bc61ea8bef1f8f2b76828f3e539fd | |
parent | 200052440d3b56f593038a35b7c14bdc780184a9 (diff) |
null_blk: support submit_queues on use_per_node_hctx
In the case of both the submit_queues param and use_per_node_hctx param
are used. We limit the number af submit_queues to the number of online
nodes.
If the submit_queues is a multiple of nr_online_nodes, its trivial. Simply map
them to the nodes. For example: 8 submit queues are mapped as node0[0,1],
node1[2,3], ...
If uneven, we are left with an uneven number of submit_queues that must be
mapped. These are mapped toward the first node and onward. E.g. 5
submit queues mapped onto 4 nodes are mapped as node0[0,1], node1[2], ...
Signed-off-by: Matias Bjorling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | drivers/block/null_blk.c | 39 |
1 files changed, 35 insertions, 4 deletions
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 9b0311b61fe1..528f4e47f38e 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | |||
2 | #include <linux/moduleparam.h> | 3 | #include <linux/moduleparam.h> |
3 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
4 | #include <linux/fs.h> | 5 | #include <linux/fs.h> |
@@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) | |||
346 | 347 | ||
347 | static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) | 348 | static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) |
348 | { | 349 | { |
349 | return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, | 350 | int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes); |
350 | hctx_index); | 351 | int tip = (reg->nr_hw_queues % nr_online_nodes); |
352 | int node = 0, i, n; | ||
353 | |||
354 | /* | ||
355 | * Split submit queues evenly wrt to the number of nodes. If uneven, | ||
356 | * fill the first buckets with one extra, until the rest is filled with | ||
357 | * no extra. | ||
358 | */ | ||
359 | for (i = 0, n = 1; i < hctx_index; i++, n++) { | ||
360 | if (n % b_size == 0) { | ||
361 | n = 0; | ||
362 | node++; | ||
363 | |||
364 | tip--; | ||
365 | if (!tip) | ||
366 | b_size = reg->nr_hw_queues / nr_online_nodes; | ||
367 | } | ||
368 | } | ||
369 | |||
370 | /* | ||
371 | * A node might not be online, therefore map the relative node id to the | ||
372 | * real node id. | ||
373 | */ | ||
374 | for_each_online_node(n) { | ||
375 | if (!node) | ||
376 | break; | ||
377 | node--; | ||
378 | } | ||
379 | |||
380 | return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n); | ||
351 | } | 381 | } |
352 | 382 | ||
353 | static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) | 383 | static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) |
@@ -591,10 +621,11 @@ static int __init null_init(void) | |||
591 | #endif | 621 | #endif |
592 | 622 | ||
593 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { | 623 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { |
594 | if (submit_queues > 0) | 624 | if (submit_queues < nr_online_nodes) { |
595 | pr_warn("null_blk: submit_queues param is set to %u.", | 625 | pr_warn("null_blk: submit_queues param is set to %u.", |
596 | nr_online_nodes); | 626 | nr_online_nodes); |
597 | submit_queues = nr_online_nodes; | 627 | submit_queues = nr_online_nodes; |
628 | } | ||
598 | } else if (submit_queues > nr_cpu_ids) | 629 | } else if (submit_queues > nr_cpu_ids) |
599 | submit_queues = nr_cpu_ids; | 630 | submit_queues = nr_cpu_ids; |
600 | else if (!submit_queues) | 631 | else if (!submit_queues) |