aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/null_blk.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/null_blk.c')
-rw-r--r--drivers/block/null_blk.c112
1 files changed, 78 insertions, 34 deletions
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index f370fc13aea5..83a598ebb65a 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -1,4 +1,5 @@
1#include <linux/module.h> 1#include <linux/module.h>
2
2#include <linux/moduleparam.h> 3#include <linux/moduleparam.h>
3#include <linux/sched.h> 4#include <linux/sched.h>
4#include <linux/fs.h> 5#include <linux/fs.h>
@@ -65,7 +66,7 @@ enum {
65 NULL_Q_MQ = 2, 66 NULL_Q_MQ = 2,
66}; 67};
67 68
68static int submit_queues = 1; 69static int submit_queues;
69module_param(submit_queues, int, S_IRUGO); 70module_param(submit_queues, int, S_IRUGO);
70MODULE_PARM_DESC(submit_queues, "Number of submission queues"); 71MODULE_PARM_DESC(submit_queues, "Number of submission queues");
71 72
@@ -101,9 +102,9 @@ static int hw_queue_depth = 64;
101module_param(hw_queue_depth, int, S_IRUGO); 102module_param(hw_queue_depth, int, S_IRUGO);
102MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); 103MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
103 104
104static bool use_per_node_hctx = true; 105static bool use_per_node_hctx = false;
105module_param(use_per_node_hctx, bool, S_IRUGO); 106module_param(use_per_node_hctx, bool, S_IRUGO);
106MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true"); 107MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
107 108
108static void put_tag(struct nullb_queue *nq, unsigned int tag) 109static void put_tag(struct nullb_queue *nq, unsigned int tag)
109{ 110{
@@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
346 347
347static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) 348static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index)
348{ 349{
349 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, 350 int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes);
350 hctx_index); 351 int tip = (reg->nr_hw_queues % nr_online_nodes);
352 int node = 0, i, n;
353
354 /*
355 * Split submit queues evenly wrt to the number of nodes. If uneven,
356 * fill the first buckets with one extra, until the rest is filled with
357 * no extra.
358 */
359 for (i = 0, n = 1; i < hctx_index; i++, n++) {
360 if (n % b_size == 0) {
361 n = 0;
362 node++;
363
364 tip--;
365 if (!tip)
366 b_size = reg->nr_hw_queues / nr_online_nodes;
367 }
368 }
369
370 /*
371 * A node might not be online, therefore map the relative node id to the
372 * real node id.
373 */
374 for_each_online_node(n) {
375 if (!node)
376 break;
377 node--;
378 }
379
380 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n);
351} 381}
352 382
353static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) 383static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
@@ -355,16 +385,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
355 kfree(hctx); 385 kfree(hctx);
356} 386}
357 387
388static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
389{
390 BUG_ON(!nullb);
391 BUG_ON(!nq);
392
393 init_waitqueue_head(&nq->wait);
394 nq->queue_depth = nullb->queue_depth;
395}
396
358static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 397static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
359 unsigned int index) 398 unsigned int index)
360{ 399{
361 struct nullb *nullb = data; 400 struct nullb *nullb = data;
362 struct nullb_queue *nq = &nullb->queues[index]; 401 struct nullb_queue *nq = &nullb->queues[index];
363 402
364 init_waitqueue_head(&nq->wait);
365 nq->queue_depth = nullb->queue_depth;
366 nullb->nr_queues++;
367 hctx->driver_data = nq; 403 hctx->driver_data = nq;
404 null_init_queue(nullb, nq);
405 nullb->nr_queues++;
368 406
369 return 0; 407 return 0;
370} 408}
@@ -387,10 +425,7 @@ static void null_del_dev(struct nullb *nullb)
387 list_del_init(&nullb->list); 425 list_del_init(&nullb->list);
388 426
389 del_gendisk(nullb->disk); 427 del_gendisk(nullb->disk);
390 if (queue_mode == NULL_Q_MQ) 428 blk_cleanup_queue(nullb->q);
391 blk_mq_free_queue(nullb->q);
392 else
393 blk_cleanup_queue(nullb->q);
394 put_disk(nullb->disk); 429 put_disk(nullb->disk);
395 kfree(nullb); 430 kfree(nullb);
396} 431}
@@ -417,13 +452,13 @@ static int setup_commands(struct nullb_queue *nq)
417 452
418 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); 453 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
419 if (!nq->cmds) 454 if (!nq->cmds)
420 return 1; 455 return -ENOMEM;
421 456
422 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; 457 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
423 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); 458 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
424 if (!nq->tag_map) { 459 if (!nq->tag_map) {
425 kfree(nq->cmds); 460 kfree(nq->cmds);
426 return 1; 461 return -ENOMEM;
427 } 462 }
428 463
429 for (i = 0; i < nq->queue_depth; i++) { 464 for (i = 0; i < nq->queue_depth; i++) {
@@ -454,33 +489,37 @@ static void cleanup_queues(struct nullb *nullb)
454 489
455static int setup_queues(struct nullb *nullb) 490static int setup_queues(struct nullb *nullb)
456{ 491{
457 struct nullb_queue *nq; 492 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
458 int i; 493 GFP_KERNEL);
459
460 nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL);
461 if (!nullb->queues) 494 if (!nullb->queues)
462 return 1; 495 return -ENOMEM;
463 496
464 nullb->nr_queues = 0; 497 nullb->nr_queues = 0;
465 nullb->queue_depth = hw_queue_depth; 498 nullb->queue_depth = hw_queue_depth;
466 499
467 if (queue_mode == NULL_Q_MQ) 500 return 0;
468 return 0; 501}
502
503static int init_driver_queues(struct nullb *nullb)
504{
505 struct nullb_queue *nq;
506 int i, ret = 0;
469 507
470 for (i = 0; i < submit_queues; i++) { 508 for (i = 0; i < submit_queues; i++) {
471 nq = &nullb->queues[i]; 509 nq = &nullb->queues[i];
472 init_waitqueue_head(&nq->wait); 510
473 nq->queue_depth = hw_queue_depth; 511 null_init_queue(nullb, nq);
474 if (setup_commands(nq)) 512
475 break; 513 ret = setup_commands(nq);
514 if (ret)
515 goto err_queue;
476 nullb->nr_queues++; 516 nullb->nr_queues++;
477 } 517 }
478 518
479 if (i == submit_queues) 519 return 0;
480 return 0; 520err_queue:
481
482 cleanup_queues(nullb); 521 cleanup_queues(nullb);
483 return 1; 522 return ret;
484} 523}
485 524
486static int null_add_dev(void) 525static int null_add_dev(void)
@@ -518,11 +557,13 @@ static int null_add_dev(void)
518 } else if (queue_mode == NULL_Q_BIO) { 557 } else if (queue_mode == NULL_Q_BIO) {
519 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); 558 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
520 blk_queue_make_request(nullb->q, null_queue_bio); 559 blk_queue_make_request(nullb->q, null_queue_bio);
560 init_driver_queues(nullb);
521 } else { 561 } else {
522 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); 562 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
523 blk_queue_prep_rq(nullb->q, null_rq_prep_fn); 563 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
524 if (nullb->q) 564 if (nullb->q)
525 blk_queue_softirq_done(nullb->q, null_softirq_done_fn); 565 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
566 init_driver_queues(nullb);
526 } 567 }
527 568
528 if (!nullb->q) 569 if (!nullb->q)
@@ -534,10 +575,7 @@ static int null_add_dev(void)
534 disk = nullb->disk = alloc_disk_node(1, home_node); 575 disk = nullb->disk = alloc_disk_node(1, home_node);
535 if (!disk) { 576 if (!disk) {
536queue_fail: 577queue_fail:
537 if (queue_mode == NULL_Q_MQ) 578 blk_cleanup_queue(nullb->q);
538 blk_mq_free_queue(nullb->q);
539 else
540 blk_cleanup_queue(nullb->q);
541 cleanup_queues(nullb); 579 cleanup_queues(nullb);
542err: 580err:
543 kfree(nullb); 581 kfree(nullb);
@@ -579,7 +617,13 @@ static int __init null_init(void)
579 } 617 }
580#endif 618#endif
581 619
582 if (submit_queues > nr_cpu_ids) 620 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
621 if (submit_queues < nr_online_nodes) {
622 pr_warn("null_blk: submit_queues param is set to %u.",
623 nr_online_nodes);
624 submit_queues = nr_online_nodes;
625 }
626 } else if (submit_queues > nr_cpu_ids)
583 submit_queues = nr_cpu_ids; 627 submit_queues = nr_cpu_ids;
584 else if (!submit_queues) 628 else if (!submit_queues)
585 submit_queues = 1; 629 submit_queues = 1;