aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/null_blk.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-04-15 16:14:00 -0400
committerJens Axboe <axboe@fb.com>2014-04-15 16:18:02 -0400
commit24d2f90309b23f2cfe016b2aebc5f0d6e01c57fd (patch)
tree10307829129eb8f96facbb89fefbba3c0032fb46 /drivers/block/null_blk.c
parented44832dea8a74f909e0187f350201402927f5e5 (diff)
blk-mq: split out tag initialization, support shared tags
Add a new blk_mq_tag_set structure that gets set up before we initialize the queue. A single blk_mq_tag_set structure can be shared by multiple queues. Signed-off-by: Christoph Hellwig <hch@lst.de> Modular export of blk_mq_{alloc,free}_tagset added by me. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/block/null_blk.c')
-rw-r--r--drivers/block/null_blk.c92
1 files changed, 55 insertions, 37 deletions
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 71df69d90900..8e7e3a0b0d24 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -32,6 +32,7 @@ struct nullb {
32 unsigned int index; 32 unsigned int index;
33 struct request_queue *q; 33 struct request_queue *q;
34 struct gendisk *disk; 34 struct gendisk *disk;
35 struct blk_mq_tag_set tag_set;
35 struct hrtimer timer; 36 struct hrtimer timer;
36 unsigned int queue_depth; 37 unsigned int queue_depth;
37 spinlock_t lock; 38 spinlock_t lock;
@@ -320,10 +321,11 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
320 return BLK_MQ_RQ_QUEUE_OK; 321 return BLK_MQ_RQ_QUEUE_OK;
321} 322}
322 323
323static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) 324static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_tag_set *set,
325 unsigned int hctx_index)
324{ 326{
325 int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes); 327 int b_size = DIV_ROUND_UP(set->nr_hw_queues, nr_online_nodes);
326 int tip = (reg->nr_hw_queues % nr_online_nodes); 328 int tip = (set->nr_hw_queues % nr_online_nodes);
327 int node = 0, i, n; 329 int node = 0, i, n;
328 330
329 /* 331 /*
@@ -338,7 +340,7 @@ static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned in
338 340
339 tip--; 341 tip--;
340 if (!tip) 342 if (!tip)
341 b_size = reg->nr_hw_queues / nr_online_nodes; 343 b_size = set->nr_hw_queues / nr_online_nodes;
342 } 344 }
343 } 345 }
344 346
@@ -387,13 +389,17 @@ static struct blk_mq_ops null_mq_ops = {
387 .map_queue = blk_mq_map_queue, 389 .map_queue = blk_mq_map_queue,
388 .init_hctx = null_init_hctx, 390 .init_hctx = null_init_hctx,
389 .complete = null_softirq_done_fn, 391 .complete = null_softirq_done_fn,
392 .alloc_hctx = blk_mq_alloc_single_hw_queue,
393 .free_hctx = blk_mq_free_single_hw_queue,
390}; 394};
391 395
392static struct blk_mq_reg null_mq_reg = { 396static struct blk_mq_ops null_mq_ops_pernode = {
393 .ops = &null_mq_ops, 397 .queue_rq = null_queue_rq,
394 .queue_depth = 64, 398 .map_queue = blk_mq_map_queue,
395 .cmd_size = sizeof(struct nullb_cmd), 399 .init_hctx = null_init_hctx,
396 .flags = BLK_MQ_F_SHOULD_MERGE, 400 .complete = null_softirq_done_fn,
401 .alloc_hctx = null_alloc_hctx,
402 .free_hctx = null_free_hctx,
397}; 403};
398 404
399static void null_del_dev(struct nullb *nullb) 405static void null_del_dev(struct nullb *nullb)
@@ -402,6 +408,8 @@ static void null_del_dev(struct nullb *nullb)
402 408
403 del_gendisk(nullb->disk); 409 del_gendisk(nullb->disk);
404 blk_cleanup_queue(nullb->q); 410 blk_cleanup_queue(nullb->q);
411 if (queue_mode == NULL_Q_MQ)
412 blk_mq_free_tag_set(&nullb->tag_set);
405 put_disk(nullb->disk); 413 put_disk(nullb->disk);
406 kfree(nullb); 414 kfree(nullb);
407} 415}
@@ -506,7 +514,7 @@ static int null_add_dev(void)
506 514
507 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); 515 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
508 if (!nullb) 516 if (!nullb)
509 return -ENOMEM; 517 goto out;
510 518
511 spin_lock_init(&nullb->lock); 519 spin_lock_init(&nullb->lock);
512 520
@@ -514,49 +522,47 @@ static int null_add_dev(void)
514 submit_queues = nr_online_nodes; 522 submit_queues = nr_online_nodes;
515 523
516 if (setup_queues(nullb)) 524 if (setup_queues(nullb))
517 goto err; 525 goto out_free_nullb;
518 526
519 if (queue_mode == NULL_Q_MQ) { 527 if (queue_mode == NULL_Q_MQ) {
520 null_mq_reg.numa_node = home_node; 528 if (use_per_node_hctx)
521 null_mq_reg.queue_depth = hw_queue_depth; 529 nullb->tag_set.ops = &null_mq_ops_pernode;
522 null_mq_reg.nr_hw_queues = submit_queues; 530 else
523 531 nullb->tag_set.ops = &null_mq_ops;
524 if (use_per_node_hctx) { 532 nullb->tag_set.nr_hw_queues = submit_queues;
525 null_mq_reg.ops->alloc_hctx = null_alloc_hctx; 533 nullb->tag_set.queue_depth = hw_queue_depth;
526 null_mq_reg.ops->free_hctx = null_free_hctx; 534 nullb->tag_set.numa_node = home_node;
527 } else { 535 nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
528 null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; 536 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
529 null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; 537 nullb->tag_set.driver_data = nullb;
530 } 538
531 539 if (blk_mq_alloc_tag_set(&nullb->tag_set))
532 nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); 540 goto out_cleanup_queues;
541
542 nullb->q = blk_mq_init_queue(&nullb->tag_set);
543 if (!nullb->q)
544 goto out_cleanup_tags;
533 } else if (queue_mode == NULL_Q_BIO) { 545 } else if (queue_mode == NULL_Q_BIO) {
534 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); 546 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
547 if (!nullb->q)
548 goto out_cleanup_queues;
535 blk_queue_make_request(nullb->q, null_queue_bio); 549 blk_queue_make_request(nullb->q, null_queue_bio);
536 init_driver_queues(nullb); 550 init_driver_queues(nullb);
537 } else { 551 } else {
538 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); 552 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
553 if (!nullb->q)
554 goto out_cleanup_queues;
539 blk_queue_prep_rq(nullb->q, null_rq_prep_fn); 555 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
540 if (nullb->q) 556 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
541 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
542 init_driver_queues(nullb); 557 init_driver_queues(nullb);
543 } 558 }
544 559
545 if (!nullb->q)
546 goto queue_fail;
547
548 nullb->q->queuedata = nullb; 560 nullb->q->queuedata = nullb;
549 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); 561 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
550 562
551 disk = nullb->disk = alloc_disk_node(1, home_node); 563 disk = nullb->disk = alloc_disk_node(1, home_node);
552 if (!disk) { 564 if (!disk)
553queue_fail: 565 goto out_cleanup_blk_queue;
554 blk_cleanup_queue(nullb->q);
555 cleanup_queues(nullb);
556err:
557 kfree(nullb);
558 return -ENOMEM;
559 }
560 566
561 mutex_lock(&lock); 567 mutex_lock(&lock);
562 list_add_tail(&nullb->list, &nullb_list); 568 list_add_tail(&nullb->list, &nullb_list);
@@ -579,6 +585,18 @@ err:
579 sprintf(disk->disk_name, "nullb%d", nullb->index); 585 sprintf(disk->disk_name, "nullb%d", nullb->index);
580 add_disk(disk); 586 add_disk(disk);
581 return 0; 587 return 0;
588
589out_cleanup_blk_queue:
590 blk_cleanup_queue(nullb->q);
591out_cleanup_tags:
592 if (queue_mode == NULL_Q_MQ)
593 blk_mq_free_tag_set(&nullb->tag_set);
594out_cleanup_queues:
595 cleanup_queues(nullb);
596out_free_nullb:
597 kfree(nullb);
598out:
599 return -ENOMEM;
582} 600}
583 601
584static int __init null_init(void) 602static int __init null_init(void)