diff options
author | Matias Bjorling <m@bjorling.me> | 2013-12-18 07:41:43 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2013-12-19 10:09:42 -0500 |
commit | 2d263a7856cbaf26dd89b671e2161c4a49f8461b (patch) | |
tree | 967e3340dc9a90690b04a93bffdf4c12cf8ed4c8 /drivers/block/null_blk.c | |
parent | 12f8f4fc0314103d47f9b1cbc812597b8d893ce1 (diff) |
null_blk: refactor init and init errors code paths
Simplify the initialization logic of the three block-layers.
- The queue initialization is split into two parts. This allows reuse of
code when initializing the sq-, bio- and mq-based layers.
- Set submit_queues default value to 0 and always set it at init time.
- Simplify the init error code paths.
Signed-off-by: Matias Bjorling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/block/null_blk.c')
-rw-r--r-- | drivers/block/null_blk.c | 63 |
1 files changed, 38 insertions, 25 deletions
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index f370fc13aea5..f0aeb2a7a9ca 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
@@ -65,7 +65,7 @@ enum { | |||
65 | NULL_Q_MQ = 2, | 65 | NULL_Q_MQ = 2, |
66 | }; | 66 | }; |
67 | 67 | ||
68 | static int submit_queues = 1; | 68 | static int submit_queues; |
69 | module_param(submit_queues, int, S_IRUGO); | 69 | module_param(submit_queues, int, S_IRUGO); |
70 | MODULE_PARM_DESC(submit_queues, "Number of submission queues"); | 70 | MODULE_PARM_DESC(submit_queues, "Number of submission queues"); |
71 | 71 | ||
@@ -355,16 +355,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) | |||
355 | kfree(hctx); | 355 | kfree(hctx); |
356 | } | 356 | } |
357 | 357 | ||
358 | static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) | ||
359 | { | ||
360 | BUG_ON(!nullb); | ||
361 | BUG_ON(!nq); | ||
362 | |||
363 | init_waitqueue_head(&nq->wait); | ||
364 | nq->queue_depth = nullb->queue_depth; | ||
365 | } | ||
366 | |||
358 | static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, | 367 | static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
359 | unsigned int index) | 368 | unsigned int index) |
360 | { | 369 | { |
361 | struct nullb *nullb = data; | 370 | struct nullb *nullb = data; |
362 | struct nullb_queue *nq = &nullb->queues[index]; | 371 | struct nullb_queue *nq = &nullb->queues[index]; |
363 | 372 | ||
364 | init_waitqueue_head(&nq->wait); | ||
365 | nq->queue_depth = nullb->queue_depth; | ||
366 | nullb->nr_queues++; | ||
367 | hctx->driver_data = nq; | 373 | hctx->driver_data = nq; |
374 | null_init_queue(nullb, nq); | ||
375 | nullb->nr_queues++; | ||
368 | 376 | ||
369 | return 0; | 377 | return 0; |
370 | } | 378 | } |
@@ -417,13 +425,13 @@ static int setup_commands(struct nullb_queue *nq) | |||
417 | 425 | ||
418 | nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); | 426 | nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); |
419 | if (!nq->cmds) | 427 | if (!nq->cmds) |
420 | return 1; | 428 | return -ENOMEM; |
421 | 429 | ||
422 | tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; | 430 | tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; |
423 | nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); | 431 | nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); |
424 | if (!nq->tag_map) { | 432 | if (!nq->tag_map) { |
425 | kfree(nq->cmds); | 433 | kfree(nq->cmds); |
426 | return 1; | 434 | return -ENOMEM; |
427 | } | 435 | } |
428 | 436 | ||
429 | for (i = 0; i < nq->queue_depth; i++) { | 437 | for (i = 0; i < nq->queue_depth; i++) { |
@@ -454,33 +462,37 @@ static void cleanup_queues(struct nullb *nullb) | |||
454 | 462 | ||
455 | static int setup_queues(struct nullb *nullb) | 463 | static int setup_queues(struct nullb *nullb) |
456 | { | 464 | { |
457 | struct nullb_queue *nq; | 465 | nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), |
458 | int i; | 466 | GFP_KERNEL); |
459 | |||
460 | nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL); | ||
461 | if (!nullb->queues) | 467 | if (!nullb->queues) |
462 | return 1; | 468 | return -ENOMEM; |
463 | 469 | ||
464 | nullb->nr_queues = 0; | 470 | nullb->nr_queues = 0; |
465 | nullb->queue_depth = hw_queue_depth; | 471 | nullb->queue_depth = hw_queue_depth; |
466 | 472 | ||
467 | if (queue_mode == NULL_Q_MQ) | 473 | return 0; |
468 | return 0; | 474 | } |
475 | |||
476 | static int init_driver_queues(struct nullb *nullb) | ||
477 | { | ||
478 | struct nullb_queue *nq; | ||
479 | int i, ret = 0; | ||
469 | 480 | ||
470 | for (i = 0; i < submit_queues; i++) { | 481 | for (i = 0; i < submit_queues; i++) { |
471 | nq = &nullb->queues[i]; | 482 | nq = &nullb->queues[i]; |
472 | init_waitqueue_head(&nq->wait); | 483 | |
473 | nq->queue_depth = hw_queue_depth; | 484 | null_init_queue(nullb, nq); |
474 | if (setup_commands(nq)) | 485 | |
475 | break; | 486 | ret = setup_commands(nq); |
487 | if (ret) | ||
488 | goto err_queue; | ||
476 | nullb->nr_queues++; | 489 | nullb->nr_queues++; |
477 | } | 490 | } |
478 | 491 | ||
479 | if (i == submit_queues) | 492 | return 0; |
480 | return 0; | 493 | err_queue: |
481 | |||
482 | cleanup_queues(nullb); | 494 | cleanup_queues(nullb); |
483 | return 1; | 495 | return ret; |
484 | } | 496 | } |
485 | 497 | ||
486 | static int null_add_dev(void) | 498 | static int null_add_dev(void) |
@@ -495,9 +507,6 @@ static int null_add_dev(void) | |||
495 | 507 | ||
496 | spin_lock_init(&nullb->lock); | 508 | spin_lock_init(&nullb->lock); |
497 | 509 | ||
498 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) | ||
499 | submit_queues = nr_online_nodes; | ||
500 | |||
501 | if (setup_queues(nullb)) | 510 | if (setup_queues(nullb)) |
502 | goto err; | 511 | goto err; |
503 | 512 | ||
@@ -518,11 +527,13 @@ static int null_add_dev(void) | |||
518 | } else if (queue_mode == NULL_Q_BIO) { | 527 | } else if (queue_mode == NULL_Q_BIO) { |
519 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); | 528 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); |
520 | blk_queue_make_request(nullb->q, null_queue_bio); | 529 | blk_queue_make_request(nullb->q, null_queue_bio); |
530 | init_driver_queues(nullb); | ||
521 | } else { | 531 | } else { |
522 | nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); | 532 | nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); |
523 | blk_queue_prep_rq(nullb->q, null_rq_prep_fn); | 533 | blk_queue_prep_rq(nullb->q, null_rq_prep_fn); |
524 | if (nullb->q) | 534 | if (nullb->q) |
525 | blk_queue_softirq_done(nullb->q, null_softirq_done_fn); | 535 | blk_queue_softirq_done(nullb->q, null_softirq_done_fn); |
536 | init_driver_queues(nullb); | ||
526 | } | 537 | } |
527 | 538 | ||
528 | if (!nullb->q) | 539 | if (!nullb->q) |
@@ -579,7 +590,9 @@ static int __init null_init(void) | |||
579 | } | 590 | } |
580 | #endif | 591 | #endif |
581 | 592 | ||
582 | if (submit_queues > nr_cpu_ids) | 593 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) |
594 | submit_queues = nr_online_nodes; | ||
595 | else if (submit_queues > nr_cpu_ids) | ||
583 | submit_queues = nr_cpu_ids; | 596 | submit_queues = nr_cpu_ids; |
584 | else if (!submit_queues) | 597 | else if (!submit_queues) |
585 | submit_queues = 1; | 598 | submit_queues = 1; |