diff options
author | Ming Lei <ming.lei@canonical.com> | 2014-09-25 11:23:39 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-09-25 17:22:34 -0400 |
commit | 1bcb1eada4f11a713cbe586d1b5a5d93a48277cb (patch) | |
tree | 89e14d77cb1e95f7742ee7395ed5ff7e84afc057 | |
parent | 08e98fc6016c890c2f4ffba6decc0ca9d2d5d7f8 (diff) |
blk-mq: allocate flush_rq in blk_mq_init_flush()
It is reasonable to allocate flush req in blk_mq_init_flush().
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | block/blk-flush.c | 11 | ||||
-rw-r--r-- | block/blk-mq.c | 16 | ||||
-rw-r--r-- | block/blk-mq.h | 2 |
3 files changed, 17 insertions, 12 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c index c8e25768f2e1..55028a707927 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -472,7 +472,16 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, | |||
472 | } | 472 | } |
473 | EXPORT_SYMBOL(blkdev_issue_flush); | 473 | EXPORT_SYMBOL(blkdev_issue_flush); |
474 | 474 | ||
475 | void blk_mq_init_flush(struct request_queue *q) | 475 | int blk_mq_init_flush(struct request_queue *q) |
476 | { | 476 | { |
477 | struct blk_mq_tag_set *set = q->tag_set; | ||
478 | |||
477 | spin_lock_init(&q->mq_flush_lock); | 479 | spin_lock_init(&q->mq_flush_lock); |
480 | |||
481 | q->flush_rq = kzalloc(round_up(sizeof(struct request) + | ||
482 | set->cmd_size, cache_line_size()), | ||
483 | GFP_KERNEL); | ||
484 | if (!q->flush_rq) | ||
485 | return -ENOMEM; | ||
486 | return 0; | ||
478 | } | 487 | } |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 66ef1fb79326..78bcf8bfb22a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -1848,17 +1848,10 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) | |||
1848 | if (set->ops->complete) | 1848 | if (set->ops->complete) |
1849 | blk_queue_softirq_done(q, set->ops->complete); | 1849 | blk_queue_softirq_done(q, set->ops->complete); |
1850 | 1850 | ||
1851 | blk_mq_init_flush(q); | ||
1852 | blk_mq_init_cpu_queues(q, set->nr_hw_queues); | 1851 | blk_mq_init_cpu_queues(q, set->nr_hw_queues); |
1853 | 1852 | ||
1854 | q->flush_rq = kzalloc(round_up(sizeof(struct request) + | ||
1855 | set->cmd_size, cache_line_size()), | ||
1856 | GFP_KERNEL); | ||
1857 | if (!q->flush_rq) | ||
1858 | goto err_hw; | ||
1859 | |||
1860 | if (blk_mq_init_hw_queues(q, set)) | 1853 | if (blk_mq_init_hw_queues(q, set)) |
1861 | goto err_flush_rq; | 1854 | goto err_hw; |
1862 | 1855 | ||
1863 | mutex_lock(&all_q_mutex); | 1856 | mutex_lock(&all_q_mutex); |
1864 | list_add_tail(&q->all_q_node, &all_q_list); | 1857 | list_add_tail(&q->all_q_node, &all_q_list); |
@@ -1866,12 +1859,15 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) | |||
1866 | 1859 | ||
1867 | blk_mq_add_queue_tag_set(set, q); | 1860 | blk_mq_add_queue_tag_set(set, q); |
1868 | 1861 | ||
1862 | if (blk_mq_init_flush(q)) | ||
1863 | goto err_hw_queues; | ||
1864 | |||
1869 | blk_mq_map_swqueue(q); | 1865 | blk_mq_map_swqueue(q); |
1870 | 1866 | ||
1871 | return q; | 1867 | return q; |
1872 | 1868 | ||
1873 | err_flush_rq: | 1869 | err_hw_queues: |
1874 | kfree(q->flush_rq); | 1870 | blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); |
1875 | err_hw: | 1871 | err_hw: |
1876 | blk_cleanup_queue(q); | 1872 | blk_cleanup_queue(q); |
1877 | err_hctxs: | 1873 | err_hctxs: |
diff --git a/block/blk-mq.h b/block/blk-mq.h index a3c613a9df2d..ecac69c08937 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h | |||
@@ -27,7 +27,7 @@ struct blk_mq_ctx { | |||
27 | 27 | ||
28 | void __blk_mq_complete_request(struct request *rq); | 28 | void __blk_mq_complete_request(struct request *rq); |
29 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); | 29 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
30 | void blk_mq_init_flush(struct request_queue *q); | 30 | int blk_mq_init_flush(struct request_queue *q); |
31 | void blk_mq_freeze_queue(struct request_queue *q); | 31 | void blk_mq_freeze_queue(struct request_queue *q); |
32 | void blk_mq_free_queue(struct request_queue *q); | 32 | void blk_mq_free_queue(struct request_queue *q); |
33 | void blk_mq_clone_flush_request(struct request *flush_rq, | 33 | void blk_mq_clone_flush_request(struct request *flush_rq, |