diff options
author | Ming Lei <ming.lei@canonical.com> | 2014-09-25 11:23:39 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-09-25 17:22:34 -0400 |
commit | 1bcb1eada4f11a713cbe586d1b5a5d93a48277cb (patch) | |
tree | 89e14d77cb1e95f7742ee7395ed5ff7e84afc057 /block/blk-mq.c | |
parent | 08e98fc6016c890c2f4ffba6decc0ca9d2d5d7f8 (diff) |
blk-mq: allocate flush_rq in blk_mq_init_flush()
It is reasonable to allocate flush req in blk_mq_init_flush().
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 16 |
1 files changed, 6 insertions, 10 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 66ef1fb79326..78bcf8bfb22a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -1848,17 +1848,10 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) | |||
1848 | if (set->ops->complete) | 1848 | if (set->ops->complete) |
1849 | blk_queue_softirq_done(q, set->ops->complete); | 1849 | blk_queue_softirq_done(q, set->ops->complete); |
1850 | 1850 | ||
1851 | blk_mq_init_flush(q); | ||
1852 | blk_mq_init_cpu_queues(q, set->nr_hw_queues); | 1851 | blk_mq_init_cpu_queues(q, set->nr_hw_queues); |
1853 | 1852 | ||
1854 | q->flush_rq = kzalloc(round_up(sizeof(struct request) + | ||
1855 | set->cmd_size, cache_line_size()), | ||
1856 | GFP_KERNEL); | ||
1857 | if (!q->flush_rq) | ||
1858 | goto err_hw; | ||
1859 | |||
1860 | if (blk_mq_init_hw_queues(q, set)) | 1853 | if (blk_mq_init_hw_queues(q, set)) |
1861 | goto err_flush_rq; | 1854 | goto err_hw; |
1862 | 1855 | ||
1863 | mutex_lock(&all_q_mutex); | 1856 | mutex_lock(&all_q_mutex); |
1864 | list_add_tail(&q->all_q_node, &all_q_list); | 1857 | list_add_tail(&q->all_q_node, &all_q_list); |
@@ -1866,12 +1859,15 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) | |||
1866 | 1859 | ||
1867 | blk_mq_add_queue_tag_set(set, q); | 1860 | blk_mq_add_queue_tag_set(set, q); |
1868 | 1861 | ||
1862 | if (blk_mq_init_flush(q)) | ||
1863 | goto err_hw_queues; | ||
1864 | |||
1869 | blk_mq_map_swqueue(q); | 1865 | blk_mq_map_swqueue(q); |
1870 | 1866 | ||
1871 | return q; | 1867 | return q; |
1872 | 1868 | ||
1873 | err_flush_rq: | 1869 | err_hw_queues: |
1874 | kfree(q->flush_rq); | 1870 | blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); |
1875 | err_hw: | 1871 | err_hw: |
1876 | blk_cleanup_queue(q); | 1872 | blk_cleanup_queue(q); |
1877 | err_hctxs: | 1873 | err_hctxs: |