diff options
author | Ming Lei <ming.lei@canonical.com> | 2014-09-25 11:23:47 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-09-25 17:22:45 -0400 |
commit | f70ced09170761acb69840cafaace4abc72cba4b (patch) | |
tree | bc62f5926a5e8b74be30316196a41b25ece12368 /include/linux/blk-mq.h | |
parent | e97c293cdf77263abdc021de280516e0017afc84 (diff) |
blk-mq: support per-distpatch_queue flush machinery
This patch supports to run one single flush machinery for
each blk-mq dispatch queue, so that:
- current init_request and exit_request callbacks can
cover flush request too, then the buggy copying way of
initializing flush request's pdu can be fixed
- flushing performance gets improved in case of multi hw-queue
In fio sync write test over virtio-blk(4 hw queues, ioengine=sync,
iodepth=64, numjobs=4, bs=4K), it is observed that througput gets
increased a lot over my test environment:
- throughput: +70% in case of virtio-blk over null_blk
- throughput: +30% in case of virtio-blk over SSD image
The multi virtqueue feature isn't merged to QEMU yet, and patches for
the feature can be found in below tree:
git://kernel.ubuntu.com/ming/qemu.git v2.1.0-mq.4
And simply passing 'num_queues=4 vectors=5' should be enough to
enable multi queue(quad queue) feature for QEMU virtio-blk.
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'include/linux/blk-mq.h')
-rw-r--r-- | include/linux/blk-mq.h | 6 |
1 files changed, 6 insertions, 0 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 325349559fb0..02c5d950f444 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/blkdev.h> | 4 | #include <linux/blkdev.h> |
5 | 5 | ||
6 | struct blk_mq_tags; | 6 | struct blk_mq_tags; |
7 | struct blk_flush_queue; | ||
7 | 8 | ||
8 | struct blk_mq_cpu_notifier { | 9 | struct blk_mq_cpu_notifier { |
9 | struct list_head list; | 10 | struct list_head list; |
@@ -34,6 +35,7 @@ struct blk_mq_hw_ctx { | |||
34 | 35 | ||
35 | struct request_queue *queue; | 36 | struct request_queue *queue; |
36 | unsigned int queue_num; | 37 | unsigned int queue_num; |
38 | struct blk_flush_queue *fq; | ||
37 | 39 | ||
38 | void *driver_data; | 40 | void *driver_data; |
39 | 41 | ||
@@ -119,6 +121,10 @@ struct blk_mq_ops { | |||
119 | /* | 121 | /* |
120 | * Called for every command allocated by the block layer to allow | 122 | * Called for every command allocated by the block layer to allow |
121 | * the driver to set up driver specific data. | 123 | * the driver to set up driver specific data. |
124 | * | ||
125 | * Tag greater than or equal to queue_depth is for setting up | ||
126 | * flush request. | ||
127 | * | ||
122 | * Ditto for exit/teardown. | 128 | * Ditto for exit/teardown. |
123 | */ | 129 | */ |
124 | init_request_fn *init_request; | 130 | init_request_fn *init_request; |