diff options
author | Ming Lei <ming.lei@canonical.com> | 2014-09-25 11:23:47 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-09-25 17:22:45 -0400 |
commit | f70ced09170761acb69840cafaace4abc72cba4b (patch) | |
tree | bc62f5926a5e8b74be30316196a41b25ece12368 /block/blk.h | |
parent | e97c293cdf77263abdc021de280516e0017afc84 (diff) |
blk-mq: support per-distpatch_queue flush machinery
This patch supports to run one single flush machinery for
each blk-mq dispatch queue, so that:
- current init_request and exit_request callbacks can
cover flush request too, then the buggy copying way of
initializing flush request's pdu can be fixed
- flushing performance gets improved in case of multi hw-queue
In fio sync write test over virtio-blk(4 hw queues, ioengine=sync,
iodepth=64, numjobs=4, bs=4K), it is observed that througput gets
increased a lot over my test environment:
- throughput: +70% in case of virtio-blk over null_blk
- throughput: +30% in case of virtio-blk over SSD image
The multi virtqueue feature isn't merged to QEMU yet, and patches for
the feature can be found in below tree:
git://kernel.ubuntu.com/ming/qemu.git v2.1.0-mq.4
And simply passing 'num_queues=4 vectors=5' should be enough to
enable multi queue(quad queue) feature for QEMU virtio-blk.
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk.h')
-rw-r--r-- | block/blk.h | 16 |
1 files changed, 13 insertions, 3 deletions
diff --git a/block/blk.h b/block/blk.h index 7ecdd8517e69..43b036185712 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -2,6 +2,8 @@ | |||
2 | #define BLK_INTERNAL_H | 2 | #define BLK_INTERNAL_H |
3 | 3 | ||
4 | #include <linux/idr.h> | 4 | #include <linux/idr.h> |
5 | #include <linux/blk-mq.h> | ||
6 | #include "blk-mq.h" | ||
5 | 7 | ||
6 | /* Amount of time in which a process may batch requests */ | 8 | /* Amount of time in which a process may batch requests */ |
7 | #define BLK_BATCH_TIME (HZ/50UL) | 9 | #define BLK_BATCH_TIME (HZ/50UL) |
@@ -31,7 +33,14 @@ extern struct ida blk_queue_ida; | |||
31 | static inline struct blk_flush_queue *blk_get_flush_queue( | 33 | static inline struct blk_flush_queue *blk_get_flush_queue( |
32 | struct request_queue *q, struct blk_mq_ctx *ctx) | 34 | struct request_queue *q, struct blk_mq_ctx *ctx) |
33 | { | 35 | { |
34 | return q->fq; | 36 | struct blk_mq_hw_ctx *hctx; |
37 | |||
38 | if (!q->mq_ops) | ||
39 | return q->fq; | ||
40 | |||
41 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | ||
42 | |||
43 | return hctx->fq; | ||
35 | } | 44 | } |
36 | 45 | ||
37 | static inline void __blk_get_queue(struct request_queue *q) | 46 | static inline void __blk_get_queue(struct request_queue *q) |
@@ -39,8 +48,9 @@ static inline void __blk_get_queue(struct request_queue *q) | |||
39 | kobject_get(&q->kobj); | 48 | kobject_get(&q->kobj); |
40 | } | 49 | } |
41 | 50 | ||
42 | struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q); | 51 | struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, |
43 | void blk_free_flush_queue(struct blk_flush_queue *fq); | 52 | int node, int cmd_size); |
53 | void blk_free_flush_queue(struct blk_flush_queue *q); | ||
44 | 54 | ||
45 | int blk_init_rl(struct request_list *rl, struct request_queue *q, | 55 | int blk_init_rl(struct request_list *rl, struct request_queue *q, |
46 | gfp_t gfp_mask); | 56 | gfp_t gfp_mask); |