aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2013-11-19 11:25:07 -0500
committerJens Axboe <axboe@kernel.dk>2013-11-19 11:25:07 -0500
commit94eddfbeaafa3e8040a2c47d370dea0e58e76941 (patch)
tree52cdf7a26b27d9cab3b1eba9b424a6d1432bdf8d /block
parent34f2fd8dfe6185b0eaaf7d661281713a6170b077 (diff)
blk-mq: ensure that we set REQ_IO_STAT so diskstats work
If disk stats are enabled on the queue, a request needs to be marked with REQ_IO_STAT for accounting to be active on that request. This fixes an issue with virtio-blk not showing up in /proc/diskstats after the conversion to blk-mq. Add QUEUE_FLAG_MQ_DEFAULT, setting stats and same cpu-group completion on by default. Reported-by: Dave Chinner <david@fromorbit.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 862f458d4760..32593dba4684 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -171,9 +171,12 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
171} 171}
172EXPORT_SYMBOL(blk_mq_can_queue); 172EXPORT_SYMBOL(blk_mq_can_queue);
173 173
174static void blk_mq_rq_ctx_init(struct blk_mq_ctx *ctx, struct request *rq, 174static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
175 unsigned int rw_flags) 175 struct request *rq, unsigned int rw_flags)
176{ 176{
177 if (blk_queue_io_stat(q))
178 rw_flags |= REQ_IO_STAT;
179
177 rq->mq_ctx = ctx; 180 rq->mq_ctx = ctx;
178 rq->cmd_flags = rw_flags; 181 rq->cmd_flags = rw_flags;
179 ctx->rq_dispatched[rw_is_sync(rw_flags)]++; 182 ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
@@ -197,7 +200,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
197 200
198 rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved); 201 rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
199 if (rq) { 202 if (rq) {
200 blk_mq_rq_ctx_init(ctx, rq, rw); 203 blk_mq_rq_ctx_init(q, ctx, rq, rw);
201 break; 204 break;
202 } else if (!(gfp & __GFP_WAIT)) 205 } else if (!(gfp & __GFP_WAIT))
203 break; 206 break;
@@ -921,7 +924,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
921 trace_block_getrq(q, bio, rw); 924 trace_block_getrq(q, bio, rw);
922 rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); 925 rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
923 if (likely(rq)) 926 if (likely(rq))
924 blk_mq_rq_ctx_init(ctx, rq, rw); 927 blk_mq_rq_ctx_init(q, ctx, rq, rw);
925 else { 928 else {
926 blk_mq_put_ctx(ctx); 929 blk_mq_put_ctx(ctx);
927 trace_block_sleeprq(q, bio, rw); 930 trace_block_sleeprq(q, bio, rw);
@@ -1377,6 +1380,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
1377 q->queue_hw_ctx = hctxs; 1380 q->queue_hw_ctx = hctxs;
1378 1381
1379 q->mq_ops = reg->ops; 1382 q->mq_ops = reg->ops;
1383 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1380 1384
1381 blk_queue_make_request(q, blk_mq_make_request); 1385 blk_queue_make_request(q, blk_mq_make_request);
1382 blk_queue_rq_timed_out(q, reg->ops->timeout); 1386 blk_queue_rq_timed_out(q, reg->ops->timeout);