diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-20 16:06:20 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-20 16:06:20 -0500 |
commit | 5a1efc6e68a095917277459091fafba6a6baef17 (patch) | |
tree | 9f3d9b14893689cee1b7918e51b15ca135f6563a | |
parent | 6d6e352c80f22c446d933ca8103e02bac1f09129 (diff) | |
parent | 01b983c9fcfeea5774ca2df2e167b68c3c299278 (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block IO fixes from Jens Axboe:
"Normally I'd defer my initial for-linus pull request until after the
merge window, but a race was uncovered in the virtio-blk conversion to
blk-mq that could cause hangs. So here's a small collection of fixes
for you to pull:
- The fix for the virtio-blk IO hang reported by Dave Chinner, from
Shaohua and myself.
- Add the Insert blktrace event for blk-mq. This makes 'btt' happy
when it is doing it's state transition analysis.
- Ensure that blk-mq has disk/partition stats enabled by default,
instead of making it opt-in.
- A fix for __bio_add_page() and large sector counts"
* 'for-linus' of git://git.kernel.dk/linux-block:
blk-mq: add blktrace insert event trace
virtio-blk: virtqueue_kick() must be ordered with other virtqueue operations
blk-mq: ensure that we set REQ_IO_STAT so diskstats work
bio: fix argument of __bio_add_page() for max_sectors > 0xffff
-rw-r--r-- | block/blk-mq.c | 14 | ||||
-rw-r--r-- | drivers/block/virtio_blk.c | 5 | ||||
-rw-r--r-- | fs/bio.c | 2 | ||||
-rw-r--r-- | include/linux/blkdev.h | 3 |
4 files changed, 17 insertions, 7 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 862f458d4760..cdc629cf075b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -171,9 +171,12 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) | |||
171 | } | 171 | } |
172 | EXPORT_SYMBOL(blk_mq_can_queue); | 172 | EXPORT_SYMBOL(blk_mq_can_queue); |
173 | 173 | ||
174 | static void blk_mq_rq_ctx_init(struct blk_mq_ctx *ctx, struct request *rq, | 174 | static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, |
175 | unsigned int rw_flags) | 175 | struct request *rq, unsigned int rw_flags) |
176 | { | 176 | { |
177 | if (blk_queue_io_stat(q)) | ||
178 | rw_flags |= REQ_IO_STAT; | ||
179 | |||
177 | rq->mq_ctx = ctx; | 180 | rq->mq_ctx = ctx; |
178 | rq->cmd_flags = rw_flags; | 181 | rq->cmd_flags = rw_flags; |
179 | ctx->rq_dispatched[rw_is_sync(rw_flags)]++; | 182 | ctx->rq_dispatched[rw_is_sync(rw_flags)]++; |
@@ -197,7 +200,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, | |||
197 | 200 | ||
198 | rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved); | 201 | rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved); |
199 | if (rq) { | 202 | if (rq) { |
200 | blk_mq_rq_ctx_init(ctx, rq, rw); | 203 | blk_mq_rq_ctx_init(q, ctx, rq, rw); |
201 | break; | 204 | break; |
202 | } else if (!(gfp & __GFP_WAIT)) | 205 | } else if (!(gfp & __GFP_WAIT)) |
203 | break; | 206 | break; |
@@ -718,6 +721,8 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, | |||
718 | { | 721 | { |
719 | struct blk_mq_ctx *ctx = rq->mq_ctx; | 722 | struct blk_mq_ctx *ctx = rq->mq_ctx; |
720 | 723 | ||
724 | trace_block_rq_insert(hctx->queue, rq); | ||
725 | |||
721 | list_add_tail(&rq->queuelist, &ctx->rq_list); | 726 | list_add_tail(&rq->queuelist, &ctx->rq_list); |
722 | blk_mq_hctx_mark_pending(hctx, ctx); | 727 | blk_mq_hctx_mark_pending(hctx, ctx); |
723 | 728 | ||
@@ -921,7 +926,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
921 | trace_block_getrq(q, bio, rw); | 926 | trace_block_getrq(q, bio, rw); |
922 | rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); | 927 | rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); |
923 | if (likely(rq)) | 928 | if (likely(rq)) |
924 | blk_mq_rq_ctx_init(ctx, rq, rw); | 929 | blk_mq_rq_ctx_init(q, ctx, rq, rw); |
925 | else { | 930 | else { |
926 | blk_mq_put_ctx(ctx); | 931 | blk_mq_put_ctx(ctx); |
927 | trace_block_sleeprq(q, bio, rw); | 932 | trace_block_sleeprq(q, bio, rw); |
@@ -1377,6 +1382,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, | |||
1377 | q->queue_hw_ctx = hctxs; | 1382 | q->queue_hw_ctx = hctxs; |
1378 | 1383 | ||
1379 | q->mq_ops = reg->ops; | 1384 | q->mq_ops = reg->ops; |
1385 | q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; | ||
1380 | 1386 | ||
1381 | blk_queue_make_request(q, blk_mq_make_request); | 1387 | blk_queue_make_request(q, blk_mq_make_request); |
1382 | blk_queue_rq_timed_out(q, reg->ops->timeout); | 1388 | blk_queue_rq_timed_out(q, reg->ops->timeout); |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 588479d58f52..6a680d4de7f1 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -199,15 +199,16 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) | |||
199 | 199 | ||
200 | spin_lock_irqsave(&vblk->vq_lock, flags); | 200 | spin_lock_irqsave(&vblk->vq_lock, flags); |
201 | if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) { | 201 | if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) { |
202 | virtqueue_kick(vblk->vq); | ||
202 | spin_unlock_irqrestore(&vblk->vq_lock, flags); | 203 | spin_unlock_irqrestore(&vblk->vq_lock, flags); |
203 | blk_mq_stop_hw_queue(hctx); | 204 | blk_mq_stop_hw_queue(hctx); |
204 | virtqueue_kick(vblk->vq); | ||
205 | return BLK_MQ_RQ_QUEUE_BUSY; | 205 | return BLK_MQ_RQ_QUEUE_BUSY; |
206 | } | 206 | } |
207 | spin_unlock_irqrestore(&vblk->vq_lock, flags); | ||
208 | 207 | ||
209 | if (last) | 208 | if (last) |
210 | virtqueue_kick(vblk->vq); | 209 | virtqueue_kick(vblk->vq); |
210 | |||
211 | spin_unlock_irqrestore(&vblk->vq_lock, flags); | ||
211 | return BLK_MQ_RQ_QUEUE_OK; | 212 | return BLK_MQ_RQ_QUEUE_OK; |
212 | } | 213 | } |
213 | 214 | ||
@@ -601,7 +601,7 @@ EXPORT_SYMBOL(bio_get_nr_vecs); | |||
601 | 601 | ||
602 | static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | 602 | static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page |
603 | *page, unsigned int len, unsigned int offset, | 603 | *page, unsigned int len, unsigned int offset, |
604 | unsigned short max_sectors) | 604 | unsigned int max_sectors) |
605 | { | 605 | { |
606 | int retried_segments = 0; | 606 | int retried_segments = 0; |
607 | struct bio_vec *bvec; | 607 | struct bio_vec *bvec; |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f26ec20f6354..1b135d49b279 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -505,6 +505,9 @@ struct request_queue { | |||
505 | (1 << QUEUE_FLAG_SAME_COMP) | \ | 505 | (1 << QUEUE_FLAG_SAME_COMP) | \ |
506 | (1 << QUEUE_FLAG_ADD_RANDOM)) | 506 | (1 << QUEUE_FLAG_ADD_RANDOM)) |
507 | 507 | ||
508 | #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | ||
509 | (1 << QUEUE_FLAG_SAME_COMP)) | ||
510 | |||
508 | static inline void queue_lockdep_assert_held(struct request_queue *q) | 511 | static inline void queue_lockdep_assert_held(struct request_queue *q) |
509 | { | 512 | { |
510 | if (q->queue_lock) | 513 | if (q->queue_lock) |