aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c102
1 files changed, 19 insertions, 83 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1fa9dd153fde..883f72089015 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -73,8 +73,8 @@ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
73 set_bit(ctx->index_hw, hctx->ctx_map); 73 set_bit(ctx->index_hw, hctx->ctx_map);
74} 74}
75 75
76static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp, 76static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
77 bool reserved) 77 gfp_t gfp, bool reserved)
78{ 78{
79 struct request *rq; 79 struct request *rq;
80 unsigned int tag; 80 unsigned int tag;
@@ -193,12 +193,6 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
193 ctx->rq_dispatched[rw_is_sync(rw_flags)]++; 193 ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
194} 194}
195 195
196static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
197 gfp_t gfp, bool reserved)
198{
199 return blk_mq_alloc_rq(hctx, gfp, reserved);
200}
201
202static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, 196static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
203 int rw, gfp_t gfp, 197 int rw, gfp_t gfp,
204 bool reserved) 198 bool reserved)
@@ -289,38 +283,10 @@ void blk_mq_free_request(struct request *rq)
289 __blk_mq_free_request(hctx, ctx, rq); 283 __blk_mq_free_request(hctx, ctx, rq);
290} 284}
291 285
292static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error) 286bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes)
293{
294 if (error)
295 clear_bit(BIO_UPTODATE, &bio->bi_flags);
296 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
297 error = -EIO;
298
299 if (unlikely(rq->cmd_flags & REQ_QUIET))
300 set_bit(BIO_QUIET, &bio->bi_flags);
301
302 /* don't actually finish bio if it's part of flush sequence */
303 if (!(rq->cmd_flags & REQ_FLUSH_SEQ))
304 bio_endio(bio, error);
305}
306
307void blk_mq_end_io(struct request *rq, int error)
308{ 287{
309 struct bio *bio = rq->bio; 288 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
310 unsigned int bytes = 0; 289 return true;
311
312 trace_block_rq_complete(rq->q, rq);
313
314 while (bio) {
315 struct bio *next = bio->bi_next;
316
317 bio->bi_next = NULL;
318 bytes += bio->bi_iter.bi_size;
319 blk_mq_bio_endio(rq, bio, error);
320 bio = next;
321 }
322
323 blk_account_io_completion(rq, bytes);
324 290
325 blk_account_io_done(rq); 291 blk_account_io_done(rq);
326 292
@@ -328,8 +294,9 @@ void blk_mq_end_io(struct request *rq, int error)
328 rq->end_io(rq, error); 294 rq->end_io(rq, error);
329 else 295 else
330 blk_mq_free_request(rq); 296 blk_mq_free_request(rq);
297 return false;
331} 298}
332EXPORT_SYMBOL(blk_mq_end_io); 299EXPORT_SYMBOL(blk_mq_end_io_partial);
333 300
334static void __blk_mq_complete_request_remote(void *data) 301static void __blk_mq_complete_request_remote(void *data)
335{ 302{
@@ -730,60 +697,27 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
730 blk_mq_add_timer(rq); 697 blk_mq_add_timer(rq);
731} 698}
732 699
733void blk_mq_insert_request(struct request_queue *q, struct request *rq, 700void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
734 bool at_head, bool run_queue) 701 bool async)
735{ 702{
703 struct request_queue *q = rq->q;
736 struct blk_mq_hw_ctx *hctx; 704 struct blk_mq_hw_ctx *hctx;
737 struct blk_mq_ctx *ctx, *current_ctx; 705 struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
706
707 current_ctx = blk_mq_get_ctx(q);
708 if (!cpu_online(ctx->cpu))
709 rq->mq_ctx = ctx = current_ctx;
738 710
739 ctx = rq->mq_ctx;
740 hctx = q->mq_ops->map_queue(q, ctx->cpu); 711 hctx = q->mq_ops->map_queue(q, ctx->cpu);
741 712
742 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) { 713 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
714 !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
743 blk_insert_flush(rq); 715 blk_insert_flush(rq);
744 } else { 716 } else {
745 current_ctx = blk_mq_get_ctx(q);
746
747 if (!cpu_online(ctx->cpu)) {
748 ctx = current_ctx;
749 hctx = q->mq_ops->map_queue(q, ctx->cpu);
750 rq->mq_ctx = ctx;
751 }
752 spin_lock(&ctx->lock); 717 spin_lock(&ctx->lock);
753 __blk_mq_insert_request(hctx, rq, at_head); 718 __blk_mq_insert_request(hctx, rq, at_head);
754 spin_unlock(&ctx->lock); 719 spin_unlock(&ctx->lock);
755
756 blk_mq_put_ctx(current_ctx);
757 }
758
759 if (run_queue)
760 __blk_mq_run_hw_queue(hctx);
761}
762EXPORT_SYMBOL(blk_mq_insert_request);
763
764/*
765 * This is a special version of blk_mq_insert_request to bypass FLUSH request
766 * check. Should only be used internally.
767 */
768void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
769{
770 struct request_queue *q = rq->q;
771 struct blk_mq_hw_ctx *hctx;
772 struct blk_mq_ctx *ctx, *current_ctx;
773
774 current_ctx = blk_mq_get_ctx(q);
775
776 ctx = rq->mq_ctx;
777 if (!cpu_online(ctx->cpu)) {
778 ctx = current_ctx;
779 rq->mq_ctx = ctx;
780 } 720 }
781 hctx = q->mq_ops->map_queue(q, ctx->cpu);
782
783 /* ctx->cpu might be offline */
784 spin_lock(&ctx->lock);
785 __blk_mq_insert_request(hctx, rq, false);
786 spin_unlock(&ctx->lock);
787 721
788 blk_mq_put_ctx(current_ctx); 722 blk_mq_put_ctx(current_ctx);
789 723
@@ -926,6 +860,8 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
926 ctx = blk_mq_get_ctx(q); 860 ctx = blk_mq_get_ctx(q);
927 hctx = q->mq_ops->map_queue(q, ctx->cpu); 861 hctx = q->mq_ops->map_queue(q, ctx->cpu);
928 862
863 if (is_sync)
864 rw |= REQ_SYNC;
929 trace_block_getrq(q, bio, rw); 865 trace_block_getrq(q, bio, rw);
930 rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); 866 rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
931 if (likely(rq)) 867 if (likely(rq))