aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2014-03-21 10:57:37 -0400
committerJens Axboe <axboe@fb.com>2014-03-21 10:57:37 -0400
commiteeabc850b79336575da7be3dbe186a2da4de8293 (patch)
tree4cc10d227c84bbe254bc3ce1b1513c9894ab364d /block/blk-mq.c
parent081241e592c47f4ed2999a0b576ae85e765c6da4 (diff)
blk-mq: merge blk_mq_insert_request and blk_mq_run_request
It's almost identical to blk_mq_insert_request, so fold the two into one slightly more generic function by making the flush special case a bit smarted. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c53
1 files changed, 10 insertions, 43 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a56e77383738..81ff7879bac8 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -724,61 +724,28 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
724 blk_mq_add_timer(rq); 724 blk_mq_add_timer(rq);
725} 725}
726 726
727void blk_mq_insert_request(struct request_queue *q, struct request *rq, 727void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
728 bool at_head, bool run_queue) 728 bool async)
729{ 729{
730 struct request_queue *q = rq->q;
730 struct blk_mq_hw_ctx *hctx; 731 struct blk_mq_hw_ctx *hctx;
731 struct blk_mq_ctx *ctx, *current_ctx; 732 struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
733
734 current_ctx = blk_mq_get_ctx(q);
735 if (!cpu_online(ctx->cpu))
736 rq->mq_ctx = ctx = current_ctx;
732 737
733 ctx = rq->mq_ctx;
734 hctx = q->mq_ops->map_queue(q, ctx->cpu); 738 hctx = q->mq_ops->map_queue(q, ctx->cpu);
735 739
736 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) { 740 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
741 !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
737 blk_insert_flush(rq); 742 blk_insert_flush(rq);
738 } else { 743 } else {
739 current_ctx = blk_mq_get_ctx(q);
740
741 if (!cpu_online(ctx->cpu)) {
742 ctx = current_ctx;
743 hctx = q->mq_ops->map_queue(q, ctx->cpu);
744 rq->mq_ctx = ctx;
745 }
746 spin_lock(&ctx->lock); 744 spin_lock(&ctx->lock);
747 __blk_mq_insert_request(hctx, rq, at_head); 745 __blk_mq_insert_request(hctx, rq, at_head);
748 spin_unlock(&ctx->lock); 746 spin_unlock(&ctx->lock);
749
750 blk_mq_put_ctx(current_ctx);
751 } 747 }
752 748
753 if (run_queue)
754 __blk_mq_run_hw_queue(hctx);
755}
756EXPORT_SYMBOL(blk_mq_insert_request);
757
758/*
759 * This is a special version of blk_mq_insert_request to bypass FLUSH request
760 * check. Should only be used internally.
761 */
762void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
763{
764 struct request_queue *q = rq->q;
765 struct blk_mq_hw_ctx *hctx;
766 struct blk_mq_ctx *ctx, *current_ctx;
767
768 current_ctx = blk_mq_get_ctx(q);
769
770 ctx = rq->mq_ctx;
771 if (!cpu_online(ctx->cpu)) {
772 ctx = current_ctx;
773 rq->mq_ctx = ctx;
774 }
775 hctx = q->mq_ops->map_queue(q, ctx->cpu);
776
777 /* ctx->cpu might be offline */
778 spin_lock(&ctx->lock);
779 __blk_mq_insert_request(hctx, rq, false);
780 spin_unlock(&ctx->lock);
781
782 blk_mq_put_ctx(current_ctx); 749 blk_mq_put_ctx(current_ctx);
783 750
784 if (run_queue) 751 if (run_queue)