aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c4
-rw-r--r--block/blk-mq.c53
-rw-r--r--block/blk-mq.h1
4 files changed, 13 insertions, 47 deletions
diff --git a/block/blk-exec.c b/block/blk-exec.c
index c68613bb4c79..dbf4502b1d67 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
65 * be resued after dying flag is set 65 * be resued after dying flag is set
66 */ 66 */
67 if (q->mq_ops) { 67 if (q->mq_ops) {
68 blk_mq_insert_request(q, rq, at_head, true); 68 blk_mq_insert_request(rq, at_head, true, false);
69 return; 69 return;
70 } 70 }
71 71
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 66e2b697f5db..f598f794c3c6 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -137,7 +137,7 @@ static void mq_flush_run(struct work_struct *work)
137 rq = container_of(work, struct request, mq_flush_work); 137 rq = container_of(work, struct request, mq_flush_work);
138 138
139 memset(&rq->csd, 0, sizeof(rq->csd)); 139 memset(&rq->csd, 0, sizeof(rq->csd));
140 blk_mq_run_request(rq, true, false); 140 blk_mq_insert_request(rq, false, true, false);
141} 141}
142 142
143static bool blk_flush_queue_rq(struct request *rq) 143static bool blk_flush_queue_rq(struct request *rq)
@@ -411,7 +411,7 @@ void blk_insert_flush(struct request *rq)
411 if ((policy & REQ_FSEQ_DATA) && 411 if ((policy & REQ_FSEQ_DATA) &&
412 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { 412 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
413 if (q->mq_ops) { 413 if (q->mq_ops) {
414 blk_mq_run_request(rq, false, true); 414 blk_mq_insert_request(rq, false, false, true);
415 } else 415 } else
416 list_add_tail(&rq->queuelist, &q->queue_head); 416 list_add_tail(&rq->queuelist, &q->queue_head);
417 return; 417 return;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a56e77383738..81ff7879bac8 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -724,61 +724,28 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
724 blk_mq_add_timer(rq); 724 blk_mq_add_timer(rq);
725} 725}
726 726
727void blk_mq_insert_request(struct request_queue *q, struct request *rq, 727void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
728 bool at_head, bool run_queue) 728 bool async)
729{ 729{
730 struct request_queue *q = rq->q;
730 struct blk_mq_hw_ctx *hctx; 731 struct blk_mq_hw_ctx *hctx;
731 struct blk_mq_ctx *ctx, *current_ctx; 732 struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
733
734 current_ctx = blk_mq_get_ctx(q);
735 if (!cpu_online(ctx->cpu))
736 rq->mq_ctx = ctx = current_ctx;
732 737
733 ctx = rq->mq_ctx;
734 hctx = q->mq_ops->map_queue(q, ctx->cpu); 738 hctx = q->mq_ops->map_queue(q, ctx->cpu);
735 739
736 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) { 740 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
741 !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
737 blk_insert_flush(rq); 742 blk_insert_flush(rq);
738 } else { 743 } else {
739 current_ctx = blk_mq_get_ctx(q);
740
741 if (!cpu_online(ctx->cpu)) {
742 ctx = current_ctx;
743 hctx = q->mq_ops->map_queue(q, ctx->cpu);
744 rq->mq_ctx = ctx;
745 }
746 spin_lock(&ctx->lock); 744 spin_lock(&ctx->lock);
747 __blk_mq_insert_request(hctx, rq, at_head); 745 __blk_mq_insert_request(hctx, rq, at_head);
748 spin_unlock(&ctx->lock); 746 spin_unlock(&ctx->lock);
749
750 blk_mq_put_ctx(current_ctx);
751 } 747 }
752 748
753 if (run_queue)
754 __blk_mq_run_hw_queue(hctx);
755}
756EXPORT_SYMBOL(blk_mq_insert_request);
757
758/*
759 * This is a special version of blk_mq_insert_request to bypass FLUSH request
760 * check. Should only be used internally.
761 */
762void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
763{
764 struct request_queue *q = rq->q;
765 struct blk_mq_hw_ctx *hctx;
766 struct blk_mq_ctx *ctx, *current_ctx;
767
768 current_ctx = blk_mq_get_ctx(q);
769
770 ctx = rq->mq_ctx;
771 if (!cpu_online(ctx->cpu)) {
772 ctx = current_ctx;
773 rq->mq_ctx = ctx;
774 }
775 hctx = q->mq_ops->map_queue(q, ctx->cpu);
776
777 /* ctx->cpu might be offline */
778 spin_lock(&ctx->lock);
779 __blk_mq_insert_request(hctx, rq, false);
780 spin_unlock(&ctx->lock);
781
782 blk_mq_put_ctx(current_ctx); 749 blk_mq_put_ctx(current_ctx);
783 750
784 if (run_queue) 751 if (run_queue)
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 361f9343dab1..ebbe6bac9d61 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -23,7 +23,6 @@ struct blk_mq_ctx {
23}; 23};
24 24
25void __blk_mq_complete_request(struct request *rq); 25void __blk_mq_complete_request(struct request *rq);
26void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
27void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 26void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
28void blk_mq_init_flush(struct request_queue *q); 27void blk_mq_init_flush(struct request_queue *q);
29void blk_mq_drain_queue(struct request_queue *q); 28void blk_mq_drain_queue(struct request_queue *q);