diff options
author | Christoph Hellwig <hch@infradead.org> | 2014-02-07 13:22:36 -0500 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-02-07 13:58:54 -0500 |
commit | 72a0a36e2854a6eadb4cf2561858f613f9cd4639 (patch) | |
tree | 26679a518f6e06640fd34a04fcff733450ef15e3 /block | |
parent | 96d2e8b5e288e9d2a40b95161b855944846526a5 (diff) |
blk-mq: support at_head inserations for blk_execute_rq
This is neede for proper SG_IO operation as well as various uses of
blk_execute_rq from the SCSI midlayer.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-exec.c | 2 | ||||
-rw-r--r-- | block/blk-mq.c | 17 |
2 files changed, 11 insertions, 8 deletions
diff --git a/block/blk-exec.c b/block/blk-exec.c index bbfc072a79c2..c68613bb4c79 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c | |||
@@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | |||
65 | * be resued after dying flag is set | 65 | * be resued after dying flag is set |
66 | */ | 66 | */ |
67 | if (q->mq_ops) { | 67 | if (q->mq_ops) { |
68 | blk_mq_insert_request(q, rq, true); | 68 | blk_mq_insert_request(q, rq, at_head, true); |
69 | return; | 69 | return; |
70 | } | 70 | } |
71 | 71 | ||
diff --git a/block/blk-mq.c b/block/blk-mq.c index 9072d0ab184f..c9306e3403fe 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -714,13 +714,16 @@ static void blk_mq_work_fn(struct work_struct *work) | |||
714 | } | 714 | } |
715 | 715 | ||
716 | static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, | 716 | static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, |
717 | struct request *rq) | 717 | struct request *rq, bool at_head) |
718 | { | 718 | { |
719 | struct blk_mq_ctx *ctx = rq->mq_ctx; | 719 | struct blk_mq_ctx *ctx = rq->mq_ctx; |
720 | 720 | ||
721 | trace_block_rq_insert(hctx->queue, rq); | 721 | trace_block_rq_insert(hctx->queue, rq); |
722 | 722 | ||
723 | list_add_tail(&rq->queuelist, &ctx->rq_list); | 723 | if (at_head) |
724 | list_add(&rq->queuelist, &ctx->rq_list); | ||
725 | else | ||
726 | list_add_tail(&rq->queuelist, &ctx->rq_list); | ||
724 | blk_mq_hctx_mark_pending(hctx, ctx); | 727 | blk_mq_hctx_mark_pending(hctx, ctx); |
725 | 728 | ||
726 | /* | 729 | /* |
@@ -730,7 +733,7 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, | |||
730 | } | 733 | } |
731 | 734 | ||
732 | void blk_mq_insert_request(struct request_queue *q, struct request *rq, | 735 | void blk_mq_insert_request(struct request_queue *q, struct request *rq, |
733 | bool run_queue) | 736 | bool at_head, bool run_queue) |
734 | { | 737 | { |
735 | struct blk_mq_hw_ctx *hctx; | 738 | struct blk_mq_hw_ctx *hctx; |
736 | struct blk_mq_ctx *ctx, *current_ctx; | 739 | struct blk_mq_ctx *ctx, *current_ctx; |
@@ -749,7 +752,7 @@ void blk_mq_insert_request(struct request_queue *q, struct request *rq, | |||
749 | rq->mq_ctx = ctx; | 752 | rq->mq_ctx = ctx; |
750 | } | 753 | } |
751 | spin_lock(&ctx->lock); | 754 | spin_lock(&ctx->lock); |
752 | __blk_mq_insert_request(hctx, rq); | 755 | __blk_mq_insert_request(hctx, rq, at_head); |
753 | spin_unlock(&ctx->lock); | 756 | spin_unlock(&ctx->lock); |
754 | 757 | ||
755 | blk_mq_put_ctx(current_ctx); | 758 | blk_mq_put_ctx(current_ctx); |
@@ -781,7 +784,7 @@ void blk_mq_run_request(struct request *rq, bool run_queue, bool async) | |||
781 | 784 | ||
782 | /* ctx->cpu might be offline */ | 785 | /* ctx->cpu might be offline */ |
783 | spin_lock(&ctx->lock); | 786 | spin_lock(&ctx->lock); |
784 | __blk_mq_insert_request(hctx, rq); | 787 | __blk_mq_insert_request(hctx, rq, false); |
785 | spin_unlock(&ctx->lock); | 788 | spin_unlock(&ctx->lock); |
786 | 789 | ||
787 | blk_mq_put_ctx(current_ctx); | 790 | blk_mq_put_ctx(current_ctx); |
@@ -819,7 +822,7 @@ static void blk_mq_insert_requests(struct request_queue *q, | |||
819 | rq = list_first_entry(list, struct request, queuelist); | 822 | rq = list_first_entry(list, struct request, queuelist); |
820 | list_del_init(&rq->queuelist); | 823 | list_del_init(&rq->queuelist); |
821 | rq->mq_ctx = ctx; | 824 | rq->mq_ctx = ctx; |
822 | __blk_mq_insert_request(hctx, rq); | 825 | __blk_mq_insert_request(hctx, rq, false); |
823 | } | 826 | } |
824 | spin_unlock(&ctx->lock); | 827 | spin_unlock(&ctx->lock); |
825 | 828 | ||
@@ -971,7 +974,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
971 | __blk_mq_free_request(hctx, ctx, rq); | 974 | __blk_mq_free_request(hctx, ctx, rq); |
972 | else { | 975 | else { |
973 | blk_mq_bio_to_request(rq, bio); | 976 | blk_mq_bio_to_request(rq, bio); |
974 | __blk_mq_insert_request(hctx, rq); | 977 | __blk_mq_insert_request(hctx, rq, false); |
975 | } | 978 | } |
976 | 979 | ||
977 | spin_unlock(&ctx->lock); | 980 | spin_unlock(&ctx->lock); |