summaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-03-02 15:26:04 -0500
committerJens Axboe <axboe@fb.com>2017-03-02 16:30:51 -0500
commit113285b473824922498d07d7f82459507b9792eb (patch)
treee4ffafcec58afc46781bfa91ecc2b232efa0ceee /block/blk-mq.c
parent7b36a7189fc320f0b783dd51bd1f541db56cfbdd (diff)
blk-mq: ensure that bd->last is always set correctly
When drivers are called with a request in blk-mq, blk-mq flags the state such that the driver knows if this is the last request in this call chain or not. The driver can then use that information to defer kicking off IO until bd->last is true. However, with blk-mq and scheduling, we need to allocate a driver tag for a request before it can be issued. If we fail to allocate such a tag, we could end up in the situation where the last request issued did not have bd->last == true set. This can then cause a driver hang. This fixes a hang with virtio-blk, which uses bd->last as a hint on whether to kick the queue or not. Reported-by: Chris Mason <clm@fb.com> Tested-by: Chris Mason <clm@fb.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c50
1 files changed, 43 insertions, 7 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a5e66a7a3506..e797607dab89 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -876,12 +876,9 @@ done:
876 return false; 876 return false;
877} 877}
878 878
879static void blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, 879static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
880 struct request *rq) 880 struct request *rq)
881{ 881{
882 if (rq->tag == -1 || rq->internal_tag == -1)
883 return;
884
885 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag); 882 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
886 rq->tag = -1; 883 rq->tag = -1;
887 884
@@ -891,6 +888,26 @@ static void blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
891 } 888 }
892} 889}
893 890
891static void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
892 struct request *rq)
893{
894 if (rq->tag == -1 || rq->internal_tag == -1)
895 return;
896
897 __blk_mq_put_driver_tag(hctx, rq);
898}
899
900static void blk_mq_put_driver_tag(struct request *rq)
901{
902 struct blk_mq_hw_ctx *hctx;
903
904 if (rq->tag == -1 || rq->internal_tag == -1)
905 return;
906
907 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
908 __blk_mq_put_driver_tag(hctx, rq);
909}
910
894/* 911/*
895 * If we fail getting a driver tag because all the driver tags are already 912 * If we fail getting a driver tag because all the driver tags are already
896 * assigned and on the dispatch list, BUT the first entry does not have a 913 * assigned and on the dispatch list, BUT the first entry does not have a
@@ -1000,7 +1017,19 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1000 1017
1001 bd.rq = rq; 1018 bd.rq = rq;
1002 bd.list = dptr; 1019 bd.list = dptr;
1003 bd.last = list_empty(list); 1020
1021 /*
1022 * Flag last if we have no more requests, or if we have more
1023 * but can't assign a driver tag to it.
1024 */
1025 if (list_empty(list))
1026 bd.last = true;
1027 else {
1028 struct request *nxt;
1029
1030 nxt = list_first_entry(list, struct request, queuelist);
1031 bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
1032 }
1004 1033
1005 ret = q->mq_ops->queue_rq(hctx, &bd); 1034 ret = q->mq_ops->queue_rq(hctx, &bd);
1006 switch (ret) { 1035 switch (ret) {
@@ -1008,7 +1037,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1008 queued++; 1037 queued++;
1009 break; 1038 break;
1010 case BLK_MQ_RQ_QUEUE_BUSY: 1039 case BLK_MQ_RQ_QUEUE_BUSY:
1011 blk_mq_put_driver_tag(hctx, rq); 1040 blk_mq_put_driver_tag_hctx(hctx, rq);
1012 list_add(&rq->queuelist, list); 1041 list_add(&rq->queuelist, list);
1013 __blk_mq_requeue_request(rq); 1042 __blk_mq_requeue_request(rq);
1014 break; 1043 break;
@@ -1038,6 +1067,13 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1038 * that is where we will continue on next queue run. 1067 * that is where we will continue on next queue run.
1039 */ 1068 */
1040 if (!list_empty(list)) { 1069 if (!list_empty(list)) {
1070 /*
1071 * If we got a driver tag for the next request already,
1072 * free it again.
1073 */
1074 rq = list_first_entry(list, struct request, queuelist);
1075 blk_mq_put_driver_tag(rq);
1076
1041 spin_lock(&hctx->lock); 1077 spin_lock(&hctx->lock);
1042 list_splice_init(list, &hctx->dispatch); 1078 list_splice_init(list, &hctx->dispatch);
1043 spin_unlock(&hctx->lock); 1079 spin_unlock(&hctx->lock);