diff options
author | Jens Axboe <axboe@kernel.dk> | 2018-12-17 23:11:17 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-12-17 23:31:42 -0500 |
commit | 3c94d83cb352627f221d971b05f163c17527de74 (patch) | |
tree | 60f1445af634c7162225f86c9a791b6e2a6b382c /block | |
parent | e5edd5f298fafda28284bafb8371e6f0b7681035 (diff) |
blk-mq: change blk_mq_queue_busy() to blk_mq_queue_inflight()
There's a single user of this function, dm, and dm just wants
to check if IO is inflight, not that it's just allocated.
This fixes a hang with srp/002 in blktests with dm, where it tries
to suspend but waits for inflight IO to finish first. As it checks
for just allocated requests, this fails.
Tested-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 6847f014606b..b0888a89fa66 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -805,14 +805,14 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) | |||
805 | } | 805 | } |
806 | EXPORT_SYMBOL(blk_mq_tag_to_rq); | 806 | EXPORT_SYMBOL(blk_mq_tag_to_rq); |
807 | 807 | ||
808 | static bool blk_mq_check_busy(struct blk_mq_hw_ctx *hctx, struct request *rq, | 808 | static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq, |
809 | void *priv, bool reserved) | 809 | void *priv, bool reserved) |
810 | { | 810 | { |
811 | /* | 811 | /* |
812 | * If we find a request, we know the queue is busy. Return false | 812 | * If we find a request that is inflight and the queue matches, |
813 | * to stop the iteration. | 813 | * we know the queue is busy. Return false to stop the iteration. |
814 | */ | 814 | */ |
815 | if (rq->q == hctx->queue) { | 815 | if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) { |
816 | bool *busy = priv; | 816 | bool *busy = priv; |
817 | 817 | ||
818 | *busy = true; | 818 | *busy = true; |
@@ -822,14 +822,14 @@ static bool blk_mq_check_busy(struct blk_mq_hw_ctx *hctx, struct request *rq, | |||
822 | return true; | 822 | return true; |
823 | } | 823 | } |
824 | 824 | ||
825 | bool blk_mq_queue_busy(struct request_queue *q) | 825 | bool blk_mq_queue_inflight(struct request_queue *q) |
826 | { | 826 | { |
827 | bool busy = false; | 827 | bool busy = false; |
828 | 828 | ||
829 | blk_mq_queue_tag_busy_iter(q, blk_mq_check_busy, &busy); | 829 | blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy); |
830 | return busy; | 830 | return busy; |
831 | } | 831 | } |
832 | EXPORT_SYMBOL_GPL(blk_mq_queue_busy); | 832 | EXPORT_SYMBOL_GPL(blk_mq_queue_inflight); |
833 | 833 | ||
834 | static void blk_mq_rq_timed_out(struct request *req, bool reserved) | 834 | static void blk_mq_rq_timed_out(struct request *req, bool reserved) |
835 | { | 835 | { |