diff options
author | Ming Lei <ming.lei@redhat.com> | 2019-04-08 18:31:21 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-04-10 11:57:33 -0400 |
commit | 1b8f21b74c3c9c82fce5a751d7aefb7cc0b8d33d (patch) | |
tree | b6e8eac9fe39c7d114587047c6addc7728009bbd | |
parent | 1978f30a87732d4d9072a20abeded9fe17884f1b (diff) |
blk-mq: introduce blk_mq_complete_request_sync()
In NVMe's error handler, follows the typical steps of tearing down
hardware for recovering controller:
1) stop blk_mq hw queues
2) stop the real hw queues
3) cancel in-flight requests via
blk_mq_tagset_busy_iter(tags, cancel_request, ...)
cancel_request():
mark the request as abort
blk_mq_complete_request(req);
4) destroy real hw queues
However, there may be race between #3 and #4, because blk_mq_complete_request()
may run q->mq_ops->complete(rq) remotelly and asynchronously, and
->complete(rq) may be run after #4.
This patch introduces blk_mq_complete_request_sync() for fixing the
above race.
Cc: Sagi Grimberg <sagi@grimberg.me>
Cc: Bart Van Assche <bvanassche@acm.org>
Cc: James Smart <james.smart@broadcom.com>
Cc: linux-nvme@lists.infradead.org
Reviewed-by: Keith Busch <keith.busch@intel.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | block/blk-mq.c | 7 | ||||
-rw-r--r-- | include/linux/blk-mq.h | 1 |
2 files changed, 8 insertions, 0 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index a9354835cf51..9516304a38ee 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -654,6 +654,13 @@ bool blk_mq_complete_request(struct request *rq) | |||
654 | } | 654 | } |
655 | EXPORT_SYMBOL(blk_mq_complete_request); | 655 | EXPORT_SYMBOL(blk_mq_complete_request); |
656 | 656 | ||
657 | void blk_mq_complete_request_sync(struct request *rq) | ||
658 | { | ||
659 | WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); | ||
660 | rq->q->mq_ops->complete(rq); | ||
661 | } | ||
662 | EXPORT_SYMBOL_GPL(blk_mq_complete_request_sync); | ||
663 | |||
657 | int blk_mq_request_started(struct request *rq) | 664 | int blk_mq_request_started(struct request *rq) |
658 | { | 665 | { |
659 | return blk_mq_rq_state(rq) != MQ_RQ_IDLE; | 666 | return blk_mq_rq_state(rq) != MQ_RQ_IDLE; |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index cb2aa7ecafff..db29928de467 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -302,6 +302,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); | |||
302 | void blk_mq_kick_requeue_list(struct request_queue *q); | 302 | void blk_mq_kick_requeue_list(struct request_queue *q); |
303 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); | 303 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); |
304 | bool blk_mq_complete_request(struct request *rq); | 304 | bool blk_mq_complete_request(struct request *rq); |
305 | void blk_mq_complete_request_sync(struct request *rq); | ||
305 | bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, | 306 | bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, |
306 | struct bio *bio); | 307 | struct bio *bio); |
307 | bool blk_mq_queue_stopped(struct request_queue *q); | 308 | bool blk_mq_queue_stopped(struct request_queue *q); |