diff options
author | Damien Le Moal <damien.lemoal@wdc.com> | 2018-02-28 12:35:29 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-03-01 10:39:24 -0500 |
commit | f3bc78d2d4b489590540ab2788d5376583e28173 (patch) | |
tree | 0a6dd46bd1feef14581a7fd0ddc7d704a54c4952 /block/mq-deadline.c | |
parent | 468f098734ab095b72d8af26bcd4bcb2ed31a3be (diff) |
mq-deadline: Make sure to always unlock zones
In case of a failed write request (all retries failed) and when using
libata, the SCSI error handler calls scsi_finish_command(). In the
case of blk-mq this means that scsi_mq_done() does not get called,
that blk_mq_complete_request() does not get called and also that the
mq-deadline .completed_request() method is not called. This results in
the target zone of the failed write request being left in a locked
state, preventing that any new write requests are issued to the same
zone.
Fix this by replacing the .completed_request() method with the
.finish_request() method as this method is always called whether or
not a request completes successfully. Since the .finish_request()
method is only called by the blk-mq core if a .prepare_request()
method exists, add a dummy .prepare_request() method.
Fixes: 5700f69178e9 ("mq-deadline: Introduce zone locking support")
Cc: Hannes Reinecke <hare@suse.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
[ bvanassche: edited patch description ]
Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/mq-deadline.c')
-rw-r--r-- | block/mq-deadline.c | 16 |
1 files changed, 13 insertions, 3 deletions
diff --git a/block/mq-deadline.c b/block/mq-deadline.c index c56f211c8440..8ec0ba9f5386 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c | |||
@@ -536,12 +536,21 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, | |||
536 | } | 536 | } |
537 | 537 | ||
538 | /* | 538 | /* |
539 | * Nothing to do here. This is defined only to ensure that .finish_request | ||
540 | * method is called upon request completion. | ||
541 | */ | ||
542 | static void dd_prepare_request(struct request *rq, struct bio *bio) | ||
543 | { | ||
544 | } | ||
545 | |||
546 | /* | ||
539 | * For zoned block devices, write unlock the target zone of | 547 | * For zoned block devices, write unlock the target zone of |
540 | * completed write requests. Do this while holding the zone lock | 548 | * completed write requests. Do this while holding the zone lock |
541 | * spinlock so that the zone is never unlocked while deadline_fifo_request() | 549 | * spinlock so that the zone is never unlocked while deadline_fifo_request() |
542 | * while deadline_next_request() are executing. | 550 | * or deadline_next_request() are executing. This function is called for |
551 | * all requests, whether or not these requests complete successfully. | ||
543 | */ | 552 | */ |
544 | static void dd_completed_request(struct request *rq) | 553 | static void dd_finish_request(struct request *rq) |
545 | { | 554 | { |
546 | struct request_queue *q = rq->q; | 555 | struct request_queue *q = rq->q; |
547 | 556 | ||
@@ -756,7 +765,8 @@ static struct elevator_type mq_deadline = { | |||
756 | .ops.mq = { | 765 | .ops.mq = { |
757 | .insert_requests = dd_insert_requests, | 766 | .insert_requests = dd_insert_requests, |
758 | .dispatch_request = dd_dispatch_request, | 767 | .dispatch_request = dd_dispatch_request, |
759 | .completed_request = dd_completed_request, | 768 | .prepare_request = dd_prepare_request, |
769 | .finish_request = dd_finish_request, | ||
760 | .next_request = elv_rb_latter_request, | 770 | .next_request = elv_rb_latter_request, |
761 | .former_request = elv_rb_former_request, | 771 | .former_request = elv_rb_former_request, |
762 | .bio_merge = dd_bio_merge, | 772 | .bio_merge = dd_bio_merge, |