aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-06-14 07:58:45 -0400
committerJens Axboe <axboe@kernel.dk>2018-06-14 10:39:46 -0400
commitda661267398869a553b7f67d739d360aaa1361b6 (patch)
tree375b099f3b56b2403b7e2802dd13afe42f2c5a11
parenta347c7ad8edf4c5685154f3fdc3c12fc1db800ba (diff)
blk-mq: don't time out requests again that are in the timeout handler
We can currently call the timeout handler again on a request that has already been handed over to the timeout handler. Prevent that with a new flag. Fixes: 12f5b931 ("blk-mq: Remove generation seqeunce") Reported-by: Andrew Randrianasulu <randrianasulu@gmail.com> Tested-by: Andrew Randrianasulu <randrianasulu@gmail.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-mq.c5
-rw-r--r--include/linux/blkdev.h2
2 files changed, 7 insertions, 0 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 2be78cc30ec5..8e57b84e50e9 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -671,6 +671,7 @@ static void __blk_mq_requeue_request(struct request *rq)
671 671
672 if (blk_mq_request_started(rq)) { 672 if (blk_mq_request_started(rq)) {
673 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 673 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
674 rq->rq_flags &= ~RQF_TIMED_OUT;
674 if (q->dma_drain_size && blk_rq_bytes(rq)) 675 if (q->dma_drain_size && blk_rq_bytes(rq))
675 rq->nr_phys_segments--; 676 rq->nr_phys_segments--;
676 } 677 }
@@ -770,6 +771,7 @@ EXPORT_SYMBOL(blk_mq_tag_to_rq);
770 771
771static void blk_mq_rq_timed_out(struct request *req, bool reserved) 772static void blk_mq_rq_timed_out(struct request *req, bool reserved)
772{ 773{
774 req->rq_flags |= RQF_TIMED_OUT;
773 if (req->q->mq_ops->timeout) { 775 if (req->q->mq_ops->timeout) {
774 enum blk_eh_timer_return ret; 776 enum blk_eh_timer_return ret;
775 777
@@ -779,6 +781,7 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
779 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); 781 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
780 } 782 }
781 783
784 req->rq_flags &= ~RQF_TIMED_OUT;
782 blk_add_timer(req); 785 blk_add_timer(req);
783} 786}
784 787
@@ -788,6 +791,8 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
788 791
789 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) 792 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
790 return false; 793 return false;
794 if (rq->rq_flags & RQF_TIMED_OUT)
795 return false;
791 796
792 deadline = blk_rq_deadline(rq); 797 deadline = blk_rq_deadline(rq);
793 if (time_after_eq(jiffies, deadline)) 798 if (time_after_eq(jiffies, deadline))
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index bca3a92eb55f..fa6f11751430 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -127,6 +127,8 @@ typedef __u32 __bitwise req_flags_t;
127#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) 127#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
128/* already slept for hybrid poll */ 128/* already slept for hybrid poll */
129#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20)) 129#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20))
130/* ->timeout has been called, don't expire again */
131#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
130 132
131/* flags that prevent us from merging requests: */ 133/* flags that prevent us from merging requests: */
132#define RQF_NOMERGE_FLAGS \ 134#define RQF_NOMERGE_FLAGS \