aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-mq.c39
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-timeout.c16
-rw-r--r--block/blk.h3
4 files changed, 42 insertions, 18 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7d5650d75aef..a84112c94e74 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -378,7 +378,15 @@ static void blk_mq_start_request(struct request *rq, bool last)
378 * REQ_ATOMIC_STARTED is seen. 378 * REQ_ATOMIC_STARTED is seen.
379 */ 379 */
380 rq->deadline = jiffies + q->rq_timeout; 380 rq->deadline = jiffies + q->rq_timeout;
381
382 /*
383 * Mark us as started and clear complete. Complete might have been
384 * set if requeue raced with timeout, which then marked it as
385 * complete. So be sure to clear complete again when we start
386 * the request, otherwise we'll ignore the completion event.
387 */
381 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 388 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
389 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
382 390
383 if (q->dma_drain_size && blk_rq_bytes(rq)) { 391 if (q->dma_drain_size && blk_rq_bytes(rq)) {
384 /* 392 /*
@@ -485,6 +493,28 @@ static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
485 blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data); 493 blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
486} 494}
487 495
496static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
497{
498 struct request_queue *q = rq->q;
499
500 /*
501 * We know that complete is set at this point. If STARTED isn't set
502 * anymore, then the request isn't active and the "timeout" should
503 * just be ignored. This can happen due to the bitflag ordering.
504 * Timeout first checks if STARTED is set, and if it is, assumes
505 * the request is active. But if we race with completion, then
506 * we both flags will get cleared. So check here again, and ignore
507 * a timeout event with a request that isn't active.
508 */
509 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
510 return BLK_EH_NOT_HANDLED;
511
512 if (!q->mq_ops->timeout)
513 return BLK_EH_RESET_TIMER;
514
515 return q->mq_ops->timeout(rq);
516}
517
488static void blk_mq_rq_timer(unsigned long data) 518static void blk_mq_rq_timer(unsigned long data)
489{ 519{
490 struct request_queue *q = (struct request_queue *) data; 520 struct request_queue *q = (struct request_queue *) data;
@@ -538,11 +568,6 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
538 return false; 568 return false;
539} 569}
540 570
541void blk_mq_add_timer(struct request *rq)
542{
543 __blk_add_timer(rq, NULL);
544}
545
546/* 571/*
547 * Run this hardware queue, pulling any software queues mapped to it in. 572 * Run this hardware queue, pulling any software queues mapped to it in.
548 * Note that this function currently has various problems around ordering 573 * Note that this function currently has various problems around ordering
@@ -799,7 +824,7 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
799 /* 824 /*
800 * We do this early, to ensure we are on the right CPU. 825 * We do this early, to ensure we are on the right CPU.
801 */ 826 */
802 blk_mq_add_timer(rq); 827 blk_add_timer(rq);
803} 828}
804 829
805void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, 830void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
@@ -1400,7 +1425,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1400 q->sg_reserved_size = INT_MAX; 1425 q->sg_reserved_size = INT_MAX;
1401 1426
1402 blk_queue_make_request(q, blk_mq_make_request); 1427 blk_queue_make_request(q, blk_mq_make_request);
1403 blk_queue_rq_timed_out(q, set->ops->timeout); 1428 blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
1404 if (set->timeout) 1429 if (set->timeout)
1405 blk_queue_rq_timeout(q, set->timeout); 1430 blk_queue_rq_timeout(q, set->timeout);
1406 1431
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 5fa14f19f752..b41a784de50d 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -51,6 +51,4 @@ void blk_mq_disable_hotplug(void);
51extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); 51extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
52extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); 52extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
53 53
54void blk_mq_add_timer(struct request *rq);
55
56#endif 54#endif
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index a09e8af8186c..49988a3ca85c 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -96,11 +96,7 @@ static void blk_rq_timed_out(struct request *req)
96 __blk_complete_request(req); 96 __blk_complete_request(req);
97 break; 97 break;
98 case BLK_EH_RESET_TIMER: 98 case BLK_EH_RESET_TIMER:
99 if (q->mq_ops) 99 blk_add_timer(req);
100 blk_mq_add_timer(req);
101 else
102 blk_add_timer(req);
103
104 blk_clear_rq_complete(req); 100 blk_clear_rq_complete(req);
105 break; 101 break;
106 case BLK_EH_NOT_HANDLED: 102 case BLK_EH_NOT_HANDLED:
@@ -170,7 +166,8 @@ void blk_abort_request(struct request *req)
170} 166}
171EXPORT_SYMBOL_GPL(blk_abort_request); 167EXPORT_SYMBOL_GPL(blk_abort_request);
172 168
173void __blk_add_timer(struct request *req, struct list_head *timeout_list) 169static void __blk_add_timer(struct request *req,
170 struct list_head *timeout_list)
174{ 171{
175 struct request_queue *q = req->q; 172 struct request_queue *q = req->q;
176 unsigned long expiry; 173 unsigned long expiry;
@@ -225,6 +222,11 @@ void __blk_add_timer(struct request *req, struct list_head *timeout_list)
225 */ 222 */
226void blk_add_timer(struct request *req) 223void blk_add_timer(struct request *req)
227{ 224{
228 __blk_add_timer(req, &req->q->timeout_list); 225 struct request_queue *q = req->q;
226
227 if (q->mq_ops)
228 __blk_add_timer(req, NULL);
229 else
230 __blk_add_timer(req, &req->q->timeout_list);
229} 231}
230 232
diff --git a/block/blk.h b/block/blk.h
index 1d880f1f957f..79be2cbce7fd 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -37,9 +37,8 @@ bool __blk_end_bidi_request(struct request *rq, int error,
37void blk_rq_timed_out_timer(unsigned long data); 37void blk_rq_timed_out_timer(unsigned long data);
38void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, 38void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
39 unsigned int *next_set); 39 unsigned int *next_set);
40void __blk_add_timer(struct request *req, struct list_head *timeout_list); 40void blk_add_timer(struct request *req);
41void blk_delete_timer(struct request *); 41void blk_delete_timer(struct request *);
42void blk_add_timer(struct request *);
43 42
44 43
45bool bio_attempt_front_merge(struct request_queue *q, struct request *req, 44bool bio_attempt_front_merge(struct request_queue *q, struct request *req,