diff options
author | Mike Snitzer <snitzer@redhat.com> | 2016-09-14 13:28:30 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-09-14 13:48:34 -0400 |
commit | 2849450ad39d2e699fda2d5c6f41e05d87fd7004 (patch) | |
tree | 086e7fd583a30bdefcfe01bd3fef60eeda4ad742 | |
parent | c5c5ca777469f0ff854f1da0aff9b3a9051b3ef7 (diff) |
blk-mq: introduce blk_mq_delay_kick_requeue_list()
blk_mq_delay_kick_requeue_list() provides the ability to kick the
q->requeue_list after a specified time. To do this the request_queue's
'requeue_work' member was changed to a delayed_work.
blk_mq_delay_kick_requeue_list() allows DM to defer processing requeued
requests while it doesn't make sense to immediately requeue them
(e.g. when all paths in a DM multipath have failed).
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | block/blk-mq.c | 16 | ||||
-rw-r--r-- | include/linux/blk-mq.h | 1 | ||||
-rw-r--r-- | include/linux/blkdev.h | 2 |
3 files changed, 14 insertions, 5 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index eea0d230faa1..7ddc7969fba4 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -502,7 +502,7 @@ EXPORT_SYMBOL(blk_mq_requeue_request); | |||
502 | static void blk_mq_requeue_work(struct work_struct *work) | 502 | static void blk_mq_requeue_work(struct work_struct *work) |
503 | { | 503 | { |
504 | struct request_queue *q = | 504 | struct request_queue *q = |
505 | container_of(work, struct request_queue, requeue_work); | 505 | container_of(work, struct request_queue, requeue_work.work); |
506 | LIST_HEAD(rq_list); | 506 | LIST_HEAD(rq_list); |
507 | struct request *rq, *next; | 507 | struct request *rq, *next; |
508 | unsigned long flags; | 508 | unsigned long flags; |
@@ -557,16 +557,24 @@ EXPORT_SYMBOL(blk_mq_add_to_requeue_list); | |||
557 | 557 | ||
558 | void blk_mq_cancel_requeue_work(struct request_queue *q) | 558 | void blk_mq_cancel_requeue_work(struct request_queue *q) |
559 | { | 559 | { |
560 | cancel_work_sync(&q->requeue_work); | 560 | cancel_delayed_work_sync(&q->requeue_work); |
561 | } | 561 | } |
562 | EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work); | 562 | EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work); |
563 | 563 | ||
564 | void blk_mq_kick_requeue_list(struct request_queue *q) | 564 | void blk_mq_kick_requeue_list(struct request_queue *q) |
565 | { | 565 | { |
566 | kblockd_schedule_work(&q->requeue_work); | 566 | kblockd_schedule_delayed_work(&q->requeue_work, 0); |
567 | } | 567 | } |
568 | EXPORT_SYMBOL(blk_mq_kick_requeue_list); | 568 | EXPORT_SYMBOL(blk_mq_kick_requeue_list); |
569 | 569 | ||
570 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, | ||
571 | unsigned long msecs) | ||
572 | { | ||
573 | kblockd_schedule_delayed_work(&q->requeue_work, | ||
574 | msecs_to_jiffies(msecs)); | ||
575 | } | ||
576 | EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); | ||
577 | |||
570 | void blk_mq_abort_requeue_list(struct request_queue *q) | 578 | void blk_mq_abort_requeue_list(struct request_queue *q) |
571 | { | 579 | { |
572 | unsigned long flags; | 580 | unsigned long flags; |
@@ -2084,7 +2092,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, | |||
2084 | 2092 | ||
2085 | q->sg_reserved_size = INT_MAX; | 2093 | q->sg_reserved_size = INT_MAX; |
2086 | 2094 | ||
2087 | INIT_WORK(&q->requeue_work, blk_mq_requeue_work); | 2095 | INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); |
2088 | INIT_LIST_HEAD(&q->requeue_list); | 2096 | INIT_LIST_HEAD(&q->requeue_list); |
2089 | spin_lock_init(&q->requeue_lock); | 2097 | spin_lock_init(&q->requeue_lock); |
2090 | 2098 | ||
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index ff14f68067aa..60ef14cbcd2d 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -233,6 +233,7 @@ void blk_mq_requeue_request(struct request *rq); | |||
233 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); | 233 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); |
234 | void blk_mq_cancel_requeue_work(struct request_queue *q); | 234 | void blk_mq_cancel_requeue_work(struct request_queue *q); |
235 | void blk_mq_kick_requeue_list(struct request_queue *q); | 235 | void blk_mq_kick_requeue_list(struct request_queue *q); |
236 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); | ||
236 | void blk_mq_abort_requeue_list(struct request_queue *q); | 237 | void blk_mq_abort_requeue_list(struct request_queue *q); |
237 | void blk_mq_complete_request(struct request *rq, int error); | 238 | void blk_mq_complete_request(struct request *rq, int error); |
238 | 239 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 69aae720f4ef..c47c358ba052 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -449,7 +449,7 @@ struct request_queue { | |||
449 | 449 | ||
450 | struct list_head requeue_list; | 450 | struct list_head requeue_list; |
451 | spinlock_t requeue_lock; | 451 | spinlock_t requeue_lock; |
452 | struct work_struct requeue_work; | 452 | struct delayed_work requeue_work; |
453 | 453 | ||
454 | struct mutex sysfs_lock; | 454 | struct mutex sysfs_lock; |
455 | 455 | ||