diff options
author | Christoph Hellwig <hch@lst.de> | 2015-10-30 08:57:30 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-12-22 11:38:16 -0500 |
commit | 287922eb0b186e2a5bf54fdd04b734c25c90035c (patch) | |
tree | bf7e8976139ac3eacfaab6ace9d1807088463140 /block/blk-timeout.c | |
parent | 8c0b39155048d5a24f25c6c60aa83729927b04cd (diff) |
block: defer timeouts to a workqueue
Timer context is not very useful for drivers to perform any meaningful abort
action from. So instead of calling the driver from this useless context
defer it to a workqueue as soon as possible.
Note that while a delayed_work item would seem the right thing here I didn't
dare to use it due to the magic in blk_add_timer that pokes deep into timer
internals. But maybe this encourages Tejun to add a sensible API for that to
the workqueue API and we'll all be fine in the end :)
Contains a major update from Keith Bush:
"This patch removes synchronizing the timeout work so that the timer can
start a freeze on its own queue. The timer enters the queue, so timer
context can only start a freeze, but not wait for frozen."
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-timeout.c')
-rw-r--r-- | block/blk-timeout.c | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 3610af561748..dd4fdfbcb3dd 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c | |||
@@ -127,13 +127,16 @@ static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout | |||
127 | } | 127 | } |
128 | } | 128 | } |
129 | 129 | ||
130 | void blk_rq_timed_out_timer(unsigned long data) | 130 | void blk_timeout_work(struct work_struct *work) |
131 | { | 131 | { |
132 | struct request_queue *q = (struct request_queue *) data; | 132 | struct request_queue *q = |
133 | container_of(work, struct request_queue, timeout_work); | ||
133 | unsigned long flags, next = 0; | 134 | unsigned long flags, next = 0; |
134 | struct request *rq, *tmp; | 135 | struct request *rq, *tmp; |
135 | int next_set = 0; | 136 | int next_set = 0; |
136 | 137 | ||
138 | if (blk_queue_enter(q, true)) | ||
139 | return; | ||
137 | spin_lock_irqsave(q->queue_lock, flags); | 140 | spin_lock_irqsave(q->queue_lock, flags); |
138 | 141 | ||
139 | list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) | 142 | list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) |
@@ -143,6 +146,7 @@ void blk_rq_timed_out_timer(unsigned long data) | |||
143 | mod_timer(&q->timeout, round_jiffies_up(next)); | 146 | mod_timer(&q->timeout, round_jiffies_up(next)); |
144 | 147 | ||
145 | spin_unlock_irqrestore(q->queue_lock, flags); | 148 | spin_unlock_irqrestore(q->queue_lock, flags); |
149 | blk_queue_exit(q); | ||
146 | } | 150 | } |
147 | 151 | ||
148 | /** | 152 | /** |