summaryrefslogtreecommitdiffstats
path: root/block/blk-timeout.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-10-29 12:25:07 -0400
committerJens Axboe <axboe@kernel.dk>2018-11-07 15:42:33 -0500
commit4316b79e4321d4140164e42f228778e5bc66c84f (patch)
treecb499b7b547f1c1d17c0b72fbd77841314c8ff1f /block/blk-timeout.c
parent92bc5a24844ada9b010f03c49a493e3edeadaa54 (diff)
block: kill legacy parts of timeout handling
The only user of legacy timing now is BSG, which is invoked from the mq timeout handler. Kill the legacy code, and rename the q->rq_timed_out_fn to q->bsg_job_timeout_fn. Reviewed-by: Hannes Reinecke <hare@suse.com> Tested-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-timeout.c')
-rw-r--r--block/blk-timeout.c99
1 files changed, 7 insertions, 92 deletions
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index f2cfd56e1606..6428d458072a 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -78,70 +78,6 @@ void blk_delete_timer(struct request *req)
78 list_del_init(&req->timeout_list); 78 list_del_init(&req->timeout_list);
79} 79}
80 80
81static void blk_rq_timed_out(struct request *req)
82{
83 struct request_queue *q = req->q;
84 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
85
86 if (q->rq_timed_out_fn)
87 ret = q->rq_timed_out_fn(req);
88 switch (ret) {
89 case BLK_EH_RESET_TIMER:
90 blk_add_timer(req);
91 blk_clear_rq_complete(req);
92 break;
93 case BLK_EH_DONE:
94 /*
95 * LLD handles this for now but in the future
96 * we can send a request msg to abort the command
97 * and we can move more of the generic scsi eh code to
98 * the blk layer.
99 */
100 break;
101 default:
102 printk(KERN_ERR "block: bad eh return: %d\n", ret);
103 break;
104 }
105}
106
107static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
108 unsigned int *next_set)
109{
110 const unsigned long deadline = blk_rq_deadline(rq);
111
112 if (time_after_eq(jiffies, deadline)) {
113 list_del_init(&rq->timeout_list);
114
115 /*
116 * Check if we raced with end io completion
117 */
118 if (!blk_mark_rq_complete(rq))
119 blk_rq_timed_out(rq);
120 } else if (!*next_set || time_after(*next_timeout, deadline)) {
121 *next_timeout = deadline;
122 *next_set = 1;
123 }
124}
125
126void blk_timeout_work(struct work_struct *work)
127{
128 struct request_queue *q =
129 container_of(work, struct request_queue, timeout_work);
130 unsigned long flags, next = 0;
131 struct request *rq, *tmp;
132 int next_set = 0;
133
134 spin_lock_irqsave(q->queue_lock, flags);
135
136 list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
137 blk_rq_check_expired(rq, &next, &next_set);
138
139 if (next_set)
140 mod_timer(&q->timeout, round_jiffies_up(next));
141
142 spin_unlock_irqrestore(q->queue_lock, flags);
143}
144
145/** 81/**
146 * blk_abort_request -- Request request recovery for the specified command 82 * blk_abort_request -- Request request recovery for the specified command
147 * @req: pointer to the request of interest 83 * @req: pointer to the request of interest
@@ -153,20 +89,13 @@ void blk_timeout_work(struct work_struct *work)
153 */ 89 */
154void blk_abort_request(struct request *req) 90void blk_abort_request(struct request *req)
155{ 91{
156 if (req->q->mq_ops) { 92 /*
157 /* 93 * All we need to ensure is that timeout scan takes place
158 * All we need to ensure is that timeout scan takes place 94 * immediately and that scan sees the new timeout value.
159 * immediately and that scan sees the new timeout value. 95 * No need for fancy synchronizations.
160 * No need for fancy synchronizations. 96 */
161 */ 97 blk_rq_set_deadline(req, jiffies);
162 blk_rq_set_deadline(req, jiffies); 98 kblockd_schedule_work(&req->q->timeout_work);
163 kblockd_schedule_work(&req->q->timeout_work);
164 } else {
165 if (blk_mark_rq_complete(req))
166 return;
167 blk_delete_timer(req);
168 blk_rq_timed_out(req);
169 }
170} 99}
171EXPORT_SYMBOL_GPL(blk_abort_request); 100EXPORT_SYMBOL_GPL(blk_abort_request);
172 101
@@ -194,13 +123,6 @@ void blk_add_timer(struct request *req)
194 struct request_queue *q = req->q; 123 struct request_queue *q = req->q;
195 unsigned long expiry; 124 unsigned long expiry;
196 125
197 if (!q->mq_ops)
198 lockdep_assert_held(q->queue_lock);
199
200 /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
201 if (!q->mq_ops && !q->rq_timed_out_fn)
202 return;
203
204 BUG_ON(!list_empty(&req->timeout_list)); 126 BUG_ON(!list_empty(&req->timeout_list));
205 127
206 /* 128 /*
@@ -214,13 +136,6 @@ void blk_add_timer(struct request *req)
214 blk_rq_set_deadline(req, jiffies + req->timeout); 136 blk_rq_set_deadline(req, jiffies + req->timeout);
215 137
216 /* 138 /*
217 * Only the non-mq case needs to add the request to a protected list.
218 * For the mq case we simply scan the tag map.
219 */
220 if (!q->mq_ops)
221 list_add_tail(&req->timeout_list, &req->q->timeout_list);
222
223 /*
224 * If the timer isn't already pending or this timeout is earlier 139 * If the timer isn't already pending or this timeout is earlier
225 * than an existing one, modify the timer. Round up to next nearest 140 * than an existing one, modify the timer. Round up to next nearest
226 * second. 141 * second.