aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-ioc.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-10-29 12:23:51 -0400
committerJens Axboe <axboe@kernel.dk>2018-11-07 15:42:32 -0500
commita1ce35fa49852db60fc6e268038530be533c5b15 (patch)
tree7a0bb9576a3f0e31e22f849463eee6cdda26bad5 /block/blk-ioc.c
parentf382fb0bcef4c37dc049e9f6963e3baf204d815c (diff)
block: remove dead elevator code
This removes a bunch of core and elevator related code. On the core front, we remove anything related to queue running, draining, initialization, plugging, and congestions. We also kill anything related to request allocation, merging, retrieval, and completion. Remove any checking for single queue IO schedulers, as they no longer exist. This means we can also delete a bunch of code related to request issue, adding, completion, etc - and all the SQ related ops and helpers. Also kill the load_default_modules(), as all that did was provide for a way to load the default single queue elevator. Tested-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r--block/blk-ioc.c33
1 files changed, 6 insertions, 27 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 01580f88fcb3..391128456aec 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -48,10 +48,8 @@ static void ioc_exit_icq(struct io_cq *icq)
48 if (icq->flags & ICQ_EXITED) 48 if (icq->flags & ICQ_EXITED)
49 return; 49 return;
50 50
51 if (et->uses_mq && et->ops.mq.exit_icq) 51 if (et->ops.mq.exit_icq)
52 et->ops.mq.exit_icq(icq); 52 et->ops.mq.exit_icq(icq);
53 else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn)
54 et->ops.sq.elevator_exit_icq_fn(icq);
55 53
56 icq->flags |= ICQ_EXITED; 54 icq->flags |= ICQ_EXITED;
57} 55}
@@ -187,25 +185,13 @@ void put_io_context_active(struct io_context *ioc)
187 * reverse double locking. Read comment in ioc_release_fn() for 185 * reverse double locking. Read comment in ioc_release_fn() for
188 * explanation on the nested locking annotation. 186 * explanation on the nested locking annotation.
189 */ 187 */
190retry:
191 spin_lock_irqsave_nested(&ioc->lock, flags, 1); 188 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
192 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) { 189 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
193 if (icq->flags & ICQ_EXITED) 190 if (icq->flags & ICQ_EXITED)
194 continue; 191 continue;
195 192
196 et = icq->q->elevator->type; 193 et = icq->q->elevator->type;
197 if (et->uses_mq) { 194 ioc_exit_icq(icq);
198 ioc_exit_icq(icq);
199 } else {
200 if (spin_trylock(icq->q->queue_lock)) {
201 ioc_exit_icq(icq);
202 spin_unlock(icq->q->queue_lock);
203 } else {
204 spin_unlock_irqrestore(&ioc->lock, flags);
205 cpu_relax();
206 goto retry;
207 }
208 }
209 } 195 }
210 spin_unlock_irqrestore(&ioc->lock, flags); 196 spin_unlock_irqrestore(&ioc->lock, flags);
211 197
@@ -232,7 +218,7 @@ static void __ioc_clear_queue(struct list_head *icq_list)
232 218
233 while (!list_empty(icq_list)) { 219 while (!list_empty(icq_list)) {
234 struct io_cq *icq = list_entry(icq_list->next, 220 struct io_cq *icq = list_entry(icq_list->next,
235 struct io_cq, q_node); 221 struct io_cq, q_node);
236 struct io_context *ioc = icq->ioc; 222 struct io_context *ioc = icq->ioc;
237 223
238 spin_lock_irqsave(&ioc->lock, flags); 224 spin_lock_irqsave(&ioc->lock, flags);
@@ -253,14 +239,9 @@ void ioc_clear_queue(struct request_queue *q)
253 239
254 spin_lock_irq(q->queue_lock); 240 spin_lock_irq(q->queue_lock);
255 list_splice_init(&q->icq_list, &icq_list); 241 list_splice_init(&q->icq_list, &icq_list);
242 spin_unlock_irq(q->queue_lock);
256 243
257 if (q->mq_ops) { 244 __ioc_clear_queue(&icq_list);
258 spin_unlock_irq(q->queue_lock);
259 __ioc_clear_queue(&icq_list);
260 } else {
261 __ioc_clear_queue(&icq_list);
262 spin_unlock_irq(q->queue_lock);
263 }
264} 245}
265 246
266int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) 247int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
@@ -415,10 +396,8 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
415 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { 396 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
416 hlist_add_head(&icq->ioc_node, &ioc->icq_list); 397 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
417 list_add(&icq->q_node, &q->icq_list); 398 list_add(&icq->q_node, &q->icq_list);
418 if (et->uses_mq && et->ops.mq.init_icq) 399 if (et->ops.mq.init_icq)
419 et->ops.mq.init_icq(icq); 400 et->ops.mq.init_icq(icq);
420 else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn)
421 et->ops.sq.elevator_init_icq_fn(icq);
422 } else { 401 } else {
423 kmem_cache_free(et->icq_cache, icq); 402 kmem_cache_free(et->icq_cache, icq);
424 icq = ioc_lookup_icq(ioc, q); 403 icq = ioc_lookup_icq(ioc, q);