aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-05-31 13:11:40 -0400
committerJens Axboe <axboe@kernel.dk>2018-06-01 09:38:21 -0400
commit131d08e122eaabae028378c0b4da688eb044c6af (patch)
tree1c48c6886d4e90d543fe738ba4c426531dcd637b
parentacddf3b308f6b6e23653de02e1abf98f402f1f12 (diff)
block: split the blk-mq case from elevator_init
There is almost no shared logic, which leads to a very confusing code flow. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com> Tested-by: Damien Le Moal <damien.lemoal@wdc.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk.h1
-rw-r--r--block/elevator.c77
3 files changed, 48 insertions, 32 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 858d6edff4d3..6332940ca118 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2573,7 +2573,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2573 if (!(set->flags & BLK_MQ_F_NO_SCHED)) { 2573 if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2574 int ret; 2574 int ret;
2575 2575
2576 ret = elevator_init(q); 2576 ret = elevator_init_mq(q);
2577 if (ret) 2577 if (ret)
2578 return ERR_PTR(ret); 2578 return ERR_PTR(ret);
2579 } 2579 }
diff --git a/block/blk.h b/block/blk.h
index ab5ec2dadc7b..8d23aea96ce9 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -232,6 +232,7 @@ static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq
232} 232}
233 233
234int elevator_init(struct request_queue *); 234int elevator_init(struct request_queue *);
235int elevator_init_mq(struct request_queue *q);
235void elevator_exit(struct request_queue *, struct elevator_queue *); 236void elevator_exit(struct request_queue *, struct elevator_queue *);
236int elv_register_queue(struct request_queue *q); 237int elv_register_queue(struct request_queue *q);
237void elv_unregister_queue(struct request_queue *q); 238void elv_unregister_queue(struct request_queue *q);
diff --git a/block/elevator.c b/block/elevator.c
index d6480e70816e..fa828b5bfd4b 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -199,6 +199,11 @@ static void elevator_release(struct kobject *kobj)
199 kfree(e); 199 kfree(e);
200} 200}
201 201
202/*
203 * Use the default elevator specified by config boot param for non-mq devices,
204 * or by config option. Don't try to load modules as we could be running off
205 * async and request_module() isn't allowed from async.
206 */
202int elevator_init(struct request_queue *q) 207int elevator_init(struct request_queue *q)
203{ 208{
204 struct elevator_type *e = NULL; 209 struct elevator_type *e = NULL;
@@ -212,46 +217,22 @@ int elevator_init(struct request_queue *q)
212 if (unlikely(q->elevator)) 217 if (unlikely(q->elevator))
213 goto out_unlock; 218 goto out_unlock;
214 219
215 /* 220 if (*chosen_elevator) {
216 * Use the default elevator specified by config boot param for
217 * non-mq devices, or by config option. Don't try to load modules
218 * as we could be running off async and request_module() isn't
219 * allowed from async.
220 */
221 if (!q->mq_ops && *chosen_elevator) {
222 e = elevator_get(q, chosen_elevator, false); 221 e = elevator_get(q, chosen_elevator, false);
223 if (!e) 222 if (!e)
224 printk(KERN_ERR "I/O scheduler %s not found\n", 223 printk(KERN_ERR "I/O scheduler %s not found\n",
225 chosen_elevator); 224 chosen_elevator);
226 } 225 }
227 226
227 if (!e)
228 e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
228 if (!e) { 229 if (!e) {
229 /* 230 printk(KERN_ERR
230 * For blk-mq devices, we default to using mq-deadline, 231 "Default I/O scheduler not found. Using noop.\n");
231 * if available, for single queue devices. If deadline 232 e = elevator_get(q, "noop", false);
232 * isn't available OR we have multiple queues, default
233 * to "none".
234 */
235 if (q->mq_ops) {
236 if (q->nr_hw_queues == 1)
237 e = elevator_get(q, "mq-deadline", false);
238 if (!e)
239 goto out_unlock;
240 } else
241 e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
242
243 if (!e) {
244 printk(KERN_ERR
245 "Default I/O scheduler not found. " \
246 "Using noop.\n");
247 e = elevator_get(q, "noop", false);
248 }
249 } 233 }
250 234
251 if (e->uses_mq) 235 err = e->ops.sq.elevator_init_fn(q, e);
252 err = blk_mq_init_sched(q, e);
253 else
254 err = e->ops.sq.elevator_init_fn(q, e);
255 if (err) 236 if (err)
256 elevator_put(e); 237 elevator_put(e);
257out_unlock: 238out_unlock:
@@ -993,6 +974,40 @@ out:
993} 974}
994 975
995/* 976/*
977 * For blk-mq devices, we default to using mq-deadline, if available, for single
978 * queue devices. If deadline isn't available OR we have multiple queues,
979 * default to "none".
980 */
981int elevator_init_mq(struct request_queue *q)
982{
983 struct elevator_type *e;
984 int err = 0;
985
986 if (q->nr_hw_queues != 1)
987 return 0;
988
989 /*
990 * q->sysfs_lock must be held to provide mutual exclusion between
991 * elevator_switch() and here.
992 */
993 mutex_lock(&q->sysfs_lock);
994 if (unlikely(q->elevator))
995 goto out_unlock;
996
997 e = elevator_get(q, "mq-deadline", false);
998 if (!e)
999 goto out_unlock;
1000
1001 err = blk_mq_init_sched(q, e);
1002 if (err)
1003 elevator_put(e);
1004out_unlock:
1005 mutex_unlock(&q->sysfs_lock);
1006 return err;
1007}
1008
1009
1010/*
996 * switch to new_e io scheduler. be careful not to introduce deadlocks - 1011 * switch to new_e io scheduler. be careful not to introduce deadlocks -
997 * we don't free the old io scheduler, before we have allocated what we 1012 * we don't free the old io scheduler, before we have allocated what we
998 * need for the new one. this way we have a chance of going back to the old 1013 * need for the new one. this way we have a chance of going back to the old