aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq-sched.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2019-06-04 09:08:02 -0400
committerJens Axboe <axboe@kernel.dk>2019-06-07 00:39:39 -0400
commitc3e2219216c92919a6bd1711f340f5faa98695e6 (patch)
tree392cb3a4e9f9de822cd0ef9bfcfca787adcca366 /block/blk-mq-sched.c
parentcf1db7fc8c2d31222701bd5c01b9cbaf89d8e7ce (diff)
block: free sched's request pool in blk_cleanup_queue
In theory, IO scheduler belongs to request queue, and the request pool of sched tags belongs to the request queue too. However, the current tags allocation interfaces are re-used for both driver tags and sched tags, and driver tags is definitely host wide, and doesn't belong to any request queue, same with its request pool. So we need tagset instance for freeing request of sched tags. Meantime, blk_mq_free_tag_set() often follows blk_cleanup_queue() in case of non-BLK_MQ_F_TAG_SHARED, this way requires that request pool of sched tags to be freed before calling blk_mq_free_tag_set(). Commit 47cdee29ef9d94e ("block: move blk_exit_queue into __blk_release_queue") moves blk_exit_queue into __blk_release_queue for simplying the fast path in generic_make_request(), then causes oops during freeing requests of sched tags in __blk_release_queue(). Fix the above issue by move freeing request pool of sched tags into blk_cleanup_queue(), this way is safe becasue queue has been frozen and no any in-queue requests at that time. Freeing sched tags has to be kept in queue's release handler becasue there might be un-completed dispatch activity which might refer to sched tags. Cc: Bart Van Assche <bvanassche@acm.org> Cc: Christoph Hellwig <hch@lst.de> Fixes: 47cdee29ef9d94e485eb08f962c74943023a5271 ("block: move blk_exit_queue into __blk_release_queue") Tested-by: Yi Zhang <yi.zhang@redhat.com> Reported-by: kernel test robot <rong.a.chen@intel.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r--block/blk-mq-sched.c30
1 files changed, 27 insertions, 3 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 74c6bb871f7e..500cb04901cc 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -475,14 +475,18 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q,
475 return ret; 475 return ret;
476} 476}
477 477
478/* called in queue's release handler, tagset has gone away */
478static void blk_mq_sched_tags_teardown(struct request_queue *q) 479static void blk_mq_sched_tags_teardown(struct request_queue *q)
479{ 480{
480 struct blk_mq_tag_set *set = q->tag_set;
481 struct blk_mq_hw_ctx *hctx; 481 struct blk_mq_hw_ctx *hctx;
482 int i; 482 int i;
483 483
484 queue_for_each_hw_ctx(q, hctx, i) 484 queue_for_each_hw_ctx(q, hctx, i) {
485 blk_mq_sched_free_tags(set, hctx, i); 485 if (hctx->sched_tags) {
486 blk_mq_free_rq_map(hctx->sched_tags);
487 hctx->sched_tags = NULL;
488 }
489 }
486} 490}
487 491
488int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) 492int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
@@ -523,6 +527,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
523 ret = e->ops.init_hctx(hctx, i); 527 ret = e->ops.init_hctx(hctx, i);
524 if (ret) { 528 if (ret) {
525 eq = q->elevator; 529 eq = q->elevator;
530 blk_mq_sched_free_requests(q);
526 blk_mq_exit_sched(q, eq); 531 blk_mq_exit_sched(q, eq);
527 kobject_put(&eq->kobj); 532 kobject_put(&eq->kobj);
528 return ret; 533 return ret;
@@ -534,11 +539,30 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
534 return 0; 539 return 0;
535 540
536err: 541err:
542 blk_mq_sched_free_requests(q);
537 blk_mq_sched_tags_teardown(q); 543 blk_mq_sched_tags_teardown(q);
538 q->elevator = NULL; 544 q->elevator = NULL;
539 return ret; 545 return ret;
540} 546}
541 547
548/*
549 * called in either blk_queue_cleanup or elevator_switch, tagset
550 * is required for freeing requests
551 */
552void blk_mq_sched_free_requests(struct request_queue *q)
553{
554 struct blk_mq_hw_ctx *hctx;
555 int i;
556
557 lockdep_assert_held(&q->sysfs_lock);
558 WARN_ON(!q->elevator);
559
560 queue_for_each_hw_ctx(q, hctx, i) {
561 if (hctx->sched_tags)
562 blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
563 }
564}
565
542void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) 566void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
543{ 567{
544 struct blk_mq_hw_ctx *hctx; 568 struct blk_mq_hw_ctx *hctx;