aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2018-01-06 03:27:40 -0500
committerJens Axboe <axboe@kernel.dk>2018-01-06 11:25:36 -0500
commitfb350e0ad99359768e1e80b4784692031ec340e4 (patch)
tree25d87e34b68ada63eb442c6269b031f037011520
parent7d4901a90d02500c8011472a060f9b2e60e6e605 (diff)
blk-mq: fix race between updating nr_hw_queues and switching io sched
In both elevator_switch_mq() and blk_mq_update_nr_hw_queues(), sched tags can be allocated, and q->nr_hw_queue is used, and race is inevitable, for example: blk_mq_init_sched() may trigger use-after-free on hctx, which is freed in blk_mq_realloc_hw_ctxs() when nr_hw_queues is decreased. This patch fixes the race be holding q->sysfs_lock. Reviewed-by: Christoph Hellwig <hch@lst.de> Reported-by: Yi Zhang <yi.zhang@redhat.com> Tested-by: Yi Zhang <yi.zhang@redhat.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-mq.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index dd21051fb251..111e1aa5562f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2407,6 +2407,9 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2407 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; 2407 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2408 2408
2409 blk_mq_sysfs_unregister(q); 2409 blk_mq_sysfs_unregister(q);
2410
2411 /* protect against switching io scheduler */
2412 mutex_lock(&q->sysfs_lock);
2410 for (i = 0; i < set->nr_hw_queues; i++) { 2413 for (i = 0; i < set->nr_hw_queues; i++) {
2411 int node; 2414 int node;
2412 2415
@@ -2451,6 +2454,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2451 } 2454 }
2452 } 2455 }
2453 q->nr_hw_queues = i; 2456 q->nr_hw_queues = i;
2457 mutex_unlock(&q->sysfs_lock);
2454 blk_mq_sysfs_register(q); 2458 blk_mq_sysfs_register(q);
2455} 2459}
2456 2460