aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorMing Lei <ming.lei@canonical.com>2015-01-29 07:17:27 -0500
committerJens Axboe <axboe@fb.com>2015-01-29 11:30:51 -0500
commite09aae7edec1d20824c60a6f0ca4589f99ada17b (patch)
tree1cf7f6b2434f356f9dab4104bd5ecbfa85228799 /block
parent74170118b26e55b611de5210f47657118a03a0e1 (diff)
blk-mq: release mq's kobjects in blk_release_queue()
The kobject memory inside blk-mq hctx/ctx shouldn't have been freed before the kobject is released because driver core can access it freely before its release. We can't do that in all ctx/hctx/mq_kobj's release handler because it can be run before blk_cleanup_queue(). Given mq_kobj shouldn't have been introduced, this patch simply moves mq's release into blk_release_queue(). Reported-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Ming Lei <ming.lei@canonical.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c29
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-sysfs.c2
3 files changed, 26 insertions, 7 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 2f95747c287e..2390c5541e71 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1641,10 +1641,8 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
1641 struct blk_mq_hw_ctx *hctx; 1641 struct blk_mq_hw_ctx *hctx;
1642 unsigned int i; 1642 unsigned int i;
1643 1643
1644 queue_for_each_hw_ctx(q, hctx, i) { 1644 queue_for_each_hw_ctx(q, hctx, i)
1645 free_cpumask_var(hctx->cpumask); 1645 free_cpumask_var(hctx->cpumask);
1646 kfree(hctx);
1647 }
1648} 1646}
1649 1647
1650static int blk_mq_init_hctx(struct request_queue *q, 1648static int blk_mq_init_hctx(struct request_queue *q,
@@ -1869,6 +1867,27 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1869 mutex_unlock(&set->tag_list_lock); 1867 mutex_unlock(&set->tag_list_lock);
1870} 1868}
1871 1869
1870/*
1871 * It is the actual release handler for mq, but we do it from
1872 * request queue's release handler for avoiding use-after-free
1873 * and headache because q->mq_kobj shouldn't have been introduced,
1874 * but we can't group ctx/kctx kobj without it.
1875 */
1876void blk_mq_release(struct request_queue *q)
1877{
1878 struct blk_mq_hw_ctx *hctx;
1879 unsigned int i;
1880
1881 /* hctx kobj stays in hctx */
1882 queue_for_each_hw_ctx(q, hctx, i)
1883 kfree(hctx);
1884
1885 kfree(q->queue_hw_ctx);
1886
1887 /* ctx kobj stays in queue_ctx */
1888 free_percpu(q->queue_ctx);
1889}
1890
1872struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 1891struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1873{ 1892{
1874 struct blk_mq_hw_ctx **hctxs; 1893 struct blk_mq_hw_ctx **hctxs;
@@ -2002,12 +2021,8 @@ void blk_mq_free_queue(struct request_queue *q)
2002 2021
2003 percpu_ref_exit(&q->mq_usage_counter); 2022 percpu_ref_exit(&q->mq_usage_counter);
2004 2023
2005 free_percpu(q->queue_ctx);
2006 kfree(q->queue_hw_ctx);
2007 kfree(q->mq_map); 2024 kfree(q->mq_map);
2008 2025
2009 q->queue_ctx = NULL;
2010 q->queue_hw_ctx = NULL;
2011 q->mq_map = NULL; 2026 q->mq_map = NULL;
2012 2027
2013 mutex_lock(&all_q_mutex); 2028 mutex_lock(&all_q_mutex);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 4f4f943c22c3..6a48c4c0d8a2 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -62,6 +62,8 @@ extern void blk_mq_sysfs_unregister(struct request_queue *q);
62 62
63extern void blk_mq_rq_timed_out(struct request *req, bool reserved); 63extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
64 64
65void blk_mq_release(struct request_queue *q);
66
65/* 67/*
66 * Basic implementation of sparser bitmap, allowing the user to spread 68 * Basic implementation of sparser bitmap, allowing the user to spread
67 * the bits over more cachelines. 69 * the bits over more cachelines.
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 935ea2aa0730..faaf36ade7eb 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -517,6 +517,8 @@ static void blk_release_queue(struct kobject *kobj)
517 517
518 if (!q->mq_ops) 518 if (!q->mq_ops)
519 blk_free_flush_queue(q->fq); 519 blk_free_flush_queue(q->fq);
520 else
521 blk_mq_release(q);
520 522
521 blk_trace_shutdown(q); 523 blk_trace_shutdown(q);
522 524