aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c28
1 files changed, 10 insertions, 18 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b2fd175e84d7..159187a28d66 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1955,16 +1955,6 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
1955 } 1955 }
1956} 1956}
1957 1957
1958static void blk_mq_free_hw_queues(struct request_queue *q,
1959 struct blk_mq_tag_set *set)
1960{
1961 struct blk_mq_hw_ctx *hctx;
1962 unsigned int i;
1963
1964 queue_for_each_hw_ctx(q, hctx, i)
1965 free_cpumask_var(hctx->cpumask);
1966}
1967
1968static int blk_mq_init_hctx(struct request_queue *q, 1958static int blk_mq_init_hctx(struct request_queue *q,
1969 struct blk_mq_tag_set *set, 1959 struct blk_mq_tag_set *set,
1970 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 1960 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
@@ -2045,7 +2035,6 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
2045 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 2035 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2046 struct blk_mq_hw_ctx *hctx; 2036 struct blk_mq_hw_ctx *hctx;
2047 2037
2048 memset(__ctx, 0, sizeof(*__ctx));
2049 __ctx->cpu = i; 2038 __ctx->cpu = i;
2050 spin_lock_init(&__ctx->lock); 2039 spin_lock_init(&__ctx->lock);
2051 INIT_LIST_HEAD(&__ctx->rq_list); 2040 INIT_LIST_HEAD(&__ctx->rq_list);
@@ -2257,15 +2246,19 @@ void blk_mq_release(struct request_queue *q)
2257 queue_for_each_hw_ctx(q, hctx, i) { 2246 queue_for_each_hw_ctx(q, hctx, i) {
2258 if (!hctx) 2247 if (!hctx)
2259 continue; 2248 continue;
2260 kfree(hctx->ctxs); 2249 kobject_put(&hctx->kobj);
2261 kfree(hctx);
2262 } 2250 }
2263 2251
2264 q->mq_map = NULL; 2252 q->mq_map = NULL;
2265 2253
2266 kfree(q->queue_hw_ctx); 2254 kfree(q->queue_hw_ctx);
2267 2255
2268 /* ctx kobj stays in queue_ctx */ 2256 /*
2257 * release .mq_kobj and sw queue's kobject now because
2258 * both share lifetime with request queue.
2259 */
2260 blk_mq_sysfs_deinit(q);
2261
2269 free_percpu(q->queue_ctx); 2262 free_percpu(q->queue_ctx);
2270} 2263}
2271 2264
@@ -2330,10 +2323,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2330 if (hctx->tags) 2323 if (hctx->tags)
2331 blk_mq_free_map_and_requests(set, j); 2324 blk_mq_free_map_and_requests(set, j);
2332 blk_mq_exit_hctx(q, set, hctx, j); 2325 blk_mq_exit_hctx(q, set, hctx, j);
2333 free_cpumask_var(hctx->cpumask);
2334 kobject_put(&hctx->kobj); 2326 kobject_put(&hctx->kobj);
2335 kfree(hctx->ctxs);
2336 kfree(hctx);
2337 hctxs[j] = NULL; 2327 hctxs[j] = NULL;
2338 2328
2339 } 2329 }
@@ -2352,6 +2342,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2352 if (!q->queue_ctx) 2342 if (!q->queue_ctx)
2353 goto err_exit; 2343 goto err_exit;
2354 2344
2345 /* init q->mq_kobj and sw queues' kobjects */
2346 blk_mq_sysfs_init(q);
2347
2355 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)), 2348 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2356 GFP_KERNEL, set->numa_node); 2349 GFP_KERNEL, set->numa_node);
2357 if (!q->queue_hw_ctx) 2350 if (!q->queue_hw_ctx)
@@ -2442,7 +2435,6 @@ void blk_mq_free_queue(struct request_queue *q)
2442 blk_mq_del_queue_tag_set(q); 2435 blk_mq_del_queue_tag_set(q);
2443 2436
2444 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 2437 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2445 blk_mq_free_hw_queues(q, set);
2446} 2438}
2447 2439
2448/* Basically redo blk_mq_init_queue with queue frozen */ 2440/* Basically redo blk_mq_init_queue with queue frozen */