aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c258
1 files changed, 127 insertions, 131 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c79126e11030..1fa9dd153fde 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -27,8 +27,6 @@ static LIST_HEAD(all_q_list);
27 27
28static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); 28static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
29 29
30DEFINE_PER_CPU(struct llist_head, ipi_lists);
31
32static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, 30static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
33 unsigned int cpu) 31 unsigned int cpu)
34{ 32{
@@ -106,10 +104,13 @@ static int blk_mq_queue_enter(struct request_queue *q)
106 104
107 spin_lock_irq(q->queue_lock); 105 spin_lock_irq(q->queue_lock);
108 ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq, 106 ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
109 !blk_queue_bypass(q), *q->queue_lock); 107 !blk_queue_bypass(q) || blk_queue_dying(q),
108 *q->queue_lock);
110 /* inc usage with lock hold to avoid freeze_queue runs here */ 109 /* inc usage with lock hold to avoid freeze_queue runs here */
111 if (!ret) 110 if (!ret && !blk_queue_dying(q))
112 __percpu_counter_add(&q->mq_usage_counter, 1, 1000000); 111 __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
112 else if (blk_queue_dying(q))
113 ret = -ENODEV;
113 spin_unlock_irq(q->queue_lock); 114 spin_unlock_irq(q->queue_lock);
114 115
115 return ret; 116 return ret;
@@ -120,6 +121,22 @@ static void blk_mq_queue_exit(struct request_queue *q)
120 __percpu_counter_add(&q->mq_usage_counter, -1, 1000000); 121 __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
121} 122}
122 123
124static void __blk_mq_drain_queue(struct request_queue *q)
125{
126 while (true) {
127 s64 count;
128
129 spin_lock_irq(q->queue_lock);
130 count = percpu_counter_sum(&q->mq_usage_counter);
131 spin_unlock_irq(q->queue_lock);
132
133 if (count == 0)
134 break;
135 blk_mq_run_queues(q, false);
136 msleep(10);
137 }
138}
139
123/* 140/*
124 * Guarantee no request is in use, so we can change any data structure of 141 * Guarantee no request is in use, so we can change any data structure of
125 * the queue afterward. 142 * the queue afterward.
@@ -133,21 +150,13 @@ static void blk_mq_freeze_queue(struct request_queue *q)
133 queue_flag_set(QUEUE_FLAG_BYPASS, q); 150 queue_flag_set(QUEUE_FLAG_BYPASS, q);
134 spin_unlock_irq(q->queue_lock); 151 spin_unlock_irq(q->queue_lock);
135 152
136 if (!drain) 153 if (drain)
137 return; 154 __blk_mq_drain_queue(q);
138 155}
139 while (true) {
140 s64 count;
141
142 spin_lock_irq(q->queue_lock);
143 count = percpu_counter_sum(&q->mq_usage_counter);
144 spin_unlock_irq(q->queue_lock);
145 156
146 if (count == 0) 157void blk_mq_drain_queue(struct request_queue *q)
147 break; 158{
148 blk_mq_run_queues(q, false); 159 __blk_mq_drain_queue(q);
149 msleep(10);
150 }
151} 160}
152 161
153static void blk_mq_unfreeze_queue(struct request_queue *q) 162static void blk_mq_unfreeze_queue(struct request_queue *q)
@@ -179,6 +188,8 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
179 188
180 rq->mq_ctx = ctx; 189 rq->mq_ctx = ctx;
181 rq->cmd_flags = rw_flags; 190 rq->cmd_flags = rw_flags;
191 rq->start_time = jiffies;
192 set_start_time_ns(rq);
182 ctx->rq_dispatched[rw_is_sync(rw_flags)]++; 193 ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
183} 194}
184 195
@@ -215,15 +226,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
215 return rq; 226 return rq;
216} 227}
217 228
218struct request *blk_mq_alloc_request(struct request_queue *q, int rw, 229struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
219 gfp_t gfp, bool reserved)
220{ 230{
221 struct request *rq; 231 struct request *rq;
222 232
223 if (blk_mq_queue_enter(q)) 233 if (blk_mq_queue_enter(q))
224 return NULL; 234 return NULL;
225 235
226 rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved); 236 rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
227 if (rq) 237 if (rq)
228 blk_mq_put_ctx(rq->mq_ctx); 238 blk_mq_put_ctx(rq->mq_ctx);
229 return rq; 239 return rq;
@@ -247,7 +257,7 @@ EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
247/* 257/*
248 * Re-init and set pdu, if we have it 258 * Re-init and set pdu, if we have it
249 */ 259 */
250static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq) 260void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
251{ 261{
252 blk_rq_init(hctx->queue, rq); 262 blk_rq_init(hctx->queue, rq);
253 263
@@ -294,7 +304,7 @@ static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
294 bio_endio(bio, error); 304 bio_endio(bio, error);
295} 305}
296 306
297void blk_mq_complete_request(struct request *rq, int error) 307void blk_mq_end_io(struct request *rq, int error)
298{ 308{
299 struct bio *bio = rq->bio; 309 struct bio *bio = rq->bio;
300 unsigned int bytes = 0; 310 unsigned int bytes = 0;
@@ -305,7 +315,7 @@ void blk_mq_complete_request(struct request *rq, int error)
305 struct bio *next = bio->bi_next; 315 struct bio *next = bio->bi_next;
306 316
307 bio->bi_next = NULL; 317 bio->bi_next = NULL;
308 bytes += bio->bi_size; 318 bytes += bio->bi_iter.bi_size;
309 blk_mq_bio_endio(rq, bio, error); 319 blk_mq_bio_endio(rq, bio, error);
310 bio = next; 320 bio = next;
311 } 321 }
@@ -319,87 +329,55 @@ void blk_mq_complete_request(struct request *rq, int error)
319 else 329 else
320 blk_mq_free_request(rq); 330 blk_mq_free_request(rq);
321} 331}
332EXPORT_SYMBOL(blk_mq_end_io);
322 333
323void __blk_mq_end_io(struct request *rq, int error) 334static void __blk_mq_complete_request_remote(void *data)
324{
325 if (!blk_mark_rq_complete(rq))
326 blk_mq_complete_request(rq, error);
327}
328
329#if defined(CONFIG_SMP)
330
331/*
332 * Called with interrupts disabled.
333 */
334static void ipi_end_io(void *data)
335{
336 struct llist_head *list = &per_cpu(ipi_lists, smp_processor_id());
337 struct llist_node *entry, *next;
338 struct request *rq;
339
340 entry = llist_del_all(list);
341
342 while (entry) {
343 next = entry->next;
344 rq = llist_entry(entry, struct request, ll_list);
345 __blk_mq_end_io(rq, rq->errors);
346 entry = next;
347 }
348}
349
350static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
351 struct request *rq, const int error)
352{ 335{
353 struct call_single_data *data = &rq->csd; 336 struct request *rq = data;
354
355 rq->errors = error;
356 rq->ll_list.next = NULL;
357
358 /*
359 * If the list is non-empty, an existing IPI must already
360 * be "in flight". If that is the case, we need not schedule
361 * a new one.
362 */
363 if (llist_add(&rq->ll_list, &per_cpu(ipi_lists, ctx->cpu))) {
364 data->func = ipi_end_io;
365 data->flags = 0;
366 __smp_call_function_single(ctx->cpu, data, 0);
367 }
368 337
369 return true; 338 rq->q->softirq_done_fn(rq);
370}
371#else /* CONFIG_SMP */
372static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
373 struct request *rq, const int error)
374{
375 return false;
376} 339}
377#endif
378 340
379/* 341void __blk_mq_complete_request(struct request *rq)
380 * End IO on this request on a multiqueue enabled driver. We'll either do
381 * it directly inline, or punt to a local IPI handler on the matching
382 * remote CPU.
383 */
384void blk_mq_end_io(struct request *rq, int error)
385{ 342{
386 struct blk_mq_ctx *ctx = rq->mq_ctx; 343 struct blk_mq_ctx *ctx = rq->mq_ctx;
387 int cpu; 344 int cpu;
388 345
389 if (!ctx->ipi_redirect) 346 if (!ctx->ipi_redirect) {
390 return __blk_mq_end_io(rq, error); 347 rq->q->softirq_done_fn(rq);
348 return;
349 }
391 350
392 cpu = get_cpu(); 351 cpu = get_cpu();
393 352 if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
394 if (cpu == ctx->cpu || !cpu_online(ctx->cpu) || 353 rq->csd.func = __blk_mq_complete_request_remote;
395 !ipi_remote_cpu(ctx, cpu, rq, error)) 354 rq->csd.info = rq;
396 __blk_mq_end_io(rq, error); 355 rq->csd.flags = 0;
397 356 __smp_call_function_single(ctx->cpu, &rq->csd, 0);
357 } else {
358 rq->q->softirq_done_fn(rq);
359 }
398 put_cpu(); 360 put_cpu();
399} 361}
400EXPORT_SYMBOL(blk_mq_end_io);
401 362
402static void blk_mq_start_request(struct request *rq) 363/**
364 * blk_mq_complete_request - end I/O on a request
365 * @rq: the request being processed
366 *
367 * Description:
368 * Ends all I/O on a request. It does not handle partial completions.
369 * The actual completion happens out-of-order, through a IPI handler.
370 **/
371void blk_mq_complete_request(struct request *rq)
372{
373 if (unlikely(blk_should_fake_timeout(rq->q)))
374 return;
375 if (!blk_mark_rq_complete(rq))
376 __blk_mq_complete_request(rq);
377}
378EXPORT_SYMBOL(blk_mq_complete_request);
379
380static void blk_mq_start_request(struct request *rq, bool last)
403{ 381{
404 struct request_queue *q = rq->q; 382 struct request_queue *q = rq->q;
405 383
@@ -412,6 +390,25 @@ static void blk_mq_start_request(struct request *rq)
412 */ 390 */
413 rq->deadline = jiffies + q->rq_timeout; 391 rq->deadline = jiffies + q->rq_timeout;
414 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 392 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
393
394 if (q->dma_drain_size && blk_rq_bytes(rq)) {
395 /*
396 * Make sure space for the drain appears. We know we can do
397 * this because max_hw_segments has been adjusted to be one
398 * fewer than the device can handle.
399 */
400 rq->nr_phys_segments++;
401 }
402
403 /*
404 * Flag the last request in the series so that drivers know when IO
405 * should be kicked off, if they don't do it on a per-request basis.
406 *
407 * Note: the flag isn't the only condition drivers should do kick off.
408 * If drive is busy, the last request might not have the bit set.
409 */
410 if (last)
411 rq->cmd_flags |= REQ_END;
415} 412}
416 413
417static void blk_mq_requeue_request(struct request *rq) 414static void blk_mq_requeue_request(struct request *rq)
@@ -420,6 +417,11 @@ static void blk_mq_requeue_request(struct request *rq)
420 417
421 trace_block_rq_requeue(q, rq); 418 trace_block_rq_requeue(q, rq);
422 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 419 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
420
421 rq->cmd_flags &= ~REQ_END;
422
423 if (q->dma_drain_size && blk_rq_bytes(rq))
424 rq->nr_phys_segments--;
423} 425}
424 426
425struct blk_mq_timeout_data { 427struct blk_mq_timeout_data {
@@ -587,19 +589,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
587 589
588 rq = list_first_entry(&rq_list, struct request, queuelist); 590 rq = list_first_entry(&rq_list, struct request, queuelist);
589 list_del_init(&rq->queuelist); 591 list_del_init(&rq->queuelist);
590 blk_mq_start_request(rq);
591 592
592 /* 593 blk_mq_start_request(rq, list_empty(&rq_list));
593 * Last request in the series. Flag it as such, this
594 * enables drivers to know when IO should be kicked off,
595 * if they don't do it on a per-request basis.
596 *
597 * Note: the flag isn't the only condition drivers
598 * should do kick off. If drive is busy, the last
599 * request might not have the bit set.
600 */
601 if (list_empty(&rq_list))
602 rq->cmd_flags |= REQ_END;
603 594
604 ret = q->mq_ops->queue_rq(hctx, rq); 595 ret = q->mq_ops->queue_rq(hctx, rq);
605 switch (ret) { 596 switch (ret) {
@@ -617,8 +608,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
617 break; 608 break;
618 default: 609 default:
619 pr_err("blk-mq: bad return on queue: %d\n", ret); 610 pr_err("blk-mq: bad return on queue: %d\n", ret);
620 rq->errors = -EIO;
621 case BLK_MQ_RQ_QUEUE_ERROR: 611 case BLK_MQ_RQ_QUEUE_ERROR:
612 rq->errors = -EIO;
622 blk_mq_end_io(rq, rq->errors); 613 blk_mq_end_io(rq, rq->errors);
623 break; 614 break;
624 } 615 }
@@ -721,13 +712,16 @@ static void blk_mq_work_fn(struct work_struct *work)
721} 712}
722 713
723static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, 714static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
724 struct request *rq) 715 struct request *rq, bool at_head)
725{ 716{
726 struct blk_mq_ctx *ctx = rq->mq_ctx; 717 struct blk_mq_ctx *ctx = rq->mq_ctx;
727 718
728 trace_block_rq_insert(hctx->queue, rq); 719 trace_block_rq_insert(hctx->queue, rq);
729 720
730 list_add_tail(&rq->queuelist, &ctx->rq_list); 721 if (at_head)
722 list_add(&rq->queuelist, &ctx->rq_list);
723 else
724 list_add_tail(&rq->queuelist, &ctx->rq_list);
731 blk_mq_hctx_mark_pending(hctx, ctx); 725 blk_mq_hctx_mark_pending(hctx, ctx);
732 726
733 /* 727 /*
@@ -737,7 +731,7 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
737} 731}
738 732
739void blk_mq_insert_request(struct request_queue *q, struct request *rq, 733void blk_mq_insert_request(struct request_queue *q, struct request *rq,
740 bool run_queue) 734 bool at_head, bool run_queue)
741{ 735{
742 struct blk_mq_hw_ctx *hctx; 736 struct blk_mq_hw_ctx *hctx;
743 struct blk_mq_ctx *ctx, *current_ctx; 737 struct blk_mq_ctx *ctx, *current_ctx;
@@ -756,7 +750,7 @@ void blk_mq_insert_request(struct request_queue *q, struct request *rq,
756 rq->mq_ctx = ctx; 750 rq->mq_ctx = ctx;
757 } 751 }
758 spin_lock(&ctx->lock); 752 spin_lock(&ctx->lock);
759 __blk_mq_insert_request(hctx, rq); 753 __blk_mq_insert_request(hctx, rq, at_head);
760 spin_unlock(&ctx->lock); 754 spin_unlock(&ctx->lock);
761 755
762 blk_mq_put_ctx(current_ctx); 756 blk_mq_put_ctx(current_ctx);
@@ -788,7 +782,7 @@ void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
788 782
789 /* ctx->cpu might be offline */ 783 /* ctx->cpu might be offline */
790 spin_lock(&ctx->lock); 784 spin_lock(&ctx->lock);
791 __blk_mq_insert_request(hctx, rq); 785 __blk_mq_insert_request(hctx, rq, false);
792 spin_unlock(&ctx->lock); 786 spin_unlock(&ctx->lock);
793 787
794 blk_mq_put_ctx(current_ctx); 788 blk_mq_put_ctx(current_ctx);
@@ -826,7 +820,7 @@ static void blk_mq_insert_requests(struct request_queue *q,
826 rq = list_first_entry(list, struct request, queuelist); 820 rq = list_first_entry(list, struct request, queuelist);
827 list_del_init(&rq->queuelist); 821 list_del_init(&rq->queuelist);
828 rq->mq_ctx = ctx; 822 rq->mq_ctx = ctx;
829 __blk_mq_insert_request(hctx, rq); 823 __blk_mq_insert_request(hctx, rq, false);
830 } 824 }
831 spin_unlock(&ctx->lock); 825 spin_unlock(&ctx->lock);
832 826
@@ -916,6 +910,11 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
916 910
917 blk_queue_bounce(q, &bio); 911 blk_queue_bounce(q, &bio);
918 912
913 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
914 bio_endio(bio, -EIO);
915 return;
916 }
917
919 if (use_plug && blk_attempt_plug_merge(q, bio, &request_count)) 918 if (use_plug && blk_attempt_plug_merge(q, bio, &request_count))
920 return; 919 return;
921 920
@@ -978,7 +977,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
978 __blk_mq_free_request(hctx, ctx, rq); 977 __blk_mq_free_request(hctx, ctx, rq);
979 else { 978 else {
980 blk_mq_bio_to_request(rq, bio); 979 blk_mq_bio_to_request(rq, bio);
981 __blk_mq_insert_request(hctx, rq); 980 __blk_mq_insert_request(hctx, rq, false);
982 } 981 }
983 982
984 spin_unlock(&ctx->lock); 983 spin_unlock(&ctx->lock);
@@ -1091,8 +1090,8 @@ static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx)
1091 struct page *page; 1090 struct page *page;
1092 1091
1093 while (!list_empty(&hctx->page_list)) { 1092 while (!list_empty(&hctx->page_list)) {
1094 page = list_first_entry(&hctx->page_list, struct page, list); 1093 page = list_first_entry(&hctx->page_list, struct page, lru);
1095 list_del_init(&page->list); 1094 list_del_init(&page->lru);
1096 __free_pages(page, page->private); 1095 __free_pages(page, page->private);
1097 } 1096 }
1098 1097
@@ -1156,7 +1155,7 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx,
1156 break; 1155 break;
1157 1156
1158 page->private = this_order; 1157 page->private = this_order;
1159 list_add_tail(&page->list, &hctx->page_list); 1158 list_add_tail(&page->lru, &hctx->page_list);
1160 1159
1161 p = page_address(page); 1160 p = page_address(page);
1162 entries_per_page = order_to_size(this_order) / rq_size; 1161 entries_per_page = order_to_size(this_order) / rq_size;
@@ -1337,15 +1336,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
1337 reg->queue_depth = BLK_MQ_MAX_DEPTH; 1336 reg->queue_depth = BLK_MQ_MAX_DEPTH;
1338 } 1337 }
1339 1338
1340 /*
1341 * Set aside a tag for flush requests. It will only be used while
1342 * another flush request is in progress but outside the driver.
1343 *
1344 * TODO: only allocate if flushes are supported
1345 */
1346 reg->queue_depth++;
1347 reg->reserved_tags++;
1348
1349 if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN)) 1339 if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN))
1350 return ERR_PTR(-EINVAL); 1340 return ERR_PTR(-EINVAL);
1351 1341
@@ -1388,17 +1378,27 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
1388 q->mq_ops = reg->ops; 1378 q->mq_ops = reg->ops;
1389 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 1379 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1390 1380
1381 q->sg_reserved_size = INT_MAX;
1382
1391 blk_queue_make_request(q, blk_mq_make_request); 1383 blk_queue_make_request(q, blk_mq_make_request);
1392 blk_queue_rq_timed_out(q, reg->ops->timeout); 1384 blk_queue_rq_timed_out(q, reg->ops->timeout);
1393 if (reg->timeout) 1385 if (reg->timeout)
1394 blk_queue_rq_timeout(q, reg->timeout); 1386 blk_queue_rq_timeout(q, reg->timeout);
1395 1387
1388 if (reg->ops->complete)
1389 blk_queue_softirq_done(q, reg->ops->complete);
1390
1396 blk_mq_init_flush(q); 1391 blk_mq_init_flush(q);
1397 blk_mq_init_cpu_queues(q, reg->nr_hw_queues); 1392 blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
1398 1393
1399 if (blk_mq_init_hw_queues(q, reg, driver_data)) 1394 q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size,
1395 cache_line_size()), GFP_KERNEL);
1396 if (!q->flush_rq)
1400 goto err_hw; 1397 goto err_hw;
1401 1398
1399 if (blk_mq_init_hw_queues(q, reg, driver_data))
1400 goto err_flush_rq;
1401
1402 blk_mq_map_swqueue(q); 1402 blk_mq_map_swqueue(q);
1403 1403
1404 mutex_lock(&all_q_mutex); 1404 mutex_lock(&all_q_mutex);
@@ -1406,6 +1406,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
1406 mutex_unlock(&all_q_mutex); 1406 mutex_unlock(&all_q_mutex);
1407 1407
1408 return q; 1408 return q;
1409
1410err_flush_rq:
1411 kfree(q->flush_rq);
1409err_hw: 1412err_hw:
1410 kfree(q->mq_map); 1413 kfree(q->mq_map);
1411err_map: 1414err_map:
@@ -1429,7 +1432,6 @@ void blk_mq_free_queue(struct request_queue *q)
1429 int i; 1432 int i;
1430 1433
1431 queue_for_each_hw_ctx(q, hctx, i) { 1434 queue_for_each_hw_ctx(q, hctx, i) {
1432 cancel_delayed_work_sync(&hctx->delayed_work);
1433 kfree(hctx->ctx_map); 1435 kfree(hctx->ctx_map);
1434 kfree(hctx->ctxs); 1436 kfree(hctx->ctxs);
1435 blk_mq_free_rq_map(hctx); 1437 blk_mq_free_rq_map(hctx);
@@ -1451,7 +1453,6 @@ void blk_mq_free_queue(struct request_queue *q)
1451 list_del_init(&q->all_q_node); 1453 list_del_init(&q->all_q_node);
1452 mutex_unlock(&all_q_mutex); 1454 mutex_unlock(&all_q_mutex);
1453} 1455}
1454EXPORT_SYMBOL(blk_mq_free_queue);
1455 1456
1456/* Basically redo blk_mq_init_queue with queue frozen */ 1457/* Basically redo blk_mq_init_queue with queue frozen */
1457static void blk_mq_queue_reinit(struct request_queue *q) 1458static void blk_mq_queue_reinit(struct request_queue *q)
@@ -1495,11 +1496,6 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1495 1496
1496static int __init blk_mq_init(void) 1497static int __init blk_mq_init(void)
1497{ 1498{
1498 unsigned int i;
1499
1500 for_each_possible_cpu(i)
1501 init_llist_head(&per_cpu(ipi_lists, i));
1502
1503 blk_mq_cpu_init(); 1499 blk_mq_cpu_init();
1504 1500
1505 /* Must be called after percpu_counter_hotcpu_callback() */ 1501 /* Must be called after percpu_counter_hotcpu_callback() */