aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c18
1 files changed, 7 insertions, 11 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index df8e1e09dd17..38f4a165640d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -119,16 +119,7 @@ void blk_mq_freeze_queue(struct request_queue *q)
119 spin_unlock_irq(q->queue_lock); 119 spin_unlock_irq(q->queue_lock);
120 120
121 if (freeze) { 121 if (freeze) {
122 /* 122 percpu_ref_kill(&q->mq_usage_counter);
123 * XXX: Temporary kludge to work around SCSI blk-mq stall.
124 * SCSI synchronously creates and destroys many queues
125 * back-to-back during probe leading to lengthy stalls.
126 * This will be fixed by keeping ->mq_usage_counter in
127 * atomic mode until genhd registration, but, for now,
128 * let's work around using expedited synchronization.
129 */
130 __percpu_ref_kill_expedited(&q->mq_usage_counter);
131
132 blk_mq_run_queues(q, false); 123 blk_mq_run_queues(q, false);
133 } 124 }
134 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); 125 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
@@ -1804,7 +1795,12 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1804 if (!q) 1795 if (!q)
1805 goto err_hctxs; 1796 goto err_hctxs;
1806 1797
1807 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release)) 1798 /*
1799 * Init percpu_ref in atomic mode so that it's faster to shutdown.
1800 * See blk_register_queue() for details.
1801 */
1802 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
1803 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
1808 goto err_map; 1804 goto err_map;
1809 1805
1810 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); 1806 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);