aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2018-09-25 12:36:20 -0400
committerJens Axboe <axboe@kernel.dk>2018-09-25 22:17:59 -0400
commit530ca2c9bd6949c72c9b5cfc330cb3dbccaa3f5b (patch)
treef25bd9aa92fc0042974192d81b705253ddd8c597
parentb57e99b4b8b0ebdf9707424e7ddc0c392bdc5fe6 (diff)
blk-mq: Allow blocking queue tag iter callbacks
A recent commit runs tag iterator callbacks under the rcu read lock, but existing callbacks do not satisfy the non-blocking requirement. The commit intended to prevent an iterator from accessing a queue that's being modified. This patch fixes the original issue by taking a queue reference instead of reading it, which allows callbacks to make blocking calls. Fixes: f5bbbbe4d6357 ("blk-mq: sync the update nr_hw_queues with blk_mq_queue_tag_busy_iter") Acked-by: Jianchao Wang <jianchao.w.wang@oracle.com> Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-mq-tag.c13
1 files changed, 4 insertions, 9 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 94e1ed667b6e..41317c50a446 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -322,16 +322,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
322 322
323 /* 323 /*
324 * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and 324 * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
325 * queue_hw_ctx after freeze the queue. So we could use q_usage_counter 325 * queue_hw_ctx after freeze the queue, so we use q_usage_counter
326 * to avoid race with it. __blk_mq_update_nr_hw_queues will users 326 * to avoid race with it.
327 * synchronize_rcu to ensure all of the users go out of the critical
328 * section below and see zeroed q_usage_counter.
329 */ 327 */
330 rcu_read_lock(); 328 if (!percpu_ref_tryget(&q->q_usage_counter))
331 if (percpu_ref_is_zero(&q->q_usage_counter)) {
332 rcu_read_unlock();
333 return; 329 return;
334 }
335 330
336 queue_for_each_hw_ctx(q, hctx, i) { 331 queue_for_each_hw_ctx(q, hctx, i) {
337 struct blk_mq_tags *tags = hctx->tags; 332 struct blk_mq_tags *tags = hctx->tags;
@@ -347,7 +342,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
347 bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); 342 bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
348 bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); 343 bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
349 } 344 }
350 rcu_read_unlock(); 345 blk_queue_exit(q);
351} 346}
352 347
353static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, 348static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,