diff options
author | Jens Axboe <axboe@fb.com> | 2015-04-17 10:28:50 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-04-17 10:31:12 -0400 |
commit | 569fd0ce96087283866ab8c438dac4bcf1738846 (patch) | |
tree | f1fcf0648a33638655ca7142667a5e67f4ed073b | |
parent | 54e514b91b95d6441c12a7955addfb9f9d2afc65 (diff) |
blk-mq: fix iteration of busy bitmap
Commit 889fa31f00b2 was a bit too eager in reducing the loop count,
so we ended up missing queues in some configurations. Ensure that
our division rounds up, so that's not the case.
Reported-by: Guenter Roeck <linux@roeck-us.net>
Fixes: 889fa31f00b2 ("blk-mq: reduce unnecessary software queue looping")
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | block/blk-mq.c | 6 | ||||
-rw-r--r-- | include/linux/blk-mq.h | 2 |
2 files changed, 4 insertions, 4 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index c82de08f3721..ade8a2d1b0aa 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -41,7 +41,7 @@ static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) | |||
41 | { | 41 | { |
42 | unsigned int i; | 42 | unsigned int i; |
43 | 43 | ||
44 | for (i = 0; i < hctx->ctx_map.map_size; i++) | 44 | for (i = 0; i < hctx->ctx_map.size; i++) |
45 | if (hctx->ctx_map.map[i].word) | 45 | if (hctx->ctx_map.map[i].word) |
46 | return true; | 46 | return true; |
47 | 47 | ||
@@ -730,7 +730,7 @@ static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) | |||
730 | struct blk_mq_ctx *ctx; | 730 | struct blk_mq_ctx *ctx; |
731 | int i; | 731 | int i; |
732 | 732 | ||
733 | for (i = 0; i < hctx->ctx_map.map_size; i++) { | 733 | for (i = 0; i < hctx->ctx_map.size; i++) { |
734 | struct blk_align_bitmap *bm = &hctx->ctx_map.map[i]; | 734 | struct blk_align_bitmap *bm = &hctx->ctx_map.map[i]; |
735 | unsigned int off, bit; | 735 | unsigned int off, bit; |
736 | 736 | ||
@@ -1818,7 +1818,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) | |||
1818 | * This is more accurate and more efficient than looping | 1818 | * This is more accurate and more efficient than looping |
1819 | * over all possibly mapped software queues. | 1819 | * over all possibly mapped software queues. |
1820 | */ | 1820 | */ |
1821 | map->map_size = hctx->nr_ctx / map->bits_per_word; | 1821 | map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word); |
1822 | 1822 | ||
1823 | /* | 1823 | /* |
1824 | * Initialize batch roundrobin counts | 1824 | * Initialize batch roundrobin counts |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8210e8797c12..2056a99b92f8 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -13,7 +13,7 @@ struct blk_mq_cpu_notifier { | |||
13 | }; | 13 | }; |
14 | 14 | ||
15 | struct blk_mq_ctxmap { | 15 | struct blk_mq_ctxmap { |
16 | unsigned int map_size; | 16 | unsigned int size; |
17 | unsigned int bits_per_word; | 17 | unsigned int bits_per_word; |
18 | struct blk_align_bitmap *map; | 18 | struct blk_align_bitmap *map; |
19 | }; | 19 | }; |