aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-17 15:09:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-17 15:09:51 -0400
commite076b7c1343e0810880e6b909445f06921f31067 (patch)
tree73f57305e66bd32208628fbcc2a009faea42ccce
parent0f5abd4020bfd2b1eec6975b91bd5068aa674a93 (diff)
parent569fd0ce96087283866ab8c438dac4bcf1738846 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block core fix from Jens Axboe: "A commit in the previous pull request introduce a regression. So far only observed on qemu-sparc64, but it's a general bug. Please pull this single fix to rectify that, thanks" [ And it turns out that it's been seen outside of that qemu-sparc64 case, and is easy to trigger with small number of CPUs and blk-mq enabled by default - Linus ] * 'for-linus' of git://git.kernel.dk/linux-block: blk-mq: fix iteration of busy bitmap
-rw-r--r--block/blk-mq.c6
-rw-r--r--include/linux/blk-mq.h2
2 files changed, 4 insertions, 4 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c82de08f3721..ade8a2d1b0aa 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -41,7 +41,7 @@ static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
41{ 41{
42 unsigned int i; 42 unsigned int i;
43 43
44 for (i = 0; i < hctx->ctx_map.map_size; i++) 44 for (i = 0; i < hctx->ctx_map.size; i++)
45 if (hctx->ctx_map.map[i].word) 45 if (hctx->ctx_map.map[i].word)
46 return true; 46 return true;
47 47
@@ -730,7 +730,7 @@ static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
730 struct blk_mq_ctx *ctx; 730 struct blk_mq_ctx *ctx;
731 int i; 731 int i;
732 732
733 for (i = 0; i < hctx->ctx_map.map_size; i++) { 733 for (i = 0; i < hctx->ctx_map.size; i++) {
734 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i]; 734 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
735 unsigned int off, bit; 735 unsigned int off, bit;
736 736
@@ -1818,7 +1818,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
1818 * This is more accurate and more efficient than looping 1818 * This is more accurate and more efficient than looping
1819 * over all possibly mapped software queues. 1819 * over all possibly mapped software queues.
1820 */ 1820 */
1821 map->map_size = hctx->nr_ctx / map->bits_per_word; 1821 map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word);
1822 1822
1823 /* 1823 /*
1824 * Initialize batch roundrobin counts 1824 * Initialize batch roundrobin counts
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8210e8797c12..2056a99b92f8 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -13,7 +13,7 @@ struct blk_mq_cpu_notifier {
13}; 13};
14 14
15struct blk_mq_ctxmap { 15struct blk_mq_ctxmap {
16 unsigned int map_size; 16 unsigned int size;
17 unsigned int bits_per_word; 17 unsigned int bits_per_word;
18 struct blk_align_bitmap *map; 18 struct blk_align_bitmap *map;
19}; 19};