summaryrefslogtreecommitdiffstats
path: root/block/bfq-iosched.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-05-09 17:26:55 -0400
committerJens Axboe <axboe@kernel.dk>2018-05-10 13:27:41 -0400
commit483b7bf2e40233657713279b6f98a9225ea0ff84 (patch)
tree7a44b55e40bf1ab35188980009eaa40661ff2616 /block/bfq-iosched.c
parenta327553965dede92587e6ccbe7df98dba36edcea (diff)
bfq-iosched: update shallow depth to smallest one used
If our shallow depth is smaller than the wake batching of sbitmap, we can introduce hangs. Ensure that sbitmap knows how low we'll go. Acked-by: Paolo Valente <paolo.valente@linaro.org> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/bfq-iosched.c')
-rw-r--r--block/bfq-iosched.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 10294124d597..b622e73a326a 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -5081,10 +5081,13 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
5081 5081
5082/* 5082/*
5083 * See the comments on bfq_limit_depth for the purpose of 5083 * See the comments on bfq_limit_depth for the purpose of
5084 * the depths set in the function. 5084 * the depths set in the function. Return minimum shallow depth we'll use.
5085 */ 5085 */
5086static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt) 5086static unsigned int bfq_update_depths(struct bfq_data *bfqd,
5087 struct sbitmap_queue *bt)
5087{ 5088{
5089 unsigned int i, j, min_shallow = UINT_MAX;
5090
5088 /* 5091 /*
5089 * In-word depths if no bfq_queue is being weight-raised: 5092 * In-word depths if no bfq_queue is being weight-raised:
5090 * leaving 25% of tags only for sync reads. 5093 * leaving 25% of tags only for sync reads.
@@ -5115,14 +5118,22 @@ static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
5115 bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U); 5118 bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
5116 /* no more than ~37% of tags for sync writes (~20% extra tags) */ 5119 /* no more than ~37% of tags for sync writes (~20% extra tags) */
5117 bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U); 5120 bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
5121
5122 for (i = 0; i < 2; i++)
5123 for (j = 0; j < 2; j++)
5124 min_shallow = min(min_shallow, bfqd->word_depths[i][j]);
5125
5126 return min_shallow;
5118} 5127}
5119 5128
5120static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) 5129static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
5121{ 5130{
5122 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; 5131 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
5123 struct blk_mq_tags *tags = hctx->sched_tags; 5132 struct blk_mq_tags *tags = hctx->sched_tags;
5133 unsigned int min_shallow;
5124 5134
5125 bfq_update_depths(bfqd, &tags->bitmap_tags); 5135 min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags);
5136 sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow);
5126 return 0; 5137 return 0;
5127} 5138}
5128 5139