summaryrefslogtreecommitdiffstats
path: root/block/bfq-iosched.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-05-09 17:25:22 -0400
committerJens Axboe <axboe@kernel.dk>2018-05-10 13:27:31 -0400
commitbd7d4ef6a4c9b3611fa487a0065bf042c71ce620 (patch)
treef6e496de016eae8151f382415847d0e3aa93f35f /block/bfq-iosched.c
parentf0635b8a416e3b99dc6fd9ac3ce534764869d0c8 (diff)
bfq-iosched: remove unused variable
bfqd->sb_shift was attempted used as a cache for the sbitmap queue shift, but we don't need it, as it never changes. Kill it with fire. Acked-by: Paolo Valente <paolo.valente@linaro.org> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/bfq-iosched.c')
-rw-r--r--block/bfq-iosched.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 0cd8aa80c32d..10294124d597 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -5085,26 +5085,24 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
5085 */ 5085 */
5086static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt) 5086static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
5087{ 5087{
5088 bfqd->sb_shift = bt->sb.shift;
5089
5090 /* 5088 /*
5091 * In-word depths if no bfq_queue is being weight-raised: 5089 * In-word depths if no bfq_queue is being weight-raised:
5092 * leaving 25% of tags only for sync reads. 5090 * leaving 25% of tags only for sync reads.
5093 * 5091 *
5094 * In next formulas, right-shift the value 5092 * In next formulas, right-shift the value
5095 * (1U<<bfqd->sb_shift), instead of computing directly 5093 * (1U<<bt->sb.shift), instead of computing directly
5096 * (1U<<(bfqd->sb_shift - something)), to be robust against 5094 * (1U<<(bt->sb.shift - something)), to be robust against
5097 * any possible value of bfqd->sb_shift, without having to 5095 * any possible value of bt->sb.shift, without having to
5098 * limit 'something'. 5096 * limit 'something'.
5099 */ 5097 */
5100 /* no more than 50% of tags for async I/O */ 5098 /* no more than 50% of tags for async I/O */
5101 bfqd->word_depths[0][0] = max((1U<<bfqd->sb_shift)>>1, 1U); 5099 bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
5102 /* 5100 /*
5103 * no more than 75% of tags for sync writes (25% extra tags 5101 * no more than 75% of tags for sync writes (25% extra tags
5104 * w.r.t. async I/O, to prevent async I/O from starving sync 5102 * w.r.t. async I/O, to prevent async I/O from starving sync
5105 * writes) 5103 * writes)
5106 */ 5104 */
5107 bfqd->word_depths[0][1] = max(((1U<<bfqd->sb_shift) * 3)>>2, 1U); 5105 bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
5108 5106
5109 /* 5107 /*
5110 * In-word depths in case some bfq_queue is being weight- 5108 * In-word depths in case some bfq_queue is being weight-
@@ -5114,9 +5112,9 @@ static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
5114 * shortage. 5112 * shortage.
5115 */ 5113 */
5116 /* no more than ~18% of tags for async I/O */ 5114 /* no more than ~18% of tags for async I/O */
5117 bfqd->word_depths[1][0] = max(((1U<<bfqd->sb_shift) * 3)>>4, 1U); 5115 bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
5118 /* no more than ~37% of tags for sync writes (~20% extra tags) */ 5116 /* no more than ~37% of tags for sync writes (~20% extra tags) */
5119 bfqd->word_depths[1][1] = max(((1U<<bfqd->sb_shift) * 6)>>4, 1U); 5117 bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
5120} 5118}
5121 5119
5122static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) 5120static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)