summaryrefslogtreecommitdiffstats
path: root/block/bfq-iosched.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-05-09 15:27:21 -0400
committerJens Axboe <axboe@kernel.dk>2018-05-10 13:27:29 -0400
commitf0635b8a416e3b99dc6fd9ac3ce534764869d0c8 (patch)
tree6307664d20afcfc1609f8d5a8e0961e3e1f98b8f /block/bfq-iosched.c
parent55141366de8cb281ee6730a810c7602dee403e92 (diff)
bfq: calculate shallow depths at init time
It doesn't change, so don't put it in the per-IO hot path. Acked-by: Paolo Valente <paolo.valente@linaro.org> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/bfq-iosched.c')
-rw-r--r--block/bfq-iosched.c97
1 files changed, 50 insertions, 47 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index db38e88a5670..0cd8aa80c32d 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -487,46 +487,6 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd,
487} 487}
488 488
489/* 489/*
490 * See the comments on bfq_limit_depth for the purpose of
491 * the depths set in the function.
492 */
493static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
494{
495 bfqd->sb_shift = bt->sb.shift;
496
497 /*
498 * In-word depths if no bfq_queue is being weight-raised:
499 * leaving 25% of tags only for sync reads.
500 *
501 * In next formulas, right-shift the value
502 * (1U<<bfqd->sb_shift), instead of computing directly
503 * (1U<<(bfqd->sb_shift - something)), to be robust against
504 * any possible value of bfqd->sb_shift, without having to
505 * limit 'something'.
506 */
507 /* no more than 50% of tags for async I/O */
508 bfqd->word_depths[0][0] = max((1U<<bfqd->sb_shift)>>1, 1U);
509 /*
510 * no more than 75% of tags for sync writes (25% extra tags
511 * w.r.t. async I/O, to prevent async I/O from starving sync
512 * writes)
513 */
514 bfqd->word_depths[0][1] = max(((1U<<bfqd->sb_shift) * 3)>>2, 1U);
515
516 /*
517 * In-word depths in case some bfq_queue is being weight-
518 * raised: leaving ~63% of tags for sync reads. This is the
519 * highest percentage for which, in our tests, application
520 * start-up times didn't suffer from any regression due to tag
521 * shortage.
522 */
523 /* no more than ~18% of tags for async I/O */
524 bfqd->word_depths[1][0] = max(((1U<<bfqd->sb_shift) * 3)>>4, 1U);
525 /* no more than ~37% of tags for sync writes (~20% extra tags) */
526 bfqd->word_depths[1][1] = max(((1U<<bfqd->sb_shift) * 6)>>4, 1U);
527}
528
529/*
530 * Async I/O can easily starve sync I/O (both sync reads and sync 490 * Async I/O can easily starve sync I/O (both sync reads and sync
531 * writes), by consuming all tags. Similarly, storms of sync writes, 491 * writes), by consuming all tags. Similarly, storms of sync writes,
532 * such as those that sync(2) may trigger, can starve sync reads. 492 * such as those that sync(2) may trigger, can starve sync reads.
@@ -535,18 +495,11 @@ static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
535 */ 495 */
536static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) 496static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
537{ 497{
538 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
539 struct bfq_data *bfqd = data->q->elevator->elevator_data; 498 struct bfq_data *bfqd = data->q->elevator->elevator_data;
540 struct sbitmap_queue *bt;
541 499
542 if (op_is_sync(op) && !op_is_write(op)) 500 if (op_is_sync(op) && !op_is_write(op))
543 return; 501 return;
544 502
545 bt = &tags->bitmap_tags;
546
547 if (unlikely(bfqd->sb_shift != bt->sb.shift))
548 bfq_update_depths(bfqd, bt);
549
550 data->shallow_depth = 503 data->shallow_depth =
551 bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)]; 504 bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)];
552 505
@@ -5126,6 +5079,55 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
5126 __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); 5079 __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
5127} 5080}
5128 5081
5082/*
5083 * See the comments on bfq_limit_depth for the purpose of
5084 * the depths set in the function.
5085 */
5086static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
5087{
5088 bfqd->sb_shift = bt->sb.shift;
5089
5090 /*
5091 * In-word depths if no bfq_queue is being weight-raised:
5092 * leaving 25% of tags only for sync reads.
5093 *
5094 * In next formulas, right-shift the value
5095 * (1U<<bfqd->sb_shift), instead of computing directly
5096 * (1U<<(bfqd->sb_shift - something)), to be robust against
5097 * any possible value of bfqd->sb_shift, without having to
5098 * limit 'something'.
5099 */
5100 /* no more than 50% of tags for async I/O */
5101 bfqd->word_depths[0][0] = max((1U<<bfqd->sb_shift)>>1, 1U);
5102 /*
5103 * no more than 75% of tags for sync writes (25% extra tags
5104 * w.r.t. async I/O, to prevent async I/O from starving sync
5105 * writes)
5106 */
5107 bfqd->word_depths[0][1] = max(((1U<<bfqd->sb_shift) * 3)>>2, 1U);
5108
5109 /*
5110 * In-word depths in case some bfq_queue is being weight-
5111 * raised: leaving ~63% of tags for sync reads. This is the
5112 * highest percentage for which, in our tests, application
5113 * start-up times didn't suffer from any regression due to tag
5114 * shortage.
5115 */
5116 /* no more than ~18% of tags for async I/O */
5117 bfqd->word_depths[1][0] = max(((1U<<bfqd->sb_shift) * 3)>>4, 1U);
5118 /* no more than ~37% of tags for sync writes (~20% extra tags) */
5119 bfqd->word_depths[1][1] = max(((1U<<bfqd->sb_shift) * 6)>>4, 1U);
5120}
5121
5122static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
5123{
5124 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
5125 struct blk_mq_tags *tags = hctx->sched_tags;
5126
5127 bfq_update_depths(bfqd, &tags->bitmap_tags);
5128 return 0;
5129}
5130
5129static void bfq_exit_queue(struct elevator_queue *e) 5131static void bfq_exit_queue(struct elevator_queue *e)
5130{ 5132{
5131 struct bfq_data *bfqd = e->elevator_data; 5133 struct bfq_data *bfqd = e->elevator_data;
@@ -5547,6 +5549,7 @@ static struct elevator_type iosched_bfq_mq = {
5547 .requests_merged = bfq_requests_merged, 5549 .requests_merged = bfq_requests_merged,
5548 .request_merged = bfq_request_merged, 5550 .request_merged = bfq_request_merged,
5549 .has_work = bfq_has_work, 5551 .has_work = bfq_has_work,
5552 .init_hctx = bfq_init_hctx,
5550 .init_sched = bfq_init_queue, 5553 .init_sched = bfq_init_queue,
5551 .exit_sched = bfq_exit_queue, 5554 .exit_sched = bfq_exit_queue,
5552 }, 5555 },