summaryrefslogtreecommitdiffstats
path: root/block/bfq-iosched.c
diff options
context:
space:
mode:
authorPaolo Valente <paolo.valente@linaro.org>2018-01-13 06:05:17 -0500
committerJens Axboe <axboe@kernel.dk>2018-01-18 10:21:35 -0500
commita52a69ea89dc12e6f4572f554940789c1ab23c7a (patch)
tree0406e05cbef505c74494c817069e13fa7ad83e83 /block/bfq-iosched.c
parent23d4ee19e789ae3dce3e04bd24e3d1537965475f (diff)
block, bfq: limit tags for writes and async I/O
Asynchronous I/O can easily starve synchronous I/O (both sync reads and sync writes), by consuming all request tags. Similarly, storms of synchronous writes, such as those that sync(2) may trigger, can starve synchronous reads. In their turn, these two problems may also cause BFQ to loose control on latency for interactive and soft real-time applications. For example, on a PLEXTOR PX-256M5S SSD, LibreOffice Writer takes 0.6 seconds to start if the device is idle, but it takes more than 45 seconds (!) if there are sequential writes in the background. This commit addresses this issue by limiting the maximum percentage of tags that asynchronous I/O requests and synchronous write requests can consume. In particular, this commit grants a higher threshold to synchronous writes, to prevent the latter from being starved by asynchronous I/O. According to the above test, LibreOffice Writer now starts in about 1.2 seconds on average, regardless of the background workload, and apart from some rare outlier. To check this improvement, run, e.g., sudo ./comm_startup_lat.sh bfq 5 5 seq 10 "lowriter --terminate_after_init" for the comm_startup_lat benchmark in the S suite [1]. [1] https://github.com/Algodev-github/S Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name> Tested-by: Holger Hoffstätte <holger@applied-asynchrony.com> Signed-off-by: Paolo Valente <paolo.valente@linaro.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/bfq-iosched.c')
-rw-r--r--block/bfq-iosched.c77
1 files changed, 77 insertions, 0 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index f352b1677143..a7ab0cb50733 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -417,6 +417,82 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd,
417 } 417 }
418} 418}
419 419
420/*
421 * See the comments on bfq_limit_depth for the purpose of
422 * the depths set in the function.
423 */
424static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
425{
426 bfqd->sb_shift = bt->sb.shift;
427
428 /*
429 * In-word depths if no bfq_queue is being weight-raised:
430 * leaving 25% of tags only for sync reads.
431 *
432 * In next formulas, right-shift the value
433 * (1U<<bfqd->sb_shift), instead of computing directly
434 * (1U<<(bfqd->sb_shift - something)), to be robust against
435 * any possible value of bfqd->sb_shift, without having to
436 * limit 'something'.
437 */
438 /* no more than 50% of tags for async I/O */
439 bfqd->word_depths[0][0] = max((1U<<bfqd->sb_shift)>>1, 1U);
440 /*
441 * no more than 75% of tags for sync writes (25% extra tags
442 * w.r.t. async I/O, to prevent async I/O from starving sync
443 * writes)
444 */
445 bfqd->word_depths[0][1] = max(((1U<<bfqd->sb_shift) * 3)>>2, 1U);
446
447 /*
448 * In-word depths in case some bfq_queue is being weight-
449 * raised: leaving ~63% of tags for sync reads. This is the
450 * highest percentage for which, in our tests, application
451 * start-up times didn't suffer from any regression due to tag
452 * shortage.
453 */
454 /* no more than ~18% of tags for async I/O */
455 bfqd->word_depths[1][0] = max(((1U<<bfqd->sb_shift) * 3)>>4, 1U);
456 /* no more than ~37% of tags for sync writes (~20% extra tags) */
457 bfqd->word_depths[1][1] = max(((1U<<bfqd->sb_shift) * 6)>>4, 1U);
458}
459
460/*
461 * Async I/O can easily starve sync I/O (both sync reads and sync
462 * writes), by consuming all tags. Similarly, storms of sync writes,
463 * such as those that sync(2) may trigger, can starve sync reads.
464 * Limit depths of async I/O and sync writes so as to counter both
465 * problems.
466 */
467static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
468{
469 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
470 struct bfq_data *bfqd = data->q->elevator->elevator_data;
471 struct sbitmap_queue *bt;
472
473 if (op_is_sync(op) && !op_is_write(op))
474 return;
475
476 if (data->flags & BLK_MQ_REQ_RESERVED) {
477 if (unlikely(!tags->nr_reserved_tags)) {
478 WARN_ON_ONCE(1);
479 return;
480 }
481 bt = &tags->breserved_tags;
482 } else
483 bt = &tags->bitmap_tags;
484
485 if (unlikely(bfqd->sb_shift != bt->sb.shift))
486 bfq_update_depths(bfqd, bt);
487
488 data->shallow_depth =
489 bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)];
490
491 bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
492 __func__, bfqd->wr_busy_queues, op_is_sync(op),
493 data->shallow_depth);
494}
495
420static struct bfq_queue * 496static struct bfq_queue *
421bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, 497bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
422 sector_t sector, struct rb_node **ret_parent, 498 sector_t sector, struct rb_node **ret_parent,
@@ -5285,6 +5361,7 @@ static struct elv_fs_entry bfq_attrs[] = {
5285 5361
5286static struct elevator_type iosched_bfq_mq = { 5362static struct elevator_type iosched_bfq_mq = {
5287 .ops.mq = { 5363 .ops.mq = {
5364 .limit_depth = bfq_limit_depth,
5288 .prepare_request = bfq_prepare_request, 5365 .prepare_request = bfq_prepare_request,
5289 .finish_request = bfq_finish_request, 5366 .finish_request = bfq_finish_request,
5290 .exit_icq = bfq_exit_icq, 5367 .exit_icq = bfq_exit_icq,