summaryrefslogtreecommitdiffstats
path: root/block/bfq-iosched.c
diff options
context:
space:
mode:
authorPaolo Valente <paolo.valente@linaro.org>2017-04-12 12:23:15 -0400
committerJens Axboe <axboe@fb.com>2017-04-19 10:30:26 -0400
commitcfd69712a101f528caad1529e64834e31e5dff62 (patch)
treecfe6110f6cec2a2735035b0a7f0aea533a12f1e2 /block/bfq-iosched.c
parentbcd5642607ab9195e22a1617d92fb82698d44448 (diff)
block, bfq: reduce latency during request-pool saturation
This patch introduces an heuristic that reduces latency when the I/O-request pool is saturated. This goal is achieved by disabling device idling, for non-weight-raised queues, when there are weight- raised queues with pending or in-flight requests. In fact, as explained in more detail in the comment on the function bfq_bfqq_may_idle(), this reduces the rate at which processes associated with non-weight-raised queues grab requests from the pool, thereby increasing the probability that processes associated with weight-raised queues get a request immediately (or at least soon) when they need one. Along the same line, if there are weight-raised queues, then this patch halves the service rate of async (write) requests for non-weight-raised queues. Signed-off-by: Paolo Valente <paolo.valente@linaro.org> Signed-off-by: Arianna Avanzini <avanzini.arianna@gmail.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/bfq-iosched.c')
-rw-r--r--block/bfq-iosched.c66
1 files changed, 63 insertions, 3 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 574a5f6a2370..deb1f21c535f 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -420,6 +420,8 @@ struct bfq_data {
420 * queue in service, even if it is idling). 420 * queue in service, even if it is idling).
421 */ 421 */
422 int busy_queues; 422 int busy_queues;
423 /* number of weight-raised busy @bfq_queues */
424 int wr_busy_queues;
423 /* number of queued requests */ 425 /* number of queued requests */
424 int queued; 426 int queued;
425 /* number of requests dispatched and waiting for completion */ 427 /* number of requests dispatched and waiting for completion */
@@ -2490,6 +2492,9 @@ static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2490 2492
2491 bfqd->busy_queues--; 2493 bfqd->busy_queues--;
2492 2494
2495 if (bfqq->wr_coeff > 1)
2496 bfqd->wr_busy_queues--;
2497
2493 bfqg_stats_update_dequeue(bfqq_group(bfqq)); 2498 bfqg_stats_update_dequeue(bfqq_group(bfqq));
2494 2499
2495 bfq_deactivate_bfqq(bfqd, bfqq, true, expiration); 2500 bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
@@ -2506,6 +2511,9 @@ static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
2506 2511
2507 bfq_mark_bfqq_busy(bfqq); 2512 bfq_mark_bfqq_busy(bfqq);
2508 bfqd->busy_queues++; 2513 bfqd->busy_queues++;
2514
2515 if (bfqq->wr_coeff > 1)
2516 bfqd->wr_busy_queues++;
2509} 2517}
2510 2518
2511#ifdef CONFIG_BFQ_GROUP_IOSCHED 2519#ifdef CONFIG_BFQ_GROUP_IOSCHED
@@ -3779,7 +3787,16 @@ static unsigned long bfq_serv_to_charge(struct request *rq,
3779 if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1) 3787 if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
3780 return blk_rq_sectors(rq); 3788 return blk_rq_sectors(rq);
3781 3789
3782 return blk_rq_sectors(rq) * bfq_async_charge_factor; 3790 /*
3791 * If there are no weight-raised queues, then amplify service
3792 * by just the async charge factor; otherwise amplify service
3793 * by twice the async charge factor, to further reduce latency
3794 * for weight-raised queues.
3795 */
3796 if (bfqq->bfqd->wr_busy_queues == 0)
3797 return blk_rq_sectors(rq) * bfq_async_charge_factor;
3798
3799 return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
3783} 3800}
3784 3801
3785/** 3802/**
@@ -4234,6 +4251,7 @@ static void bfq_add_request(struct request *rq)
4234 bfqq->wr_coeff = bfqd->bfq_wr_coeff; 4251 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
4235 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); 4252 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
4236 4253
4254 bfqd->wr_busy_queues++;
4237 bfqq->entity.prio_changed = 1; 4255 bfqq->entity.prio_changed = 1;
4238 } 4256 }
4239 if (prev != bfqq->next_rq) 4257 if (prev != bfqq->next_rq)
@@ -4474,6 +4492,8 @@ end:
4474/* Must be called with bfqq != NULL */ 4492/* Must be called with bfqq != NULL */
4475static void bfq_bfqq_end_wr(struct bfq_queue *bfqq) 4493static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
4476{ 4494{
4495 if (bfq_bfqq_busy(bfqq))
4496 bfqq->bfqd->wr_busy_queues--;
4477 bfqq->wr_coeff = 1; 4497 bfqq->wr_coeff = 1;
4478 bfqq->wr_cur_max_time = 0; 4498 bfqq->wr_cur_max_time = 0;
4479 bfqq->last_wr_start_finish = jiffies; 4499 bfqq->last_wr_start_finish = jiffies;
@@ -5497,7 +5517,8 @@ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
5497static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) 5517static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
5498{ 5518{
5499 struct bfq_data *bfqd = bfqq->bfqd; 5519 struct bfq_data *bfqd = bfqq->bfqd;
5500 bool idling_boosts_thr, asymmetric_scenario; 5520 bool idling_boosts_thr, idling_boosts_thr_without_issues,
5521 asymmetric_scenario;
5501 5522
5502 if (bfqd->strict_guarantees) 5523 if (bfqd->strict_guarantees)
5503 return true; 5524 return true;
@@ -5520,6 +5541,44 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
5520 idling_boosts_thr = !bfqd->hw_tag || bfq_bfqq_IO_bound(bfqq); 5541 idling_boosts_thr = !bfqd->hw_tag || bfq_bfqq_IO_bound(bfqq);
5521 5542
5522 /* 5543 /*
5544 * The value of the next variable,
5545 * idling_boosts_thr_without_issues, is equal to that of
5546 * idling_boosts_thr, unless a special case holds. In this
5547 * special case, described below, idling may cause problems to
5548 * weight-raised queues.
5549 *
5550 * When the request pool is saturated (e.g., in the presence
5551 * of write hogs), if the processes associated with
5552 * non-weight-raised queues ask for requests at a lower rate,
5553 * then processes associated with weight-raised queues have a
5554 * higher probability to get a request from the pool
5555 * immediately (or at least soon) when they need one. Thus
5556 * they have a higher probability to actually get a fraction
5557 * of the device throughput proportional to their high
5558 * weight. This is especially true with NCQ-capable drives,
5559 * which enqueue several requests in advance, and further
5560 * reorder internally-queued requests.
5561 *
5562 * For this reason, we force to false the value of
5563 * idling_boosts_thr_without_issues if there are weight-raised
5564 * busy queues. In this case, and if bfqq is not weight-raised,
5565 * this guarantees that the device is not idled for bfqq (if,
5566 * instead, bfqq is weight-raised, then idling will be
5567 * guaranteed by another variable, see below). Combined with
5568 * the timestamping rules of BFQ (see [1] for details), this
5569 * behavior causes bfqq, and hence any sync non-weight-raised
5570 * queue, to get a lower number of requests served, and thus
5571 * to ask for a lower number of requests from the request
5572 * pool, before the busy weight-raised queues get served
5573 * again. This often mitigates starvation problems in the
5574 * presence of heavy write workloads and NCQ, thereby
5575 * guaranteeing a higher application and system responsiveness
5576 * in these hostile scenarios.
5577 */
5578 idling_boosts_thr_without_issues = idling_boosts_thr &&
5579 bfqd->wr_busy_queues == 0;
5580
5581 /*
5523 * There is then a case where idling must be performed not for 5582 * There is then a case where idling must be performed not for
5524 * throughput concerns, but to preserve service guarantees. To 5583 * throughput concerns, but to preserve service guarantees. To
5525 * introduce it, we can note that allowing the drive to 5584 * introduce it, we can note that allowing the drive to
@@ -5593,7 +5652,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
5593 * is necessary to preserve service guarantees. 5652 * is necessary to preserve service guarantees.
5594 */ 5653 */
5595 return bfq_bfqq_sync(bfqq) && 5654 return bfq_bfqq_sync(bfqq) &&
5596 (idling_boosts_thr || asymmetric_scenario); 5655 (idling_boosts_thr_without_issues || asymmetric_scenario);
5597} 5656}
5598 5657
5599/* 5658/*
@@ -6801,6 +6860,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
6801 * high-definition compressed 6860 * high-definition compressed
6802 * video. 6861 * video.
6803 */ 6862 */
6863 bfqd->wr_busy_queues = 0;
6804 6864
6805 /* 6865 /*
6806 * Begin by assuming, optimistically, that the device is a 6866 * Begin by assuming, optimistically, that the device is a