summaryrefslogtreecommitdiffstats
path: root/block/bfq-iosched.c
diff options
context:
space:
mode:
authorPaolo Valente <paolo.valente@linaro.org>2018-08-16 12:51:17 -0400
committerJens Axboe <axboe@kernel.dk>2018-08-16 15:08:13 -0400
commitd5801088a7bd210dd8fd7add04745e35f0f6ea72 (patch)
treea91daecab3318589a0f19f84602fcbb5a779b642 /block/bfq-iosched.c
parente02a0aa26bf61b6e481a3d7453a150e692b0df80 (diff)
block, bfq: reduce write overcharge
When a sync request is dispatched, the queue that contains that request, and all the ancestor entities of that queue, are charged with the number of sectors of the request. In constrast, if the request is async, then the queue and its ancestor entities are charged with the number of sectors of the request, multiplied by an overcharge factor. This throttles the bandwidth for async I/O, w.r.t. to sync I/O, and it is done to counter the tendency of async writes to steal I/O throughput to reads. On the opposite end, the lower this parameter, the stabler I/O control, in the following respect. The lower this parameter is, the less the bandwidth enjoyed by a group decreases - when the group does writes, w.r.t. to when it does reads; - when other groups do reads, w.r.t. to when they do writes. The fixes "block, bfq: always update the budget of an entity when needed" and "block, bfq: readd missing reset of parent-entity service" improved I/O control in bfq to such an extent that it has been possible to revise this overcharge factor downwards. This commit introduces the resulting, new value. Signed-off-by: Paolo Valente <paolo.valente@linaro.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/bfq-iosched.c')
-rw-r--r--block/bfq-iosched.c33
1 files changed, 19 insertions, 14 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 62efc1b97afb..653100fb719e 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -187,11 +187,25 @@ static const int bfq_stats_min_budgets = 194;
187static const int bfq_default_max_budget = 16 * 1024; 187static const int bfq_default_max_budget = 16 * 1024;
188 188
189/* 189/*
190 * Async to sync throughput distribution is controlled as follows: 190 * When a sync request is dispatched, the queue that contains that
191 * when an async request is served, the entity is charged the number 191 * request, and all the ancestor entities of that queue, are charged
192 * of sectors of the request, multiplied by the factor below 192 * with the number of sectors of the request. In constrast, if the
193 * request is async, then the queue and its ancestor entities are
194 * charged with the number of sectors of the request, multiplied by
195 * the factor below. This throttles the bandwidth for async I/O,
196 * w.r.t. to sync I/O, and it is done to counter the tendency of async
197 * writes to steal I/O throughput to reads.
198 *
199 * The current value of this parameter is the result of a tuning with
200 * several hardware and software configurations. We tried to find the
201 * lowest value for which writes do not cause noticeable problems to
202 * reads. In fact, the lower this parameter, the stabler I/O control,
203 * in the following respect. The lower this parameter is, the less
204 * the bandwidth enjoyed by a group decreases
205 * - when the group does writes, w.r.t. to when it does reads;
206 * - when other groups do reads, w.r.t. to when they do writes.
193 */ 207 */
194static const int bfq_async_charge_factor = 10; 208static const int bfq_async_charge_factor = 3;
195 209
196/* Default timeout values, in jiffies, approximating CFQ defaults. */ 210/* Default timeout values, in jiffies, approximating CFQ defaults. */
197const int bfq_timeout = HZ / 8; 211const int bfq_timeout = HZ / 8;
@@ -853,16 +867,7 @@ static unsigned long bfq_serv_to_charge(struct request *rq,
853 if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1) 867 if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
854 return blk_rq_sectors(rq); 868 return blk_rq_sectors(rq);
855 869
856 /* 870 return blk_rq_sectors(rq) * bfq_async_charge_factor;
857 * If there are no weight-raised queues, then amplify service
858 * by just the async charge factor; otherwise amplify service
859 * by twice the async charge factor, to further reduce latency
860 * for weight-raised queues.
861 */
862 if (bfqq->bfqd->wr_busy_queues == 0)
863 return blk_rq_sectors(rq) * bfq_async_charge_factor;
864
865 return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
866} 871}
867 872
868/** 873/**