summaryrefslogtreecommitdiffstats
path: root/block/bfq-iosched.c
diff options
context:
space:
mode:
authorPaolo Valente <paolo.valente@linaro.org>2019-06-25 01:12:46 -0400
committerJens Axboe <axboe@kernel.dk>2019-06-25 11:07:34 -0400
commita3f9bce3697a5b4039ff7096db4a1ee897349276 (patch)
tree24d9d3a9e58279913cd889b11590c29570728af8 /block/bfq-iosched.c
parent24792ad01cb659c8b5899de2af6e8ca250f93df3 (diff)
block, bfq: bring forward seek&think time update
Until the base value for request service times gets finally computed for a bfq_queue, the inject limit for that queue does depend on the think-time state (short|long) of the queue. A timely update of the think time then guarantees a quicker activation or deactivation of the injection. Fortunately, the think time of a bfq_queue is updated in the same code path as the inject limit; but after the inject limit. This commits moves the update of the think time before the update of the inject limit. For coherence, it moves the update of the seek time too. Reported-by: Srivatsa S. Bhat (VMware) <srivatsa@csail.mit.edu> Tested-by: Srivatsa S. Bhat (VMware) <srivatsa@csail.mit.edu> Signed-off-by: Paolo Valente <paolo.valente@linaro.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/bfq-iosched.c')
-rw-r--r--block/bfq-iosched.c14
1 files changed, 4 insertions, 10 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 62442083b147..d5bc32371ace 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -4979,19 +4979,9 @@ static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
4979static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, 4979static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
4980 struct request *rq) 4980 struct request *rq)
4981{ 4981{
4982 struct bfq_io_cq *bic = RQ_BIC(rq);
4983
4984 if (rq->cmd_flags & REQ_META) 4982 if (rq->cmd_flags & REQ_META)
4985 bfqq->meta_pending++; 4983 bfqq->meta_pending++;
4986 4984
4987 bfq_update_io_thinktime(bfqd, bfqq);
4988 bfq_update_has_short_ttime(bfqd, bfqq, bic);
4989 bfq_update_io_seektime(bfqd, bfqq, rq);
4990
4991 bfq_log_bfqq(bfqd, bfqq,
4992 "rq_enqueued: has_short_ttime=%d (seeky %d)",
4993 bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq));
4994
4995 bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); 4985 bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
4996 4986
4997 if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) { 4987 if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
@@ -5079,6 +5069,10 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
5079 bfqq = new_bfqq; 5069 bfqq = new_bfqq;
5080 } 5070 }
5081 5071
5072 bfq_update_io_thinktime(bfqd, bfqq);
5073 bfq_update_has_short_ttime(bfqd, bfqq, RQ_BIC(rq));
5074 bfq_update_io_seektime(bfqd, bfqq, rq);
5075
5082 waiting = bfqq && bfq_bfqq_wait_request(bfqq); 5076 waiting = bfqq && bfq_bfqq_wait_request(bfqq);
5083 bfq_add_request(rq); 5077 bfq_add_request(rq);
5084 idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq); 5078 idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);