summaryrefslogtreecommitdiffstats
path: root/block/bfq-iosched.c
diff options
context:
space:
mode:
authorLuca Miccio <lucmiccio@gmail.com>2017-11-13 01:34:08 -0500
committerJens Axboe <axboe@kernel.dk>2017-11-14 22:13:33 -0500
commit614822f81f606e0064acdae11d9ec1efd3db4190 (patch)
tree3c0ce01c5a00be7aac4dec067c2e198fa827ce5f /block/bfq-iosched.c
parent68017e5d87a2477d40476f1a0a06f202ee79316b (diff)
block, bfq: add missing invocations of bfqg_stats_update_io_add/remove
bfqg_stats_update_io_add and bfqg_stats_update_io_remove are to be invoked, respectively, when an I/O request enters and when an I/O request exits the scheduler. Unfortunately, bfq does not fully comply with this scheme, because it does not invoke these functions for requests that are inserted into or extracted from its priority dispatch list. This commit fixes this mistake. Tested-by: Lee Tibbert <lee.tibbert@gmail.com> Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name> Signed-off-by: Paolo Valente <paolo.valente@linaro.org> Signed-off-by: Luca Miccio <lucmiccio@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/bfq-iosched.c')
-rw-r--r--block/bfq-iosched.c21
1 files changed, 18 insertions, 3 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 889a8549d97f..91703eba63f0 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -1359,7 +1359,6 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
1359 bfqq->ttime.last_end_request + 1359 bfqq->ttime.last_end_request +
1360 bfqd->bfq_slice_idle * 3; 1360 bfqd->bfq_slice_idle * 3;
1361 1361
1362 bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags);
1363 1362
1364 /* 1363 /*
1365 * bfqq deserves to be weight-raised if: 1364 * bfqq deserves to be weight-raised if:
@@ -1633,7 +1632,6 @@ static void bfq_remove_request(struct request_queue *q,
1633 if (rq->cmd_flags & REQ_META) 1632 if (rq->cmd_flags & REQ_META)
1634 bfqq->meta_pending--; 1633 bfqq->meta_pending--;
1635 1634
1636 bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags);
1637} 1635}
1638 1636
1639static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) 1637static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
@@ -1746,6 +1744,7 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
1746 bfqq->next_rq = rq; 1744 bfqq->next_rq = rq;
1747 1745
1748 bfq_remove_request(q, next); 1746 bfq_remove_request(q, next);
1747 bfqg_stats_update_io_remove(bfqq_group(bfqq), next->cmd_flags);
1749 1748
1750 spin_unlock_irq(&bfqq->bfqd->lock); 1749 spin_unlock_irq(&bfqq->bfqd->lock);
1751end: 1750end:
@@ -3700,6 +3699,9 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3700 spin_lock_irq(&bfqd->lock); 3699 spin_lock_irq(&bfqd->lock);
3701 3700
3702 rq = __bfq_dispatch_request(hctx); 3701 rq = __bfq_dispatch_request(hctx);
3702 if (rq && RQ_BFQQ(rq))
3703 bfqg_stats_update_io_remove(bfqq_group(RQ_BFQQ(rq)),
3704 rq->cmd_flags);
3703 spin_unlock_irq(&bfqd->lock); 3705 spin_unlock_irq(&bfqd->lock);
3704 3706
3705 return rq; 3707 return rq;
@@ -4224,6 +4226,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
4224{ 4226{
4225 struct request_queue *q = hctx->queue; 4227 struct request_queue *q = hctx->queue;
4226 struct bfq_data *bfqd = q->elevator->elevator_data; 4228 struct bfq_data *bfqd = q->elevator->elevator_data;
4229 struct bfq_queue *bfqq = RQ_BFQQ(rq);
4227 4230
4228 spin_lock_irq(&bfqd->lock); 4231 spin_lock_irq(&bfqd->lock);
4229 if (blk_mq_sched_try_insert_merge(q, rq)) { 4232 if (blk_mq_sched_try_insert_merge(q, rq)) {
@@ -4243,6 +4246,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
4243 list_add_tail(&rq->queuelist, &bfqd->dispatch); 4246 list_add_tail(&rq->queuelist, &bfqd->dispatch);
4244 } else { 4247 } else {
4245 __bfq_insert_request(bfqd, rq); 4248 __bfq_insert_request(bfqd, rq);
4249 /*
4250 * Update bfqq, because, if a queue merge has occurred
4251 * in __bfq_insert_request, then rq has been
4252 * redirected into a new queue.
4253 */
4254 bfqq = RQ_BFQQ(rq);
4246 4255
4247 if (rq_mergeable(rq)) { 4256 if (rq_mergeable(rq)) {
4248 elv_rqhash_add(q, rq); 4257 elv_rqhash_add(q, rq);
@@ -4251,6 +4260,9 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
4251 } 4260 }
4252 } 4261 }
4253 4262
4263 if (bfqq)
4264 bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, rq->cmd_flags);
4265
4254 spin_unlock_irq(&bfqd->lock); 4266 spin_unlock_irq(&bfqd->lock);
4255} 4267}
4256 4268
@@ -4428,8 +4440,11 @@ static void bfq_finish_request(struct request *rq)
4428 * lock is held. 4440 * lock is held.
4429 */ 4441 */
4430 4442
4431 if (!RB_EMPTY_NODE(&rq->rb_node)) 4443 if (!RB_EMPTY_NODE(&rq->rb_node)) {
4432 bfq_remove_request(rq->q, rq); 4444 bfq_remove_request(rq->q, rq);
4445 bfqg_stats_update_io_remove(bfqq_group(bfqq),
4446 rq->cmd_flags);
4447 }
4433 bfq_put_rq_priv_body(bfqq); 4448 bfq_put_rq_priv_body(bfqq);
4434 } 4449 }
4435 4450