summaryrefslogtreecommitdiffstats
path: root/block/bfq-iosched.c
diff options
context:
space:
mode:
authorPaolo Valente <paolo.valente@linaro.org>2017-12-04 05:42:05 -0500
committerJens Axboe <axboe@kernel.dk>2018-01-05 11:32:59 -0500
commit9b25bd0368d562d1929059e8eb9de4102567b923 (patch)
treeec28af9f679cb63cceb189530c75e02fb7ece769 /block/bfq-iosched.c
parenta34b024448eb71b0e51ad011fa1862236e366034 (diff)
block, bfq: remove batches of confusing ifdefs
Commit a33801e8b473 ("block, bfq: move debug blkio stats behind CONFIG_DEBUG_BLK_CGROUP") introduced two batches of confusing ifdefs: one reported in [1], plus a similar one in another function. This commit removes both batches, in the way suggested in [1]. [1] https://www.spinics.net/lists/linux-block/msg20043.html Fixes: a33801e8b473 ("block, bfq: move debug blkio stats behind CONFIG_DEBUG_BLK_CGROUP") Reported-by: Linus Torvalds <torvalds@linux-foundation.org> Tested-by: Luca Miccio <lucmiccio@gmail.com> Signed-off-by: Paolo Valente <paolo.valente@linaro.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/bfq-iosched.c')
-rw-r--r--block/bfq-iosched.c127
1 files changed, 72 insertions, 55 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index e33c5c4c9856..7bd789da7a29 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -3743,35 +3743,16 @@ exit:
3743 return rq; 3743 return rq;
3744} 3744}
3745 3745
3746static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3747{
3748 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3749 struct request *rq;
3750#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) 3746#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
3751 struct bfq_queue *in_serv_queue, *bfqq; 3747static void bfq_update_dispatch_stats(struct request_queue *q,
3752 bool waiting_rq, idle_timer_disabled; 3748 struct request *rq,
3753#endif 3749 struct bfq_queue *in_serv_queue,
3754 3750 bool idle_timer_disabled)
3755 spin_lock_irq(&bfqd->lock); 3751{
3756 3752 struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
3757#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
3758 in_serv_queue = bfqd->in_service_queue;
3759 waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
3760
3761 rq = __bfq_dispatch_request(hctx);
3762
3763 idle_timer_disabled =
3764 waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
3765
3766#else
3767 rq = __bfq_dispatch_request(hctx);
3768#endif
3769 spin_unlock_irq(&bfqd->lock);
3770 3753
3771#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
3772 bfqq = rq ? RQ_BFQQ(rq) : NULL;
3773 if (!idle_timer_disabled && !bfqq) 3754 if (!idle_timer_disabled && !bfqq)
3774 return rq; 3755 return;
3775 3756
3776 /* 3757 /*
3777 * rq and bfqq are guaranteed to exist until this function 3758 * rq and bfqq are guaranteed to exist until this function
@@ -3786,7 +3767,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3786 * In addition, the following queue lock guarantees that 3767 * In addition, the following queue lock guarantees that
3787 * bfqq_group(bfqq) exists as well. 3768 * bfqq_group(bfqq) exists as well.
3788 */ 3769 */
3789 spin_lock_irq(hctx->queue->queue_lock); 3770 spin_lock_irq(q->queue_lock);
3790 if (idle_timer_disabled) 3771 if (idle_timer_disabled)
3791 /* 3772 /*
3792 * Since the idle timer has been disabled, 3773 * Since the idle timer has been disabled,
@@ -3805,9 +3786,37 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3805 bfqg_stats_set_start_empty_time(bfqg); 3786 bfqg_stats_set_start_empty_time(bfqg);
3806 bfqg_stats_update_io_remove(bfqg, rq->cmd_flags); 3787 bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
3807 } 3788 }
3808 spin_unlock_irq(hctx->queue->queue_lock); 3789 spin_unlock_irq(q->queue_lock);
3790}
3791#else
3792static inline void bfq_update_dispatch_stats(struct request_queue *q,
3793 struct request *rq,
3794 struct bfq_queue *in_serv_queue,
3795 bool idle_timer_disabled) {}
3809#endif 3796#endif
3810 3797
3798static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3799{
3800 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3801 struct request *rq;
3802 struct bfq_queue *in_serv_queue;
3803 bool waiting_rq, idle_timer_disabled;
3804
3805 spin_lock_irq(&bfqd->lock);
3806
3807 in_serv_queue = bfqd->in_service_queue;
3808 waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
3809
3810 rq = __bfq_dispatch_request(hctx);
3811
3812 idle_timer_disabled =
3813 waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
3814
3815 spin_unlock_irq(&bfqd->lock);
3816
3817 bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue,
3818 idle_timer_disabled);
3819
3811 return rq; 3820 return rq;
3812} 3821}
3813 3822
@@ -4335,16 +4344,46 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
4335 return idle_timer_disabled; 4344 return idle_timer_disabled;
4336} 4345}
4337 4346
4347#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
4348static void bfq_update_insert_stats(struct request_queue *q,
4349 struct bfq_queue *bfqq,
4350 bool idle_timer_disabled,
4351 unsigned int cmd_flags)
4352{
4353 if (!bfqq)
4354 return;
4355
4356 /*
4357 * bfqq still exists, because it can disappear only after
4358 * either it is merged with another queue, or the process it
4359 * is associated with exits. But both actions must be taken by
4360 * the same process currently executing this flow of
4361 * instructions.
4362 *
4363 * In addition, the following queue lock guarantees that
4364 * bfqq_group(bfqq) exists as well.
4365 */
4366 spin_lock_irq(q->queue_lock);
4367 bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
4368 if (idle_timer_disabled)
4369 bfqg_stats_update_idle_time(bfqq_group(bfqq));
4370 spin_unlock_irq(q->queue_lock);
4371}
4372#else
4373static inline void bfq_update_insert_stats(struct request_queue *q,
4374 struct bfq_queue *bfqq,
4375 bool idle_timer_disabled,
4376 unsigned int cmd_flags) {}
4377#endif
4378
4338static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 4379static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
4339 bool at_head) 4380 bool at_head)
4340{ 4381{
4341 struct request_queue *q = hctx->queue; 4382 struct request_queue *q = hctx->queue;
4342 struct bfq_data *bfqd = q->elevator->elevator_data; 4383 struct bfq_data *bfqd = q->elevator->elevator_data;
4343#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
4344 struct bfq_queue *bfqq = RQ_BFQQ(rq); 4384 struct bfq_queue *bfqq = RQ_BFQQ(rq);
4345 bool idle_timer_disabled = false; 4385 bool idle_timer_disabled = false;
4346 unsigned int cmd_flags; 4386 unsigned int cmd_flags;
4347#endif
4348 4387
4349 spin_lock_irq(&bfqd->lock); 4388 spin_lock_irq(&bfqd->lock);
4350 if (blk_mq_sched_try_insert_merge(q, rq)) { 4389 if (blk_mq_sched_try_insert_merge(q, rq)) {
@@ -4363,7 +4402,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
4363 else 4402 else
4364 list_add_tail(&rq->queuelist, &bfqd->dispatch); 4403 list_add_tail(&rq->queuelist, &bfqd->dispatch);
4365 } else { 4404 } else {
4366#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
4367 idle_timer_disabled = __bfq_insert_request(bfqd, rq); 4405 idle_timer_disabled = __bfq_insert_request(bfqd, rq);
4368 /* 4406 /*
4369 * Update bfqq, because, if a queue merge has occurred 4407 * Update bfqq, because, if a queue merge has occurred
@@ -4371,9 +4409,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
4371 * redirected into a new queue. 4409 * redirected into a new queue.
4372 */ 4410 */
4373 bfqq = RQ_BFQQ(rq); 4411 bfqq = RQ_BFQQ(rq);
4374#else
4375 __bfq_insert_request(bfqd, rq);
4376#endif
4377 4412
4378 if (rq_mergeable(rq)) { 4413 if (rq_mergeable(rq)) {
4379 elv_rqhash_add(q, rq); 4414 elv_rqhash_add(q, rq);
@@ -4382,35 +4417,17 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
4382 } 4417 }
4383 } 4418 }
4384 4419
4385#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
4386 /* 4420 /*
4387 * Cache cmd_flags before releasing scheduler lock, because rq 4421 * Cache cmd_flags before releasing scheduler lock, because rq
4388 * may disappear afterwards (for example, because of a request 4422 * may disappear afterwards (for example, because of a request
4389 * merge). 4423 * merge).
4390 */ 4424 */
4391 cmd_flags = rq->cmd_flags; 4425 cmd_flags = rq->cmd_flags;
4392#endif 4426
4393 spin_unlock_irq(&bfqd->lock); 4427 spin_unlock_irq(&bfqd->lock);
4394 4428
4395#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) 4429 bfq_update_insert_stats(q, bfqq, idle_timer_disabled,
4396 if (!bfqq) 4430 cmd_flags);
4397 return;
4398 /*
4399 * bfqq still exists, because it can disappear only after
4400 * either it is merged with another queue, or the process it
4401 * is associated with exits. But both actions must be taken by
4402 * the same process currently executing this flow of
4403 * instruction.
4404 *
4405 * In addition, the following queue lock guarantees that
4406 * bfqq_group(bfqq) exists as well.
4407 */
4408 spin_lock_irq(q->queue_lock);
4409 bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
4410 if (idle_timer_disabled)
4411 bfqg_stats_update_idle_time(bfqq_group(bfqq));
4412 spin_unlock_irq(q->queue_lock);
4413#endif
4414} 4431}
4415 4432
4416static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, 4433static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,