diff options
author | Paolo Valente <paolo.valente@linaro.org> | 2018-06-25 15:55:37 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-07-09 11:07:52 -0400 |
commit | 277a4a9b56cde0f3d53ea8abc0e43ff636820007 (patch) | |
tree | 9ee40cd882086479f11a2ed501a581f9489e6f55 /block/bfq-iosched.c | |
parent | 9fae8dd59ff3d9c19570cbddf12e87d7bb66c8a2 (diff) |
block, bfq: give a better name to bfq_bfqq_may_idle
The actual goal of the function bfq_bfqq_may_idle is to tell whether
it is better to perform device idling (more precisely: I/O-dispatch
plugging) for the input bfq_queue, either to boost throughput or to
preserve service guarantees. This commit improves the name of the
function accordingly.
Tested-by: Holger Hoffstätte <holger@applied-asynchrony.com>
Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/bfq-iosched.c')
-rw-r--r-- | block/bfq-iosched.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index d579cc8e0db6..41d9036b1822 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c | |||
@@ -634,7 +634,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd) | |||
634 | * The following function returns true if every queue must receive the | 634 | * The following function returns true if every queue must receive the |
635 | * same share of the throughput (this condition is used when deciding | 635 | * same share of the throughput (this condition is used when deciding |
636 | * whether idling may be disabled, see the comments in the function | 636 | * whether idling may be disabled, see the comments in the function |
637 | * bfq_bfqq_may_idle()). | 637 | * bfq_better_to_idle()). |
638 | * | 638 | * |
639 | * Such a scenario occurs when: | 639 | * Such a scenario occurs when: |
640 | * 1) all active queues have the same weight, | 640 | * 1) all active queues have the same weight, |
@@ -3355,7 +3355,7 @@ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) | |||
3355 | * issues taken into account are not trivial. We discuss these issues | 3355 | * issues taken into account are not trivial. We discuss these issues |
3356 | * individually while introducing the variables. | 3356 | * individually while introducing the variables. |
3357 | */ | 3357 | */ |
3358 | static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) | 3358 | static bool bfq_better_to_idle(struct bfq_queue *bfqq) |
3359 | { | 3359 | { |
3360 | struct bfq_data *bfqd = bfqq->bfqd; | 3360 | struct bfq_data *bfqd = bfqq->bfqd; |
3361 | bool rot_without_queueing = | 3361 | bool rot_without_queueing = |
@@ -3588,19 +3588,19 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) | |||
3588 | } | 3588 | } |
3589 | 3589 | ||
3590 | /* | 3590 | /* |
3591 | * If the in-service queue is empty but the function bfq_bfqq_may_idle | 3591 | * If the in-service queue is empty but the function bfq_better_to_idle |
3592 | * returns true, then: | 3592 | * returns true, then: |
3593 | * 1) the queue must remain in service and cannot be expired, and | 3593 | * 1) the queue must remain in service and cannot be expired, and |
3594 | * 2) the device must be idled to wait for the possible arrival of a new | 3594 | * 2) the device must be idled to wait for the possible arrival of a new |
3595 | * request for the queue. | 3595 | * request for the queue. |
3596 | * See the comments on the function bfq_bfqq_may_idle for the reasons | 3596 | * See the comments on the function bfq_better_to_idle for the reasons |
3597 | * why performing device idling is the best choice to boost the throughput | 3597 | * why performing device idling is the best choice to boost the throughput |
3598 | * and preserve service guarantees when bfq_bfqq_may_idle itself | 3598 | * and preserve service guarantees when bfq_better_to_idle itself |
3599 | * returns true. | 3599 | * returns true. |
3600 | */ | 3600 | */ |
3601 | static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq) | 3601 | static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq) |
3602 | { | 3602 | { |
3603 | return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq); | 3603 | return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq); |
3604 | } | 3604 | } |
3605 | 3605 | ||
3606 | /* | 3606 | /* |
@@ -3686,7 +3686,7 @@ check_queue: | |||
3686 | * may idle after their completion, then keep it anyway. | 3686 | * may idle after their completion, then keep it anyway. |
3687 | */ | 3687 | */ |
3688 | if (bfq_bfqq_wait_request(bfqq) || | 3688 | if (bfq_bfqq_wait_request(bfqq) || |
3689 | (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) { | 3689 | (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) { |
3690 | bfqq = NULL; | 3690 | bfqq = NULL; |
3691 | goto keep_queue; | 3691 | goto keep_queue; |
3692 | } | 3692 | } |
@@ -4734,7 +4734,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) | |||
4734 | BFQQE_BUDGET_TIMEOUT); | 4734 | BFQQE_BUDGET_TIMEOUT); |
4735 | else if (RB_EMPTY_ROOT(&bfqq->sort_list) && | 4735 | else if (RB_EMPTY_ROOT(&bfqq->sort_list) && |
4736 | (bfqq->dispatched == 0 || | 4736 | (bfqq->dispatched == 0 || |
4737 | !bfq_bfqq_may_idle(bfqq))) | 4737 | !bfq_better_to_idle(bfqq))) |
4738 | bfq_bfqq_expire(bfqd, bfqq, false, | 4738 | bfq_bfqq_expire(bfqd, bfqq, false, |
4739 | BFQQE_NO_MORE_REQUESTS); | 4739 | BFQQE_NO_MORE_REQUESTS); |
4740 | } | 4740 | } |