aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Valente <paolo.valente@linaro.org>2019-01-29 06:06:29 -0500
committerJens Axboe <axboe@kernel.dk>2019-01-31 14:50:23 -0500
commit73d58118498b14e4d2f2391105459b997b586ddc (patch)
tree71e72676425a3eaa6e6ebe582ce27994d1eafbf2
parent03e565e4204c6cf8687d995de5cafd0341503b4e (diff)
block, bfq: consider also ioprio classes in symmetry detection
In asymmetric scenarios, i.e., when some bfq_queue or bfq_group needs to be guaranteed a different bandwidth than other bfq_queues or bfq_groups, these service guaranteed can be provided only by plugging I/O dispatch, completely or partially, when the queue in service remains temporarily empty. A case where asymmetry is particularly strong is when some active bfq_queues belong to a higher-priority class than some other active bfq_queues. Unfortunately, this important case is not considered at all in the code for detecting asymmetric scenarios. This commit adds the missing logic. Signed-off-by: Paolo Valente <paolo.valente@linaro.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/bfq-iosched.c86
-rw-r--r--block/bfq-iosched.h8
-rw-r--r--block/bfq-wf2q.c12
3 files changed, 59 insertions, 47 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index a9275ed57726..6bfbfa65610b 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -624,26 +624,6 @@ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
624} 624}
625 625
626/* 626/*
627 * Tell whether there are active queues with different weights or
628 * active groups.
629 */
630static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd)
631{
632 /*
633 * For queue weights to differ, queue_weights_tree must contain
634 * at least two nodes.
635 */
636 return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
637 (bfqd->queue_weights_tree.rb_node->rb_left ||
638 bfqd->queue_weights_tree.rb_node->rb_right)
639#ifdef CONFIG_BFQ_GROUP_IOSCHED
640 ) ||
641 (bfqd->num_groups_with_pending_reqs > 0
642#endif
643 );
644}
645
646/*
647 * The following function returns true if every queue must receive the 627 * The following function returns true if every queue must receive the
648 * same share of the throughput (this condition is used when deciding 628 * same share of the throughput (this condition is used when deciding
649 * whether idling may be disabled, see the comments in the function 629 * whether idling may be disabled, see the comments in the function
@@ -651,25 +631,48 @@ static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd)
651 * 631 *
652 * Such a scenario occurs when: 632 * Such a scenario occurs when:
653 * 1) all active queues have the same weight, 633 * 1) all active queues have the same weight,
654 * 2) all active groups at the same level in the groups tree have the same 634 * 2) all active queues belong to the same I/O-priority class,
655 * weight,
656 * 3) all active groups at the same level in the groups tree have the same 635 * 3) all active groups at the same level in the groups tree have the same
636 * weight,
637 * 4) all active groups at the same level in the groups tree have the same
657 * number of children. 638 * number of children.
658 * 639 *
659 * Unfortunately, keeping the necessary state for evaluating exactly 640 * Unfortunately, keeping the necessary state for evaluating exactly
660 * the last two symmetry sub-conditions above would be quite complex 641 * the last two symmetry sub-conditions above would be quite complex
661 * and time consuming. Therefore this function evaluates, instead, 642 * and time consuming. Therefore this function evaluates, instead,
662 * only the following stronger two sub-conditions, for which it is 643 * only the following stronger three sub-conditions, for which it is
663 * much easier to maintain the needed state: 644 * much easier to maintain the needed state:
664 * 1) all active queues have the same weight, 645 * 1) all active queues have the same weight,
665 * 2) there are no active groups. 646 * 2) all active queues belong to the same I/O-priority class,
647 * 3) there are no active groups.
666 * In particular, the last condition is always true if hierarchical 648 * In particular, the last condition is always true if hierarchical
667 * support or the cgroups interface are not enabled, thus no state 649 * support or the cgroups interface are not enabled, thus no state
668 * needs to be maintained in this case. 650 * needs to be maintained in this case.
669 */ 651 */
670static bool bfq_symmetric_scenario(struct bfq_data *bfqd) 652static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
671{ 653{
672 return !bfq_varied_queue_weights_or_active_groups(bfqd); 654 /*
655 * For queue weights to differ, queue_weights_tree must contain
656 * at least two nodes.
657 */
658 bool varied_queue_weights = !RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
659 (bfqd->queue_weights_tree.rb_node->rb_left ||
660 bfqd->queue_weights_tree.rb_node->rb_right);
661
662 bool multiple_classes_busy =
663 (bfqd->busy_queues[0] && bfqd->busy_queues[1]) ||
664 (bfqd->busy_queues[0] && bfqd->busy_queues[2]) ||
665 (bfqd->busy_queues[1] && bfqd->busy_queues[2]);
666
667 /*
668 * For queue weights to differ, queue_weights_tree must contain
669 * at least two nodes.
670 */
671 return !(varied_queue_weights || multiple_classes_busy
672#ifdef BFQ_GROUP_IOSCHED_ENABLED
673 || bfqd->num_groups_with_pending_reqs > 0
674#endif
675 );
673} 676}
674 677
675/* 678/*
@@ -728,15 +731,14 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
728 /* 731 /*
729 * In the unlucky event of an allocation failure, we just 732 * In the unlucky event of an allocation failure, we just
730 * exit. This will cause the weight of queue to not be 733 * exit. This will cause the weight of queue to not be
731 * considered in bfq_varied_queue_weights_or_active_groups, 734 * considered in bfq_symmetric_scenario, which, in its turn,
732 * which, in its turn, causes the scenario to be deemed 735 * causes the scenario to be deemed wrongly symmetric in case
733 * wrongly symmetric in case bfqq's weight would have been 736 * bfqq's weight would have been the only weight making the
734 * the only weight making the scenario asymmetric. On the 737 * scenario asymmetric. On the bright side, no unbalance will
735 * bright side, no unbalance will however occur when bfqq 738 * however occur when bfqq becomes inactive again (the
736 * becomes inactive again (the invocation of this function 739 * invocation of this function is triggered by an activation
737 * is triggered by an activation of queue). In fact, 740 * of queue). In fact, bfq_weights_tree_remove does nothing
738 * bfq_weights_tree_remove does nothing if 741 * if !bfqq->weight_counter.
739 * !bfqq->weight_counter.
740 */ 742 */
741 if (unlikely(!bfqq->weight_counter)) 743 if (unlikely(!bfqq->weight_counter))
742 return; 744 return;
@@ -2227,7 +2229,7 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2227 return NULL; 2229 return NULL;
2228 2230
2229 /* If there is only one backlogged queue, don't search. */ 2231 /* If there is only one backlogged queue, don't search. */
2230 if (bfqd->busy_queues == 1) 2232 if (bfq_tot_busy_queues(bfqd) == 1)
2231 return NULL; 2233 return NULL;
2232 2234
2233 in_service_bfqq = bfqd->in_service_queue; 2235 in_service_bfqq = bfqd->in_service_queue;
@@ -3681,7 +3683,8 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
3681 * the requests already queued in the device have been served. 3683 * the requests already queued in the device have been served.
3682 */ 3684 */
3683 asymmetric_scenario = (bfqq->wr_coeff > 1 && 3685 asymmetric_scenario = (bfqq->wr_coeff > 1 &&
3684 bfqd->wr_busy_queues < bfqd->busy_queues) || 3686 bfqd->wr_busy_queues <
3687 bfq_tot_busy_queues(bfqd)) ||
3685 !bfq_symmetric_scenario(bfqd); 3688 !bfq_symmetric_scenario(bfqd);
3686 3689
3687 /* 3690 /*
@@ -3960,7 +3963,7 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
3960 * belongs to CLASS_IDLE and other queues are waiting for 3963 * belongs to CLASS_IDLE and other queues are waiting for
3961 * service. 3964 * service.
3962 */ 3965 */
3963 if (!(bfqd->busy_queues > 1 && bfq_class_idle(bfqq))) 3966 if (!(bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq)))
3964 goto return_rq; 3967 goto return_rq;
3965 3968
3966 bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED); 3969 bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
@@ -3978,7 +3981,7 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
3978 * most a call to dispatch for nothing 3981 * most a call to dispatch for nothing
3979 */ 3982 */
3980 return !list_empty_careful(&bfqd->dispatch) || 3983 return !list_empty_careful(&bfqd->dispatch) ||
3981 bfqd->busy_queues > 0; 3984 bfq_tot_busy_queues(bfqd) > 0;
3982} 3985}
3983 3986
3984static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) 3987static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
@@ -4032,9 +4035,10 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
4032 goto start_rq; 4035 goto start_rq;
4033 } 4036 }
4034 4037
4035 bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues); 4038 bfq_log(bfqd, "dispatch requests: %d busy queues",
4039 bfq_tot_busy_queues(bfqd));
4036 4040
4037 if (bfqd->busy_queues == 0) 4041 if (bfq_tot_busy_queues(bfqd) == 0)
4038 goto exit; 4042 goto exit;
4039 4043
4040 /* 4044 /*
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 0b02bf302de0..30be669be465 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -501,10 +501,11 @@ struct bfq_data {
501 unsigned int num_groups_with_pending_reqs; 501 unsigned int num_groups_with_pending_reqs;
502 502
503 /* 503 /*
504 * Number of bfq_queues containing requests (including the 504 * Per-class (RT, BE, IDLE) number of bfq_queues containing
505 * queue in service, even if it is idling). 505 * requests (including the queue in service, even if it is
506 * idling).
506 */ 507 */
507 int busy_queues; 508 unsigned int busy_queues[3];
508 /* number of weight-raised busy @bfq_queues */ 509 /* number of weight-raised busy @bfq_queues */
509 int wr_busy_queues; 510 int wr_busy_queues;
510 /* number of queued requests */ 511 /* number of queued requests */
@@ -974,6 +975,7 @@ extern struct blkcg_policy blkcg_policy_bfq;
974 975
975struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq); 976struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq);
976struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity); 977struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
978unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd);
977struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity); 979struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity);
978struct bfq_entity *bfq_entity_of(struct rb_node *node); 980struct bfq_entity *bfq_entity_of(struct rb_node *node);
979unsigned short bfq_ioprio_to_weight(int ioprio); 981unsigned short bfq_ioprio_to_weight(int ioprio);
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index 72adbbe975d5..ce37d709a34f 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -44,6 +44,12 @@ static unsigned int bfq_class_idx(struct bfq_entity *entity)
44 BFQ_DEFAULT_GRP_CLASS - 1; 44 BFQ_DEFAULT_GRP_CLASS - 1;
45} 45}
46 46
47unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd)
48{
49 return bfqd->busy_queues[0] + bfqd->busy_queues[1] +
50 bfqd->busy_queues[2];
51}
52
47static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, 53static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
48 bool expiration); 54 bool expiration);
49 55
@@ -1513,7 +1519,7 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
1513 struct bfq_sched_data *sd; 1519 struct bfq_sched_data *sd;
1514 struct bfq_queue *bfqq; 1520 struct bfq_queue *bfqq;
1515 1521
1516 if (bfqd->busy_queues == 0) 1522 if (bfq_tot_busy_queues(bfqd) == 0)
1517 return NULL; 1523 return NULL;
1518 1524
1519 /* 1525 /*
@@ -1665,7 +1671,7 @@ void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1665 1671
1666 bfq_clear_bfqq_busy(bfqq); 1672 bfq_clear_bfqq_busy(bfqq);
1667 1673
1668 bfqd->busy_queues--; 1674 bfqd->busy_queues[bfqq->ioprio_class - 1]--;
1669 1675
1670 if (!bfqq->dispatched) 1676 if (!bfqq->dispatched)
1671 bfq_weights_tree_remove(bfqd, bfqq); 1677 bfq_weights_tree_remove(bfqd, bfqq);
@@ -1688,7 +1694,7 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1688 bfq_activate_bfqq(bfqd, bfqq); 1694 bfq_activate_bfqq(bfqd, bfqq);
1689 1695
1690 bfq_mark_bfqq_busy(bfqq); 1696 bfq_mark_bfqq_busy(bfqq);
1691 bfqd->busy_queues++; 1697 bfqd->busy_queues[bfqq->ioprio_class - 1]++;
1692 1698
1693 if (!bfqq->dispatched) 1699 if (!bfqq->dispatched)
1694 if (bfqq->wr_coeff == 1) 1700 if (bfqq->wr_coeff == 1)