aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-05-14 16:52:35 -0400
committerTejun Heo <tj@kernel.org>2013-05-14 16:52:35 -0400
commit0e9f4164ba915052918a77ecb2a59822dbfd661c (patch)
tree3b9756ee1b216b9f8a9ed5e53623e4036971912b /block/blk-throttle.c
parent651930bc1c2a2550fde93a8cfa1a201c363a0ca1 (diff)
blk-throttle: generalize update_disptime optimization in blk_throtl_bio()
When blk_throtl_bio() wants to queue a bio to a tg (throtl_grp), it avoids invoking tg_update_disptime() and throtl_schedule_next_dispatch() if the tg already has bios queued in that direction. As a new bio is appeneded after the existing ones, it can't change the tg's next dispatch time or the parent's dispatch schedule. This optimization is currently open coded in blk_throtl_bio(). Whether the target biolist was occupied was recorded in a local variable and later used to skip disptime update. This patch moves generalizes it so that throtl_add_bio_tg() sets a new flag THROTL_TG_WAS_EMPTY if the biolist was empty before the new bio was added. tg_update_disptime() clears the flag automatically. blk_throtl_bio() is updated to simply test the flag before updating disptime. This patch doesn't make any functional differences now but will enable using the same optimization for recursive dispatch. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c28
1 files changed, 18 insertions, 10 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 154bd63719c5..ec9397f3eb0a 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -46,6 +46,7 @@ struct throtl_service_queue {
46 46
47enum tg_state_flags { 47enum tg_state_flags {
48 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */ 48 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
49 THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
49}; 50};
50 51
51#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) 52#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
@@ -712,6 +713,15 @@ static void throtl_add_bio_tg(struct bio *bio, struct throtl_grp *tg,
712 struct throtl_service_queue *sq = &tg->service_queue; 713 struct throtl_service_queue *sq = &tg->service_queue;
713 bool rw = bio_data_dir(bio); 714 bool rw = bio_data_dir(bio);
714 715
716 /*
717 * If @tg doesn't currently have any bios queued in the same
718 * direction, queueing @bio can change when @tg should be
719 * dispatched. Mark that @tg was empty. This is automatically
720 * cleaered on the next tg_update_disptime().
721 */
722 if (!sq->nr_queued[rw])
723 tg->flags |= THROTL_TG_WAS_EMPTY;
724
715 bio_list_add(&sq->bio_lists[rw], bio); 725 bio_list_add(&sq->bio_lists[rw], bio);
716 /* Take a bio reference on tg */ 726 /* Take a bio reference on tg */
717 blkg_get(tg_to_blkg(tg)); 727 blkg_get(tg_to_blkg(tg));
@@ -740,6 +750,9 @@ static void tg_update_disptime(struct throtl_grp *tg,
740 throtl_dequeue_tg(tg, parent_sq); 750 throtl_dequeue_tg(tg, parent_sq);
741 tg->disptime = disptime; 751 tg->disptime = disptime;
742 throtl_enqueue_tg(tg, parent_sq); 752 throtl_enqueue_tg(tg, parent_sq);
753
754 /* see throtl_add_bio_tg() */
755 tg->flags &= ~THROTL_TG_WAS_EMPTY;
743} 756}
744 757
745static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw, 758static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw,
@@ -1061,7 +1074,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1061 struct throtl_data *td = q->td; 1074 struct throtl_data *td = q->td;
1062 struct throtl_grp *tg; 1075 struct throtl_grp *tg;
1063 struct throtl_service_queue *sq; 1076 struct throtl_service_queue *sq;
1064 bool rw = bio_data_dir(bio), update_disptime = true; 1077 bool rw = bio_data_dir(bio);
1065 struct blkcg *blkcg; 1078 struct blkcg *blkcg;
1066 bool throttled = false; 1079 bool throttled = false;
1067 1080
@@ -1097,16 +1110,10 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1097 1110
1098 sq = &tg->service_queue; 1111 sq = &tg->service_queue;
1099 1112
1100 if (sq->nr_queued[rw]) { 1113 /* throtl is FIFO - if other bios are already queued, should queue */
1101 /* 1114 if (sq->nr_queued[rw])
1102 * There is already another bio queued in same dir. No
1103 * need to update dispatch time.
1104 */
1105 update_disptime = false;
1106 goto queue_bio; 1115 goto queue_bio;
1107 1116
1108 }
1109
1110 /* Bio is with-in rate limit of group */ 1117 /* Bio is with-in rate limit of group */
1111 if (tg_may_dispatch(tg, bio, NULL)) { 1118 if (tg_may_dispatch(tg, bio, NULL)) {
1112 throtl_charge_bio(tg, bio); 1119 throtl_charge_bio(tg, bio);
@@ -1138,7 +1145,8 @@ queue_bio:
1138 throtl_add_bio_tg(bio, tg, &q->td->service_queue); 1145 throtl_add_bio_tg(bio, tg, &q->td->service_queue);
1139 throttled = true; 1146 throttled = true;
1140 1147
1141 if (update_disptime) { 1148 /* update @tg's dispatch time if @tg was empty before @bio */
1149 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1142 tg_update_disptime(tg, &td->service_queue); 1150 tg_update_disptime(tg, &td->service_queue);
1143 throtl_schedule_next_dispatch(td); 1151 throtl_schedule_next_dispatch(td);
1144 } 1152 }