aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-05-14 16:52:36 -0400
committerTejun Heo <tj@kernel.org>2013-05-14 16:52:36 -0400
commit2a0f61e6ecd08d260054bde4b096ff207ce5350f (patch)
tree7ff768d67f66aa79c806a56ff8a9d5a0ff59e6e4
parentfda6f272c77a7acd798bb247fadc4791574e698b (diff)
blk-throttle: set REQ_THROTTLED from throtl_charge_bio() and gate stats update with it
With proper hierarchy support, a bio can be dispatched multiple times until it reaches the top-level service_queue and we don't want to update dispatch stats at each step. They are local stats and will be kept local. If recursive stats are necessary, they should be implemented separately and definitely not by updating counters recursively on each dispatch. This patch moves REQ_THROTTLED setting to throtl_charge_bio() and gate stats update with it so that dispatch stats are updated only on the first time the bio is charged to a throtl_grp, which will always be the throtl_grp the bio was originally queued to. This means that REQ_THROTTLED would be set even for bios which don't get throttled. As we don't want bios to leave blk-throtl with the flag set, move REQ_THROTLLED clearing to the end of blk_throtl_bio() and clear if the bio is being issued directly. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com>
-rw-r--r--block/blk-throttle.c30
1 files changed, 25 insertions, 5 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 2875ff66e1b9..420eaa150d11 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -757,7 +757,22 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
757 tg->bytes_disp[rw] += bio->bi_size; 757 tg->bytes_disp[rw] += bio->bi_size;
758 tg->io_disp[rw]++; 758 tg->io_disp[rw]++;
759 759
760 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw); 760 /*
761 * REQ_THROTTLED is used to prevent the same bio to be throttled
762 * more than once as a throttled bio will go through blk-throtl the
763 * second time when it eventually gets issued. Set it when a bio
764 * is being charged to a tg.
765 *
766 * Dispatch stats aren't recursive and each @bio should only be
767 * accounted by the @tg it was originally associated with. Let's
768 * update the stats when setting REQ_THROTTLED for the first time
769 * which is guaranteed to be for the @bio's original tg.
770 */
771 if (!(bio->bi_rw & REQ_THROTTLED)) {
772 bio->bi_rw |= REQ_THROTTLED;
773 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size,
774 bio->bi_rw);
775 }
761} 776}
762 777
763static void throtl_add_bio_tg(struct bio *bio, struct throtl_grp *tg) 778static void throtl_add_bio_tg(struct bio *bio, struct throtl_grp *tg)
@@ -821,7 +836,6 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
821 836
822 throtl_charge_bio(tg, bio); 837 throtl_charge_bio(tg, bio);
823 bio_list_add(&sq->parent_sq->bio_lists[rw], bio); 838 bio_list_add(&sq->parent_sq->bio_lists[rw], bio);
824 bio->bi_rw |= REQ_THROTTLED;
825 839
826 throtl_trim_slice(tg, rw); 840 throtl_trim_slice(tg, rw);
827} 841}
@@ -1128,10 +1142,9 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1128 struct blkcg *blkcg; 1142 struct blkcg *blkcg;
1129 bool throttled = false; 1143 bool throttled = false;
1130 1144
1131 if (bio->bi_rw & REQ_THROTTLED) { 1145 /* see throtl_charge_bio() */
1132 bio->bi_rw &= ~REQ_THROTTLED; 1146 if (bio->bi_rw & REQ_THROTTLED)
1133 goto out; 1147 goto out;
1134 }
1135 1148
1136 /* 1149 /*
1137 * A throtl_grp pointer retrieved under rcu can be used to access 1150 * A throtl_grp pointer retrieved under rcu can be used to access
@@ -1205,6 +1218,13 @@ out_unlock:
1205out_unlock_rcu: 1218out_unlock_rcu:
1206 rcu_read_unlock(); 1219 rcu_read_unlock();
1207out: 1220out:
1221 /*
1222 * As multiple blk-throtls may stack in the same issue path, we
1223 * don't want bios to leave with the flag set. Clear the flag if
1224 * being issued.
1225 */
1226 if (!throttled)
1227 bio->bi_rw &= ~REQ_THROTTLED;
1208 return throttled; 1228 return throttled;
1209} 1229}
1210 1230