diff options
-rw-r--r-- | block/blk-throttle.c | 30 |
1 files changed, 25 insertions, 5 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 2875ff66e1b9..420eaa150d11 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -757,7 +757,22 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) | |||
757 | tg->bytes_disp[rw] += bio->bi_size; | 757 | tg->bytes_disp[rw] += bio->bi_size; |
758 | tg->io_disp[rw]++; | 758 | tg->io_disp[rw]++; |
759 | 759 | ||
760 | throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw); | 760 | /* |
761 | * REQ_THROTTLED is used to prevent the same bio to be throttled | ||
762 | * more than once as a throttled bio will go through blk-throtl the | ||
763 | * second time when it eventually gets issued. Set it when a bio | ||
764 | * is being charged to a tg. | ||
765 | * | ||
766 | * Dispatch stats aren't recursive and each @bio should only be | ||
767 | * accounted by the @tg it was originally associated with. Let's | ||
768 | * update the stats when setting REQ_THROTTLED for the first time | ||
769 | * which is guaranteed to be for the @bio's original tg. | ||
770 | */ | ||
771 | if (!(bio->bi_rw & REQ_THROTTLED)) { | ||
772 | bio->bi_rw |= REQ_THROTTLED; | ||
773 | throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, | ||
774 | bio->bi_rw); | ||
775 | } | ||
761 | } | 776 | } |
762 | 777 | ||
763 | static void throtl_add_bio_tg(struct bio *bio, struct throtl_grp *tg) | 778 | static void throtl_add_bio_tg(struct bio *bio, struct throtl_grp *tg) |
@@ -821,7 +836,6 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) | |||
821 | 836 | ||
822 | throtl_charge_bio(tg, bio); | 837 | throtl_charge_bio(tg, bio); |
823 | bio_list_add(&sq->parent_sq->bio_lists[rw], bio); | 838 | bio_list_add(&sq->parent_sq->bio_lists[rw], bio); |
824 | bio->bi_rw |= REQ_THROTTLED; | ||
825 | 839 | ||
826 | throtl_trim_slice(tg, rw); | 840 | throtl_trim_slice(tg, rw); |
827 | } | 841 | } |
@@ -1128,10 +1142,9 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) | |||
1128 | struct blkcg *blkcg; | 1142 | struct blkcg *blkcg; |
1129 | bool throttled = false; | 1143 | bool throttled = false; |
1130 | 1144 | ||
1131 | if (bio->bi_rw & REQ_THROTTLED) { | 1145 | /* see throtl_charge_bio() */ |
1132 | bio->bi_rw &= ~REQ_THROTTLED; | 1146 | if (bio->bi_rw & REQ_THROTTLED) |
1133 | goto out; | 1147 | goto out; |
1134 | } | ||
1135 | 1148 | ||
1136 | /* | 1149 | /* |
1137 | * A throtl_grp pointer retrieved under rcu can be used to access | 1150 | * A throtl_grp pointer retrieved under rcu can be used to access |
@@ -1205,6 +1218,13 @@ out_unlock: | |||
1205 | out_unlock_rcu: | 1218 | out_unlock_rcu: |
1206 | rcu_read_unlock(); | 1219 | rcu_read_unlock(); |
1207 | out: | 1220 | out: |
1221 | /* | ||
1222 | * As multiple blk-throtls may stack in the same issue path, we | ||
1223 | * don't want bios to leave with the flag set. Clear the flag if | ||
1224 | * being issued. | ||
1225 | */ | ||
1226 | if (!throttled) | ||
1227 | bio->bi_rw &= ~REQ_THROTTLED; | ||
1208 | return throttled; | 1228 | return throttled; |
1209 | } | 1229 | } |
1210 | 1230 | ||