aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-05-14 16:52:38 -0400
committerTejun Heo <tj@kernel.org>2013-05-14 16:52:38 -0400
commit9e660acffcd1b5adc4ec1ffba0cbb584f86b8907 (patch)
tree96ac75e374c9a7069e07093a1c636aebf76d8a3d /block/blk-throttle.c
parent2a12f0dcdad1ba7c0e53bbff8e5f6d0ee7a29882 (diff)
blk-throttle: make blk_throtl_bio() ready for hierarchy
Currently, blk_throtl_bio() issues the passed in bio directly if it's within limits of its associated tg (throtl_grp). This behavior becomes incorrect with hierarchy support as the bio should be accounted to and throttled by the ancestor throtl_grps too. This patch makes the direct issue path of blk_throtl_bio() to loop until it reaches the top-level service_queue or gets throttled. If the former, the bio can be issued directly; otherwise, it gets queued at the first layer it was above limits. As tg->parent_sq is always the top-level service queue currently, this patch in itself doesn't make any behavior differences. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c27
1 files changed, 20 insertions, 7 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 8c6e13359781..52321a42cd78 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1239,12 +1239,16 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1239 1239
1240 sq = &tg->service_queue; 1240 sq = &tg->service_queue;
1241 1241
1242 /* throtl is FIFO - if other bios are already queued, should queue */ 1242 while (true) {
1243 if (sq->nr_queued[rw]) 1243 /* throtl is FIFO - if bios are already queued, should queue */
1244 goto queue_bio; 1244 if (sq->nr_queued[rw])
1245 break;
1245 1246
1246 /* Bio is with-in rate limit of group */ 1247 /* if above limits, break to queue */
1247 if (tg_may_dispatch(tg, bio, NULL)) { 1248 if (!tg_may_dispatch(tg, bio, NULL))
1249 break;
1250
1251 /* within limits, let's charge and dispatch directly */
1248 throtl_charge_bio(tg, bio); 1252 throtl_charge_bio(tg, bio);
1249 1253
1250 /* 1254 /*
@@ -1259,10 +1263,19 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1259 * So keep on trimming slice even if bio is not queued. 1263 * So keep on trimming slice even if bio is not queued.
1260 */ 1264 */
1261 throtl_trim_slice(tg, rw); 1265 throtl_trim_slice(tg, rw);
1262 goto out_unlock; 1266
1267 /*
1268 * @bio passed through this layer without being throttled.
1269 * Climb up the ladder. If we''re already at the top, it
1270 * can be executed directly.
1271 */
1272 sq = sq->parent_sq;
1273 tg = sq_to_tg(sq);
1274 if (!tg)
1275 goto out_unlock;
1263 } 1276 }
1264 1277
1265queue_bio: 1278 /* out-of-limit, queue to @tg */
1266 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", 1279 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1267 rw == READ ? 'R' : 'W', 1280 rw == READ ? 'R' : 'W',
1268 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], 1281 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],