diff options
author | Tejun Heo <tj@kernel.org> | 2013-05-14 16:52:37 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-05-14 16:52:37 -0400 |
commit | 2a12f0dcdad1ba7c0e53bbff8e5f6d0ee7a29882 (patch) | |
tree | fd0cee6d116653a4619c941d3dc1a759edd45064 /block | |
parent | 6e1a5704cbbd244a8db2d7d59215cf9a4c9a0d31 (diff) |
blk-throttle: make blk_throtl_drain() ready for hierarchy
The current blk_throtl_drain() assumes that all active throtl_grps are
queued on throtl_data->service_queue, which won't be true once
hierarchy support is implemented.
This patch makes blk_throtl_drain() perform post-order walk of the
blkg hierarchy draining each associated throtl_grp, which guarantees
that all bios will eventually be pushed to the top-level service_queue
in throtl_data.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-throttle.c | 51 |
1 files changed, 40 insertions, 11 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 918d22240856..8c6e13359781 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -1299,6 +1299,28 @@ out: | |||
1299 | return throttled; | 1299 | return throttled; |
1300 | } | 1300 | } |
1301 | 1301 | ||
1302 | /* | ||
1303 | * Dispatch all bios from all children tg's queued on @parent_sq. On | ||
1304 | * return, @parent_sq is guaranteed to not have any active children tg's | ||
1305 | * and all bios from previously active tg's are on @parent_sq->bio_lists[]. | ||
1306 | */ | ||
1307 | static void tg_drain_bios(struct throtl_service_queue *parent_sq) | ||
1308 | { | ||
1309 | struct throtl_grp *tg; | ||
1310 | |||
1311 | while ((tg = throtl_rb_first(parent_sq))) { | ||
1312 | struct throtl_service_queue *sq = &tg->service_queue; | ||
1313 | struct bio *bio; | ||
1314 | |||
1315 | throtl_dequeue_tg(tg); | ||
1316 | |||
1317 | while ((bio = bio_list_peek(&sq->bio_lists[READ]))) | ||
1318 | tg_dispatch_one_bio(tg, bio_data_dir(bio)); | ||
1319 | while ((bio = bio_list_peek(&sq->bio_lists[WRITE]))) | ||
1320 | tg_dispatch_one_bio(tg, bio_data_dir(bio)); | ||
1321 | } | ||
1322 | } | ||
1323 | |||
1302 | /** | 1324 | /** |
1303 | * blk_throtl_drain - drain throttled bios | 1325 | * blk_throtl_drain - drain throttled bios |
1304 | * @q: request_queue to drain throttled bios for | 1326 | * @q: request_queue to drain throttled bios for |
@@ -1309,27 +1331,34 @@ void blk_throtl_drain(struct request_queue *q) | |||
1309 | __releases(q->queue_lock) __acquires(q->queue_lock) | 1331 | __releases(q->queue_lock) __acquires(q->queue_lock) |
1310 | { | 1332 | { |
1311 | struct throtl_data *td = q->td; | 1333 | struct throtl_data *td = q->td; |
1312 | struct throtl_service_queue *parent_sq = &td->service_queue; | 1334 | struct blkcg_gq *blkg; |
1313 | struct throtl_grp *tg; | 1335 | struct cgroup *pos_cgrp; |
1314 | struct bio *bio; | 1336 | struct bio *bio; |
1315 | int rw; | 1337 | int rw; |
1316 | 1338 | ||
1317 | queue_lockdep_assert_held(q); | 1339 | queue_lockdep_assert_held(q); |
1340 | rcu_read_lock(); | ||
1318 | 1341 | ||
1319 | while ((tg = throtl_rb_first(parent_sq))) { | 1342 | /* |
1320 | struct throtl_service_queue *sq = &tg->service_queue; | 1343 | * Drain each tg while doing post-order walk on the blkg tree, so |
1344 | * that all bios are propagated to td->service_queue. It'd be | ||
1345 | * better to walk service_queue tree directly but blkg walk is | ||
1346 | * easier. | ||
1347 | */ | ||
1348 | blkg_for_each_descendant_post(blkg, pos_cgrp, td->queue->root_blkg) | ||
1349 | tg_drain_bios(&blkg_to_tg(blkg)->service_queue); | ||
1321 | 1350 | ||
1322 | throtl_dequeue_tg(tg); | 1351 | tg_drain_bios(&td_root_tg(td)->service_queue); |
1323 | 1352 | ||
1324 | while ((bio = bio_list_peek(&sq->bio_lists[READ]))) | 1353 | /* finally, transfer bios from top-level tg's into the td */ |
1325 | tg_dispatch_one_bio(tg, bio_data_dir(bio)); | 1354 | tg_drain_bios(&td->service_queue); |
1326 | while ((bio = bio_list_peek(&sq->bio_lists[WRITE]))) | 1355 | |
1327 | tg_dispatch_one_bio(tg, bio_data_dir(bio)); | 1356 | rcu_read_unlock(); |
1328 | } | ||
1329 | spin_unlock_irq(q->queue_lock); | 1357 | spin_unlock_irq(q->queue_lock); |
1330 | 1358 | ||
1359 | /* all bios now should be in td->service_queue, issue them */ | ||
1331 | for (rw = READ; rw <= WRITE; rw++) | 1360 | for (rw = READ; rw <= WRITE; rw++) |
1332 | while ((bio = bio_list_pop(&parent_sq->bio_lists[rw]))) | 1361 | while ((bio = bio_list_pop(&td->service_queue.bio_lists[rw]))) |
1333 | generic_make_request(bio); | 1362 | generic_make_request(bio); |
1334 | 1363 | ||
1335 | spin_lock_irq(q->queue_lock); | 1364 | spin_lock_irq(q->queue_lock); |