diff options
author | Tejun Heo <tj@kernel.org> | 2015-08-18 17:55:20 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-08-18 18:49:17 -0400 |
commit | ae11889636111199dbcf47283b4167f578b69472 (patch) | |
tree | 02870926926c7391d41e4b40a8b5bcf82ce34aab /block/blk-throttle.c | |
parent | c9589f03e490956628ff91a1da133216dc796b63 (diff) |
blkcg: consolidate blkg creation in blkcg_bio_issue_check()
blkg (blkcg_gq) currently is created by blkcg policies invoking
blkg_lookup_create() which ends up repeating about the same code in
different policies. Theoretically, this can avoid the overhead of
looking and/or creating blkg's if blkcg is enabled but no policy is in
use; however, the cost of blkg lookup / creation is very low
especially if only the root blkcg is in use which is highly likely if
no blkcg policy is in active use - it boils down to a single very
predictable conditional and surrounding RCU protection.
This patch consolidates blkg creation to a new function
blkcg_bio_issue_check() which is called during bio issue from
generic_make_request_checks(). blkcg_bio_issue_check() is now the
only function which tries to create missing blkg's. The subsequent
policy and request_list operations just perform blkg_lookup() and if
missing falls back to the root.
* blk_get_rl() no longer tries to create blkg. It uses blkg_lookup()
instead of blkg_lookup_create().
* blk_throtl_bio() is now called from blkcg_bio_issue_check() with rcu
read locked and blkg already looked up. Both throtl_lookup_tg() and
throtl_lookup_create_tg() are dropped.
* cfq is similarly updated. cfq_lookup_create_cfqg() is replaced with
cfq_lookup_cfqg()which uses blkg_lookup().
This consolidates blkg handling and avoids unnecessary blkg creation
retries under memory pressure. In addition, this provides a common
bio entry point into blkcg where things like common accounting can be
performed.
v2: Build fixes for !CONFIG_CFQ_GROUP_IOSCHED and
!CONFIG_BLK_DEV_THROTTLING.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Arianna Avanzini <avanzini.arianna@gmail.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r-- | block/blk-throttle.c | 72 |
1 files changed, 6 insertions, 66 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 900a777e01c2..29c22ed4b073 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -182,11 +182,6 @@ static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg) | |||
182 | return pd_to_blkg(&tg->pd); | 182 | return pd_to_blkg(&tg->pd); |
183 | } | 183 | } |
184 | 184 | ||
185 | static inline struct throtl_grp *td_root_tg(struct throtl_data *td) | ||
186 | { | ||
187 | return blkg_to_tg(td->queue->root_blkg); | ||
188 | } | ||
189 | |||
190 | /** | 185 | /** |
191 | * sq_to_tg - return the throl_grp the specified service queue belongs to | 186 | * sq_to_tg - return the throl_grp the specified service queue belongs to |
192 | * @sq: the throtl_service_queue of interest | 187 | * @sq: the throtl_service_queue of interest |
@@ -449,39 +444,6 @@ static void throtl_pd_reset_stats(struct blkg_policy_data *pd) | |||
449 | } | 444 | } |
450 | } | 445 | } |
451 | 446 | ||
452 | static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td, | ||
453 | struct blkcg *blkcg) | ||
454 | { | ||
455 | return blkg_to_tg(blkg_lookup(blkcg, td->queue)); | ||
456 | } | ||
457 | |||
458 | static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td, | ||
459 | struct blkcg *blkcg) | ||
460 | { | ||
461 | struct request_queue *q = td->queue; | ||
462 | struct throtl_grp *tg = NULL; | ||
463 | |||
464 | /* | ||
465 | * This is the common case when there are no blkcgs. Avoid lookup | ||
466 | * in this case | ||
467 | */ | ||
468 | if (blkcg == &blkcg_root) { | ||
469 | tg = td_root_tg(td); | ||
470 | } else { | ||
471 | struct blkcg_gq *blkg; | ||
472 | |||
473 | blkg = blkg_lookup_create(blkcg, q); | ||
474 | |||
475 | /* if %NULL and @q is alive, fall back to root_tg */ | ||
476 | if (!IS_ERR(blkg)) | ||
477 | tg = blkg_to_tg(blkg); | ||
478 | else | ||
479 | tg = td_root_tg(td); | ||
480 | } | ||
481 | |||
482 | return tg; | ||
483 | } | ||
484 | |||
485 | static struct throtl_grp * | 447 | static struct throtl_grp * |
486 | throtl_rb_first(struct throtl_service_queue *parent_sq) | 448 | throtl_rb_first(struct throtl_service_queue *parent_sq) |
487 | { | 449 | { |
@@ -1403,46 +1365,26 @@ static struct blkcg_policy blkcg_policy_throtl = { | |||
1403 | .pd_reset_stats_fn = throtl_pd_reset_stats, | 1365 | .pd_reset_stats_fn = throtl_pd_reset_stats, |
1404 | }; | 1366 | }; |
1405 | 1367 | ||
1406 | bool blk_throtl_bio(struct request_queue *q, struct bio *bio) | 1368 | bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, |
1369 | struct bio *bio) | ||
1407 | { | 1370 | { |
1408 | struct throtl_data *td = q->td; | ||
1409 | struct throtl_qnode *qn = NULL; | 1371 | struct throtl_qnode *qn = NULL; |
1410 | struct throtl_grp *tg; | 1372 | struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg); |
1411 | struct throtl_service_queue *sq; | 1373 | struct throtl_service_queue *sq; |
1412 | bool rw = bio_data_dir(bio); | 1374 | bool rw = bio_data_dir(bio); |
1413 | struct blkcg *blkcg; | ||
1414 | bool throttled = false; | 1375 | bool throttled = false; |
1415 | 1376 | ||
1377 | WARN_ON_ONCE(!rcu_read_lock_held()); | ||
1378 | |||
1416 | /* see throtl_charge_bio() */ | 1379 | /* see throtl_charge_bio() */ |
1417 | if (bio->bi_rw & REQ_THROTTLED) | 1380 | if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw]) |
1418 | goto out; | 1381 | goto out; |
1419 | 1382 | ||
1420 | /* | ||
1421 | * A throtl_grp pointer retrieved under rcu can be used to access | ||
1422 | * basic fields like stats and io rates. If a group has no rules, | ||
1423 | * just update the dispatch stats in lockless manner and return. | ||
1424 | */ | ||
1425 | rcu_read_lock(); | ||
1426 | blkcg = bio_blkcg(bio); | ||
1427 | tg = throtl_lookup_tg(td, blkcg); | ||
1428 | if (tg) { | ||
1429 | if (!tg->has_rules[rw]) { | ||
1430 | throtl_update_dispatch_stats(tg_to_blkg(tg), | ||
1431 | bio->bi_iter.bi_size, bio->bi_rw); | ||
1432 | goto out_unlock_rcu; | ||
1433 | } | ||
1434 | } | ||
1435 | |||
1436 | /* | ||
1437 | * Either group has not been allocated yet or it is not an unlimited | ||
1438 | * IO group | ||
1439 | */ | ||
1440 | spin_lock_irq(q->queue_lock); | 1383 | spin_lock_irq(q->queue_lock); |
1441 | 1384 | ||
1442 | if (unlikely(blk_queue_bypass(q))) | 1385 | if (unlikely(blk_queue_bypass(q))) |
1443 | goto out_unlock; | 1386 | goto out_unlock; |
1444 | 1387 | ||
1445 | tg = throtl_lookup_create_tg(td, blkcg); | ||
1446 | sq = &tg->service_queue; | 1388 | sq = &tg->service_queue; |
1447 | 1389 | ||
1448 | while (true) { | 1390 | while (true) { |
@@ -1507,8 +1449,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) | |||
1507 | 1449 | ||
1508 | out_unlock: | 1450 | out_unlock: |
1509 | spin_unlock_irq(q->queue_lock); | 1451 | spin_unlock_irq(q->queue_lock); |
1510 | out_unlock_rcu: | ||
1511 | rcu_read_unlock(); | ||
1512 | out: | 1452 | out: |
1513 | /* | 1453 | /* |
1514 | * As multiple blk-throtls may stack in the same issue path, we | 1454 | * As multiple blk-throtls may stack in the same issue path, we |