diff options
-rw-r--r-- | block/blk-throttle.c | 49 |
1 files changed, 42 insertions, 7 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 7477f332c8dc..27f006bb363b 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -124,6 +124,9 @@ struct throtl_grp { | |||
124 | 124 | ||
125 | unsigned int flags; | 125 | unsigned int flags; |
126 | 126 | ||
127 | /* are there any throtl rules between this group and td? */ | ||
128 | bool has_rules[2]; | ||
129 | |||
127 | /* bytes per second rate limits */ | 130 | /* bytes per second rate limits */ |
128 | uint64_t bps[2]; | 131 | uint64_t bps[2]; |
129 | 132 | ||
@@ -422,6 +425,30 @@ static void throtl_pd_init(struct blkcg_gq *blkg) | |||
422 | spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); | 425 | spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); |
423 | } | 426 | } |
424 | 427 | ||
428 | /* | ||
429 | * Set has_rules[] if @tg or any of its parents have limits configured. | ||
430 | * This doesn't require walking up to the top of the hierarchy as the | ||
431 | * parent's has_rules[] is guaranteed to be correct. | ||
432 | */ | ||
433 | static void tg_update_has_rules(struct throtl_grp *tg) | ||
434 | { | ||
435 | struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq); | ||
436 | int rw; | ||
437 | |||
438 | for (rw = READ; rw <= WRITE; rw++) | ||
439 | tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) || | ||
440 | (tg->bps[rw] != -1 || tg->iops[rw] != -1); | ||
441 | } | ||
442 | |||
443 | static void throtl_pd_online(struct blkcg_gq *blkg) | ||
444 | { | ||
445 | /* | ||
446 | * We don't want new groups to escape the limits of its ancestors. | ||
447 | * Update has_rules[] after a new group is brought online. | ||
448 | */ | ||
449 | tg_update_has_rules(blkg_to_tg(blkg)); | ||
450 | } | ||
451 | |||
425 | static void throtl_pd_exit(struct blkcg_gq *blkg) | 452 | static void throtl_pd_exit(struct blkcg_gq *blkg) |
426 | { | 453 | { |
427 | struct throtl_grp *tg = blkg_to_tg(blkg); | 454 | struct throtl_grp *tg = blkg_to_tg(blkg); |
@@ -843,12 +870,6 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, | |||
843 | return 0; | 870 | return 0; |
844 | } | 871 | } |
845 | 872 | ||
846 | static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) { | ||
847 | if (tg->bps[rw] == -1 && tg->iops[rw] == -1) | ||
848 | return 1; | ||
849 | return 0; | ||
850 | } | ||
851 | |||
852 | /* | 873 | /* |
853 | * Returns whether one can dispatch a bio or not. Also returns approx number | 874 | * Returns whether one can dispatch a bio or not. Also returns approx number |
854 | * of jiffies to wait before this bio is with-in IO rate and can be dispatched | 875 | * of jiffies to wait before this bio is with-in IO rate and can be dispatched |
@@ -1307,6 +1328,8 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf, | |||
1307 | struct blkg_conf_ctx ctx; | 1328 | struct blkg_conf_ctx ctx; |
1308 | struct throtl_grp *tg; | 1329 | struct throtl_grp *tg; |
1309 | struct throtl_service_queue *sq; | 1330 | struct throtl_service_queue *sq; |
1331 | struct blkcg_gq *blkg; | ||
1332 | struct cgroup *pos_cgrp; | ||
1310 | int ret; | 1333 | int ret; |
1311 | 1334 | ||
1312 | ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); | 1335 | ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); |
@@ -1330,6 +1353,17 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf, | |||
1330 | tg->iops[READ], tg->iops[WRITE]); | 1353 | tg->iops[READ], tg->iops[WRITE]); |
1331 | 1354 | ||
1332 | /* | 1355 | /* |
1356 | * Update has_rules[] flags for the updated tg's subtree. A tg is | ||
1357 | * considered to have rules if either the tg itself or any of its | ||
1358 | * ancestors has rules. This identifies groups without any | ||
1359 | * restrictions in the whole hierarchy and allows them to bypass | ||
1360 | * blk-throttle. | ||
1361 | */ | ||
1362 | tg_update_has_rules(tg); | ||
1363 | blkg_for_each_descendant_pre(blkg, pos_cgrp, ctx.blkg) | ||
1364 | tg_update_has_rules(blkg_to_tg(blkg)); | ||
1365 | |||
1366 | /* | ||
1333 | * We're already holding queue_lock and know @tg is valid. Let's | 1367 | * We're already holding queue_lock and know @tg is valid. Let's |
1334 | * apply the new config directly. | 1368 | * apply the new config directly. |
1335 | * | 1369 | * |
@@ -1415,6 +1449,7 @@ static struct blkcg_policy blkcg_policy_throtl = { | |||
1415 | .cftypes = throtl_files, | 1449 | .cftypes = throtl_files, |
1416 | 1450 | ||
1417 | .pd_init_fn = throtl_pd_init, | 1451 | .pd_init_fn = throtl_pd_init, |
1452 | .pd_online_fn = throtl_pd_online, | ||
1418 | .pd_exit_fn = throtl_pd_exit, | 1453 | .pd_exit_fn = throtl_pd_exit, |
1419 | .pd_reset_stats_fn = throtl_pd_reset_stats, | 1454 | .pd_reset_stats_fn = throtl_pd_reset_stats, |
1420 | }; | 1455 | }; |
@@ -1442,7 +1477,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) | |||
1442 | blkcg = bio_blkcg(bio); | 1477 | blkcg = bio_blkcg(bio); |
1443 | tg = throtl_lookup_tg(td, blkcg); | 1478 | tg = throtl_lookup_tg(td, blkcg); |
1444 | if (tg) { | 1479 | if (tg) { |
1445 | if (tg_no_rule_group(tg, rw)) { | 1480 | if (!tg->has_rules[rw]) { |
1446 | throtl_update_dispatch_stats(tg_to_blkg(tg), | 1481 | throtl_update_dispatch_stats(tg_to_blkg(tg), |
1447 | bio->bi_size, bio->bi_rw); | 1482 | bio->bi_size, bio->bi_rw); |
1448 | goto out_unlock_rcu; | 1483 | goto out_unlock_rcu; |