diff options
author | Tejun Heo <tj@kernel.org> | 2013-01-09 11:05:13 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-01-09 11:05:13 -0500 |
commit | 0b39920b5f9f3ad37dd259bfa2e9cbca33475b28 (patch) | |
tree | 84f3c07ae535dbf02276ff3645b4fbfe95419d9b /block | |
parent | 689665af4489f779bc82e7869509c9ac11b5a903 (diff) |
cfq-iosched: collect stats from dead cfqgs
To support hierarchical stats, it's necessary to remember stats from
dead children. Add cfqg->dead_stats and make a dying cfqg transfer
its stats to the parent's dead-stats.
The transfer happens form ->pd_offline_fn() and it is possible that
there are some residual IOs completing afterwards. Currently, we lose
these stats. Given that cgroup removal isn't a very high frequency
operation and the amount of residual IOs on offline are likely to be
nil or small, this shouldn't be a big deal and the complexity needed
to handle residual IOs - another callback and rather elaborate
synchronization to reach and lock the matching q - doesn't seem
justified.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/cfq-iosched.c | 57 |
1 files changed, 56 insertions, 1 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index f8b34bbbd372..4d75b7944574 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -289,7 +289,8 @@ struct cfq_group { | |||
289 | /* number of requests that are on the dispatch list or inside driver */ | 289 | /* number of requests that are on the dispatch list or inside driver */ |
290 | int dispatched; | 290 | int dispatched; |
291 | struct cfq_ttime ttime; | 291 | struct cfq_ttime ttime; |
292 | struct cfqg_stats stats; | 292 | struct cfqg_stats stats; /* stats for this cfqg */ |
293 | struct cfqg_stats dead_stats; /* stats pushed from dead children */ | ||
293 | }; | 294 | }; |
294 | 295 | ||
295 | struct cfq_io_cq { | 296 | struct cfq_io_cq { |
@@ -709,6 +710,47 @@ static void cfqg_stats_reset(struct cfqg_stats *stats) | |||
709 | #endif | 710 | #endif |
710 | } | 711 | } |
711 | 712 | ||
713 | /* @to += @from */ | ||
714 | static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from) | ||
715 | { | ||
716 | /* queued stats shouldn't be cleared */ | ||
717 | blkg_rwstat_merge(&to->service_bytes, &from->service_bytes); | ||
718 | blkg_rwstat_merge(&to->serviced, &from->serviced); | ||
719 | blkg_rwstat_merge(&to->merged, &from->merged); | ||
720 | blkg_rwstat_merge(&to->service_time, &from->service_time); | ||
721 | blkg_rwstat_merge(&to->wait_time, &from->wait_time); | ||
722 | blkg_stat_merge(&from->time, &from->time); | ||
723 | #ifdef CONFIG_DEBUG_BLK_CGROUP | ||
724 | blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time); | ||
725 | blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum); | ||
726 | blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples); | ||
727 | blkg_stat_merge(&to->dequeue, &from->dequeue); | ||
728 | blkg_stat_merge(&to->group_wait_time, &from->group_wait_time); | ||
729 | blkg_stat_merge(&to->idle_time, &from->idle_time); | ||
730 | blkg_stat_merge(&to->empty_time, &from->empty_time); | ||
731 | #endif | ||
732 | } | ||
733 | |||
734 | /* | ||
735 | * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors' | ||
736 | * recursive stats can still account for the amount used by this cfqg after | ||
737 | * it's gone. | ||
738 | */ | ||
739 | static void cfqg_stats_xfer_dead(struct cfq_group *cfqg) | ||
740 | { | ||
741 | struct cfq_group *parent = cfqg_parent(cfqg); | ||
742 | |||
743 | lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock); | ||
744 | |||
745 | if (unlikely(!parent)) | ||
746 | return; | ||
747 | |||
748 | cfqg_stats_merge(&parent->dead_stats, &cfqg->stats); | ||
749 | cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats); | ||
750 | cfqg_stats_reset(&cfqg->stats); | ||
751 | cfqg_stats_reset(&cfqg->dead_stats); | ||
752 | } | ||
753 | |||
712 | #else /* CONFIG_CFQ_GROUP_IOSCHED */ | 754 | #else /* CONFIG_CFQ_GROUP_IOSCHED */ |
713 | 755 | ||
714 | static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; } | 756 | static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; } |
@@ -1475,11 +1517,23 @@ static void cfq_pd_init(struct blkcg_gq *blkg) | |||
1475 | cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight; | 1517 | cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight; |
1476 | } | 1518 | } |
1477 | 1519 | ||
1520 | static void cfq_pd_offline(struct blkcg_gq *blkg) | ||
1521 | { | ||
1522 | /* | ||
1523 | * @blkg is going offline and will be ignored by | ||
1524 | * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so | ||
1525 | * that they don't get lost. If IOs complete after this point, the | ||
1526 | * stats for them will be lost. Oh well... | ||
1527 | */ | ||
1528 | cfqg_stats_xfer_dead(blkg_to_cfqg(blkg)); | ||
1529 | } | ||
1530 | |||
1478 | static void cfq_pd_reset_stats(struct blkcg_gq *blkg) | 1531 | static void cfq_pd_reset_stats(struct blkcg_gq *blkg) |
1479 | { | 1532 | { |
1480 | struct cfq_group *cfqg = blkg_to_cfqg(blkg); | 1533 | struct cfq_group *cfqg = blkg_to_cfqg(blkg); |
1481 | 1534 | ||
1482 | cfqg_stats_reset(&cfqg->stats); | 1535 | cfqg_stats_reset(&cfqg->stats); |
1536 | cfqg_stats_reset(&cfqg->dead_stats); | ||
1483 | } | 1537 | } |
1484 | 1538 | ||
1485 | /* | 1539 | /* |
@@ -4408,6 +4462,7 @@ static struct blkcg_policy blkcg_policy_cfq = { | |||
4408 | .cftypes = cfq_blkcg_files, | 4462 | .cftypes = cfq_blkcg_files, |
4409 | 4463 | ||
4410 | .pd_init_fn = cfq_pd_init, | 4464 | .pd_init_fn = cfq_pd_init, |
4465 | .pd_offline_fn = cfq_pd_offline, | ||
4411 | .pd_reset_stats_fn = cfq_pd_reset_stats, | 4466 | .pd_reset_stats_fn = cfq_pd_reset_stats, |
4412 | }; | 4467 | }; |
4413 | #endif | 4468 | #endif |