summaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-08-18 17:55:21 -0400
committerJens Axboe <axboe@fb.com>2015-08-18 18:49:17 -0400
commite6269c44546755094979ab53609e6e203a68c8ff (patch)
treefc083e47b6ea842e5d260084542a2ed5f76a86ea /block/cfq-iosched.c
parentae11889636111199dbcf47283b4167f578b69472 (diff)
blkcg: add blkg_[rw]stat->aux_cnt and replace cfq_group->dead_stats with it
cgroup stats are local to each cgroup and doesn't propagate to ancestors by default. When recursive stats are necessary, the sum is calculated over all the descendants. This initially was for backward compatibility to support both group-local and recursive stats but this mode of operation makes general sense as stat update is much hotter thafn reporting those stats. This however ends up losing recursive stats when a child is removed. To work around this, cfq-iosched adds its stats to its parent cfq_group->dead_stats which is summed up together when calculating recursive stats. It's planned that the core stats will be moved to blkcg_gq, so we want to move the mechanism for keeping track of the stats of dead children from cfq to blkcg core. This patch adds blkg_[rw]stat->aux_cnt which are atomic64_t's keeping track of auxiliary counts which are excluded when reading local counts but included for recursive. blkg_[rw]stat_merge() which were used by cfq to implement dead_stats are replaced by blkg_[rw]stat_add_aux(), and cfq now forwards stats of a dead cgroup to the aux counts of parent->stats instead of separate ->dead_stats. This will also help making blkg_[rw]stats per-cpu. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c67
1 files changed, 18 insertions, 49 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 0994f3b523a8..b272cfff7364 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -304,7 +304,6 @@ struct cfq_group {
304 int dispatched; 304 int dispatched;
305 struct cfq_ttime ttime; 305 struct cfq_ttime ttime;
306 struct cfqg_stats stats; /* stats for this cfqg */ 306 struct cfqg_stats stats; /* stats for this cfqg */
307 struct cfqg_stats dead_stats; /* stats pushed from dead children */
308 307
309 /* async queue for each priority case */ 308 /* async queue for each priority case */
310 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; 309 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
@@ -736,28 +735,28 @@ static void cfqg_stats_reset(struct cfqg_stats *stats)
736} 735}
737 736
738/* @to += @from */ 737/* @to += @from */
739static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from) 738static void cfqg_stats_add_aux(struct cfqg_stats *to, struct cfqg_stats *from)
740{ 739{
741 /* queued stats shouldn't be cleared */ 740 /* queued stats shouldn't be cleared */
742 blkg_rwstat_merge(&to->service_bytes, &from->service_bytes); 741 blkg_rwstat_add_aux(&to->service_bytes, &from->service_bytes);
743 blkg_rwstat_merge(&to->serviced, &from->serviced); 742 blkg_rwstat_add_aux(&to->serviced, &from->serviced);
744 blkg_rwstat_merge(&to->merged, &from->merged); 743 blkg_rwstat_add_aux(&to->merged, &from->merged);
745 blkg_rwstat_merge(&to->service_time, &from->service_time); 744 blkg_rwstat_add_aux(&to->service_time, &from->service_time);
746 blkg_rwstat_merge(&to->wait_time, &from->wait_time); 745 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
747 blkg_stat_merge(&from->time, &from->time); 746 blkg_stat_add_aux(&from->time, &from->time);
748#ifdef CONFIG_DEBUG_BLK_CGROUP 747#ifdef CONFIG_DEBUG_BLK_CGROUP
749 blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time); 748 blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time);
750 blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum); 749 blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
751 blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples); 750 blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
752 blkg_stat_merge(&to->dequeue, &from->dequeue); 751 blkg_stat_add_aux(&to->dequeue, &from->dequeue);
753 blkg_stat_merge(&to->group_wait_time, &from->group_wait_time); 752 blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
754 blkg_stat_merge(&to->idle_time, &from->idle_time); 753 blkg_stat_add_aux(&to->idle_time, &from->idle_time);
755 blkg_stat_merge(&to->empty_time, &from->empty_time); 754 blkg_stat_add_aux(&to->empty_time, &from->empty_time);
756#endif 755#endif
757} 756}
758 757
759/* 758/*
760 * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors' 759 * Transfer @cfqg's stats to its parent's aux counts so that the ancestors'
761 * recursive stats can still account for the amount used by this cfqg after 760 * recursive stats can still account for the amount used by this cfqg after
762 * it's gone. 761 * it's gone.
763 */ 762 */
@@ -770,10 +769,8 @@ static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
770 if (unlikely(!parent)) 769 if (unlikely(!parent))
771 return; 770 return;
772 771
773 cfqg_stats_merge(&parent->dead_stats, &cfqg->stats); 772 cfqg_stats_add_aux(&parent->stats, &cfqg->stats);
774 cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats);
775 cfqg_stats_reset(&cfqg->stats); 773 cfqg_stats_reset(&cfqg->stats);
776 cfqg_stats_reset(&cfqg->dead_stats);
777} 774}
778 775
779#else /* CONFIG_CFQ_GROUP_IOSCHED */ 776#else /* CONFIG_CFQ_GROUP_IOSCHED */
@@ -1606,7 +1603,6 @@ static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node)
1606 1603
1607 cfq_init_cfqg_base(cfqg); 1604 cfq_init_cfqg_base(cfqg);
1608 cfqg_stats_init(&cfqg->stats); 1605 cfqg_stats_init(&cfqg->stats);
1609 cfqg_stats_init(&cfqg->dead_stats);
1610 1606
1611 return &cfqg->pd; 1607 return &cfqg->pd;
1612} 1608}
@@ -1649,38 +1645,11 @@ static void cfq_pd_free(struct blkg_policy_data *pd)
1649 return kfree(pd); 1645 return kfree(pd);
1650} 1646}
1651 1647
1652/* offset delta from cfqg->stats to cfqg->dead_stats */
1653static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) -
1654 offsetof(struct cfq_group, stats);
1655
1656/* to be used by recursive prfill, sums live and dead stats recursively */
1657static u64 cfqg_stat_pd_recursive_sum(struct blkg_policy_data *pd, int off)
1658{
1659 u64 sum = 0;
1660
1661 sum += blkg_stat_recursive_sum(pd, off);
1662 sum += blkg_stat_recursive_sum(pd, off + dead_stats_off_delta);
1663 return sum;
1664}
1665
1666/* to be used by recursive prfill, sums live and dead rwstats recursively */
1667static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd,
1668 int off)
1669{
1670 struct blkg_rwstat a, b;
1671
1672 a = blkg_rwstat_recursive_sum(pd, off);
1673 b = blkg_rwstat_recursive_sum(pd, off + dead_stats_off_delta);
1674 blkg_rwstat_merge(&a, &b);
1675 return a;
1676}
1677
1678static void cfq_pd_reset_stats(struct blkg_policy_data *pd) 1648static void cfq_pd_reset_stats(struct blkg_policy_data *pd)
1679{ 1649{
1680 struct cfq_group *cfqg = pd_to_cfqg(pd); 1650 struct cfq_group *cfqg = pd_to_cfqg(pd);
1681 1651
1682 cfqg_stats_reset(&cfqg->stats); 1652 cfqg_stats_reset(&cfqg->stats);
1683 cfqg_stats_reset(&cfqg->dead_stats);
1684} 1653}
1685 1654
1686static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd, 1655static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
@@ -1883,7 +1852,7 @@ static int cfqg_print_rwstat(struct seq_file *sf, void *v)
1883static u64 cfqg_prfill_stat_recursive(struct seq_file *sf, 1852static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1884 struct blkg_policy_data *pd, int off) 1853 struct blkg_policy_data *pd, int off)
1885{ 1854{
1886 u64 sum = cfqg_stat_pd_recursive_sum(pd, off); 1855 u64 sum = blkg_stat_recursive_sum(pd, off);
1887 1856
1888 return __blkg_prfill_u64(sf, pd, sum); 1857 return __blkg_prfill_u64(sf, pd, sum);
1889} 1858}
@@ -1891,7 +1860,7 @@ static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1891static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf, 1860static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1892 struct blkg_policy_data *pd, int off) 1861 struct blkg_policy_data *pd, int off)
1893{ 1862{
1894 struct blkg_rwstat sum = cfqg_rwstat_pd_recursive_sum(pd, off); 1863 struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd, off);
1895 1864
1896 return __blkg_prfill_rwstat(sf, pd, &sum); 1865 return __blkg_prfill_rwstat(sf, pd, &sum);
1897} 1866}