diff options
author | Tejun Heo <tj@kernel.org> | 2015-08-18 17:55:25 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-08-18 18:49:18 -0400 |
commit | 702747cabe737fe9b358739443d539f10cc7c715 (patch) | |
tree | ff92a88a4465a3aedd9a7491ecc4e6c618368b85 | |
parent | 77ea733884eb5520f22c36def1309fe2ab61633e (diff) |
blkcg: remove cfqg_stats->sectors
cfq_stats->sectors is a blkg_stat which keeps track of the total
number of sectors serviced; however, this can be trivially calculated
from blkcg_gq->stat_bytes. The only thing necessary is adding up
READs and WRITEs and then dividing by sector size.
Remove cfqg_stats->sectors and make cfq print "sectors" and
"sectors_recursive" from stat_bytes.
While this is a bit more code, it removes duplicate stat allocations
and updates and ensures that the reported stats stay in tune with each
other.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | block/cfq-iosched.c | 55 |
1 files changed, 36 insertions, 19 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index a948d4df3fc3..395476ab14fe 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -185,8 +185,6 @@ struct cfqg_stats { | |||
185 | struct blkg_rwstat wait_time; | 185 | struct blkg_rwstat wait_time; |
186 | /* number of IOs queued up */ | 186 | /* number of IOs queued up */ |
187 | struct blkg_rwstat queued; | 187 | struct blkg_rwstat queued; |
188 | /* total sectors transferred */ | ||
189 | struct blkg_stat sectors; | ||
190 | /* total disk time and nr sectors dispatched by this group */ | 188 | /* total disk time and nr sectors dispatched by this group */ |
191 | struct blkg_stat time; | 189 | struct blkg_stat time; |
192 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 190 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
@@ -688,12 +686,6 @@ static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) | |||
688 | blkg_rwstat_add(&cfqg->stats.merged, rw, 1); | 686 | blkg_rwstat_add(&cfqg->stats.merged, rw, 1); |
689 | } | 687 | } |
690 | 688 | ||
691 | static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg, | ||
692 | uint64_t bytes, int rw) | ||
693 | { | ||
694 | blkg_stat_add(&cfqg->stats.sectors, bytes >> 9); | ||
695 | } | ||
696 | |||
697 | static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, | 689 | static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, |
698 | uint64_t start_time, uint64_t io_start_time, int rw) | 690 | uint64_t start_time, uint64_t io_start_time, int rw) |
699 | { | 691 | { |
@@ -782,8 +774,6 @@ static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, | |||
782 | unsigned long time, unsigned long unaccounted_time) { } | 774 | unsigned long time, unsigned long unaccounted_time) { } |
783 | static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { } | 775 | static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { } |
784 | static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { } | 776 | static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { } |
785 | static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg, | ||
786 | uint64_t bytes, int rw) { } | ||
787 | static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, | 777 | static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, |
788 | uint64_t start_time, uint64_t io_start_time, int rw) { } | 778 | uint64_t start_time, uint64_t io_start_time, int rw) { } |
789 | 779 | ||
@@ -1538,8 +1528,6 @@ static void cfqg_stats_exit(struct cfqg_stats *stats) | |||
1538 | blkg_rwstat_exit(&stats->service_time); | 1528 | blkg_rwstat_exit(&stats->service_time); |
1539 | blkg_rwstat_exit(&stats->wait_time); | 1529 | blkg_rwstat_exit(&stats->wait_time); |
1540 | blkg_rwstat_exit(&stats->queued); | 1530 | blkg_rwstat_exit(&stats->queued); |
1541 | |||
1542 | blkg_stat_exit(&stats->sectors); | ||
1543 | blkg_stat_exit(&stats->time); | 1531 | blkg_stat_exit(&stats->time); |
1544 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 1532 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
1545 | blkg_stat_exit(&stats->unaccounted_time); | 1533 | blkg_stat_exit(&stats->unaccounted_time); |
@@ -1558,8 +1546,6 @@ static int cfqg_stats_init(struct cfqg_stats *stats, gfp_t gfp) | |||
1558 | blkg_rwstat_init(&stats->service_time, gfp) || | 1546 | blkg_rwstat_init(&stats->service_time, gfp) || |
1559 | blkg_rwstat_init(&stats->wait_time, gfp) || | 1547 | blkg_rwstat_init(&stats->wait_time, gfp) || |
1560 | blkg_rwstat_init(&stats->queued, gfp) || | 1548 | blkg_rwstat_init(&stats->queued, gfp) || |
1561 | |||
1562 | blkg_stat_init(&stats->sectors, gfp) || | ||
1563 | blkg_stat_init(&stats->time, gfp)) | 1549 | blkg_stat_init(&stats->time, gfp)) |
1564 | goto err; | 1550 | goto err; |
1565 | 1551 | ||
@@ -1901,6 +1887,40 @@ static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v) | |||
1901 | return 0; | 1887 | return 0; |
1902 | } | 1888 | } |
1903 | 1889 | ||
1890 | static u64 cfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd, | ||
1891 | int off) | ||
1892 | { | ||
1893 | u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes); | ||
1894 | |||
1895 | return __blkg_prfill_u64(sf, pd, sum >> 9); | ||
1896 | } | ||
1897 | |||
1898 | static int cfqg_print_stat_sectors(struct seq_file *sf, void *v) | ||
1899 | { | ||
1900 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | ||
1901 | cfqg_prfill_sectors, &blkcg_policy_cfq, 0, false); | ||
1902 | return 0; | ||
1903 | } | ||
1904 | |||
1905 | static u64 cfqg_prfill_sectors_recursive(struct seq_file *sf, | ||
1906 | struct blkg_policy_data *pd, int off) | ||
1907 | { | ||
1908 | struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL, | ||
1909 | offsetof(struct blkcg_gq, stat_bytes)); | ||
1910 | u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) + | ||
1911 | atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]); | ||
1912 | |||
1913 | return __blkg_prfill_u64(sf, pd, sum >> 9); | ||
1914 | } | ||
1915 | |||
1916 | static int cfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v) | ||
1917 | { | ||
1918 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | ||
1919 | cfqg_prfill_sectors_recursive, &blkcg_policy_cfq, 0, | ||
1920 | false); | ||
1921 | return 0; | ||
1922 | } | ||
1923 | |||
1904 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 1924 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
1905 | static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, | 1925 | static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, |
1906 | struct blkg_policy_data *pd, int off) | 1926 | struct blkg_policy_data *pd, int off) |
@@ -1975,8 +1995,7 @@ static struct cftype cfq_blkcg_files[] = { | |||
1975 | }, | 1995 | }, |
1976 | { | 1996 | { |
1977 | .name = "sectors", | 1997 | .name = "sectors", |
1978 | .private = offsetof(struct cfq_group, stats.sectors), | 1998 | .seq_show = cfqg_print_stat_sectors, |
1979 | .seq_show = cfqg_print_stat, | ||
1980 | }, | 1999 | }, |
1981 | { | 2000 | { |
1982 | .name = "io_service_bytes", | 2001 | .name = "io_service_bytes", |
@@ -2017,8 +2036,7 @@ static struct cftype cfq_blkcg_files[] = { | |||
2017 | }, | 2036 | }, |
2018 | { | 2037 | { |
2019 | .name = "sectors_recursive", | 2038 | .name = "sectors_recursive", |
2020 | .private = offsetof(struct cfq_group, stats.sectors), | 2039 | .seq_show = cfqg_print_stat_sectors_recursive, |
2021 | .seq_show = cfqg_print_stat_recursive, | ||
2022 | }, | 2040 | }, |
2023 | { | 2041 | { |
2024 | .name = "io_service_bytes_recursive", | 2042 | .name = "io_service_bytes_recursive", |
@@ -2888,7 +2906,6 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) | |||
2888 | 2906 | ||
2889 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; | 2907 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; |
2890 | cfqq->nr_sectors += blk_rq_sectors(rq); | 2908 | cfqq->nr_sectors += blk_rq_sectors(rq); |
2891 | cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags); | ||
2892 | } | 2909 | } |
2893 | 2910 | ||
2894 | /* | 2911 | /* |