diff options
author | Tejun Heo <tj@kernel.org> | 2012-04-01 17:38:44 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-04-01 17:38:44 -0400 |
commit | 41b38b6d540f951c49315d8573e6f6195a6e736d (patch) | |
tree | e667cec0bba6e7678810ff0087c96547a21a45c7 /block/blk-throttle.c | |
parent | 629ed0b10209ffc4e1d439e5508d52d5e3a090b8 (diff) |
blkcg: cfq doesn't need per-cpu dispatch stats
blkio_group_stats_cpu is used to count dispatch stats using per-cpu
counters. This is used by both blk-throtl and cfq-iosched but the
sharing is rather silly.
* cfq-iosched doesn't need per-cpu dispatch stats. cfq always updates
those stats while holding queue_lock.
* blk-throtl needs per-cpu dispatch stats but only service_bytes and
serviced. It doesn't make use of sectors.
This patch makes cfq add and use global stats for service_bytes,
serviced and sectors, removes per-cpu sectors counter and moves
per-cpu stat printing code to blk-throttle.c.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r-- | block/blk-throttle.c | 31 |
1 files changed, 30 insertions, 1 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 5d647edc02a1..cb259bc46f43 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -582,7 +582,6 @@ static void throtl_update_dispatch_stats(struct blkio_group *blkg, u64 bytes, | |||
582 | 582 | ||
583 | stats_cpu = this_cpu_ptr(pd->stats_cpu); | 583 | stats_cpu = this_cpu_ptr(pd->stats_cpu); |
584 | 584 | ||
585 | blkg_stat_add(&stats_cpu->sectors, bytes >> 9); | ||
586 | blkg_rwstat_add(&stats_cpu->serviced, rw, 1); | 585 | blkg_rwstat_add(&stats_cpu->serviced, rw, 1); |
587 | blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes); | 586 | blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes); |
588 | 587 | ||
@@ -843,6 +842,36 @@ static void throtl_update_blkio_group_common(struct throtl_data *td, | |||
843 | throtl_schedule_delayed_work(td, 0); | 842 | throtl_schedule_delayed_work(td, 0); |
844 | } | 843 | } |
845 | 844 | ||
845 | static u64 blkg_prfill_cpu_rwstat(struct seq_file *sf, | ||
846 | struct blkg_policy_data *pd, int off) | ||
847 | { | ||
848 | struct blkg_rwstat rwstat = { }, tmp; | ||
849 | int i, cpu; | ||
850 | |||
851 | for_each_possible_cpu(cpu) { | ||
852 | struct blkio_group_stats_cpu *sc = | ||
853 | per_cpu_ptr(pd->stats_cpu, cpu); | ||
854 | |||
855 | tmp = blkg_rwstat_read((void *)sc + off); | ||
856 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | ||
857 | rwstat.cnt[i] += tmp.cnt[i]; | ||
858 | } | ||
859 | |||
860 | return __blkg_prfill_rwstat(sf, pd, &rwstat); | ||
861 | } | ||
862 | |||
863 | /* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */ | ||
864 | static int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft, | ||
865 | struct seq_file *sf) | ||
866 | { | ||
867 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); | ||
868 | |||
869 | blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_rwstat, | ||
870 | BLKCG_STAT_POL(cft->private), | ||
871 | BLKCG_STAT_OFF(cft->private), true); | ||
872 | return 0; | ||
873 | } | ||
874 | |||
846 | static u64 blkg_prfill_conf_u64(struct seq_file *sf, | 875 | static u64 blkg_prfill_conf_u64(struct seq_file *sf, |
847 | struct blkg_policy_data *pd, int off) | 876 | struct blkg_policy_data *pd, int off) |
848 | { | 877 | { |