diff options
author | Tejun Heo <tj@kernel.org> | 2012-04-01 17:38:44 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-04-01 17:38:44 -0400 |
commit | 155fead9b6347ead90e0b0396cb108a6ba6126c6 (patch) | |
tree | 7e14bb87d942561aa5f44ac8a2d09d6e546c6ccb /block/blk-cgroup.c | |
parent | 9ade5ea4ce57d3596eaee6a57cd212a483674058 (diff) |
blkcg: move blkio_group_stats to cfq-iosched.c
blkio_group_stats contains only fields used by cfq and has no reason
to be defined in blkcg core.
* Move blkio_group_stats to cfq-iosched.c and rename it to cfqg_stats.
* blkg_policy_data->stats is replaced with cfq_group->stats.
blkg_prfill_[rw]stat() are updated to use offset against pd->pdata
instead.
* All related macros / functions are renamed so that they have cfqg_
prefix and the unnecessary @pol arguments are dropped.
* All stat functions now take cfq_group * instead of blkio_group *.
* lockdep assertion on queue lock dropped. Elevator runs under queue
lock by default. There isn't much to be gained by adding lockdep
assertions at stat function level.
* cfqg_stats_reset() implemented for blkio_reset_group_stats_fn method
so that cfqg->stats can be reset.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r-- | block/blk-cgroup.c | 23 |
1 files changed, 2 insertions, 21 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 2e6fb7d91805..cfdda44f4a0b 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -417,25 +417,6 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) | |||
417 | struct blkio_policy_type *pol; | 417 | struct blkio_policy_type *pol; |
418 | 418 | ||
419 | list_for_each_entry(pol, &blkio_list, list) { | 419 | list_for_each_entry(pol, &blkio_list, list) { |
420 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | ||
421 | struct blkio_group_stats *stats = &pd->stats; | ||
422 | |||
423 | /* queued stats shouldn't be cleared */ | ||
424 | blkg_rwstat_reset(&stats->service_bytes); | ||
425 | blkg_rwstat_reset(&stats->serviced); | ||
426 | blkg_rwstat_reset(&stats->merged); | ||
427 | blkg_rwstat_reset(&stats->service_time); | ||
428 | blkg_rwstat_reset(&stats->wait_time); | ||
429 | blkg_stat_reset(&stats->time); | ||
430 | #ifdef CONFIG_DEBUG_BLK_CGROUP | ||
431 | blkg_stat_reset(&stats->unaccounted_time); | ||
432 | blkg_stat_reset(&stats->avg_queue_size_sum); | ||
433 | blkg_stat_reset(&stats->avg_queue_size_samples); | ||
434 | blkg_stat_reset(&stats->dequeue); | ||
435 | blkg_stat_reset(&stats->group_wait_time); | ||
436 | blkg_stat_reset(&stats->idle_time); | ||
437 | blkg_stat_reset(&stats->empty_time); | ||
438 | #endif | ||
439 | blkio_reset_stats_cpu(blkg, pol->plid); | 420 | blkio_reset_stats_cpu(blkg, pol->plid); |
440 | 421 | ||
441 | if (pol->ops.blkio_reset_group_stats_fn) | 422 | if (pol->ops.blkio_reset_group_stats_fn) |
@@ -549,13 +530,13 @@ static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, | |||
549 | int off) | 530 | int off) |
550 | { | 531 | { |
551 | return __blkg_prfill_u64(sf, pd, | 532 | return __blkg_prfill_u64(sf, pd, |
552 | blkg_stat_read((void *)&pd->stats + off)); | 533 | blkg_stat_read((void *)pd->pdata + off)); |
553 | } | 534 | } |
554 | 535 | ||
555 | static u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | 536 | static u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
556 | int off) | 537 | int off) |
557 | { | 538 | { |
558 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)&pd->stats + off); | 539 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->pdata + off); |
559 | 540 | ||
560 | return __blkg_prfill_rwstat(sf, pd, &rwstat); | 541 | return __blkg_prfill_rwstat(sf, pd, &rwstat); |
561 | } | 542 | } |