aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-08 13:53:58 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-20 07:45:37 -0400
commit997a026c80c3cc05f82e589aced1f0011c17d376 (patch)
tree905fe49970f8549663e1e70e77dd04811fd14c9c /block/blk-cgroup.h
parent5fe224d2d5fbf8f020b30d0ba69fed7856923752 (diff)
blkcg: simplify stat reset
blkiocg_reset_stats() implements stat reset for blkio.reset_stats cgroupfs file. This feature is very unconventional and something which shouldn't have been merged. It's only useful when there's only one user or tool looking at the stats. As soon as multiple users and/or tools are involved, it becomes useless as resetting disrupts other usages. There are very good reasons why all other stats expect readers to read values at the start and end of a period and subtract to determine delta over the period. The implementation is rather complex - some fields shouldn't be cleared and it saves some fields, resets whole and restores for some reason. Reset of percpu stats is also racy. The comment points to 64bit store atomicity for the reason but even without that stores for zero can simply race with other CPUs doing RMW and get clobbered. Simplify reset by * Clear selectively instead of resetting and restoring. * Grouping debug stat fields to be reset and using memset() over them. * Not caring about stats_lock. * Using memset() to reset percpu stats. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-cgroup.h')
-rw-r--r--block/blk-cgroup.h14
1 files changed, 12 insertions, 2 deletions
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 6c8e3e345426..1fa3c5e8d87f 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -131,21 +131,31 @@ struct blkio_group_stats {
131 131
132 /* Total time spent waiting for it to be assigned a timeslice. */ 132 /* Total time spent waiting for it to be assigned a timeslice. */
133 uint64_t group_wait_time; 133 uint64_t group_wait_time;
134 uint64_t start_group_wait_time;
135 134
136 /* Time spent idling for this blkio_group */ 135 /* Time spent idling for this blkio_group */
137 uint64_t idle_time; 136 uint64_t idle_time;
138 uint64_t start_idle_time;
139 /* 137 /*
140 * Total time when we have requests queued and do not contain the 138 * Total time when we have requests queued and do not contain the
141 * current active queue. 139 * current active queue.
142 */ 140 */
143 uint64_t empty_time; 141 uint64_t empty_time;
142
143 /* fields after this shouldn't be cleared on stat reset */
144 uint64_t start_group_wait_time;
145 uint64_t start_idle_time;
144 uint64_t start_empty_time; 146 uint64_t start_empty_time;
145 uint16_t flags; 147 uint16_t flags;
146#endif 148#endif
147}; 149};
148 150
151#ifdef CONFIG_DEBUG_BLK_CGROUP
152#define BLKG_STATS_DEBUG_CLEAR_START \
153 offsetof(struct blkio_group_stats, unaccounted_time)
154#define BLKG_STATS_DEBUG_CLEAR_SIZE \
155 (offsetof(struct blkio_group_stats, start_group_wait_time) - \
156 BLKG_STATS_DEBUG_CLEAR_START)
157#endif
158
149/* Per cpu blkio group stats */ 159/* Per cpu blkio group stats */
150struct blkio_group_stats_cpu { 160struct blkio_group_stats_cpu {
151 uint64_t sectors; 161 uint64_t sectors;