diff options
author | Divyesh Shah <dpshah@google.com> | 2010-04-09 00:14:23 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2010-04-09 02:36:07 -0400 |
commit | 812d402648f4fc1ab1091b2172a46fc1b367c724 (patch) | |
tree | 9dc52d5bfdbc170559169a0157ed3295d551e9cf /block/blk-cgroup.c | |
parent | 84c124da9ff50bd71fab9c939ee5b7cd8bef2bd9 (diff) |
blkio: Add io_merged stat
This includes both the number of bios merged into requests belonging to this
cgroup as well as the number of requests merged together.
In the past, we've observed different merging behavior across upstream kernels,
some by design some actual bugs. This stat helps a lot in debugging such
problems when applications report decreased throughput with a new kernel
version.
This needed adding an extra elevator function to capture bios being merged as I
did not want to pollute elevator code with blkiocg knowledge and hence needed
the accounting invocation to come from CFQ.
Signed-off-by: Divyesh Shah<dpshah@google.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r-- | block/blk-cgroup.c | 17 |
1 files changed, 17 insertions, 0 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 6797df508821..d23b538858ce 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -127,6 +127,18 @@ void blkiocg_update_completion_stats(struct blkio_group *blkg, | |||
127 | } | 127 | } |
128 | EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats); | 128 | EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats); |
129 | 129 | ||
130 | void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction, | ||
131 | bool sync) | ||
132 | { | ||
133 | unsigned long flags; | ||
134 | |||
135 | spin_lock_irqsave(&blkg->stats_lock, flags); | ||
136 | blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction, | ||
137 | sync); | ||
138 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | ||
139 | } | ||
140 | EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats); | ||
141 | |||
130 | void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, | 142 | void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, |
131 | struct blkio_group *blkg, void *key, dev_t dev) | 143 | struct blkio_group *blkg, void *key, dev_t dev) |
132 | { | 144 | { |
@@ -363,6 +375,7 @@ SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1); | |||
363 | SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1); | 375 | SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1); |
364 | SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1); | 376 | SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1); |
365 | SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1); | 377 | SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1); |
378 | SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1); | ||
366 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 379 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
367 | SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0); | 380 | SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0); |
368 | #endif | 381 | #endif |
@@ -408,6 +421,10 @@ struct cftype blkio_files[] = { | |||
408 | .read_map = blkiocg_io_wait_time_read, | 421 | .read_map = blkiocg_io_wait_time_read, |
409 | }, | 422 | }, |
410 | { | 423 | { |
424 | .name = "io_merged", | ||
425 | .read_map = blkiocg_io_merged_read, | ||
426 | }, | ||
427 | { | ||
411 | .name = "reset_stats", | 428 | .name = "reset_stats", |
412 | .write_u64 = blkiocg_reset_stats, | 429 | .write_u64 = blkiocg_reset_stats, |
413 | }, | 430 | }, |