aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorDivyesh Shah <dpshah@google.com>2010-04-01 18:01:41 -0400
committerJens Axboe <jens.axboe@oracle.com>2010-04-02 02:44:37 -0400
commit9195291e5f05e01d67f9a09c756b8aca8f009089 (patch)
treee46a151fe39be2bc23d0683bedb2cbefe916fb5b /block/blk-cgroup.c
parent303a3acb2362f16c7e7f4c53b40c2f4b396dc8d5 (diff)
blkio: Increment the blkio cgroup stats for real now
We also add start_time_ns and io_start_time_ns fields to struct request here to record the time when a request is created and when it is dispatched to device. We use ns uints here as ms and jiffies are not very useful for non-rotational media. Signed-off-by: Divyesh Shah<dpshah@google.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c60
1 files changed, 58 insertions, 2 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index ad6843f2e0ab..9af7257f429c 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -15,6 +15,7 @@
15#include <linux/kdev_t.h> 15#include <linux/kdev_t.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/blkdev.h>
18#include "blk-cgroup.h" 19#include "blk-cgroup.h"
19 20
20static DEFINE_SPINLOCK(blkio_list_lock); 21static DEFINE_SPINLOCK(blkio_list_lock);
@@ -55,6 +56,26 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
55} 56}
56EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); 57EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
57 58
59/*
60 * Add to the appropriate stat variable depending on the request type.
61 * This should be called with the blkg->stats_lock held.
62 */
63void io_add_stat(uint64_t *stat, uint64_t add, unsigned int flags)
64{
65 if (flags & REQ_RW)
66 stat[IO_WRITE] += add;
67 else
68 stat[IO_READ] += add;
69 /*
70 * Everywhere in the block layer, an IO is treated as sync if it is a
71 * read or a SYNC write. We follow the same norm.
72 */
73 if (!(flags & REQ_RW) || flags & REQ_RW_SYNC)
74 stat[IO_SYNC] += add;
75 else
76 stat[IO_ASYNC] += add;
77}
78
58void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time) 79void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
59{ 80{
60 unsigned long flags; 81 unsigned long flags;
@@ -65,6 +86,41 @@ void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
65} 86}
66EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); 87EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
67 88
89void blkiocg_update_request_dispatch_stats(struct blkio_group *blkg,
90 struct request *rq)
91{
92 struct blkio_group_stats *stats;
93 unsigned long flags;
94
95 spin_lock_irqsave(&blkg->stats_lock, flags);
96 stats = &blkg->stats;
97 stats->sectors += blk_rq_sectors(rq);
98 io_add_stat(stats->io_serviced, 1, rq->cmd_flags);
99 io_add_stat(stats->io_service_bytes, blk_rq_sectors(rq) << 9,
100 rq->cmd_flags);
101 spin_unlock_irqrestore(&blkg->stats_lock, flags);
102}
103
104void blkiocg_update_request_completion_stats(struct blkio_group *blkg,
105 struct request *rq)
106{
107 struct blkio_group_stats *stats;
108 unsigned long flags;
109 unsigned long long now = sched_clock();
110
111 spin_lock_irqsave(&blkg->stats_lock, flags);
112 stats = &blkg->stats;
113 if (time_after64(now, rq->io_start_time_ns))
114 io_add_stat(stats->io_service_time, now - rq->io_start_time_ns,
115 rq->cmd_flags);
116 if (time_after64(rq->io_start_time_ns, rq->start_time_ns))
117 io_add_stat(stats->io_wait_time,
118 rq->io_start_time_ns - rq->start_time_ns,
119 rq->cmd_flags);
120 spin_unlock_irqrestore(&blkg->stats_lock, flags);
121}
122EXPORT_SYMBOL_GPL(blkiocg_update_request_completion_stats);
123
68void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 124void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
69 struct blkio_group *blkg, void *key, dev_t dev) 125 struct blkio_group *blkg, void *key, dev_t dev)
70{ 126{
@@ -325,12 +381,12 @@ SHOW_FUNCTION_PER_GROUP(dequeue, get_stat, get_dequeue_stat, 0);
325#undef SHOW_FUNCTION_PER_GROUP 381#undef SHOW_FUNCTION_PER_GROUP
326 382
327#ifdef CONFIG_DEBUG_BLK_CGROUP 383#ifdef CONFIG_DEBUG_BLK_CGROUP
328void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg, 384void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
329 unsigned long dequeue) 385 unsigned long dequeue)
330{ 386{
331 blkg->stats.dequeue += dequeue; 387 blkg->stats.dequeue += dequeue;
332} 388}
333EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_dequeue_stats); 389EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
334#endif 390#endif
335 391
336struct cftype blkio_files[] = { 392struct cftype blkio_files[] = {