diff options
author | Divyesh Shah <dpshah@google.com> | 2010-04-01 18:01:41 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2010-04-02 02:44:37 -0400 |
commit | 9195291e5f05e01d67f9a09c756b8aca8f009089 (patch) | |
tree | e46a151fe39be2bc23d0683bedb2cbefe916fb5b /block | |
parent | 303a3acb2362f16c7e7f4c53b40c2f4b396dc8d5 (diff) |
blkio: Increment the blkio cgroup stats for real now
We also add start_time_ns and io_start_time_ns fields to struct request
here to record the time when a request is created and when it is
dispatched to device. We use ns uints here as ms and jiffies are
not very useful for non-rotational media.
Signed-off-by: Divyesh Shah<dpshah@google.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 60 | ||||
-rw-r--r-- | block/blk-cgroup.h | 14 | ||||
-rw-r--r-- | block/blk-core.c | 6 | ||||
-rw-r--r-- | block/cfq-iosched.c | 4 |
4 files changed, 76 insertions, 8 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index ad6843f2e0ab..9af7257f429c 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/kdev_t.h> | 15 | #include <linux/kdev_t.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/blkdev.h> | ||
18 | #include "blk-cgroup.h" | 19 | #include "blk-cgroup.h" |
19 | 20 | ||
20 | static DEFINE_SPINLOCK(blkio_list_lock); | 21 | static DEFINE_SPINLOCK(blkio_list_lock); |
@@ -55,6 +56,26 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) | |||
55 | } | 56 | } |
56 | EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); | 57 | EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); |
57 | 58 | ||
59 | /* | ||
60 | * Add to the appropriate stat variable depending on the request type. | ||
61 | * This should be called with the blkg->stats_lock held. | ||
62 | */ | ||
63 | void io_add_stat(uint64_t *stat, uint64_t add, unsigned int flags) | ||
64 | { | ||
65 | if (flags & REQ_RW) | ||
66 | stat[IO_WRITE] += add; | ||
67 | else | ||
68 | stat[IO_READ] += add; | ||
69 | /* | ||
70 | * Everywhere in the block layer, an IO is treated as sync if it is a | ||
71 | * read or a SYNC write. We follow the same norm. | ||
72 | */ | ||
73 | if (!(flags & REQ_RW) || flags & REQ_RW_SYNC) | ||
74 | stat[IO_SYNC] += add; | ||
75 | else | ||
76 | stat[IO_ASYNC] += add; | ||
77 | } | ||
78 | |||
58 | void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time) | 79 | void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time) |
59 | { | 80 | { |
60 | unsigned long flags; | 81 | unsigned long flags; |
@@ -65,6 +86,41 @@ void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time) | |||
65 | } | 86 | } |
66 | EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); | 87 | EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); |
67 | 88 | ||
89 | void blkiocg_update_request_dispatch_stats(struct blkio_group *blkg, | ||
90 | struct request *rq) | ||
91 | { | ||
92 | struct blkio_group_stats *stats; | ||
93 | unsigned long flags; | ||
94 | |||
95 | spin_lock_irqsave(&blkg->stats_lock, flags); | ||
96 | stats = &blkg->stats; | ||
97 | stats->sectors += blk_rq_sectors(rq); | ||
98 | io_add_stat(stats->io_serviced, 1, rq->cmd_flags); | ||
99 | io_add_stat(stats->io_service_bytes, blk_rq_sectors(rq) << 9, | ||
100 | rq->cmd_flags); | ||
101 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | ||
102 | } | ||
103 | |||
104 | void blkiocg_update_request_completion_stats(struct blkio_group *blkg, | ||
105 | struct request *rq) | ||
106 | { | ||
107 | struct blkio_group_stats *stats; | ||
108 | unsigned long flags; | ||
109 | unsigned long long now = sched_clock(); | ||
110 | |||
111 | spin_lock_irqsave(&blkg->stats_lock, flags); | ||
112 | stats = &blkg->stats; | ||
113 | if (time_after64(now, rq->io_start_time_ns)) | ||
114 | io_add_stat(stats->io_service_time, now - rq->io_start_time_ns, | ||
115 | rq->cmd_flags); | ||
116 | if (time_after64(rq->io_start_time_ns, rq->start_time_ns)) | ||
117 | io_add_stat(stats->io_wait_time, | ||
118 | rq->io_start_time_ns - rq->start_time_ns, | ||
119 | rq->cmd_flags); | ||
120 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | ||
121 | } | ||
122 | EXPORT_SYMBOL_GPL(blkiocg_update_request_completion_stats); | ||
123 | |||
68 | void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, | 124 | void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, |
69 | struct blkio_group *blkg, void *key, dev_t dev) | 125 | struct blkio_group *blkg, void *key, dev_t dev) |
70 | { | 126 | { |
@@ -325,12 +381,12 @@ SHOW_FUNCTION_PER_GROUP(dequeue, get_stat, get_dequeue_stat, 0); | |||
325 | #undef SHOW_FUNCTION_PER_GROUP | 381 | #undef SHOW_FUNCTION_PER_GROUP |
326 | 382 | ||
327 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 383 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
328 | void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg, | 384 | void blkiocg_update_dequeue_stats(struct blkio_group *blkg, |
329 | unsigned long dequeue) | 385 | unsigned long dequeue) |
330 | { | 386 | { |
331 | blkg->stats.dequeue += dequeue; | 387 | blkg->stats.dequeue += dequeue; |
332 | } | 388 | } |
333 | EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_dequeue_stats); | 389 | EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats); |
334 | #endif | 390 | #endif |
335 | 391 | ||
336 | struct cftype blkio_files[] = { | 392 | struct cftype blkio_files[] = { |
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 5c5e5294b506..80010ef64ab0 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h | |||
@@ -112,12 +112,12 @@ static inline char *blkg_path(struct blkio_group *blkg) | |||
112 | { | 112 | { |
113 | return blkg->path; | 113 | return blkg->path; |
114 | } | 114 | } |
115 | void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg, | 115 | void blkiocg_update_dequeue_stats(struct blkio_group *blkg, |
116 | unsigned long dequeue); | 116 | unsigned long dequeue); |
117 | #else | 117 | #else |
118 | static inline char *blkg_path(struct blkio_group *blkg) { return NULL; } | 118 | static inline char *blkg_path(struct blkio_group *blkg) { return NULL; } |
119 | static inline void blkiocg_update_blkio_group_dequeue_stats( | 119 | static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg, |
120 | struct blkio_group *blkg, unsigned long dequeue) {} | 120 | unsigned long dequeue) {} |
121 | #endif | 121 | #endif |
122 | 122 | ||
123 | #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) | 123 | #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) |
@@ -130,6 +130,10 @@ extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, | |||
130 | void *key); | 130 | void *key); |
131 | void blkiocg_update_timeslice_used(struct blkio_group *blkg, | 131 | void blkiocg_update_timeslice_used(struct blkio_group *blkg, |
132 | unsigned long time); | 132 | unsigned long time); |
133 | void blkiocg_update_request_dispatch_stats(struct blkio_group *blkg, | ||
134 | struct request *rq); | ||
135 | void blkiocg_update_request_completion_stats(struct blkio_group *blkg, | ||
136 | struct request *rq); | ||
133 | #else | 137 | #else |
134 | struct cgroup; | 138 | struct cgroup; |
135 | static inline struct blkio_cgroup * | 139 | static inline struct blkio_cgroup * |
@@ -147,5 +151,9 @@ static inline struct blkio_group * | |||
147 | blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; } | 151 | blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; } |
148 | static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg, | 152 | static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg, |
149 | unsigned long time) {} | 153 | unsigned long time) {} |
154 | static inline void blkiocg_update_request_dispatch_stats( | ||
155 | struct blkio_group *blkg, struct request *rq) {} | ||
156 | static inline void blkiocg_update_request_completion_stats( | ||
157 | struct blkio_group *blkg, struct request *rq) {} | ||
150 | #endif | 158 | #endif |
151 | #endif /* _BLK_CGROUP_H */ | 159 | #endif /* _BLK_CGROUP_H */ |
diff --git a/block/blk-core.c b/block/blk-core.c index 9fe174dc74d1..1d94f15d7f0d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -127,6 +127,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq) | |||
127 | rq->tag = -1; | 127 | rq->tag = -1; |
128 | rq->ref_count = 1; | 128 | rq->ref_count = 1; |
129 | rq->start_time = jiffies; | 129 | rq->start_time = jiffies; |
130 | set_start_time_ns(rq); | ||
130 | } | 131 | } |
131 | EXPORT_SYMBOL(blk_rq_init); | 132 | EXPORT_SYMBOL(blk_rq_init); |
132 | 133 | ||
@@ -1855,8 +1856,10 @@ void blk_dequeue_request(struct request *rq) | |||
1855 | * and to it is freed is accounted as io that is in progress at | 1856 | * and to it is freed is accounted as io that is in progress at |
1856 | * the driver side. | 1857 | * the driver side. |
1857 | */ | 1858 | */ |
1858 | if (blk_account_rq(rq)) | 1859 | if (blk_account_rq(rq)) { |
1859 | q->in_flight[rq_is_sync(rq)]++; | 1860 | q->in_flight[rq_is_sync(rq)]++; |
1861 | set_io_start_time_ns(rq); | ||
1862 | } | ||
1860 | } | 1863 | } |
1861 | 1864 | ||
1862 | /** | 1865 | /** |
@@ -2517,4 +2520,3 @@ int __init blk_dev_init(void) | |||
2517 | 2520 | ||
2518 | return 0; | 2521 | return 0; |
2519 | } | 2522 | } |
2520 | |||
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index c5161bbf2fe9..42028e7128a7 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -855,7 +855,7 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
855 | if (!RB_EMPTY_NODE(&cfqg->rb_node)) | 855 | if (!RB_EMPTY_NODE(&cfqg->rb_node)) |
856 | cfq_rb_erase(&cfqg->rb_node, st); | 856 | cfq_rb_erase(&cfqg->rb_node, st); |
857 | cfqg->saved_workload_slice = 0; | 857 | cfqg->saved_workload_slice = 0; |
858 | blkiocg_update_blkio_group_dequeue_stats(&cfqg->blkg, 1); | 858 | blkiocg_update_dequeue_stats(&cfqg->blkg, 1); |
859 | } | 859 | } |
860 | 860 | ||
861 | static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) | 861 | static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) |
@@ -1865,6 +1865,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) | |||
1865 | elv_dispatch_sort(q, rq); | 1865 | elv_dispatch_sort(q, rq); |
1866 | 1866 | ||
1867 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; | 1867 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; |
1868 | blkiocg_update_request_dispatch_stats(&cfqq->cfqg->blkg, rq); | ||
1868 | } | 1869 | } |
1869 | 1870 | ||
1870 | /* | 1871 | /* |
@@ -3285,6 +3286,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
3285 | WARN_ON(!cfqq->dispatched); | 3286 | WARN_ON(!cfqq->dispatched); |
3286 | cfqd->rq_in_driver--; | 3287 | cfqd->rq_in_driver--; |
3287 | cfqq->dispatched--; | 3288 | cfqq->dispatched--; |
3289 | blkiocg_update_request_completion_stats(&cfqq->cfqg->blkg, rq); | ||
3288 | 3290 | ||
3289 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; | 3291 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; |
3290 | 3292 | ||