diff options
-rw-r--r-- | Documentation/cgroups/blkio-controller.txt | 11 | ||||
-rw-r--r-- | block/blk-cgroup.c | 98 | ||||
-rw-r--r-- | block/blk-cgroup.h | 20 | ||||
-rw-r--r-- | block/cfq-iosched.c | 11 |
4 files changed, 134 insertions, 6 deletions
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt index 810e30171a54..6e52e7c512a4 100644 --- a/Documentation/cgroups/blkio-controller.txt +++ b/Documentation/cgroups/blkio-controller.txt | |||
@@ -139,6 +139,17 @@ Details of cgroup files | |||
139 | cgroup. This is further divided by the type of operation - read or | 139 | cgroup. This is further divided by the type of operation - read or |
140 | write, sync or async. | 140 | write, sync or async. |
141 | 141 | ||
142 | - blkio.io_queued | ||
143 | - Total number of requests queued up at any given instant for this | ||
144 | cgroup. This is further divided by the type of operation - read or | ||
145 | write, sync or async. | ||
146 | |||
147 | - blkio.avg_queue_size | ||
148 | - Debugging aid only enabled if CONFIG_DEBUG_CFQ_IOSCHED=y. | ||
149 | The average queue size for this cgroup over the entire time of this | ||
150 | cgroup's existence. Queue size samples are taken each time one of the | ||
151 | queues of this cgroup gets a timeslice. | ||
152 | |||
142 | - blkio.dequeue | 153 | - blkio.dequeue |
143 | - Debugging aid only enabled if CONFIG_DEBUG_CFQ_IOSCHED=y. This | 154 | - Debugging aid only enabled if CONFIG_DEBUG_CFQ_IOSCHED=y. This |
144 | gives the statistics about how many a times a group was dequeued | 155 | gives the statistics about how many a times a group was dequeued |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index d23b538858ce..1e0c4970b35d 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -81,6 +81,71 @@ static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction, | |||
81 | stat[BLKIO_STAT_ASYNC] += add; | 81 | stat[BLKIO_STAT_ASYNC] += add; |
82 | } | 82 | } |
83 | 83 | ||
84 | /* | ||
85 | * Decrements the appropriate stat variable if non-zero depending on the | ||
86 | * request type. Panics on value being zero. | ||
87 | * This should be called with the blkg->stats_lock held. | ||
88 | */ | ||
89 | static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync) | ||
90 | { | ||
91 | if (direction) { | ||
92 | BUG_ON(stat[BLKIO_STAT_WRITE] == 0); | ||
93 | stat[BLKIO_STAT_WRITE]--; | ||
94 | } else { | ||
95 | BUG_ON(stat[BLKIO_STAT_READ] == 0); | ||
96 | stat[BLKIO_STAT_READ]--; | ||
97 | } | ||
98 | if (sync) { | ||
99 | BUG_ON(stat[BLKIO_STAT_SYNC] == 0); | ||
100 | stat[BLKIO_STAT_SYNC]--; | ||
101 | } else { | ||
102 | BUG_ON(stat[BLKIO_STAT_ASYNC] == 0); | ||
103 | stat[BLKIO_STAT_ASYNC]--; | ||
104 | } | ||
105 | } | ||
106 | |||
107 | #ifdef CONFIG_DEBUG_BLK_CGROUP | ||
108 | void blkiocg_update_set_active_queue_stats(struct blkio_group *blkg) | ||
109 | { | ||
110 | unsigned long flags; | ||
111 | struct blkio_group_stats *stats; | ||
112 | |||
113 | spin_lock_irqsave(&blkg->stats_lock, flags); | ||
114 | stats = &blkg->stats; | ||
115 | stats->avg_queue_size_sum += | ||
116 | stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] + | ||
117 | stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]; | ||
118 | stats->avg_queue_size_samples++; | ||
119 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | ||
120 | } | ||
121 | EXPORT_SYMBOL_GPL(blkiocg_update_set_active_queue_stats); | ||
122 | #endif | ||
123 | |||
124 | void blkiocg_update_request_add_stats(struct blkio_group *blkg, | ||
125 | struct blkio_group *curr_blkg, bool direction, | ||
126 | bool sync) | ||
127 | { | ||
128 | unsigned long flags; | ||
129 | |||
130 | spin_lock_irqsave(&blkg->stats_lock, flags); | ||
131 | blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction, | ||
132 | sync); | ||
133 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | ||
134 | } | ||
135 | EXPORT_SYMBOL_GPL(blkiocg_update_request_add_stats); | ||
136 | |||
137 | void blkiocg_update_request_remove_stats(struct blkio_group *blkg, | ||
138 | bool direction, bool sync) | ||
139 | { | ||
140 | unsigned long flags; | ||
141 | |||
142 | spin_lock_irqsave(&blkg->stats_lock, flags); | ||
143 | blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], | ||
144 | direction, sync); | ||
145 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | ||
146 | } | ||
147 | EXPORT_SYMBOL_GPL(blkiocg_update_request_remove_stats); | ||
148 | |||
84 | void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time) | 149 | void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time) |
85 | { | 150 | { |
86 | unsigned long flags; | 151 | unsigned long flags; |
@@ -253,14 +318,18 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) | |||
253 | struct blkio_cgroup *blkcg; | 318 | struct blkio_cgroup *blkcg; |
254 | struct blkio_group *blkg; | 319 | struct blkio_group *blkg; |
255 | struct hlist_node *n; | 320 | struct hlist_node *n; |
256 | struct blkio_group_stats *stats; | 321 | uint64_t queued[BLKIO_STAT_TOTAL]; |
322 | int i; | ||
257 | 323 | ||
258 | blkcg = cgroup_to_blkio_cgroup(cgroup); | 324 | blkcg = cgroup_to_blkio_cgroup(cgroup); |
259 | spin_lock_irq(&blkcg->lock); | 325 | spin_lock_irq(&blkcg->lock); |
260 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { | 326 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { |
261 | spin_lock(&blkg->stats_lock); | 327 | spin_lock(&blkg->stats_lock); |
262 | stats = &blkg->stats; | 328 | for (i = 0; i < BLKIO_STAT_TOTAL; i++) |
263 | memset(stats, 0, sizeof(struct blkio_group_stats)); | 329 | queued[i] = blkg->stats.stat_arr[BLKIO_STAT_QUEUED][i]; |
330 | memset(&blkg->stats, 0, sizeof(struct blkio_group_stats)); | ||
331 | for (i = 0; i < BLKIO_STAT_TOTAL; i++) | ||
332 | blkg->stats.stat_arr[BLKIO_STAT_QUEUED][i] = queued[i]; | ||
264 | spin_unlock(&blkg->stats_lock); | 333 | spin_unlock(&blkg->stats_lock); |
265 | } | 334 | } |
266 | spin_unlock_irq(&blkcg->lock); | 335 | spin_unlock_irq(&blkcg->lock); |
@@ -323,6 +392,15 @@ static uint64_t blkio_get_stat(struct blkio_group *blkg, | |||
323 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | 392 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, |
324 | blkg->stats.sectors, cb, dev); | 393 | blkg->stats.sectors, cb, dev); |
325 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 394 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
395 | if (type == BLKIO_STAT_AVG_QUEUE_SIZE) { | ||
396 | uint64_t sum = blkg->stats.avg_queue_size_sum; | ||
397 | uint64_t samples = blkg->stats.avg_queue_size_samples; | ||
398 | if (samples) | ||
399 | do_div(sum, samples); | ||
400 | else | ||
401 | sum = 0; | ||
402 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev); | ||
403 | } | ||
326 | if (type == BLKIO_STAT_DEQUEUE) | 404 | if (type == BLKIO_STAT_DEQUEUE) |
327 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | 405 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, |
328 | blkg->stats.dequeue, cb, dev); | 406 | blkg->stats.dequeue, cb, dev); |
@@ -376,8 +454,10 @@ SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1); | |||
376 | SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1); | 454 | SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1); |
377 | SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1); | 455 | SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1); |
378 | SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1); | 456 | SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1); |
457 | SHOW_FUNCTION_PER_GROUP(io_queued, BLKIO_STAT_QUEUED, 1); | ||
379 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 458 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
380 | SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0); | 459 | SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0); |
460 | SHOW_FUNCTION_PER_GROUP(avg_queue_size, BLKIO_STAT_AVG_QUEUE_SIZE, 0); | ||
381 | #endif | 461 | #endif |
382 | #undef SHOW_FUNCTION_PER_GROUP | 462 | #undef SHOW_FUNCTION_PER_GROUP |
383 | 463 | ||
@@ -425,14 +505,22 @@ struct cftype blkio_files[] = { | |||
425 | .read_map = blkiocg_io_merged_read, | 505 | .read_map = blkiocg_io_merged_read, |
426 | }, | 506 | }, |
427 | { | 507 | { |
508 | .name = "io_queued", | ||
509 | .read_map = blkiocg_io_queued_read, | ||
510 | }, | ||
511 | { | ||
428 | .name = "reset_stats", | 512 | .name = "reset_stats", |
429 | .write_u64 = blkiocg_reset_stats, | 513 | .write_u64 = blkiocg_reset_stats, |
430 | }, | 514 | }, |
431 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 515 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
432 | { | 516 | { |
517 | .name = "avg_queue_size", | ||
518 | .read_map = blkiocg_avg_queue_size_read, | ||
519 | }, | ||
520 | { | ||
433 | .name = "dequeue", | 521 | .name = "dequeue", |
434 | .read_map = blkiocg_dequeue_read, | 522 | .read_map = blkiocg_dequeue_read, |
435 | }, | 523 | }, |
436 | #endif | 524 | #endif |
437 | }; | 525 | }; |
438 | 526 | ||
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 470a29db6bec..bea7f3b9a88e 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h | |||
@@ -36,10 +36,13 @@ enum stat_type { | |||
36 | BLKIO_STAT_WAIT_TIME, | 36 | BLKIO_STAT_WAIT_TIME, |
37 | /* Number of IOs merged */ | 37 | /* Number of IOs merged */ |
38 | BLKIO_STAT_MERGED, | 38 | BLKIO_STAT_MERGED, |
39 | /* Number of IOs queued up */ | ||
40 | BLKIO_STAT_QUEUED, | ||
39 | /* All the single valued stats go below this */ | 41 | /* All the single valued stats go below this */ |
40 | BLKIO_STAT_TIME, | 42 | BLKIO_STAT_TIME, |
41 | BLKIO_STAT_SECTORS, | 43 | BLKIO_STAT_SECTORS, |
42 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 44 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
45 | BLKIO_STAT_AVG_QUEUE_SIZE, | ||
43 | BLKIO_STAT_DEQUEUE | 46 | BLKIO_STAT_DEQUEUE |
44 | #endif | 47 | #endif |
45 | }; | 48 | }; |
@@ -63,8 +66,12 @@ struct blkio_group_stats { | |||
63 | /* total disk time and nr sectors dispatched by this group */ | 66 | /* total disk time and nr sectors dispatched by this group */ |
64 | uint64_t time; | 67 | uint64_t time; |
65 | uint64_t sectors; | 68 | uint64_t sectors; |
66 | uint64_t stat_arr[BLKIO_STAT_MERGED + 1][BLKIO_STAT_TOTAL]; | 69 | uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL]; |
67 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 70 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
71 | /* Sum of number of IOs queued across all samples */ | ||
72 | uint64_t avg_queue_size_sum; | ||
73 | /* Count of samples taken for average */ | ||
74 | uint64_t avg_queue_size_samples; | ||
68 | /* How many times this group has been removed from service tree */ | 75 | /* How many times this group has been removed from service tree */ |
69 | unsigned long dequeue; | 76 | unsigned long dequeue; |
70 | #endif | 77 | #endif |
@@ -127,10 +134,13 @@ static inline char *blkg_path(struct blkio_group *blkg) | |||
127 | { | 134 | { |
128 | return blkg->path; | 135 | return blkg->path; |
129 | } | 136 | } |
137 | void blkiocg_update_set_active_queue_stats(struct blkio_group *blkg); | ||
130 | void blkiocg_update_dequeue_stats(struct blkio_group *blkg, | 138 | void blkiocg_update_dequeue_stats(struct blkio_group *blkg, |
131 | unsigned long dequeue); | 139 | unsigned long dequeue); |
132 | #else | 140 | #else |
133 | static inline char *blkg_path(struct blkio_group *blkg) { return NULL; } | 141 | static inline char *blkg_path(struct blkio_group *blkg) { return NULL; } |
142 | static inline void blkiocg_update_set_active_queue_stats( | ||
143 | struct blkio_group *blkg) {} | ||
134 | static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg, | 144 | static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg, |
135 | unsigned long dequeue) {} | 145 | unsigned long dequeue) {} |
136 | #endif | 146 | #endif |
@@ -152,6 +162,10 @@ void blkiocg_update_completion_stats(struct blkio_group *blkg, | |||
152 | uint64_t start_time, uint64_t io_start_time, bool direction, bool sync); | 162 | uint64_t start_time, uint64_t io_start_time, bool direction, bool sync); |
153 | void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction, | 163 | void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction, |
154 | bool sync); | 164 | bool sync); |
165 | void blkiocg_update_request_add_stats(struct blkio_group *blkg, | ||
166 | struct blkio_group *curr_blkg, bool direction, bool sync); | ||
167 | void blkiocg_update_request_remove_stats(struct blkio_group *blkg, | ||
168 | bool direction, bool sync); | ||
155 | #else | 169 | #else |
156 | struct cgroup; | 170 | struct cgroup; |
157 | static inline struct blkio_cgroup * | 171 | static inline struct blkio_cgroup * |
@@ -175,5 +189,9 @@ static inline void blkiocg_update_completion_stats(struct blkio_group *blkg, | |||
175 | bool sync) {} | 189 | bool sync) {} |
176 | static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg, | 190 | static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg, |
177 | bool direction, bool sync) {} | 191 | bool direction, bool sync) {} |
192 | static inline void blkiocg_update_request_add_stats(struct blkio_group *blkg, | ||
193 | struct blkio_group *curr_blkg, bool direction, bool sync) {} | ||
194 | static inline void blkiocg_update_request_remove_stats(struct blkio_group *blkg, | ||
195 | bool direction, bool sync) {} | ||
178 | #endif | 196 | #endif |
179 | #endif /* _BLK_CGROUP_H */ | 197 | #endif /* _BLK_CGROUP_H */ |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 4eb1906cf6c6..8e0b86a9111a 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1380,7 +1380,12 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) | |||
1380 | { | 1380 | { |
1381 | elv_rb_del(&cfqq->sort_list, rq); | 1381 | elv_rb_del(&cfqq->sort_list, rq); |
1382 | cfqq->queued[rq_is_sync(rq)]--; | 1382 | cfqq->queued[rq_is_sync(rq)]--; |
1383 | blkiocg_update_request_remove_stats(&cfqq->cfqg->blkg, rq_data_dir(rq), | ||
1384 | rq_is_sync(rq)); | ||
1383 | cfq_add_rq_rb(rq); | 1385 | cfq_add_rq_rb(rq); |
1386 | blkiocg_update_request_add_stats( | ||
1387 | &cfqq->cfqg->blkg, &cfqq->cfqd->serving_group->blkg, | ||
1388 | rq_data_dir(rq), rq_is_sync(rq)); | ||
1384 | } | 1389 | } |
1385 | 1390 | ||
1386 | static struct request * | 1391 | static struct request * |
@@ -1436,6 +1441,8 @@ static void cfq_remove_request(struct request *rq) | |||
1436 | cfq_del_rq_rb(rq); | 1441 | cfq_del_rq_rb(rq); |
1437 | 1442 | ||
1438 | cfqq->cfqd->rq_queued--; | 1443 | cfqq->cfqd->rq_queued--; |
1444 | blkiocg_update_request_remove_stats(&cfqq->cfqg->blkg, rq_data_dir(rq), | ||
1445 | rq_is_sync(rq)); | ||
1439 | if (rq_is_meta(rq)) { | 1446 | if (rq_is_meta(rq)) { |
1440 | WARN_ON(!cfqq->meta_pending); | 1447 | WARN_ON(!cfqq->meta_pending); |
1441 | cfqq->meta_pending--; | 1448 | cfqq->meta_pending--; |
@@ -1527,6 +1534,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, | |||
1527 | if (cfqq) { | 1534 | if (cfqq) { |
1528 | cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", | 1535 | cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", |
1529 | cfqd->serving_prio, cfqd->serving_type); | 1536 | cfqd->serving_prio, cfqd->serving_type); |
1537 | blkiocg_update_set_active_queue_stats(&cfqq->cfqg->blkg); | ||
1530 | cfqq->slice_start = 0; | 1538 | cfqq->slice_start = 0; |
1531 | cfqq->dispatch_start = jiffies; | 1539 | cfqq->dispatch_start = jiffies; |
1532 | cfqq->allocated_slice = 0; | 1540 | cfqq->allocated_slice = 0; |
@@ -3213,6 +3221,9 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) | |||
3213 | list_add_tail(&rq->queuelist, &cfqq->fifo); | 3221 | list_add_tail(&rq->queuelist, &cfqq->fifo); |
3214 | cfq_add_rq_rb(rq); | 3222 | cfq_add_rq_rb(rq); |
3215 | 3223 | ||
3224 | blkiocg_update_request_add_stats(&cfqq->cfqg->blkg, | ||
3225 | &cfqd->serving_group->blkg, rq_data_dir(rq), | ||
3226 | rq_is_sync(rq)); | ||
3216 | cfq_rq_enqueued(cfqd, cfqq, rq); | 3227 | cfq_rq_enqueued(cfqd, cfqq, rq); |
3217 | } | 3228 | } |
3218 | 3229 | ||