diff options
-rw-r--r-- | block/blk-cgroup.c | 135 | ||||
-rw-r--r-- | block/blk-cgroup.h | 27 | ||||
-rw-r--r-- | block/blk-throttle.c | 9 | ||||
-rw-r--r-- | block/cfq-iosched.c | 18 |
4 files changed, 147 insertions, 42 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index b0592bca6970..34bfcefdd924 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -392,20 +392,22 @@ void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time, | |||
392 | } | 392 | } |
393 | EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); | 393 | EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); |
394 | 394 | ||
395 | /* | ||
396 | * should be called under rcu read lock or queue lock to make sure blkg pointer | ||
397 | * is valid. | ||
398 | */ | ||
395 | void blkiocg_update_dispatch_stats(struct blkio_group *blkg, | 399 | void blkiocg_update_dispatch_stats(struct blkio_group *blkg, |
396 | uint64_t bytes, bool direction, bool sync) | 400 | uint64_t bytes, bool direction, bool sync) |
397 | { | 401 | { |
398 | struct blkio_group_stats *stats; | 402 | struct blkio_group_stats_cpu *stats_cpu; |
399 | unsigned long flags; | ||
400 | 403 | ||
401 | spin_lock_irqsave(&blkg->stats_lock, flags); | 404 | stats_cpu = this_cpu_ptr(blkg->stats_cpu); |
402 | stats = &blkg->stats; | 405 | |
403 | stats->sectors += bytes >> 9; | 406 | stats_cpu->sectors += bytes >> 9; |
404 | blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction, | 407 | blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED], |
405 | sync); | 408 | 1, direction, sync); |
406 | blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes, | 409 | blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES], |
407 | direction, sync); | 410 | bytes, direction, sync); |
408 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | ||
409 | } | 411 | } |
410 | EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats); | 412 | EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats); |
411 | 413 | ||
@@ -440,6 +442,20 @@ void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction, | |||
440 | } | 442 | } |
441 | EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats); | 443 | EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats); |
442 | 444 | ||
445 | /* | ||
446 | * This function allocates the per cpu stats for blkio_group. Should be called | ||
447 | * from sleepable context as alloc_per_cpu() requires that. | ||
448 | */ | ||
449 | int blkio_alloc_blkg_stats(struct blkio_group *blkg) | ||
450 | { | ||
451 | /* Allocate memory for per cpu stats */ | ||
452 | blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu); | ||
453 | if (!blkg->stats_cpu) | ||
454 | return -ENOMEM; | ||
455 | return 0; | ||
456 | } | ||
457 | EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats); | ||
458 | |||
443 | void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, | 459 | void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, |
444 | struct blkio_group *blkg, void *key, dev_t dev, | 460 | struct blkio_group *blkg, void *key, dev_t dev, |
445 | enum blkio_policy_id plid) | 461 | enum blkio_policy_id plid) |
@@ -600,6 +616,53 @@ static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val, | |||
600 | return val; | 616 | return val; |
601 | } | 617 | } |
602 | 618 | ||
619 | |||
620 | static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, | ||
621 | enum stat_type_cpu type, enum stat_sub_type sub_type) | ||
622 | { | ||
623 | int cpu; | ||
624 | struct blkio_group_stats_cpu *stats_cpu; | ||
625 | uint64_t val = 0; | ||
626 | |||
627 | for_each_possible_cpu(cpu) { | ||
628 | stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu); | ||
629 | |||
630 | if (type == BLKIO_STAT_CPU_SECTORS) | ||
631 | val += stats_cpu->sectors; | ||
632 | else | ||
633 | val += stats_cpu->stat_arr_cpu[type][sub_type]; | ||
634 | } | ||
635 | |||
636 | return val; | ||
637 | } | ||
638 | |||
639 | static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, | ||
640 | struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type) | ||
641 | { | ||
642 | uint64_t disk_total, val; | ||
643 | char key_str[MAX_KEY_LEN]; | ||
644 | enum stat_sub_type sub_type; | ||
645 | |||
646 | if (type == BLKIO_STAT_CPU_SECTORS) { | ||
647 | val = blkio_read_stat_cpu(blkg, type, 0); | ||
648 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev); | ||
649 | } | ||
650 | |||
651 | for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL; | ||
652 | sub_type++) { | ||
653 | blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false); | ||
654 | val = blkio_read_stat_cpu(blkg, type, sub_type); | ||
655 | cb->fill(cb, key_str, val); | ||
656 | } | ||
657 | |||
658 | disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) + | ||
659 | blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE); | ||
660 | |||
661 | blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false); | ||
662 | cb->fill(cb, key_str, disk_total); | ||
663 | return disk_total; | ||
664 | } | ||
665 | |||
603 | /* This should be called with blkg->stats_lock held */ | 666 | /* This should be called with blkg->stats_lock held */ |
604 | static uint64_t blkio_get_stat(struct blkio_group *blkg, | 667 | static uint64_t blkio_get_stat(struct blkio_group *blkg, |
605 | struct cgroup_map_cb *cb, dev_t dev, enum stat_type type) | 668 | struct cgroup_map_cb *cb, dev_t dev, enum stat_type type) |
@@ -611,9 +674,6 @@ static uint64_t blkio_get_stat(struct blkio_group *blkg, | |||
611 | if (type == BLKIO_STAT_TIME) | 674 | if (type == BLKIO_STAT_TIME) |
612 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | 675 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, |
613 | blkg->stats.time, cb, dev); | 676 | blkg->stats.time, cb, dev); |
614 | if (type == BLKIO_STAT_SECTORS) | ||
615 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | ||
616 | blkg->stats.sectors, cb, dev); | ||
617 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 677 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
618 | if (type == BLKIO_STAT_UNACCOUNTED_TIME) | 678 | if (type == BLKIO_STAT_UNACCOUNTED_TIME) |
619 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | 679 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, |
@@ -1077,8 +1137,8 @@ static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft, | |||
1077 | } | 1137 | } |
1078 | 1138 | ||
1079 | static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg, | 1139 | static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg, |
1080 | struct cftype *cft, struct cgroup_map_cb *cb, enum stat_type type, | 1140 | struct cftype *cft, struct cgroup_map_cb *cb, |
1081 | bool show_total) | 1141 | enum stat_type type, bool show_total, bool pcpu) |
1082 | { | 1142 | { |
1083 | struct blkio_group *blkg; | 1143 | struct blkio_group *blkg; |
1084 | struct hlist_node *n; | 1144 | struct hlist_node *n; |
@@ -1089,10 +1149,15 @@ static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg, | |||
1089 | if (blkg->dev) { | 1149 | if (blkg->dev) { |
1090 | if (!cftype_blkg_same_policy(cft, blkg)) | 1150 | if (!cftype_blkg_same_policy(cft, blkg)) |
1091 | continue; | 1151 | continue; |
1092 | spin_lock_irq(&blkg->stats_lock); | 1152 | if (pcpu) |
1093 | cgroup_total += blkio_get_stat(blkg, cb, blkg->dev, | 1153 | cgroup_total += blkio_get_stat_cpu(blkg, cb, |
1094 | type); | 1154 | blkg->dev, type); |
1095 | spin_unlock_irq(&blkg->stats_lock); | 1155 | else { |
1156 | spin_lock_irq(&blkg->stats_lock); | ||
1157 | cgroup_total += blkio_get_stat(blkg, cb, | ||
1158 | blkg->dev, type); | ||
1159 | spin_unlock_irq(&blkg->stats_lock); | ||
1160 | } | ||
1096 | } | 1161 | } |
1097 | } | 1162 | } |
1098 | if (show_total) | 1163 | if (show_total) |
@@ -1116,47 +1181,47 @@ static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft, | |||
1116 | switch(name) { | 1181 | switch(name) { |
1117 | case BLKIO_PROP_time: | 1182 | case BLKIO_PROP_time: |
1118 | return blkio_read_blkg_stats(blkcg, cft, cb, | 1183 | return blkio_read_blkg_stats(blkcg, cft, cb, |
1119 | BLKIO_STAT_TIME, 0); | 1184 | BLKIO_STAT_TIME, 0, 0); |
1120 | case BLKIO_PROP_sectors: | 1185 | case BLKIO_PROP_sectors: |
1121 | return blkio_read_blkg_stats(blkcg, cft, cb, | 1186 | return blkio_read_blkg_stats(blkcg, cft, cb, |
1122 | BLKIO_STAT_SECTORS, 0); | 1187 | BLKIO_STAT_CPU_SECTORS, 0, 1); |
1123 | case BLKIO_PROP_io_service_bytes: | 1188 | case BLKIO_PROP_io_service_bytes: |
1124 | return blkio_read_blkg_stats(blkcg, cft, cb, | 1189 | return blkio_read_blkg_stats(blkcg, cft, cb, |
1125 | BLKIO_STAT_SERVICE_BYTES, 1); | 1190 | BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1); |
1126 | case BLKIO_PROP_io_serviced: | 1191 | case BLKIO_PROP_io_serviced: |
1127 | return blkio_read_blkg_stats(blkcg, cft, cb, | 1192 | return blkio_read_blkg_stats(blkcg, cft, cb, |
1128 | BLKIO_STAT_SERVICED, 1); | 1193 | BLKIO_STAT_CPU_SERVICED, 1, 1); |
1129 | case BLKIO_PROP_io_service_time: | 1194 | case BLKIO_PROP_io_service_time: |
1130 | return blkio_read_blkg_stats(blkcg, cft, cb, | 1195 | return blkio_read_blkg_stats(blkcg, cft, cb, |
1131 | BLKIO_STAT_SERVICE_TIME, 1); | 1196 | BLKIO_STAT_SERVICE_TIME, 1, 0); |
1132 | case BLKIO_PROP_io_wait_time: | 1197 | case BLKIO_PROP_io_wait_time: |
1133 | return blkio_read_blkg_stats(blkcg, cft, cb, | 1198 | return blkio_read_blkg_stats(blkcg, cft, cb, |
1134 | BLKIO_STAT_WAIT_TIME, 1); | 1199 | BLKIO_STAT_WAIT_TIME, 1, 0); |
1135 | case BLKIO_PROP_io_merged: | 1200 | case BLKIO_PROP_io_merged: |
1136 | return blkio_read_blkg_stats(blkcg, cft, cb, | 1201 | return blkio_read_blkg_stats(blkcg, cft, cb, |
1137 | BLKIO_STAT_MERGED, 1); | 1202 | BLKIO_STAT_MERGED, 1, 0); |
1138 | case BLKIO_PROP_io_queued: | 1203 | case BLKIO_PROP_io_queued: |
1139 | return blkio_read_blkg_stats(blkcg, cft, cb, | 1204 | return blkio_read_blkg_stats(blkcg, cft, cb, |
1140 | BLKIO_STAT_QUEUED, 1); | 1205 | BLKIO_STAT_QUEUED, 1, 0); |
1141 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 1206 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
1142 | case BLKIO_PROP_unaccounted_time: | 1207 | case BLKIO_PROP_unaccounted_time: |
1143 | return blkio_read_blkg_stats(blkcg, cft, cb, | 1208 | return blkio_read_blkg_stats(blkcg, cft, cb, |
1144 | BLKIO_STAT_UNACCOUNTED_TIME, 0); | 1209 | BLKIO_STAT_UNACCOUNTED_TIME, 0, 0); |
1145 | case BLKIO_PROP_dequeue: | 1210 | case BLKIO_PROP_dequeue: |
1146 | return blkio_read_blkg_stats(blkcg, cft, cb, | 1211 | return blkio_read_blkg_stats(blkcg, cft, cb, |
1147 | BLKIO_STAT_DEQUEUE, 0); | 1212 | BLKIO_STAT_DEQUEUE, 0, 0); |
1148 | case BLKIO_PROP_avg_queue_size: | 1213 | case BLKIO_PROP_avg_queue_size: |
1149 | return blkio_read_blkg_stats(blkcg, cft, cb, | 1214 | return blkio_read_blkg_stats(blkcg, cft, cb, |
1150 | BLKIO_STAT_AVG_QUEUE_SIZE, 0); | 1215 | BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0); |
1151 | case BLKIO_PROP_group_wait_time: | 1216 | case BLKIO_PROP_group_wait_time: |
1152 | return blkio_read_blkg_stats(blkcg, cft, cb, | 1217 | return blkio_read_blkg_stats(blkcg, cft, cb, |
1153 | BLKIO_STAT_GROUP_WAIT_TIME, 0); | 1218 | BLKIO_STAT_GROUP_WAIT_TIME, 0, 0); |
1154 | case BLKIO_PROP_idle_time: | 1219 | case BLKIO_PROP_idle_time: |
1155 | return blkio_read_blkg_stats(blkcg, cft, cb, | 1220 | return blkio_read_blkg_stats(blkcg, cft, cb, |
1156 | BLKIO_STAT_IDLE_TIME, 0); | 1221 | BLKIO_STAT_IDLE_TIME, 0, 0); |
1157 | case BLKIO_PROP_empty_time: | 1222 | case BLKIO_PROP_empty_time: |
1158 | return blkio_read_blkg_stats(blkcg, cft, cb, | 1223 | return blkio_read_blkg_stats(blkcg, cft, cb, |
1159 | BLKIO_STAT_EMPTY_TIME, 0); | 1224 | BLKIO_STAT_EMPTY_TIME, 0, 0); |
1160 | #endif | 1225 | #endif |
1161 | default: | 1226 | default: |
1162 | BUG(); | 1227 | BUG(); |
@@ -1166,10 +1231,10 @@ static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft, | |||
1166 | switch(name){ | 1231 | switch(name){ |
1167 | case BLKIO_THROTL_io_service_bytes: | 1232 | case BLKIO_THROTL_io_service_bytes: |
1168 | return blkio_read_blkg_stats(blkcg, cft, cb, | 1233 | return blkio_read_blkg_stats(blkcg, cft, cb, |
1169 | BLKIO_STAT_SERVICE_BYTES, 1); | 1234 | BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1); |
1170 | case BLKIO_THROTL_io_serviced: | 1235 | case BLKIO_THROTL_io_serviced: |
1171 | return blkio_read_blkg_stats(blkcg, cft, cb, | 1236 | return blkio_read_blkg_stats(blkcg, cft, cb, |
1172 | BLKIO_STAT_SERVICED, 1); | 1237 | BLKIO_STAT_CPU_SERVICED, 1, 1); |
1173 | default: | 1238 | default: |
1174 | BUG(); | 1239 | BUG(); |
1175 | } | 1240 | } |
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 63f1ef4450d7..fd730a24b491 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h | |||
@@ -36,10 +36,6 @@ enum stat_type { | |||
36 | * request completion for IOs doen by this cgroup. This may not be | 36 | * request completion for IOs doen by this cgroup. This may not be |
37 | * accurate when NCQ is turned on. */ | 37 | * accurate when NCQ is turned on. */ |
38 | BLKIO_STAT_SERVICE_TIME = 0, | 38 | BLKIO_STAT_SERVICE_TIME = 0, |
39 | /* Total bytes transferred */ | ||
40 | BLKIO_STAT_SERVICE_BYTES, | ||
41 | /* Total IOs serviced, post merge */ | ||
42 | BLKIO_STAT_SERVICED, | ||
43 | /* Total time spent waiting in scheduler queue in ns */ | 39 | /* Total time spent waiting in scheduler queue in ns */ |
44 | BLKIO_STAT_WAIT_TIME, | 40 | BLKIO_STAT_WAIT_TIME, |
45 | /* Number of IOs merged */ | 41 | /* Number of IOs merged */ |
@@ -48,7 +44,6 @@ enum stat_type { | |||
48 | BLKIO_STAT_QUEUED, | 44 | BLKIO_STAT_QUEUED, |
49 | /* All the single valued stats go below this */ | 45 | /* All the single valued stats go below this */ |
50 | BLKIO_STAT_TIME, | 46 | BLKIO_STAT_TIME, |
51 | BLKIO_STAT_SECTORS, | ||
52 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 47 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
53 | /* Time not charged to this cgroup */ | 48 | /* Time not charged to this cgroup */ |
54 | BLKIO_STAT_UNACCOUNTED_TIME, | 49 | BLKIO_STAT_UNACCOUNTED_TIME, |
@@ -60,6 +55,16 @@ enum stat_type { | |||
60 | #endif | 55 | #endif |
61 | }; | 56 | }; |
62 | 57 | ||
58 | /* Per cpu stats */ | ||
59 | enum stat_type_cpu { | ||
60 | BLKIO_STAT_CPU_SECTORS, | ||
61 | /* Total bytes transferred */ | ||
62 | BLKIO_STAT_CPU_SERVICE_BYTES, | ||
63 | /* Total IOs serviced, post merge */ | ||
64 | BLKIO_STAT_CPU_SERVICED, | ||
65 | BLKIO_STAT_CPU_NR | ||
66 | }; | ||
67 | |||
63 | enum stat_sub_type { | 68 | enum stat_sub_type { |
64 | BLKIO_STAT_READ = 0, | 69 | BLKIO_STAT_READ = 0, |
65 | BLKIO_STAT_WRITE, | 70 | BLKIO_STAT_WRITE, |
@@ -116,7 +121,6 @@ struct blkio_cgroup { | |||
116 | struct blkio_group_stats { | 121 | struct blkio_group_stats { |
117 | /* total disk time and nr sectors dispatched by this group */ | 122 | /* total disk time and nr sectors dispatched by this group */ |
118 | uint64_t time; | 123 | uint64_t time; |
119 | uint64_t sectors; | ||
120 | uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL]; | 124 | uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL]; |
121 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 125 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
122 | /* Time not charged to this cgroup */ | 126 | /* Time not charged to this cgroup */ |
@@ -146,6 +150,12 @@ struct blkio_group_stats { | |||
146 | #endif | 150 | #endif |
147 | }; | 151 | }; |
148 | 152 | ||
153 | /* Per cpu blkio group stats */ | ||
154 | struct blkio_group_stats_cpu { | ||
155 | uint64_t sectors; | ||
156 | uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL]; | ||
157 | }; | ||
158 | |||
149 | struct blkio_group { | 159 | struct blkio_group { |
150 | /* An rcu protected unique identifier for the group */ | 160 | /* An rcu protected unique identifier for the group */ |
151 | void *key; | 161 | void *key; |
@@ -161,6 +171,8 @@ struct blkio_group { | |||
161 | /* Need to serialize the stats in the case of reset/update */ | 171 | /* Need to serialize the stats in the case of reset/update */ |
162 | spinlock_t stats_lock; | 172 | spinlock_t stats_lock; |
163 | struct blkio_group_stats stats; | 173 | struct blkio_group_stats stats; |
174 | /* Per cpu stats pointer */ | ||
175 | struct blkio_group_stats_cpu __percpu *stats_cpu; | ||
164 | }; | 176 | }; |
165 | 177 | ||
166 | struct blkio_policy_node { | 178 | struct blkio_policy_node { |
@@ -296,6 +308,7 @@ extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk); | |||
296 | extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, | 308 | extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, |
297 | struct blkio_group *blkg, void *key, dev_t dev, | 309 | struct blkio_group *blkg, void *key, dev_t dev, |
298 | enum blkio_policy_id plid); | 310 | enum blkio_policy_id plid); |
311 | extern int blkio_alloc_blkg_stats(struct blkio_group *blkg); | ||
299 | extern int blkiocg_del_blkio_group(struct blkio_group *blkg); | 312 | extern int blkiocg_del_blkio_group(struct blkio_group *blkg); |
300 | extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, | 313 | extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, |
301 | void *key); | 314 | void *key); |
@@ -323,6 +336,8 @@ static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, | |||
323 | struct blkio_group *blkg, void *key, dev_t dev, | 336 | struct blkio_group *blkg, void *key, dev_t dev, |
324 | enum blkio_policy_id plid) {} | 337 | enum blkio_policy_id plid) {} |
325 | 338 | ||
339 | static inline int blkio_alloc_blkg_stats(struct blkio_group *blkg) { return 0; } | ||
340 | |||
326 | static inline int | 341 | static inline int |
327 | blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; } | 342 | blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; } |
328 | 343 | ||
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 90ad40735f73..c29a5a8cc18c 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -158,6 +158,7 @@ static void throtl_free_tg(struct rcu_head *head) | |||
158 | struct throtl_grp *tg; | 158 | struct throtl_grp *tg; |
159 | 159 | ||
160 | tg = container_of(head, struct throtl_grp, rcu_head); | 160 | tg = container_of(head, struct throtl_grp, rcu_head); |
161 | free_percpu(tg->blkg.stats_cpu); | ||
161 | kfree(tg); | 162 | kfree(tg); |
162 | } | 163 | } |
163 | 164 | ||
@@ -249,11 +250,19 @@ static void throtl_init_add_tg_lists(struct throtl_data *td, | |||
249 | static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td) | 250 | static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td) |
250 | { | 251 | { |
251 | struct throtl_grp *tg = NULL; | 252 | struct throtl_grp *tg = NULL; |
253 | int ret; | ||
252 | 254 | ||
253 | tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node); | 255 | tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node); |
254 | if (!tg) | 256 | if (!tg) |
255 | return NULL; | 257 | return NULL; |
256 | 258 | ||
259 | ret = blkio_alloc_blkg_stats(&tg->blkg); | ||
260 | |||
261 | if (ret) { | ||
262 | kfree(tg); | ||
263 | return NULL; | ||
264 | } | ||
265 | |||
257 | throtl_init_group(tg); | 266 | throtl_init_group(tg); |
258 | return tg; | 267 | return tg; |
259 | } | 268 | } |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 606020fe93f3..d646b279c8bb 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1051,7 +1051,7 @@ static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd, | |||
1051 | static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd) | 1051 | static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd) |
1052 | { | 1052 | { |
1053 | struct cfq_group *cfqg = NULL; | 1053 | struct cfq_group *cfqg = NULL; |
1054 | int i, j; | 1054 | int i, j, ret; |
1055 | struct cfq_rb_root *st; | 1055 | struct cfq_rb_root *st; |
1056 | 1056 | ||
1057 | cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node); | 1057 | cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node); |
@@ -1069,6 +1069,13 @@ static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd) | |||
1069 | * or cgroup deletion path depending on who is exiting first. | 1069 | * or cgroup deletion path depending on who is exiting first. |
1070 | */ | 1070 | */ |
1071 | cfqg->ref = 1; | 1071 | cfqg->ref = 1; |
1072 | |||
1073 | ret = blkio_alloc_blkg_stats(&cfqg->blkg); | ||
1074 | if (ret) { | ||
1075 | kfree(cfqg); | ||
1076 | return NULL; | ||
1077 | } | ||
1078 | |||
1072 | return cfqg; | 1079 | return cfqg; |
1073 | } | 1080 | } |
1074 | 1081 | ||
@@ -1183,6 +1190,7 @@ static void cfq_put_cfqg(struct cfq_group *cfqg) | |||
1183 | return; | 1190 | return; |
1184 | for_each_cfqg_st(cfqg, i, j, st) | 1191 | for_each_cfqg_st(cfqg, i, j, st) |
1185 | BUG_ON(!RB_EMPTY_ROOT(&st->rb)); | 1192 | BUG_ON(!RB_EMPTY_ROOT(&st->rb)); |
1193 | free_percpu(cfqg->blkg.stats_cpu); | ||
1186 | kfree(cfqg); | 1194 | kfree(cfqg); |
1187 | } | 1195 | } |
1188 | 1196 | ||
@@ -3995,7 +4003,15 @@ static void *cfq_init_queue(struct request_queue *q) | |||
3995 | * throtl_data goes away. | 4003 | * throtl_data goes away. |
3996 | */ | 4004 | */ |
3997 | cfqg->ref = 2; | 4005 | cfqg->ref = 2; |
4006 | |||
4007 | if (blkio_alloc_blkg_stats(&cfqg->blkg)) { | ||
4008 | kfree(cfqg); | ||
4009 | kfree(cfqd); | ||
4010 | return NULL; | ||
4011 | } | ||
4012 | |||
3998 | rcu_read_lock(); | 4013 | rcu_read_lock(); |
4014 | |||
3999 | cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, | 4015 | cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, |
4000 | (void *)cfqd, 0); | 4016 | (void *)cfqd, 0); |
4001 | rcu_read_unlock(); | 4017 | rcu_read_unlock(); |