aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-08 13:54:00 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-20 07:45:37 -0400
commitedf1b879e308d37b5b7c414476ab39f79650a253 (patch)
treefec2895e806eb357ba9e9ed960f8c247042d5383 /block/blk-cgroup.c
parentc4c76a05382c7d05e0b911daa58a827399e9ba1a (diff)
blkcg: remove blkio_group->stats_lock
With recent plug merge updates, all non-percpu stat updates happen under queue_lock making stats_lock unnecessary to synchronize stat updates. The only synchronization necessary is stat reading, which can be done using u64_stats_sync instead. This patch removes blkio_group->stats_lock and adds blkio_group_stats->syncp for reader synchronization. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c209
1 files changed, 102 insertions, 107 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 80887bc3a049..b15a51711bca 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -156,7 +156,7 @@ static inline void blkio_update_group_iops(struct blkio_group *blkg,
156 156
157/* 157/*
158 * Add to the appropriate stat variable depending on the request type. 158 * Add to the appropriate stat variable depending on the request type.
159 * This should be called with the blkg->stats_lock held. 159 * This should be called with queue_lock held.
160 */ 160 */
161static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction, 161static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
162 bool sync) 162 bool sync)
@@ -174,7 +174,7 @@ static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
174/* 174/*
175 * Decrements the appropriate stat variable if non-zero depending on the 175 * Decrements the appropriate stat variable if non-zero depending on the
176 * request type. Panics on value being zero. 176 * request type. Panics on value being zero.
177 * This should be called with the blkg->stats_lock held. 177 * This should be called with the queue_lock held.
178 */ 178 */
179static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync) 179static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
180{ 180{
@@ -195,7 +195,7 @@ static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
195} 195}
196 196
197#ifdef CONFIG_DEBUG_BLK_CGROUP 197#ifdef CONFIG_DEBUG_BLK_CGROUP
198/* This should be called with the blkg->stats_lock held. */ 198/* This should be called with the queue_lock held. */
199static void blkio_set_start_group_wait_time(struct blkio_group *blkg, 199static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
200 struct blkio_policy_type *pol, 200 struct blkio_policy_type *pol,
201 struct blkio_group *curr_blkg) 201 struct blkio_group *curr_blkg)
@@ -210,7 +210,7 @@ static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
210 blkio_mark_blkg_waiting(&pd->stats); 210 blkio_mark_blkg_waiting(&pd->stats);
211} 211}
212 212
213/* This should be called with the blkg->stats_lock held. */ 213/* This should be called with the queue_lock held. */
214static void blkio_update_group_wait_time(struct blkio_group_stats *stats) 214static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
215{ 215{
216 unsigned long long now; 216 unsigned long long now;
@@ -224,7 +224,7 @@ static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
224 blkio_clear_blkg_waiting(stats); 224 blkio_clear_blkg_waiting(stats);
225} 225}
226 226
227/* This should be called with the blkg->stats_lock held. */ 227/* This should be called with the queue_lock held. */
228static void blkio_end_empty_time(struct blkio_group_stats *stats) 228static void blkio_end_empty_time(struct blkio_group_stats *stats)
229{ 229{
230 unsigned long long now; 230 unsigned long long now;
@@ -241,84 +241,74 @@ static void blkio_end_empty_time(struct blkio_group_stats *stats)
241void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg, 241void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
242 struct blkio_policy_type *pol) 242 struct blkio_policy_type *pol)
243{ 243{
244 struct blkg_policy_data *pd = blkg->pd[pol->plid]; 244 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
245 unsigned long flags;
246 245
247 spin_lock_irqsave(&blkg->stats_lock, flags); 246 lockdep_assert_held(blkg->q->queue_lock);
248 BUG_ON(blkio_blkg_idling(&pd->stats)); 247 BUG_ON(blkio_blkg_idling(stats));
249 pd->stats.start_idle_time = sched_clock(); 248
250 blkio_mark_blkg_idling(&pd->stats); 249 stats->start_idle_time = sched_clock();
251 spin_unlock_irqrestore(&blkg->stats_lock, flags); 250 blkio_mark_blkg_idling(stats);
252} 251}
253EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats); 252EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
254 253
255void blkiocg_update_idle_time_stats(struct blkio_group *blkg, 254void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
256 struct blkio_policy_type *pol) 255 struct blkio_policy_type *pol)
257{ 256{
258 struct blkg_policy_data *pd = blkg->pd[pol->plid]; 257 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
259 unsigned long flags; 258
260 unsigned long long now; 259 lockdep_assert_held(blkg->q->queue_lock);
261 struct blkio_group_stats *stats;
262 260
263 spin_lock_irqsave(&blkg->stats_lock, flags);
264 stats = &pd->stats;
265 if (blkio_blkg_idling(stats)) { 261 if (blkio_blkg_idling(stats)) {
266 now = sched_clock(); 262 unsigned long long now = sched_clock();
267 if (time_after64(now, stats->start_idle_time)) 263
264 if (time_after64(now, stats->start_idle_time)) {
265 u64_stats_update_begin(&stats->syncp);
268 stats->idle_time += now - stats->start_idle_time; 266 stats->idle_time += now - stats->start_idle_time;
267 u64_stats_update_end(&stats->syncp);
268 }
269 blkio_clear_blkg_idling(stats); 269 blkio_clear_blkg_idling(stats);
270 } 270 }
271 spin_unlock_irqrestore(&blkg->stats_lock, flags);
272} 271}
273EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats); 272EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
274 273
275void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg, 274void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
276 struct blkio_policy_type *pol) 275 struct blkio_policy_type *pol)
277{ 276{
278 struct blkg_policy_data *pd = blkg->pd[pol->plid]; 277 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
279 unsigned long flags;
280 struct blkio_group_stats *stats;
281 278
282 spin_lock_irqsave(&blkg->stats_lock, flags); 279 lockdep_assert_held(blkg->q->queue_lock);
283 stats = &pd->stats; 280
281 u64_stats_update_begin(&stats->syncp);
284 stats->avg_queue_size_sum += 282 stats->avg_queue_size_sum +=
285 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] + 283 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
286 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]; 284 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
287 stats->avg_queue_size_samples++; 285 stats->avg_queue_size_samples++;
288 blkio_update_group_wait_time(stats); 286 blkio_update_group_wait_time(stats);
289 spin_unlock_irqrestore(&blkg->stats_lock, flags); 287 u64_stats_update_end(&stats->syncp);
290} 288}
291EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats); 289EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
292 290
293void blkiocg_set_start_empty_time(struct blkio_group *blkg, 291void blkiocg_set_start_empty_time(struct blkio_group *blkg,
294 struct blkio_policy_type *pol) 292 struct blkio_policy_type *pol)
295{ 293{
296 struct blkg_policy_data *pd = blkg->pd[pol->plid]; 294 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
297 unsigned long flags;
298 struct blkio_group_stats *stats;
299 295
300 spin_lock_irqsave(&blkg->stats_lock, flags); 296 lockdep_assert_held(blkg->q->queue_lock);
301 stats = &pd->stats;
302 297
303 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] || 298 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
304 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) { 299 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE])
305 spin_unlock_irqrestore(&blkg->stats_lock, flags);
306 return; 300 return;
307 }
308 301
309 /* 302 /*
310 * group is already marked empty. This can happen if cfqq got new 303 * group is already marked empty. This can happen if cfqq got new
311 * request in parent group and moved to this group while being added 304 * request in parent group and moved to this group while being added
312 * to service tree. Just ignore the event and move on. 305 * to service tree. Just ignore the event and move on.
313 */ 306 */
314 if(blkio_blkg_empty(stats)) { 307 if (blkio_blkg_empty(stats))
315 spin_unlock_irqrestore(&blkg->stats_lock, flags);
316 return; 308 return;
317 }
318 309
319 stats->start_empty_time = sched_clock(); 310 stats->start_empty_time = sched_clock();
320 blkio_mark_blkg_empty(stats); 311 blkio_mark_blkg_empty(stats);
321 spin_unlock_irqrestore(&blkg->stats_lock, flags);
322} 312}
323EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time); 313EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
324 314
@@ -328,6 +318,8 @@ void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
328{ 318{
329 struct blkg_policy_data *pd = blkg->pd[pol->plid]; 319 struct blkg_policy_data *pd = blkg->pd[pol->plid];
330 320
321 lockdep_assert_held(blkg->q->queue_lock);
322
331 pd->stats.dequeue += dequeue; 323 pd->stats.dequeue += dequeue;
332} 324}
333EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats); 325EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
@@ -343,15 +335,16 @@ void blkiocg_update_io_add_stats(struct blkio_group *blkg,
343 struct blkio_group *curr_blkg, bool direction, 335 struct blkio_group *curr_blkg, bool direction,
344 bool sync) 336 bool sync)
345{ 337{
346 struct blkg_policy_data *pd = blkg->pd[pol->plid]; 338 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
347 unsigned long flags; 339
340 lockdep_assert_held(blkg->q->queue_lock);
341
342 u64_stats_update_begin(&stats->syncp);
343 blkio_add_stat(stats->stat_arr[BLKIO_STAT_QUEUED], 1, direction, sync);
344 blkio_end_empty_time(stats);
345 u64_stats_update_end(&stats->syncp);
348 346
349 spin_lock_irqsave(&blkg->stats_lock, flags);
350 blkio_add_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
351 sync);
352 blkio_end_empty_time(&pd->stats);
353 blkio_set_start_group_wait_time(blkg, pol, curr_blkg); 347 blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
354 spin_unlock_irqrestore(&blkg->stats_lock, flags);
355} 348}
356EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats); 349EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
357 350
@@ -359,13 +352,14 @@ void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
359 struct blkio_policy_type *pol, 352 struct blkio_policy_type *pol,
360 bool direction, bool sync) 353 bool direction, bool sync)
361{ 354{
362 struct blkg_policy_data *pd = blkg->pd[pol->plid]; 355 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
363 unsigned long flags;
364 356
365 spin_lock_irqsave(&blkg->stats_lock, flags); 357 lockdep_assert_held(blkg->q->queue_lock);
366 blkio_check_and_dec_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED], 358
367 direction, sync); 359 u64_stats_update_begin(&stats->syncp);
368 spin_unlock_irqrestore(&blkg->stats_lock, flags); 360 blkio_check_and_dec_stat(stats->stat_arr[BLKIO_STAT_QUEUED], direction,
361 sync);
362 u64_stats_update_end(&stats->syncp);
369} 363}
370EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats); 364EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
371 365
@@ -374,15 +368,16 @@ void blkiocg_update_timeslice_used(struct blkio_group *blkg,
374 unsigned long time, 368 unsigned long time,
375 unsigned long unaccounted_time) 369 unsigned long unaccounted_time)
376{ 370{
377 struct blkg_policy_data *pd = blkg->pd[pol->plid]; 371 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
378 unsigned long flags; 372
373 lockdep_assert_held(blkg->q->queue_lock);
379 374
380 spin_lock_irqsave(&blkg->stats_lock, flags); 375 u64_stats_update_begin(&stats->syncp);
381 pd->stats.time += time; 376 stats->time += time;
382#ifdef CONFIG_DEBUG_BLK_CGROUP 377#ifdef CONFIG_DEBUG_BLK_CGROUP
383 pd->stats.unaccounted_time += unaccounted_time; 378 stats->unaccounted_time += unaccounted_time;
384#endif 379#endif
385 spin_unlock_irqrestore(&blkg->stats_lock, flags); 380 u64_stats_update_end(&stats->syncp);
386} 381}
387EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); 382EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
388 383
@@ -428,20 +423,19 @@ void blkiocg_update_completion_stats(struct blkio_group *blkg,
428 uint64_t io_start_time, bool direction, 423 uint64_t io_start_time, bool direction,
429 bool sync) 424 bool sync)
430{ 425{
431 struct blkg_policy_data *pd = blkg->pd[pol->plid]; 426 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
432 struct blkio_group_stats *stats;
433 unsigned long flags;
434 unsigned long long now = sched_clock(); 427 unsigned long long now = sched_clock();
435 428
436 spin_lock_irqsave(&blkg->stats_lock, flags); 429 lockdep_assert_held(blkg->q->queue_lock);
437 stats = &pd->stats; 430
431 u64_stats_update_begin(&stats->syncp);
438 if (time_after64(now, io_start_time)) 432 if (time_after64(now, io_start_time))
439 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME], 433 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
440 now - io_start_time, direction, sync); 434 now - io_start_time, direction, sync);
441 if (time_after64(io_start_time, start_time)) 435 if (time_after64(io_start_time, start_time))
442 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME], 436 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
443 io_start_time - start_time, direction, sync); 437 io_start_time - start_time, direction, sync);
444 spin_unlock_irqrestore(&blkg->stats_lock, flags); 438 u64_stats_update_end(&stats->syncp);
445} 439}
446EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats); 440EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
447 441
@@ -450,14 +444,13 @@ void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
450 struct blkio_policy_type *pol, 444 struct blkio_policy_type *pol,
451 bool direction, bool sync) 445 bool direction, bool sync)
452{ 446{
453 struct blkg_policy_data *pd = blkg->pd[pol->plid]; 447 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
454 struct blkio_group_stats *stats;
455 unsigned long flags;
456 448
457 spin_lock_irqsave(&blkg->stats_lock, flags); 449 lockdep_assert_held(blkg->q->queue_lock);
458 stats = &pd->stats; 450
451 u64_stats_update_begin(&stats->syncp);
459 blkio_add_stat(stats->stat_arr[BLKIO_STAT_MERGED], 1, direction, sync); 452 blkio_add_stat(stats->stat_arr[BLKIO_STAT_MERGED], 1, direction, sync);
460 spin_unlock_irqrestore(&blkg->stats_lock, flags); 453 u64_stats_update_end(&stats->syncp);
461} 454}
462EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats); 455EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
463 456
@@ -558,7 +551,6 @@ static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
558 if (!blkg) 551 if (!blkg)
559 return NULL; 552 return NULL;
560 553
561 spin_lock_init(&blkg->stats_lock);
562 blkg->q = q; 554 blkg->q = q;
563 INIT_LIST_HEAD(&blkg->q_node); 555 INIT_LIST_HEAD(&blkg->q_node);
564 INIT_LIST_HEAD(&blkg->alloc_node); 556 INIT_LIST_HEAD(&blkg->alloc_node);
@@ -929,7 +921,6 @@ static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, int plid,
929 return disk_total; 921 return disk_total;
930} 922}
931 923
932/* This should be called with blkg->stats_lock held */
933static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid, 924static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid,
934 struct cgroup_map_cb *cb, const char *dname, 925 struct cgroup_map_cb *cb, const char *dname,
935 enum stat_type type) 926 enum stat_type type)
@@ -937,42 +928,46 @@ static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid,
937 struct blkio_group_stats *stats = &blkg->pd[plid]->stats; 928 struct blkio_group_stats *stats = &blkg->pd[plid]->stats;
938 uint64_t v = 0, disk_total = 0; 929 uint64_t v = 0, disk_total = 0;
939 char key_str[MAX_KEY_LEN]; 930 char key_str[MAX_KEY_LEN];
931 unsigned int sync_start;
940 int st; 932 int st;
941 933
942 if (type >= BLKIO_STAT_ARR_NR) { 934 if (type >= BLKIO_STAT_ARR_NR) {
943 switch (type) { 935 do {
944 case BLKIO_STAT_TIME: 936 sync_start = u64_stats_fetch_begin(&stats->syncp);
945 v = stats->time; 937 switch (type) {
946 break; 938 case BLKIO_STAT_TIME:
939 v = stats->time;
940 break;
947#ifdef CONFIG_DEBUG_BLK_CGROUP 941#ifdef CONFIG_DEBUG_BLK_CGROUP
948 case BLKIO_STAT_UNACCOUNTED_TIME: 942 case BLKIO_STAT_UNACCOUNTED_TIME:
949 v = stats->unaccounted_time; 943 v = stats->unaccounted_time;
950 break; 944 break;
951 case BLKIO_STAT_AVG_QUEUE_SIZE: { 945 case BLKIO_STAT_AVG_QUEUE_SIZE: {
952 uint64_t samples = stats->avg_queue_size_samples; 946 uint64_t samples = stats->avg_queue_size_samples;
953 947
954 if (samples) { 948 if (samples) {
955 v = stats->avg_queue_size_sum; 949 v = stats->avg_queue_size_sum;
956 do_div(v, samples); 950 do_div(v, samples);
951 }
952 break;
957 } 953 }
958 break; 954 case BLKIO_STAT_IDLE_TIME:
959 } 955 v = stats->idle_time;
960 case BLKIO_STAT_IDLE_TIME: 956 break;
961 v = stats->idle_time; 957 case BLKIO_STAT_EMPTY_TIME:
962 break; 958 v = stats->empty_time;
963 case BLKIO_STAT_EMPTY_TIME: 959 break;
964 v = stats->empty_time; 960 case BLKIO_STAT_DEQUEUE:
965 break; 961 v = stats->dequeue;
966 case BLKIO_STAT_DEQUEUE: 962 break;
967 v = stats->dequeue; 963 case BLKIO_STAT_GROUP_WAIT_TIME:
968 break; 964 v = stats->group_wait_time;
969 case BLKIO_STAT_GROUP_WAIT_TIME: 965 break;
970 v = stats->group_wait_time;
971 break;
972#endif 966#endif
973 default: 967 default:
974 WARN_ON_ONCE(1); 968 WARN_ON_ONCE(1);
975 } 969 }
970 } while (u64_stats_fetch_retry(&stats->syncp, sync_start));
976 971
977 blkio_get_key_name(0, dname, key_str, MAX_KEY_LEN, true); 972 blkio_get_key_name(0, dname, key_str, MAX_KEY_LEN, true);
978 cb->fill(cb, key_str, v); 973 cb->fill(cb, key_str, v);
@@ -980,7 +975,10 @@ static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid,
980 } 975 }
981 976
982 for (st = BLKIO_STAT_READ; st < BLKIO_STAT_TOTAL; st++) { 977 for (st = BLKIO_STAT_READ; st < BLKIO_STAT_TOTAL; st++) {
983 v = stats->stat_arr[type][st]; 978 do {
979 sync_start = u64_stats_fetch_begin(&stats->syncp);
980 v = stats->stat_arr[type][st];
981 } while (u64_stats_fetch_retry(&stats->syncp, sync_start));
984 982
985 blkio_get_key_name(st, dname, key_str, MAX_KEY_LEN, false); 983 blkio_get_key_name(st, dname, key_str, MAX_KEY_LEN, false);
986 cb->fill(cb, key_str, v); 984 cb->fill(cb, key_str, v);
@@ -1250,15 +1248,12 @@ static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1250 1248
1251 if (!dname) 1249 if (!dname)
1252 continue; 1250 continue;
1253 if (pcpu) { 1251 if (pcpu)
1254 cgroup_total += blkio_get_stat_cpu(blkg, plid, 1252 cgroup_total += blkio_get_stat_cpu(blkg, plid,
1255 cb, dname, type); 1253 cb, dname, type);
1256 } else { 1254 else
1257 spin_lock(&blkg->stats_lock);
1258 cgroup_total += blkio_get_stat(blkg, plid, 1255 cgroup_total += blkio_get_stat(blkg, plid,
1259 cb, dname, type); 1256 cb, dname, type);
1260 spin_unlock(&blkg->stats_lock);
1261 }
1262 } 1257 }
1263 if (show_total) 1258 if (show_total)
1264 cb->fill(cb, "Total", cgroup_total); 1259 cb->fill(cb, "Total", cgroup_total);