diff options
author | Christoph Hellwig <hch@lst.de> | 2019-06-06 06:26:22 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-06-20 12:32:34 -0400 |
commit | c0ce79dca5b0e8373a546ebea2af7b3df94c584e (patch) | |
tree | 2039ddde7e970451a75d78b433177b0b42199282 /block | |
parent | 7af6fd9112ba310a889c60d0606b4b74049cfe14 (diff) |
blk-cgroup: move struct blkg_stat to bfq
This structure and assorted infrastructure is only used by the bfq I/O
scheduler. Move it there instead of bloating the common code.
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: Paolo Valente <paolo.valente@linaro.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/bfq-cgroup.c | 192 | ||||
-rw-r--r-- | block/bfq-iosched.h | 19 | ||||
-rw-r--r-- | block/blk-cgroup.c | 56 |
3 files changed, 167 insertions, 100 deletions
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 624374a99c6e..a691dca7e966 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c | |||
@@ -17,6 +17,124 @@ | |||
17 | 17 | ||
18 | #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) | 18 | #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) |
19 | 19 | ||
20 | static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp) | ||
21 | { | ||
22 | int ret; | ||
23 | |||
24 | ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp); | ||
25 | if (ret) | ||
26 | return ret; | ||
27 | |||
28 | atomic64_set(&stat->aux_cnt, 0); | ||
29 | return 0; | ||
30 | } | ||
31 | |||
32 | static void bfq_stat_exit(struct bfq_stat *stat) | ||
33 | { | ||
34 | percpu_counter_destroy(&stat->cpu_cnt); | ||
35 | } | ||
36 | |||
37 | /** | ||
38 | * bfq_stat_add - add a value to a bfq_stat | ||
39 | * @stat: target bfq_stat | ||
40 | * @val: value to add | ||
41 | * | ||
42 | * Add @val to @stat. The caller must ensure that IRQ on the same CPU | ||
43 | * don't re-enter this function for the same counter. | ||
44 | */ | ||
45 | static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val) | ||
46 | { | ||
47 | percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH); | ||
48 | } | ||
49 | |||
50 | /** | ||
51 | * bfq_stat_read - read the current value of a bfq_stat | ||
52 | * @stat: bfq_stat to read | ||
53 | */ | ||
54 | static inline uint64_t bfq_stat_read(struct bfq_stat *stat) | ||
55 | { | ||
56 | return percpu_counter_sum_positive(&stat->cpu_cnt); | ||
57 | } | ||
58 | |||
59 | /** | ||
60 | * bfq_stat_reset - reset a bfq_stat | ||
61 | * @stat: bfq_stat to reset | ||
62 | */ | ||
63 | static inline void bfq_stat_reset(struct bfq_stat *stat) | ||
64 | { | ||
65 | percpu_counter_set(&stat->cpu_cnt, 0); | ||
66 | atomic64_set(&stat->aux_cnt, 0); | ||
67 | } | ||
68 | |||
69 | /** | ||
70 | * bfq_stat_add_aux - add a bfq_stat into another's aux count | ||
71 | * @to: the destination bfq_stat | ||
72 | * @from: the source | ||
73 | * | ||
74 | * Add @from's count including the aux one to @to's aux count. | ||
75 | */ | ||
76 | static inline void bfq_stat_add_aux(struct bfq_stat *to, | ||
77 | struct bfq_stat *from) | ||
78 | { | ||
79 | atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt), | ||
80 | &to->aux_cnt); | ||
81 | } | ||
82 | |||
83 | /** | ||
84 | * bfq_stat_recursive_sum - collect hierarchical bfq_stat | ||
85 | * @blkg: blkg of interest | ||
86 | * @pol: blkcg_policy which contains the bfq_stat | ||
87 | * @off: offset to the bfq_stat in blkg_policy_data or @blkg | ||
88 | * | ||
89 | * Collect the bfq_stat specified by @blkg, @pol and @off and all its | ||
90 | * online descendants and their aux counts. The caller must be holding the | ||
91 | * queue lock for online tests. | ||
92 | * | ||
93 | * If @pol is NULL, bfq_stat is at @off bytes into @blkg; otherwise, it is | ||
94 | * at @off bytes into @blkg's blkg_policy_data of the policy. | ||
95 | */ | ||
96 | static u64 bfq_stat_recursive_sum(struct blkcg_gq *blkg, | ||
97 | struct blkcg_policy *pol, int off) | ||
98 | { | ||
99 | struct blkcg_gq *pos_blkg; | ||
100 | struct cgroup_subsys_state *pos_css; | ||
101 | u64 sum = 0; | ||
102 | |||
103 | lockdep_assert_held(&blkg->q->queue_lock); | ||
104 | |||
105 | rcu_read_lock(); | ||
106 | blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { | ||
107 | struct bfq_stat *stat; | ||
108 | |||
109 | if (!pos_blkg->online) | ||
110 | continue; | ||
111 | |||
112 | if (pol) | ||
113 | stat = (void *)blkg_to_pd(pos_blkg, pol) + off; | ||
114 | else | ||
115 | stat = (void *)blkg + off; | ||
116 | |||
117 | sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt); | ||
118 | } | ||
119 | rcu_read_unlock(); | ||
120 | |||
121 | return sum; | ||
122 | } | ||
123 | |||
124 | /** | ||
125 | * blkg_prfill_stat - prfill callback for bfq_stat | ||
126 | * @sf: seq_file to print to | ||
127 | * @pd: policy private data of interest | ||
128 | * @off: offset to the bfq_stat in @pd | ||
129 | * | ||
130 | * prfill callback for printing a bfq_stat. | ||
131 | */ | ||
132 | static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, | ||
133 | int off) | ||
134 | { | ||
135 | return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off)); | ||
136 | } | ||
137 | |||
20 | /* bfqg stats flags */ | 138 | /* bfqg stats flags */ |
21 | enum bfqg_stats_flags { | 139 | enum bfqg_stats_flags { |
22 | BFQG_stats_waiting = 0, | 140 | BFQG_stats_waiting = 0, |
@@ -53,7 +171,7 @@ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) | |||
53 | 171 | ||
54 | now = ktime_get_ns(); | 172 | now = ktime_get_ns(); |
55 | if (now > stats->start_group_wait_time) | 173 | if (now > stats->start_group_wait_time) |
56 | blkg_stat_add(&stats->group_wait_time, | 174 | bfq_stat_add(&stats->group_wait_time, |
57 | now - stats->start_group_wait_time); | 175 | now - stats->start_group_wait_time); |
58 | bfqg_stats_clear_waiting(stats); | 176 | bfqg_stats_clear_waiting(stats); |
59 | } | 177 | } |
@@ -82,14 +200,14 @@ static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) | |||
82 | 200 | ||
83 | now = ktime_get_ns(); | 201 | now = ktime_get_ns(); |
84 | if (now > stats->start_empty_time) | 202 | if (now > stats->start_empty_time) |
85 | blkg_stat_add(&stats->empty_time, | 203 | bfq_stat_add(&stats->empty_time, |
86 | now - stats->start_empty_time); | 204 | now - stats->start_empty_time); |
87 | bfqg_stats_clear_empty(stats); | 205 | bfqg_stats_clear_empty(stats); |
88 | } | 206 | } |
89 | 207 | ||
90 | void bfqg_stats_update_dequeue(struct bfq_group *bfqg) | 208 | void bfqg_stats_update_dequeue(struct bfq_group *bfqg) |
91 | { | 209 | { |
92 | blkg_stat_add(&bfqg->stats.dequeue, 1); | 210 | bfq_stat_add(&bfqg->stats.dequeue, 1); |
93 | } | 211 | } |
94 | 212 | ||
95 | void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) | 213 | void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) |
@@ -119,7 +237,7 @@ void bfqg_stats_update_idle_time(struct bfq_group *bfqg) | |||
119 | u64 now = ktime_get_ns(); | 237 | u64 now = ktime_get_ns(); |
120 | 238 | ||
121 | if (now > stats->start_idle_time) | 239 | if (now > stats->start_idle_time) |
122 | blkg_stat_add(&stats->idle_time, | 240 | bfq_stat_add(&stats->idle_time, |
123 | now - stats->start_idle_time); | 241 | now - stats->start_idle_time); |
124 | bfqg_stats_clear_idling(stats); | 242 | bfqg_stats_clear_idling(stats); |
125 | } | 243 | } |
@@ -137,9 +255,9 @@ void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) | |||
137 | { | 255 | { |
138 | struct bfqg_stats *stats = &bfqg->stats; | 256 | struct bfqg_stats *stats = &bfqg->stats; |
139 | 257 | ||
140 | blkg_stat_add(&stats->avg_queue_size_sum, | 258 | bfq_stat_add(&stats->avg_queue_size_sum, |
141 | blkg_rwstat_total(&stats->queued)); | 259 | blkg_rwstat_total(&stats->queued)); |
142 | blkg_stat_add(&stats->avg_queue_size_samples, 1); | 260 | bfq_stat_add(&stats->avg_queue_size_samples, 1); |
143 | bfqg_stats_update_group_wait_time(stats); | 261 | bfqg_stats_update_group_wait_time(stats); |
144 | } | 262 | } |
145 | 263 | ||
@@ -279,13 +397,13 @@ static void bfqg_stats_reset(struct bfqg_stats *stats) | |||
279 | blkg_rwstat_reset(&stats->merged); | 397 | blkg_rwstat_reset(&stats->merged); |
280 | blkg_rwstat_reset(&stats->service_time); | 398 | blkg_rwstat_reset(&stats->service_time); |
281 | blkg_rwstat_reset(&stats->wait_time); | 399 | blkg_rwstat_reset(&stats->wait_time); |
282 | blkg_stat_reset(&stats->time); | 400 | bfq_stat_reset(&stats->time); |
283 | blkg_stat_reset(&stats->avg_queue_size_sum); | 401 | bfq_stat_reset(&stats->avg_queue_size_sum); |
284 | blkg_stat_reset(&stats->avg_queue_size_samples); | 402 | bfq_stat_reset(&stats->avg_queue_size_samples); |
285 | blkg_stat_reset(&stats->dequeue); | 403 | bfq_stat_reset(&stats->dequeue); |
286 | blkg_stat_reset(&stats->group_wait_time); | 404 | bfq_stat_reset(&stats->group_wait_time); |
287 | blkg_stat_reset(&stats->idle_time); | 405 | bfq_stat_reset(&stats->idle_time); |
288 | blkg_stat_reset(&stats->empty_time); | 406 | bfq_stat_reset(&stats->empty_time); |
289 | #endif | 407 | #endif |
290 | } | 408 | } |
291 | 409 | ||
@@ -300,14 +418,14 @@ static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from) | |||
300 | blkg_rwstat_add_aux(&to->merged, &from->merged); | 418 | blkg_rwstat_add_aux(&to->merged, &from->merged); |
301 | blkg_rwstat_add_aux(&to->service_time, &from->service_time); | 419 | blkg_rwstat_add_aux(&to->service_time, &from->service_time); |
302 | blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); | 420 | blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); |
303 | blkg_stat_add_aux(&from->time, &from->time); | 421 | bfq_stat_add_aux(&from->time, &from->time); |
304 | blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); | 422 | bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); |
305 | blkg_stat_add_aux(&to->avg_queue_size_samples, | 423 | bfq_stat_add_aux(&to->avg_queue_size_samples, |
306 | &from->avg_queue_size_samples); | 424 | &from->avg_queue_size_samples); |
307 | blkg_stat_add_aux(&to->dequeue, &from->dequeue); | 425 | bfq_stat_add_aux(&to->dequeue, &from->dequeue); |
308 | blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time); | 426 | bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time); |
309 | blkg_stat_add_aux(&to->idle_time, &from->idle_time); | 427 | bfq_stat_add_aux(&to->idle_time, &from->idle_time); |
310 | blkg_stat_add_aux(&to->empty_time, &from->empty_time); | 428 | bfq_stat_add_aux(&to->empty_time, &from->empty_time); |
311 | #endif | 429 | #endif |
312 | } | 430 | } |
313 | 431 | ||
@@ -360,13 +478,13 @@ static void bfqg_stats_exit(struct bfqg_stats *stats) | |||
360 | blkg_rwstat_exit(&stats->service_time); | 478 | blkg_rwstat_exit(&stats->service_time); |
361 | blkg_rwstat_exit(&stats->wait_time); | 479 | blkg_rwstat_exit(&stats->wait_time); |
362 | blkg_rwstat_exit(&stats->queued); | 480 | blkg_rwstat_exit(&stats->queued); |
363 | blkg_stat_exit(&stats->time); | 481 | bfq_stat_exit(&stats->time); |
364 | blkg_stat_exit(&stats->avg_queue_size_sum); | 482 | bfq_stat_exit(&stats->avg_queue_size_sum); |
365 | blkg_stat_exit(&stats->avg_queue_size_samples); | 483 | bfq_stat_exit(&stats->avg_queue_size_samples); |
366 | blkg_stat_exit(&stats->dequeue); | 484 | bfq_stat_exit(&stats->dequeue); |
367 | blkg_stat_exit(&stats->group_wait_time); | 485 | bfq_stat_exit(&stats->group_wait_time); |
368 | blkg_stat_exit(&stats->idle_time); | 486 | bfq_stat_exit(&stats->idle_time); |
369 | blkg_stat_exit(&stats->empty_time); | 487 | bfq_stat_exit(&stats->empty_time); |
370 | #endif | 488 | #endif |
371 | } | 489 | } |
372 | 490 | ||
@@ -377,13 +495,13 @@ static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) | |||
377 | blkg_rwstat_init(&stats->service_time, gfp) || | 495 | blkg_rwstat_init(&stats->service_time, gfp) || |
378 | blkg_rwstat_init(&stats->wait_time, gfp) || | 496 | blkg_rwstat_init(&stats->wait_time, gfp) || |
379 | blkg_rwstat_init(&stats->queued, gfp) || | 497 | blkg_rwstat_init(&stats->queued, gfp) || |
380 | blkg_stat_init(&stats->time, gfp) || | 498 | bfq_stat_init(&stats->time, gfp) || |
381 | blkg_stat_init(&stats->avg_queue_size_sum, gfp) || | 499 | bfq_stat_init(&stats->avg_queue_size_sum, gfp) || |
382 | blkg_stat_init(&stats->avg_queue_size_samples, gfp) || | 500 | bfq_stat_init(&stats->avg_queue_size_samples, gfp) || |
383 | blkg_stat_init(&stats->dequeue, gfp) || | 501 | bfq_stat_init(&stats->dequeue, gfp) || |
384 | blkg_stat_init(&stats->group_wait_time, gfp) || | 502 | bfq_stat_init(&stats->group_wait_time, gfp) || |
385 | blkg_stat_init(&stats->idle_time, gfp) || | 503 | bfq_stat_init(&stats->idle_time, gfp) || |
386 | blkg_stat_init(&stats->empty_time, gfp)) { | 504 | bfq_stat_init(&stats->empty_time, gfp)) { |
387 | bfqg_stats_exit(stats); | 505 | bfqg_stats_exit(stats); |
388 | return -ENOMEM; | 506 | return -ENOMEM; |
389 | } | 507 | } |
@@ -927,7 +1045,7 @@ static int bfqg_print_rwstat(struct seq_file *sf, void *v) | |||
927 | static u64 bfqg_prfill_stat_recursive(struct seq_file *sf, | 1045 | static u64 bfqg_prfill_stat_recursive(struct seq_file *sf, |
928 | struct blkg_policy_data *pd, int off) | 1046 | struct blkg_policy_data *pd, int off) |
929 | { | 1047 | { |
930 | u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd), | 1048 | u64 sum = bfq_stat_recursive_sum(pd_to_blkg(pd), |
931 | &blkcg_policy_bfq, off); | 1049 | &blkcg_policy_bfq, off); |
932 | return __blkg_prfill_u64(sf, pd, sum); | 1050 | return __blkg_prfill_u64(sf, pd, sum); |
933 | } | 1051 | } |
@@ -996,11 +1114,11 @@ static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf, | |||
996 | struct blkg_policy_data *pd, int off) | 1114 | struct blkg_policy_data *pd, int off) |
997 | { | 1115 | { |
998 | struct bfq_group *bfqg = pd_to_bfqg(pd); | 1116 | struct bfq_group *bfqg = pd_to_bfqg(pd); |
999 | u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples); | 1117 | u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples); |
1000 | u64 v = 0; | 1118 | u64 v = 0; |
1001 | 1119 | ||
1002 | if (samples) { | 1120 | if (samples) { |
1003 | v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum); | 1121 | v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum); |
1004 | v = div64_u64(v, samples); | 1122 | v = div64_u64(v, samples); |
1005 | } | 1123 | } |
1006 | __blkg_prfill_u64(sf, pd, v); | 1124 | __blkg_prfill_u64(sf, pd, v); |
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index c2faa77824f8..aef4fa0046b8 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h | |||
@@ -777,6 +777,11 @@ enum bfqq_expiration { | |||
777 | BFQQE_PREEMPTED /* preemption in progress */ | 777 | BFQQE_PREEMPTED /* preemption in progress */ |
778 | }; | 778 | }; |
779 | 779 | ||
780 | struct bfq_stat { | ||
781 | struct percpu_counter cpu_cnt; | ||
782 | atomic64_t aux_cnt; | ||
783 | }; | ||
784 | |||
780 | struct bfqg_stats { | 785 | struct bfqg_stats { |
781 | #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) | 786 | #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) |
782 | /* number of ios merged */ | 787 | /* number of ios merged */ |
@@ -788,19 +793,19 @@ struct bfqg_stats { | |||
788 | /* number of IOs queued up */ | 793 | /* number of IOs queued up */ |
789 | struct blkg_rwstat queued; | 794 | struct blkg_rwstat queued; |
790 | /* total disk time and nr sectors dispatched by this group */ | 795 | /* total disk time and nr sectors dispatched by this group */ |
791 | struct blkg_stat time; | 796 | struct bfq_stat time; |
792 | /* sum of number of ios queued across all samples */ | 797 | /* sum of number of ios queued across all samples */ |
793 | struct blkg_stat avg_queue_size_sum; | 798 | struct bfq_stat avg_queue_size_sum; |
794 | /* count of samples taken for average */ | 799 | /* count of samples taken for average */ |
795 | struct blkg_stat avg_queue_size_samples; | 800 | struct bfq_stat avg_queue_size_samples; |
796 | /* how many times this group has been removed from service tree */ | 801 | /* how many times this group has been removed from service tree */ |
797 | struct blkg_stat dequeue; | 802 | struct bfq_stat dequeue; |
798 | /* total time spent waiting for it to be assigned a timeslice. */ | 803 | /* total time spent waiting for it to be assigned a timeslice. */ |
799 | struct blkg_stat group_wait_time; | 804 | struct bfq_stat group_wait_time; |
800 | /* time spent idling for this blkcg_gq */ | 805 | /* time spent idling for this blkcg_gq */ |
801 | struct blkg_stat idle_time; | 806 | struct bfq_stat idle_time; |
802 | /* total time with empty current active q with other requests queued */ | 807 | /* total time with empty current active q with other requests queued */ |
803 | struct blkg_stat empty_time; | 808 | struct bfq_stat empty_time; |
804 | /* fields after this shouldn't be cleared on stat reset */ | 809 | /* fields after this shouldn't be cleared on stat reset */ |
805 | u64 start_group_wait_time; | 810 | u64 start_group_wait_time; |
806 | u64 start_idle_time; | 811 | u64 start_idle_time; |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 664c09866839..53b7bd4c7000 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -573,20 +573,6 @@ u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | |||
573 | EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat); | 573 | EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat); |
574 | 574 | ||
575 | /** | 575 | /** |
576 | * blkg_prfill_stat - prfill callback for blkg_stat | ||
577 | * @sf: seq_file to print to | ||
578 | * @pd: policy private data of interest | ||
579 | * @off: offset to the blkg_stat in @pd | ||
580 | * | ||
581 | * prfill callback for printing a blkg_stat. | ||
582 | */ | ||
583 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) | ||
584 | { | ||
585 | return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); | ||
586 | } | ||
587 | EXPORT_SYMBOL_GPL(blkg_prfill_stat); | ||
588 | |||
589 | /** | ||
590 | * blkg_prfill_rwstat - prfill callback for blkg_rwstat | 576 | * blkg_prfill_rwstat - prfill callback for blkg_rwstat |
591 | * @sf: seq_file to print to | 577 | * @sf: seq_file to print to |
592 | * @pd: policy private data of interest | 578 | * @pd: policy private data of interest |
@@ -688,48 +674,6 @@ int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v) | |||
688 | EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive); | 674 | EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive); |
689 | 675 | ||
690 | /** | 676 | /** |
691 | * blkg_stat_recursive_sum - collect hierarchical blkg_stat | ||
692 | * @blkg: blkg of interest | ||
693 | * @pol: blkcg_policy which contains the blkg_stat | ||
694 | * @off: offset to the blkg_stat in blkg_policy_data or @blkg | ||
695 | * | ||
696 | * Collect the blkg_stat specified by @blkg, @pol and @off and all its | ||
697 | * online descendants and their aux counts. The caller must be holding the | ||
698 | * queue lock for online tests. | ||
699 | * | ||
700 | * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is | ||
701 | * at @off bytes into @blkg's blkg_policy_data of the policy. | ||
702 | */ | ||
703 | u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg, | ||
704 | struct blkcg_policy *pol, int off) | ||
705 | { | ||
706 | struct blkcg_gq *pos_blkg; | ||
707 | struct cgroup_subsys_state *pos_css; | ||
708 | u64 sum = 0; | ||
709 | |||
710 | lockdep_assert_held(&blkg->q->queue_lock); | ||
711 | |||
712 | rcu_read_lock(); | ||
713 | blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { | ||
714 | struct blkg_stat *stat; | ||
715 | |||
716 | if (!pos_blkg->online) | ||
717 | continue; | ||
718 | |||
719 | if (pol) | ||
720 | stat = (void *)blkg_to_pd(pos_blkg, pol) + off; | ||
721 | else | ||
722 | stat = (void *)blkg + off; | ||
723 | |||
724 | sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt); | ||
725 | } | ||
726 | rcu_read_unlock(); | ||
727 | |||
728 | return sum; | ||
729 | } | ||
730 | EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum); | ||
731 | |||
732 | /** | ||
733 | * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat | 677 | * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat |
734 | * @blkg: blkg of interest | 678 | * @blkg: blkg of interest |
735 | * @pol: blkcg_policy which contains the blkg_rwstat | 679 | * @pol: blkcg_policy which contains the blkg_rwstat |