aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:15:17 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:23 -0500
commitc1768268f9424410761da57ea71107acae7b03cc (patch)
treebe6a534b1a15ab9df9f23e585b039776c5a5e498 /block/blk-cgroup.c
parent549d3aa872cd1aec1ee540fd93afd9611faa0def (diff)
blkcg: don't use blkg->plid in stat related functions
blkg is scheduled to be unified for all policies and thus there won't be one-to-one mapping from blkg to policy. Update stat related functions to take explicit @pol or @plid arguments and not use blkg->plid. This is painful for now but most of specific stat interface functions will be replaced with a handful of generic helpers. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c150
1 files changed, 85 insertions, 65 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 0eb39981e7c2..91f9824be5cc 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -78,14 +78,14 @@ struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
78} 78}
79EXPORT_SYMBOL_GPL(task_blkio_cgroup); 79EXPORT_SYMBOL_GPL(task_blkio_cgroup);
80 80
81static inline void 81static inline void blkio_update_group_weight(struct blkio_group *blkg,
82blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight) 82 int plid, unsigned int weight)
83{ 83{
84 struct blkio_policy_type *blkiop; 84 struct blkio_policy_type *blkiop;
85 85
86 list_for_each_entry(blkiop, &blkio_list, list) { 86 list_for_each_entry(blkiop, &blkio_list, list) {
87 /* If this policy does not own the blkg, do not send updates */ 87 /* If this policy does not own the blkg, do not send updates */
88 if (blkiop->plid != blkg->plid) 88 if (blkiop->plid != plid)
89 continue; 89 continue;
90 if (blkiop->ops.blkio_update_group_weight_fn) 90 if (blkiop->ops.blkio_update_group_weight_fn)
91 blkiop->ops.blkio_update_group_weight_fn(blkg->q, 91 blkiop->ops.blkio_update_group_weight_fn(blkg->q,
@@ -93,15 +93,15 @@ blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
93 } 93 }
94} 94}
95 95
96static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps, 96static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
97 int fileid) 97 u64 bps, int fileid)
98{ 98{
99 struct blkio_policy_type *blkiop; 99 struct blkio_policy_type *blkiop;
100 100
101 list_for_each_entry(blkiop, &blkio_list, list) { 101 list_for_each_entry(blkiop, &blkio_list, list) {
102 102
103 /* If this policy does not own the blkg, do not send updates */ 103 /* If this policy does not own the blkg, do not send updates */
104 if (blkiop->plid != blkg->plid) 104 if (blkiop->plid != plid)
105 continue; 105 continue;
106 106
107 if (fileid == BLKIO_THROTL_read_bps_device 107 if (fileid == BLKIO_THROTL_read_bps_device
@@ -117,14 +117,15 @@ static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
117} 117}
118 118
119static inline void blkio_update_group_iops(struct blkio_group *blkg, 119static inline void blkio_update_group_iops(struct blkio_group *blkg,
120 unsigned int iops, int fileid) 120 int plid, unsigned int iops,
121 int fileid)
121{ 122{
122 struct blkio_policy_type *blkiop; 123 struct blkio_policy_type *blkiop;
123 124
124 list_for_each_entry(blkiop, &blkio_list, list) { 125 list_for_each_entry(blkiop, &blkio_list, list) {
125 126
126 /* If this policy does not own the blkg, do not send updates */ 127 /* If this policy does not own the blkg, do not send updates */
127 if (blkiop->plid != blkg->plid) 128 if (blkiop->plid != plid)
128 continue; 129 continue;
129 130
130 if (fileid == BLKIO_THROTL_read_iops_device 131 if (fileid == BLKIO_THROTL_read_iops_device
@@ -182,9 +183,10 @@ static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
182#ifdef CONFIG_DEBUG_BLK_CGROUP 183#ifdef CONFIG_DEBUG_BLK_CGROUP
183/* This should be called with the blkg->stats_lock held. */ 184/* This should be called with the blkg->stats_lock held. */
184static void blkio_set_start_group_wait_time(struct blkio_group *blkg, 185static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
185 struct blkio_group *curr_blkg) 186 struct blkio_policy_type *pol,
187 struct blkio_group *curr_blkg)
186{ 188{
187 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 189 struct blkg_policy_data *pd = blkg->pd[pol->plid];
188 190
189 if (blkio_blkg_waiting(&pd->stats)) 191 if (blkio_blkg_waiting(&pd->stats))
190 return; 192 return;
@@ -222,9 +224,10 @@ static void blkio_end_empty_time(struct blkio_group_stats *stats)
222 blkio_clear_blkg_empty(stats); 224 blkio_clear_blkg_empty(stats);
223} 225}
224 226
225void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) 227void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
228 struct blkio_policy_type *pol)
226{ 229{
227 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 230 struct blkg_policy_data *pd = blkg->pd[pol->plid];
228 unsigned long flags; 231 unsigned long flags;
229 232
230 spin_lock_irqsave(&blkg->stats_lock, flags); 233 spin_lock_irqsave(&blkg->stats_lock, flags);
@@ -235,9 +238,10 @@ void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
235} 238}
236EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats); 239EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
237 240
238void blkiocg_update_idle_time_stats(struct blkio_group *blkg) 241void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
242 struct blkio_policy_type *pol)
239{ 243{
240 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 244 struct blkg_policy_data *pd = blkg->pd[pol->plid];
241 unsigned long flags; 245 unsigned long flags;
242 unsigned long long now; 246 unsigned long long now;
243 struct blkio_group_stats *stats; 247 struct blkio_group_stats *stats;
@@ -254,9 +258,10 @@ void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
254} 258}
255EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats); 259EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
256 260
257void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) 261void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
262 struct blkio_policy_type *pol)
258{ 263{
259 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 264 struct blkg_policy_data *pd = blkg->pd[pol->plid];
260 unsigned long flags; 265 unsigned long flags;
261 struct blkio_group_stats *stats; 266 struct blkio_group_stats *stats;
262 267
@@ -271,9 +276,10 @@ void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
271} 276}
272EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats); 277EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
273 278
274void blkiocg_set_start_empty_time(struct blkio_group *blkg) 279void blkiocg_set_start_empty_time(struct blkio_group *blkg,
280 struct blkio_policy_type *pol)
275{ 281{
276 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 282 struct blkg_policy_data *pd = blkg->pd[pol->plid];
277 unsigned long flags; 283 unsigned long flags;
278 struct blkio_group_stats *stats; 284 struct blkio_group_stats *stats;
279 285
@@ -303,39 +309,43 @@ void blkiocg_set_start_empty_time(struct blkio_group *blkg)
303EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time); 309EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
304 310
305void blkiocg_update_dequeue_stats(struct blkio_group *blkg, 311void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
306 unsigned long dequeue) 312 struct blkio_policy_type *pol,
313 unsigned long dequeue)
307{ 314{
308 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 315 struct blkg_policy_data *pd = blkg->pd[pol->plid];
309 316
310 pd->stats.dequeue += dequeue; 317 pd->stats.dequeue += dequeue;
311} 318}
312EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats); 319EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
313#else 320#else
314static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg, 321static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
315 struct blkio_group *curr_blkg) {} 322 struct blkio_policy_type *pol,
316static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {} 323 struct blkio_group *curr_blkg) { }
324static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
317#endif 325#endif
318 326
319void blkiocg_update_io_add_stats(struct blkio_group *blkg, 327void blkiocg_update_io_add_stats(struct blkio_group *blkg,
320 struct blkio_group *curr_blkg, bool direction, 328 struct blkio_policy_type *pol,
321 bool sync) 329 struct blkio_group *curr_blkg, bool direction,
330 bool sync)
322{ 331{
323 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 332 struct blkg_policy_data *pd = blkg->pd[pol->plid];
324 unsigned long flags; 333 unsigned long flags;
325 334
326 spin_lock_irqsave(&blkg->stats_lock, flags); 335 spin_lock_irqsave(&blkg->stats_lock, flags);
327 blkio_add_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction, 336 blkio_add_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
328 sync); 337 sync);
329 blkio_end_empty_time(&pd->stats); 338 blkio_end_empty_time(&pd->stats);
330 blkio_set_start_group_wait_time(blkg, curr_blkg); 339 blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
331 spin_unlock_irqrestore(&blkg->stats_lock, flags); 340 spin_unlock_irqrestore(&blkg->stats_lock, flags);
332} 341}
333EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats); 342EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
334 343
335void blkiocg_update_io_remove_stats(struct blkio_group *blkg, 344void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
336 bool direction, bool sync) 345 struct blkio_policy_type *pol,
346 bool direction, bool sync)
337{ 347{
338 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 348 struct blkg_policy_data *pd = blkg->pd[pol->plid];
339 unsigned long flags; 349 unsigned long flags;
340 350
341 spin_lock_irqsave(&blkg->stats_lock, flags); 351 spin_lock_irqsave(&blkg->stats_lock, flags);
@@ -345,10 +355,12 @@ void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
345} 355}
346EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats); 356EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
347 357
348void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time, 358void blkiocg_update_timeslice_used(struct blkio_group *blkg,
349 unsigned long unaccounted_time) 359 struct blkio_policy_type *pol,
360 unsigned long time,
361 unsigned long unaccounted_time)
350{ 362{
351 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 363 struct blkg_policy_data *pd = blkg->pd[pol->plid];
352 unsigned long flags; 364 unsigned long flags;
353 365
354 spin_lock_irqsave(&blkg->stats_lock, flags); 366 spin_lock_irqsave(&blkg->stats_lock, flags);
@@ -365,9 +377,10 @@ EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
365 * is valid. 377 * is valid.
366 */ 378 */
367void blkiocg_update_dispatch_stats(struct blkio_group *blkg, 379void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
368 uint64_t bytes, bool direction, bool sync) 380 struct blkio_policy_type *pol,
381 uint64_t bytes, bool direction, bool sync)
369{ 382{
370 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 383 struct blkg_policy_data *pd = blkg->pd[pol->plid];
371 struct blkio_group_stats_cpu *stats_cpu; 384 struct blkio_group_stats_cpu *stats_cpu;
372 unsigned long flags; 385 unsigned long flags;
373 386
@@ -392,9 +405,12 @@ void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
392EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats); 405EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
393 406
394void blkiocg_update_completion_stats(struct blkio_group *blkg, 407void blkiocg_update_completion_stats(struct blkio_group *blkg,
395 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) 408 struct blkio_policy_type *pol,
409 uint64_t start_time,
410 uint64_t io_start_time, bool direction,
411 bool sync)
396{ 412{
397 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 413 struct blkg_policy_data *pd = blkg->pd[pol->plid];
398 struct blkio_group_stats *stats; 414 struct blkio_group_stats *stats;
399 unsigned long flags; 415 unsigned long flags;
400 unsigned long long now = sched_clock(); 416 unsigned long long now = sched_clock();
@@ -412,10 +428,11 @@ void blkiocg_update_completion_stats(struct blkio_group *blkg,
412EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats); 428EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
413 429
414/* Merged stats are per cpu. */ 430/* Merged stats are per cpu. */
415void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction, 431void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
416 bool sync) 432 struct blkio_policy_type *pol,
433 bool direction, bool sync)
417{ 434{
418 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 435 struct blkg_policy_data *pd = blkg->pd[pol->plid];
419 struct blkio_group_stats_cpu *stats_cpu; 436 struct blkio_group_stats_cpu *stats_cpu;
420 unsigned long flags; 437 unsigned long flags;
421 438
@@ -681,9 +698,9 @@ void __blkg_release(struct blkio_group *blkg)
681} 698}
682EXPORT_SYMBOL_GPL(__blkg_release); 699EXPORT_SYMBOL_GPL(__blkg_release);
683 700
684static void blkio_reset_stats_cpu(struct blkio_group *blkg) 701static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
685{ 702{
686 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 703 struct blkg_policy_data *pd = blkg->pd[plid];
687 struct blkio_group_stats_cpu *stats_cpu; 704 struct blkio_group_stats_cpu *stats_cpu;
688 int i, j, k; 705 int i, j, k;
689 /* 706 /*
@@ -754,7 +771,7 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
754 spin_unlock(&blkg->stats_lock); 771 spin_unlock(&blkg->stats_lock);
755 772
756 /* Reset Per cpu stats which don't take blkg->stats_lock */ 773 /* Reset Per cpu stats which don't take blkg->stats_lock */
757 blkio_reset_stats_cpu(blkg); 774 blkio_reset_stats_cpu(blkg, blkg->plid);
758 } 775 }
759 776
760 spin_unlock_irq(&blkcg->lock); 777 spin_unlock_irq(&blkcg->lock);
@@ -803,10 +820,10 @@ static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
803} 820}
804 821
805 822
806static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, 823static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, int plid,
807 enum stat_type_cpu type, enum stat_sub_type sub_type) 824 enum stat_type_cpu type, enum stat_sub_type sub_type)
808{ 825{
809 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 826 struct blkg_policy_data *pd = blkg->pd[plid];
810 int cpu; 827 int cpu;
811 struct blkio_group_stats_cpu *stats_cpu; 828 struct blkio_group_stats_cpu *stats_cpu;
812 u64 val = 0, tval; 829 u64 val = 0, tval;
@@ -829,7 +846,7 @@ static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
829 return val; 846 return val;
830} 847}
831 848
832static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, 849static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, int plid,
833 struct cgroup_map_cb *cb, const char *dname, 850 struct cgroup_map_cb *cb, const char *dname,
834 enum stat_type_cpu type) 851 enum stat_type_cpu type)
835{ 852{
@@ -838,7 +855,7 @@ static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
838 enum stat_sub_type sub_type; 855 enum stat_sub_type sub_type;
839 856
840 if (type == BLKIO_STAT_CPU_SECTORS) { 857 if (type == BLKIO_STAT_CPU_SECTORS) {
841 val = blkio_read_stat_cpu(blkg, type, 0); 858 val = blkio_read_stat_cpu(blkg, plid, type, 0);
842 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, 859 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb,
843 dname); 860 dname);
844 } 861 }
@@ -847,12 +864,12 @@ static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
847 sub_type++) { 864 sub_type++) {
848 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN, 865 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
849 false); 866 false);
850 val = blkio_read_stat_cpu(blkg, type, sub_type); 867 val = blkio_read_stat_cpu(blkg, plid, type, sub_type);
851 cb->fill(cb, key_str, val); 868 cb->fill(cb, key_str, val);
852 } 869 }
853 870
854 disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) + 871 disk_total = blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_READ) +
855 blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE); 872 blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_WRITE);
856 873
857 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN, 874 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
858 false); 875 false);
@@ -861,11 +878,11 @@ static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
861} 878}
862 879
863/* This should be called with blkg->stats_lock held */ 880/* This should be called with blkg->stats_lock held */
864static uint64_t blkio_get_stat(struct blkio_group *blkg, 881static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid,
865 struct cgroup_map_cb *cb, const char *dname, 882 struct cgroup_map_cb *cb, const char *dname,
866 enum stat_type type) 883 enum stat_type type)
867{ 884{
868 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 885 struct blkg_policy_data *pd = blkg->pd[plid];
869 uint64_t disk_total; 886 uint64_t disk_total;
870 char key_str[MAX_KEY_LEN]; 887 char key_str[MAX_KEY_LEN];
871 enum stat_sub_type sub_type; 888 enum stat_sub_type sub_type;
@@ -989,29 +1006,29 @@ static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
989 goto out_unlock; 1006 goto out_unlock;
990 1007
991 pd->conf.weight = temp; 1008 pd->conf.weight = temp;
992 blkio_update_group_weight(blkg, temp ?: blkcg->weight); 1009 blkio_update_group_weight(blkg, plid, temp ?: blkcg->weight);
993 break; 1010 break;
994 case BLKIO_POLICY_THROTL: 1011 case BLKIO_POLICY_THROTL:
995 switch(fileid) { 1012 switch(fileid) {
996 case BLKIO_THROTL_read_bps_device: 1013 case BLKIO_THROTL_read_bps_device:
997 pd->conf.bps[READ] = temp; 1014 pd->conf.bps[READ] = temp;
998 blkio_update_group_bps(blkg, temp ?: -1, fileid); 1015 blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
999 break; 1016 break;
1000 case BLKIO_THROTL_write_bps_device: 1017 case BLKIO_THROTL_write_bps_device:
1001 pd->conf.bps[WRITE] = temp; 1018 pd->conf.bps[WRITE] = temp;
1002 blkio_update_group_bps(blkg, temp ?: -1, fileid); 1019 blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
1003 break; 1020 break;
1004 case BLKIO_THROTL_read_iops_device: 1021 case BLKIO_THROTL_read_iops_device:
1005 if (temp > THROTL_IOPS_MAX) 1022 if (temp > THROTL_IOPS_MAX)
1006 goto out_unlock; 1023 goto out_unlock;
1007 pd->conf.iops[READ] = temp; 1024 pd->conf.iops[READ] = temp;
1008 blkio_update_group_iops(blkg, temp ?: -1, fileid); 1025 blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
1009 break; 1026 break;
1010 case BLKIO_THROTL_write_iops_device: 1027 case BLKIO_THROTL_write_iops_device:
1011 if (temp > THROTL_IOPS_MAX) 1028 if (temp > THROTL_IOPS_MAX)
1012 goto out_unlock; 1029 goto out_unlock;
1013 pd->conf.iops[WRITE] = temp; 1030 pd->conf.iops[WRITE] = temp;
1014 blkio_update_group_iops(blkg, temp ?: -1, fileid); 1031 blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
1015 break; 1032 break;
1016 } 1033 }
1017 break; 1034 break;
@@ -1066,15 +1083,16 @@ static const char *blkg_dev_name(struct blkio_group *blkg)
1066static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg, 1083static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
1067 struct seq_file *m) 1084 struct seq_file *m)
1068{ 1085{
1069 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 1086 int plid = BLKIOFILE_POLICY(cft->private);
1070 const char *dname = blkg_dev_name(blkg);
1071 int fileid = BLKIOFILE_ATTR(cft->private); 1087 int fileid = BLKIOFILE_ATTR(cft->private);
1088 struct blkg_policy_data *pd = blkg->pd[plid];
1089 const char *dname = blkg_dev_name(blkg);
1072 int rw = WRITE; 1090 int rw = WRITE;
1073 1091
1074 if (!dname) 1092 if (!dname)
1075 return; 1093 return;
1076 1094
1077 switch (blkg->plid) { 1095 switch (plid) {
1078 case BLKIO_POLICY_PROP: 1096 case BLKIO_POLICY_PROP:
1079 if (pd->conf.weight) 1097 if (pd->conf.weight)
1080 seq_printf(m, "%s\t%u\n", 1098 seq_printf(m, "%s\t%u\n",
@@ -1166,15 +1184,17 @@ static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1166 rcu_read_lock(); 1184 rcu_read_lock();
1167 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) { 1185 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
1168 const char *dname = blkg_dev_name(blkg); 1186 const char *dname = blkg_dev_name(blkg);
1187 int plid = BLKIOFILE_POLICY(cft->private);
1169 1188
1170 if (!dname || BLKIOFILE_POLICY(cft->private) != blkg->plid) 1189 if (!dname || plid != blkg->plid)
1171 continue; 1190 continue;
1172 if (pcpu) 1191 if (pcpu) {
1173 cgroup_total += blkio_get_stat_cpu(blkg, cb, dname, 1192 cgroup_total += blkio_get_stat_cpu(blkg, plid,
1174 type); 1193 cb, dname, type);
1175 else { 1194 } else {
1176 spin_lock_irq(&blkg->stats_lock); 1195 spin_lock_irq(&blkg->stats_lock);
1177 cgroup_total += blkio_get_stat(blkg, cb, dname, type); 1196 cgroup_total += blkio_get_stat(blkg, plid,
1197 cb, dname, type);
1178 spin_unlock_irq(&blkg->stats_lock); 1198 spin_unlock_irq(&blkg->stats_lock);
1179 } 1199 }
1180 } 1200 }
@@ -1280,7 +1300,7 @@ static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val)
1280 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 1300 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
1281 1301
1282 if (blkg->plid == plid && !pd->conf.weight) 1302 if (blkg->plid == plid && !pd->conf.weight)
1283 blkio_update_group_weight(blkg, blkcg->weight); 1303 blkio_update_group_weight(blkg, plid, blkcg->weight);
1284 } 1304 }
1285 1305
1286 spin_unlock_irq(&blkcg->lock); 1306 spin_unlock_irq(&blkcg->lock);