aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:15:17 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:23 -0500
commitc1768268f9424410761da57ea71107acae7b03cc (patch)
treebe6a534b1a15ab9df9f23e585b039776c5a5e498 /block
parent549d3aa872cd1aec1ee540fd93afd9611faa0def (diff)
blkcg: don't use blkg->plid in stat related functions
blkg is scheduled to be unified for all policies and thus there won't be one-to-one mapping from blkg to policy. Update stat related functions to take explicit @pol or @plid arguments and not use blkg->plid. This is painful for now but most of specific stat interface functions will be replaced with a handful of generic helpers. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c150
-rw-r--r--block/blk-cgroup.h80
-rw-r--r--block/blk-throttle.c4
-rw-r--r--block/cfq-iosched.c44
-rw-r--r--block/cfq.h96
5 files changed, 224 insertions, 150 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 0eb39981e7c2..91f9824be5cc 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -78,14 +78,14 @@ struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
78} 78}
79EXPORT_SYMBOL_GPL(task_blkio_cgroup); 79EXPORT_SYMBOL_GPL(task_blkio_cgroup);
80 80
81static inline void 81static inline void blkio_update_group_weight(struct blkio_group *blkg,
82blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight) 82 int plid, unsigned int weight)
83{ 83{
84 struct blkio_policy_type *blkiop; 84 struct blkio_policy_type *blkiop;
85 85
86 list_for_each_entry(blkiop, &blkio_list, list) { 86 list_for_each_entry(blkiop, &blkio_list, list) {
87 /* If this policy does not own the blkg, do not send updates */ 87 /* If this policy does not own the blkg, do not send updates */
88 if (blkiop->plid != blkg->plid) 88 if (blkiop->plid != plid)
89 continue; 89 continue;
90 if (blkiop->ops.blkio_update_group_weight_fn) 90 if (blkiop->ops.blkio_update_group_weight_fn)
91 blkiop->ops.blkio_update_group_weight_fn(blkg->q, 91 blkiop->ops.blkio_update_group_weight_fn(blkg->q,
@@ -93,15 +93,15 @@ blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
93 } 93 }
94} 94}
95 95
96static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps, 96static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
97 int fileid) 97 u64 bps, int fileid)
98{ 98{
99 struct blkio_policy_type *blkiop; 99 struct blkio_policy_type *blkiop;
100 100
101 list_for_each_entry(blkiop, &blkio_list, list) { 101 list_for_each_entry(blkiop, &blkio_list, list) {
102 102
103 /* If this policy does not own the blkg, do not send updates */ 103 /* If this policy does not own the blkg, do not send updates */
104 if (blkiop->plid != blkg->plid) 104 if (blkiop->plid != plid)
105 continue; 105 continue;
106 106
107 if (fileid == BLKIO_THROTL_read_bps_device 107 if (fileid == BLKIO_THROTL_read_bps_device
@@ -117,14 +117,15 @@ static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
117} 117}
118 118
119static inline void blkio_update_group_iops(struct blkio_group *blkg, 119static inline void blkio_update_group_iops(struct blkio_group *blkg,
120 unsigned int iops, int fileid) 120 int plid, unsigned int iops,
121 int fileid)
121{ 122{
122 struct blkio_policy_type *blkiop; 123 struct blkio_policy_type *blkiop;
123 124
124 list_for_each_entry(blkiop, &blkio_list, list) { 125 list_for_each_entry(blkiop, &blkio_list, list) {
125 126
126 /* If this policy does not own the blkg, do not send updates */ 127 /* If this policy does not own the blkg, do not send updates */
127 if (blkiop->plid != blkg->plid) 128 if (blkiop->plid != plid)
128 continue; 129 continue;
129 130
130 if (fileid == BLKIO_THROTL_read_iops_device 131 if (fileid == BLKIO_THROTL_read_iops_device
@@ -182,9 +183,10 @@ static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
182#ifdef CONFIG_DEBUG_BLK_CGROUP 183#ifdef CONFIG_DEBUG_BLK_CGROUP
183/* This should be called with the blkg->stats_lock held. */ 184/* This should be called with the blkg->stats_lock held. */
184static void blkio_set_start_group_wait_time(struct blkio_group *blkg, 185static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
185 struct blkio_group *curr_blkg) 186 struct blkio_policy_type *pol,
187 struct blkio_group *curr_blkg)
186{ 188{
187 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 189 struct blkg_policy_data *pd = blkg->pd[pol->plid];
188 190
189 if (blkio_blkg_waiting(&pd->stats)) 191 if (blkio_blkg_waiting(&pd->stats))
190 return; 192 return;
@@ -222,9 +224,10 @@ static void blkio_end_empty_time(struct blkio_group_stats *stats)
222 blkio_clear_blkg_empty(stats); 224 blkio_clear_blkg_empty(stats);
223} 225}
224 226
225void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) 227void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
228 struct blkio_policy_type *pol)
226{ 229{
227 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 230 struct blkg_policy_data *pd = blkg->pd[pol->plid];
228 unsigned long flags; 231 unsigned long flags;
229 232
230 spin_lock_irqsave(&blkg->stats_lock, flags); 233 spin_lock_irqsave(&blkg->stats_lock, flags);
@@ -235,9 +238,10 @@ void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
235} 238}
236EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats); 239EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
237 240
238void blkiocg_update_idle_time_stats(struct blkio_group *blkg) 241void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
242 struct blkio_policy_type *pol)
239{ 243{
240 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 244 struct blkg_policy_data *pd = blkg->pd[pol->plid];
241 unsigned long flags; 245 unsigned long flags;
242 unsigned long long now; 246 unsigned long long now;
243 struct blkio_group_stats *stats; 247 struct blkio_group_stats *stats;
@@ -254,9 +258,10 @@ void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
254} 258}
255EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats); 259EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
256 260
257void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) 261void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
262 struct blkio_policy_type *pol)
258{ 263{
259 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 264 struct blkg_policy_data *pd = blkg->pd[pol->plid];
260 unsigned long flags; 265 unsigned long flags;
261 struct blkio_group_stats *stats; 266 struct blkio_group_stats *stats;
262 267
@@ -271,9 +276,10 @@ void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
271} 276}
272EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats); 277EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
273 278
274void blkiocg_set_start_empty_time(struct blkio_group *blkg) 279void blkiocg_set_start_empty_time(struct blkio_group *blkg,
280 struct blkio_policy_type *pol)
275{ 281{
276 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 282 struct blkg_policy_data *pd = blkg->pd[pol->plid];
277 unsigned long flags; 283 unsigned long flags;
278 struct blkio_group_stats *stats; 284 struct blkio_group_stats *stats;
279 285
@@ -303,39 +309,43 @@ void blkiocg_set_start_empty_time(struct blkio_group *blkg)
303EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time); 309EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
304 310
305void blkiocg_update_dequeue_stats(struct blkio_group *blkg, 311void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
306 unsigned long dequeue) 312 struct blkio_policy_type *pol,
313 unsigned long dequeue)
307{ 314{
308 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 315 struct blkg_policy_data *pd = blkg->pd[pol->plid];
309 316
310 pd->stats.dequeue += dequeue; 317 pd->stats.dequeue += dequeue;
311} 318}
312EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats); 319EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
313#else 320#else
314static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg, 321static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
315 struct blkio_group *curr_blkg) {} 322 struct blkio_policy_type *pol,
316static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {} 323 struct blkio_group *curr_blkg) { }
324static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
317#endif 325#endif
318 326
319void blkiocg_update_io_add_stats(struct blkio_group *blkg, 327void blkiocg_update_io_add_stats(struct blkio_group *blkg,
320 struct blkio_group *curr_blkg, bool direction, 328 struct blkio_policy_type *pol,
321 bool sync) 329 struct blkio_group *curr_blkg, bool direction,
330 bool sync)
322{ 331{
323 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 332 struct blkg_policy_data *pd = blkg->pd[pol->plid];
324 unsigned long flags; 333 unsigned long flags;
325 334
326 spin_lock_irqsave(&blkg->stats_lock, flags); 335 spin_lock_irqsave(&blkg->stats_lock, flags);
327 blkio_add_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction, 336 blkio_add_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
328 sync); 337 sync);
329 blkio_end_empty_time(&pd->stats); 338 blkio_end_empty_time(&pd->stats);
330 blkio_set_start_group_wait_time(blkg, curr_blkg); 339 blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
331 spin_unlock_irqrestore(&blkg->stats_lock, flags); 340 spin_unlock_irqrestore(&blkg->stats_lock, flags);
332} 341}
333EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats); 342EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
334 343
335void blkiocg_update_io_remove_stats(struct blkio_group *blkg, 344void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
336 bool direction, bool sync) 345 struct blkio_policy_type *pol,
346 bool direction, bool sync)
337{ 347{
338 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 348 struct blkg_policy_data *pd = blkg->pd[pol->plid];
339 unsigned long flags; 349 unsigned long flags;
340 350
341 spin_lock_irqsave(&blkg->stats_lock, flags); 351 spin_lock_irqsave(&blkg->stats_lock, flags);
@@ -345,10 +355,12 @@ void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
345} 355}
346EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats); 356EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
347 357
348void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time, 358void blkiocg_update_timeslice_used(struct blkio_group *blkg,
349 unsigned long unaccounted_time) 359 struct blkio_policy_type *pol,
360 unsigned long time,
361 unsigned long unaccounted_time)
350{ 362{
351 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 363 struct blkg_policy_data *pd = blkg->pd[pol->plid];
352 unsigned long flags; 364 unsigned long flags;
353 365
354 spin_lock_irqsave(&blkg->stats_lock, flags); 366 spin_lock_irqsave(&blkg->stats_lock, flags);
@@ -365,9 +377,10 @@ EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
365 * is valid. 377 * is valid.
366 */ 378 */
367void blkiocg_update_dispatch_stats(struct blkio_group *blkg, 379void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
368 uint64_t bytes, bool direction, bool sync) 380 struct blkio_policy_type *pol,
381 uint64_t bytes, bool direction, bool sync)
369{ 382{
370 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 383 struct blkg_policy_data *pd = blkg->pd[pol->plid];
371 struct blkio_group_stats_cpu *stats_cpu; 384 struct blkio_group_stats_cpu *stats_cpu;
372 unsigned long flags; 385 unsigned long flags;
373 386
@@ -392,9 +405,12 @@ void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
392EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats); 405EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
393 406
394void blkiocg_update_completion_stats(struct blkio_group *blkg, 407void blkiocg_update_completion_stats(struct blkio_group *blkg,
395 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) 408 struct blkio_policy_type *pol,
409 uint64_t start_time,
410 uint64_t io_start_time, bool direction,
411 bool sync)
396{ 412{
397 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 413 struct blkg_policy_data *pd = blkg->pd[pol->plid];
398 struct blkio_group_stats *stats; 414 struct blkio_group_stats *stats;
399 unsigned long flags; 415 unsigned long flags;
400 unsigned long long now = sched_clock(); 416 unsigned long long now = sched_clock();
@@ -412,10 +428,11 @@ void blkiocg_update_completion_stats(struct blkio_group *blkg,
412EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats); 428EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
413 429
414/* Merged stats are per cpu. */ 430/* Merged stats are per cpu. */
415void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction, 431void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
416 bool sync) 432 struct blkio_policy_type *pol,
433 bool direction, bool sync)
417{ 434{
418 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 435 struct blkg_policy_data *pd = blkg->pd[pol->plid];
419 struct blkio_group_stats_cpu *stats_cpu; 436 struct blkio_group_stats_cpu *stats_cpu;
420 unsigned long flags; 437 unsigned long flags;
421 438
@@ -681,9 +698,9 @@ void __blkg_release(struct blkio_group *blkg)
681} 698}
682EXPORT_SYMBOL_GPL(__blkg_release); 699EXPORT_SYMBOL_GPL(__blkg_release);
683 700
684static void blkio_reset_stats_cpu(struct blkio_group *blkg) 701static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
685{ 702{
686 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 703 struct blkg_policy_data *pd = blkg->pd[plid];
687 struct blkio_group_stats_cpu *stats_cpu; 704 struct blkio_group_stats_cpu *stats_cpu;
688 int i, j, k; 705 int i, j, k;
689 /* 706 /*
@@ -754,7 +771,7 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
754 spin_unlock(&blkg->stats_lock); 771 spin_unlock(&blkg->stats_lock);
755 772
756 /* Reset Per cpu stats which don't take blkg->stats_lock */ 773 /* Reset Per cpu stats which don't take blkg->stats_lock */
757 blkio_reset_stats_cpu(blkg); 774 blkio_reset_stats_cpu(blkg, blkg->plid);
758 } 775 }
759 776
760 spin_unlock_irq(&blkcg->lock); 777 spin_unlock_irq(&blkcg->lock);
@@ -803,10 +820,10 @@ static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
803} 820}
804 821
805 822
806static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, 823static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, int plid,
807 enum stat_type_cpu type, enum stat_sub_type sub_type) 824 enum stat_type_cpu type, enum stat_sub_type sub_type)
808{ 825{
809 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 826 struct blkg_policy_data *pd = blkg->pd[plid];
810 int cpu; 827 int cpu;
811 struct blkio_group_stats_cpu *stats_cpu; 828 struct blkio_group_stats_cpu *stats_cpu;
812 u64 val = 0, tval; 829 u64 val = 0, tval;
@@ -829,7 +846,7 @@ static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
829 return val; 846 return val;
830} 847}
831 848
832static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, 849static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, int plid,
833 struct cgroup_map_cb *cb, const char *dname, 850 struct cgroup_map_cb *cb, const char *dname,
834 enum stat_type_cpu type) 851 enum stat_type_cpu type)
835{ 852{
@@ -838,7 +855,7 @@ static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
838 enum stat_sub_type sub_type; 855 enum stat_sub_type sub_type;
839 856
840 if (type == BLKIO_STAT_CPU_SECTORS) { 857 if (type == BLKIO_STAT_CPU_SECTORS) {
841 val = blkio_read_stat_cpu(blkg, type, 0); 858 val = blkio_read_stat_cpu(blkg, plid, type, 0);
842 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, 859 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb,
843 dname); 860 dname);
844 } 861 }
@@ -847,12 +864,12 @@ static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
847 sub_type++) { 864 sub_type++) {
848 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN, 865 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
849 false); 866 false);
850 val = blkio_read_stat_cpu(blkg, type, sub_type); 867 val = blkio_read_stat_cpu(blkg, plid, type, sub_type);
851 cb->fill(cb, key_str, val); 868 cb->fill(cb, key_str, val);
852 } 869 }
853 870
854 disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) + 871 disk_total = blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_READ) +
855 blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE); 872 blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_WRITE);
856 873
857 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN, 874 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
858 false); 875 false);
@@ -861,11 +878,11 @@ static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
861} 878}
862 879
863/* This should be called with blkg->stats_lock held */ 880/* This should be called with blkg->stats_lock held */
864static uint64_t blkio_get_stat(struct blkio_group *blkg, 881static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid,
865 struct cgroup_map_cb *cb, const char *dname, 882 struct cgroup_map_cb *cb, const char *dname,
866 enum stat_type type) 883 enum stat_type type)
867{ 884{
868 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 885 struct blkg_policy_data *pd = blkg->pd[plid];
869 uint64_t disk_total; 886 uint64_t disk_total;
870 char key_str[MAX_KEY_LEN]; 887 char key_str[MAX_KEY_LEN];
871 enum stat_sub_type sub_type; 888 enum stat_sub_type sub_type;
@@ -989,29 +1006,29 @@ static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
989 goto out_unlock; 1006 goto out_unlock;
990 1007
991 pd->conf.weight = temp; 1008 pd->conf.weight = temp;
992 blkio_update_group_weight(blkg, temp ?: blkcg->weight); 1009 blkio_update_group_weight(blkg, plid, temp ?: blkcg->weight);
993 break; 1010 break;
994 case BLKIO_POLICY_THROTL: 1011 case BLKIO_POLICY_THROTL:
995 switch(fileid) { 1012 switch(fileid) {
996 case BLKIO_THROTL_read_bps_device: 1013 case BLKIO_THROTL_read_bps_device:
997 pd->conf.bps[READ] = temp; 1014 pd->conf.bps[READ] = temp;
998 blkio_update_group_bps(blkg, temp ?: -1, fileid); 1015 blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
999 break; 1016 break;
1000 case BLKIO_THROTL_write_bps_device: 1017 case BLKIO_THROTL_write_bps_device:
1001 pd->conf.bps[WRITE] = temp; 1018 pd->conf.bps[WRITE] = temp;
1002 blkio_update_group_bps(blkg, temp ?: -1, fileid); 1019 blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
1003 break; 1020 break;
1004 case BLKIO_THROTL_read_iops_device: 1021 case BLKIO_THROTL_read_iops_device:
1005 if (temp > THROTL_IOPS_MAX) 1022 if (temp > THROTL_IOPS_MAX)
1006 goto out_unlock; 1023 goto out_unlock;
1007 pd->conf.iops[READ] = temp; 1024 pd->conf.iops[READ] = temp;
1008 blkio_update_group_iops(blkg, temp ?: -1, fileid); 1025 blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
1009 break; 1026 break;
1010 case BLKIO_THROTL_write_iops_device: 1027 case BLKIO_THROTL_write_iops_device:
1011 if (temp > THROTL_IOPS_MAX) 1028 if (temp > THROTL_IOPS_MAX)
1012 goto out_unlock; 1029 goto out_unlock;
1013 pd->conf.iops[WRITE] = temp; 1030 pd->conf.iops[WRITE] = temp;
1014 blkio_update_group_iops(blkg, temp ?: -1, fileid); 1031 blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
1015 break; 1032 break;
1016 } 1033 }
1017 break; 1034 break;
@@ -1066,15 +1083,16 @@ static const char *blkg_dev_name(struct blkio_group *blkg)
1066static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg, 1083static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
1067 struct seq_file *m) 1084 struct seq_file *m)
1068{ 1085{
1069 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 1086 int plid = BLKIOFILE_POLICY(cft->private);
1070 const char *dname = blkg_dev_name(blkg);
1071 int fileid = BLKIOFILE_ATTR(cft->private); 1087 int fileid = BLKIOFILE_ATTR(cft->private);
1088 struct blkg_policy_data *pd = blkg->pd[plid];
1089 const char *dname = blkg_dev_name(blkg);
1072 int rw = WRITE; 1090 int rw = WRITE;
1073 1091
1074 if (!dname) 1092 if (!dname)
1075 return; 1093 return;
1076 1094
1077 switch (blkg->plid) { 1095 switch (plid) {
1078 case BLKIO_POLICY_PROP: 1096 case BLKIO_POLICY_PROP:
1079 if (pd->conf.weight) 1097 if (pd->conf.weight)
1080 seq_printf(m, "%s\t%u\n", 1098 seq_printf(m, "%s\t%u\n",
@@ -1166,15 +1184,17 @@ static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1166 rcu_read_lock(); 1184 rcu_read_lock();
1167 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) { 1185 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
1168 const char *dname = blkg_dev_name(blkg); 1186 const char *dname = blkg_dev_name(blkg);
1187 int plid = BLKIOFILE_POLICY(cft->private);
1169 1188
1170 if (!dname || BLKIOFILE_POLICY(cft->private) != blkg->plid) 1189 if (!dname || plid != blkg->plid)
1171 continue; 1190 continue;
1172 if (pcpu) 1191 if (pcpu) {
1173 cgroup_total += blkio_get_stat_cpu(blkg, cb, dname, 1192 cgroup_total += blkio_get_stat_cpu(blkg, plid,
1174 type); 1193 cb, dname, type);
1175 else { 1194 } else {
1176 spin_lock_irq(&blkg->stats_lock); 1195 spin_lock_irq(&blkg->stats_lock);
1177 cgroup_total += blkio_get_stat(blkg, cb, dname, type); 1196 cgroup_total += blkio_get_stat(blkg, plid,
1197 cb, dname, type);
1178 spin_unlock_irq(&blkg->stats_lock); 1198 spin_unlock_irq(&blkg->stats_lock);
1179 } 1199 }
1180 } 1200 }
@@ -1280,7 +1300,7 @@ static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val)
1280 struct blkg_policy_data *pd = blkg->pd[blkg->plid]; 1300 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
1281 1301
1282 if (blkg->plid == plid && !pd->conf.weight) 1302 if (blkg->plid == plid && !pd->conf.weight)
1283 blkio_update_group_weight(blkg, blkcg->weight); 1303 blkio_update_group_weight(blkg, plid, blkcg->weight);
1284 } 1304 }
1285 1305
1286 spin_unlock_irq(&blkcg->lock); 1306 spin_unlock_irq(&blkcg->lock);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 5dffd436f30d..60e96b4be4ce 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -335,12 +335,17 @@ static inline void blkg_put(struct blkio_group *blkg) { }
335#define BLKIO_WEIGHT_DEFAULT 500 335#define BLKIO_WEIGHT_DEFAULT 500
336 336
337#ifdef CONFIG_DEBUG_BLK_CGROUP 337#ifdef CONFIG_DEBUG_BLK_CGROUP
338void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg); 338void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
339 struct blkio_policy_type *pol);
339void blkiocg_update_dequeue_stats(struct blkio_group *blkg, 340void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
340 unsigned long dequeue); 341 struct blkio_policy_type *pol,
341void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg); 342 unsigned long dequeue);
342void blkiocg_update_idle_time_stats(struct blkio_group *blkg); 343void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
343void blkiocg_set_start_empty_time(struct blkio_group *blkg); 344 struct blkio_policy_type *pol);
345void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
346 struct blkio_policy_type *pol);
347void blkiocg_set_start_empty_time(struct blkio_group *blkg,
348 struct blkio_policy_type *pol);
344 349
345#define BLKG_FLAG_FNS(name) \ 350#define BLKG_FLAG_FNS(name) \
346static inline void blkio_mark_blkg_##name( \ 351static inline void blkio_mark_blkg_##name( \
@@ -363,14 +368,16 @@ BLKG_FLAG_FNS(idling)
363BLKG_FLAG_FNS(empty) 368BLKG_FLAG_FNS(empty)
364#undef BLKG_FLAG_FNS 369#undef BLKG_FLAG_FNS
365#else 370#else
366static inline void blkiocg_update_avg_queue_size_stats( 371static inline void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
367 struct blkio_group *blkg) {} 372 struct blkio_policy_type *pol) { }
368static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg, 373static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
369 unsigned long dequeue) {} 374 struct blkio_policy_type *pol, unsigned long dequeue) { }
370static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) 375static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
371{} 376 struct blkio_policy_type *pol) { }
372static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg) {} 377static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
373static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {} 378 struct blkio_policy_type *pol) { }
379static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg,
380 struct blkio_policy_type *pol) { }
374#endif 381#endif
375 382
376#ifdef CONFIG_BLK_CGROUP 383#ifdef CONFIG_BLK_CGROUP
@@ -386,18 +393,27 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
386 enum blkio_policy_id plid, 393 enum blkio_policy_id plid,
387 bool for_root); 394 bool for_root);
388void blkiocg_update_timeslice_used(struct blkio_group *blkg, 395void blkiocg_update_timeslice_used(struct blkio_group *blkg,
389 unsigned long time, 396 struct blkio_policy_type *pol,
390 unsigned long unaccounted_time); 397 unsigned long time,
391void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes, 398 unsigned long unaccounted_time);
392 bool direction, bool sync); 399void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
400 struct blkio_policy_type *pol,
401 uint64_t bytes, bool direction, bool sync);
393void blkiocg_update_completion_stats(struct blkio_group *blkg, 402void blkiocg_update_completion_stats(struct blkio_group *blkg,
394 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync); 403 struct blkio_policy_type *pol,
395void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction, 404 uint64_t start_time,
396 bool sync); 405 uint64_t io_start_time, bool direction,
406 bool sync);
407void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
408 struct blkio_policy_type *pol,
409 bool direction, bool sync);
397void blkiocg_update_io_add_stats(struct blkio_group *blkg, 410void blkiocg_update_io_add_stats(struct blkio_group *blkg,
398 struct blkio_group *curr_blkg, bool direction, bool sync); 411 struct blkio_policy_type *pol,
412 struct blkio_group *curr_blkg, bool direction,
413 bool sync);
399void blkiocg_update_io_remove_stats(struct blkio_group *blkg, 414void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
400 bool direction, bool sync); 415 struct blkio_policy_type *pol,
416 bool direction, bool sync);
401#else 417#else
402struct cgroup; 418struct cgroup;
403static inline struct blkio_cgroup * 419static inline struct blkio_cgroup *
@@ -411,19 +427,23 @@ blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
411static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, 427static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
412 void *key) { return NULL; } 428 void *key) { return NULL; }
413static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg, 429static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
414 unsigned long time, 430 struct blkio_policy_type *pol, unsigned long time,
415 unsigned long unaccounted_time) 431 unsigned long unaccounted_time) { }
416{}
417static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg, 432static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
418 uint64_t bytes, bool direction, bool sync) {} 433 struct blkio_policy_type *pol, uint64_t bytes,
434 bool direction, bool sync) { }
419static inline void blkiocg_update_completion_stats(struct blkio_group *blkg, 435static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
420 uint64_t start_time, uint64_t io_start_time, bool direction, 436 struct blkio_policy_type *pol, uint64_t start_time,
421 bool sync) {} 437 uint64_t io_start_time, bool direction, bool sync) { }
422static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg, 438static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
423 bool direction, bool sync) {} 439 struct blkio_policy_type *pol, bool direction,
440 bool sync) { }
424static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg, 441static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
425 struct blkio_group *curr_blkg, bool direction, bool sync) {} 442 struct blkio_policy_type *pol,
443 struct blkio_group *curr_blkg, bool direction,
444 bool sync) { }
426static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg, 445static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
427 bool direction, bool sync) {} 446 struct blkio_policy_type *pol, bool direction,
447 bool sync) { }
428#endif 448#endif
429#endif /* _BLK_CGROUP_H */ 449#endif /* _BLK_CGROUP_H */
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 153ba509446b..b2fddaf20b98 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -588,7 +588,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
588 tg->bytes_disp[rw] += bio->bi_size; 588 tg->bytes_disp[rw] += bio->bi_size;
589 tg->io_disp[rw]++; 589 tg->io_disp[rw]++;
590 590
591 blkiocg_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, rw, sync); 591 blkiocg_update_dispatch_stats(tg_to_blkg(tg), &blkio_policy_throtl,
592 bio->bi_size, rw, sync);
592} 593}
593 594
594static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg, 595static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
@@ -1000,6 +1001,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1000 if (tg) { 1001 if (tg) {
1001 if (tg_no_rule_group(tg, rw)) { 1002 if (tg_no_rule_group(tg, rw)) {
1002 blkiocg_update_dispatch_stats(tg_to_blkg(tg), 1003 blkiocg_update_dispatch_stats(tg_to_blkg(tg),
1004 &blkio_policy_throtl,
1003 bio->bi_size, rw, 1005 bio->bi_size, rw,
1004 rw_is_sync(bio->bi_rw)); 1006 rw_is_sync(bio->bi_rw));
1005 goto out_unlock_rcu; 1007 goto out_unlock_rcu;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 86980023339a..11dd9d7f2edb 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -945,7 +945,8 @@ cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
945 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); 945 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
946 cfq_group_service_tree_del(st, cfqg); 946 cfq_group_service_tree_del(st, cfqg);
947 cfqg->saved_workload_slice = 0; 947 cfqg->saved_workload_slice = 0;
948 cfq_blkiocg_update_dequeue_stats(cfqg_to_blkg(cfqg), 1); 948 cfq_blkiocg_update_dequeue_stats(cfqg_to_blkg(cfqg),
949 &blkio_policy_cfq, 1);
949} 950}
950 951
951static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq, 952static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
@@ -1017,9 +1018,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1017 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu", 1018 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1018 used_sl, cfqq->slice_dispatch, charge, 1019 used_sl, cfqq->slice_dispatch, charge,
1019 iops_mode(cfqd), cfqq->nr_sectors); 1020 iops_mode(cfqd), cfqq->nr_sectors);
1020 cfq_blkiocg_update_timeslice_used(cfqg_to_blkg(cfqg), used_sl, 1021 cfq_blkiocg_update_timeslice_used(cfqg_to_blkg(cfqg), &blkio_policy_cfq,
1021 unaccounted_sl); 1022 used_sl, unaccounted_sl);
1022 cfq_blkiocg_set_start_empty_time(cfqg_to_blkg(cfqg)); 1023 cfq_blkiocg_set_start_empty_time(cfqg_to_blkg(cfqg), &blkio_policy_cfq);
1023} 1024}
1024 1025
1025/** 1026/**
@@ -1463,9 +1464,11 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1463 elv_rb_del(&cfqq->sort_list, rq); 1464 elv_rb_del(&cfqq->sort_list, rq);
1464 cfqq->queued[rq_is_sync(rq)]--; 1465 cfqq->queued[rq_is_sync(rq)]--;
1465 cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)), 1466 cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1466 rq_data_dir(rq), rq_is_sync(rq)); 1467 &blkio_policy_cfq, rq_data_dir(rq),
1468 rq_is_sync(rq));
1467 cfq_add_rq_rb(rq); 1469 cfq_add_rq_rb(rq);
1468 cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)), 1470 cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1471 &blkio_policy_cfq,
1469 cfqg_to_blkg(cfqq->cfqd->serving_group), 1472 cfqg_to_blkg(cfqq->cfqd->serving_group),
1470 rq_data_dir(rq), rq_is_sync(rq)); 1473 rq_data_dir(rq), rq_is_sync(rq));
1471} 1474}
@@ -1524,7 +1527,8 @@ static void cfq_remove_request(struct request *rq)
1524 1527
1525 cfqq->cfqd->rq_queued--; 1528 cfqq->cfqd->rq_queued--;
1526 cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)), 1529 cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1527 rq_data_dir(rq), rq_is_sync(rq)); 1530 &blkio_policy_cfq, rq_data_dir(rq),
1531 rq_is_sync(rq));
1528 if (rq->cmd_flags & REQ_PRIO) { 1532 if (rq->cmd_flags & REQ_PRIO) {
1529 WARN_ON(!cfqq->prio_pending); 1533 WARN_ON(!cfqq->prio_pending);
1530 cfqq->prio_pending--; 1534 cfqq->prio_pending--;
@@ -1560,7 +1564,8 @@ static void cfq_bio_merged(struct request_queue *q, struct request *req,
1560 struct bio *bio) 1564 struct bio *bio)
1561{ 1565{
1562 cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(req)), 1566 cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(req)),
1563 bio_data_dir(bio), cfq_bio_sync(bio)); 1567 &blkio_policy_cfq, bio_data_dir(bio),
1568 cfq_bio_sync(bio));
1564} 1569}
1565 1570
1566static void 1571static void
@@ -1583,7 +1588,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
1583 cfqq->next_rq = rq; 1588 cfqq->next_rq = rq;
1584 cfq_remove_request(next); 1589 cfq_remove_request(next);
1585 cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(rq)), 1590 cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1586 rq_data_dir(next), rq_is_sync(next)); 1591 &blkio_policy_cfq, rq_data_dir(next),
1592 rq_is_sync(next));
1587 1593
1588 cfqq = RQ_CFQQ(next); 1594 cfqq = RQ_CFQQ(next);
1589 /* 1595 /*
@@ -1624,7 +1630,8 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1624static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1630static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1625{ 1631{
1626 del_timer(&cfqd->idle_slice_timer); 1632 del_timer(&cfqd->idle_slice_timer);
1627 cfq_blkiocg_update_idle_time_stats(cfqg_to_blkg(cfqq->cfqg)); 1633 cfq_blkiocg_update_idle_time_stats(cfqg_to_blkg(cfqq->cfqg),
1634 &blkio_policy_cfq);
1628} 1635}
1629 1636
1630static void __cfq_set_active_queue(struct cfq_data *cfqd, 1637static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -1633,7 +1640,8 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
1633 if (cfqq) { 1640 if (cfqq) {
1634 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", 1641 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1635 cfqd->serving_prio, cfqd->serving_type); 1642 cfqd->serving_prio, cfqd->serving_type);
1636 cfq_blkiocg_update_avg_queue_size_stats(cfqg_to_blkg(cfqq->cfqg)); 1643 cfq_blkiocg_update_avg_queue_size_stats(cfqg_to_blkg(cfqq->cfqg),
1644 &blkio_policy_cfq);
1637 cfqq->slice_start = 0; 1645 cfqq->slice_start = 0;
1638 cfqq->dispatch_start = jiffies; 1646 cfqq->dispatch_start = jiffies;
1639 cfqq->allocated_slice = 0; 1647 cfqq->allocated_slice = 0;
@@ -1981,7 +1989,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1981 sl = cfqd->cfq_slice_idle; 1989 sl = cfqd->cfq_slice_idle;
1982 1990
1983 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 1991 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1984 cfq_blkiocg_update_set_idle_time_stats(cfqg_to_blkg(cfqq->cfqg)); 1992 cfq_blkiocg_update_set_idle_time_stats(cfqg_to_blkg(cfqq->cfqg),
1993 &blkio_policy_cfq);
1985 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, 1994 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
1986 group_idle ? 1 : 0); 1995 group_idle ? 1 : 0);
1987} 1996}
@@ -2005,8 +2014,8 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2005 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; 2014 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2006 cfqq->nr_sectors += blk_rq_sectors(rq); 2015 cfqq->nr_sectors += blk_rq_sectors(rq);
2007 cfq_blkiocg_update_dispatch_stats(cfqg_to_blkg(cfqq->cfqg), 2016 cfq_blkiocg_update_dispatch_stats(cfqg_to_blkg(cfqq->cfqg),
2008 blk_rq_bytes(rq), rq_data_dir(rq), 2017 &blkio_policy_cfq, blk_rq_bytes(rq),
2009 rq_is_sync(rq)); 2018 rq_data_dir(rq), rq_is_sync(rq));
2010} 2019}
2011 2020
2012/* 2021/*
@@ -3094,7 +3103,8 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3094 __blk_run_queue(cfqd->queue); 3103 __blk_run_queue(cfqd->queue);
3095 } else { 3104 } else {
3096 cfq_blkiocg_update_idle_time_stats( 3105 cfq_blkiocg_update_idle_time_stats(
3097 cfqg_to_blkg(cfqq->cfqg)); 3106 cfqg_to_blkg(cfqq->cfqg),
3107 &blkio_policy_cfq);
3098 cfq_mark_cfqq_must_dispatch(cfqq); 3108 cfq_mark_cfqq_must_dispatch(cfqq);
3099 } 3109 }
3100 } 3110 }
@@ -3122,6 +3132,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
3122 list_add_tail(&rq->queuelist, &cfqq->fifo); 3132 list_add_tail(&rq->queuelist, &cfqq->fifo);
3123 cfq_add_rq_rb(rq); 3133 cfq_add_rq_rb(rq);
3124 cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)), 3134 cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)),
3135 &blkio_policy_cfq,
3125 cfqg_to_blkg(cfqd->serving_group), 3136 cfqg_to_blkg(cfqd->serving_group),
3126 rq_data_dir(rq), rq_is_sync(rq)); 3137 rq_data_dir(rq), rq_is_sync(rq));
3127 cfq_rq_enqueued(cfqd, cfqq, rq); 3138 cfq_rq_enqueued(cfqd, cfqq, rq);
@@ -3220,8 +3231,9 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3220 cfqq->dispatched--; 3231 cfqq->dispatched--;
3221 (RQ_CFQG(rq))->dispatched--; 3232 (RQ_CFQG(rq))->dispatched--;
3222 cfq_blkiocg_update_completion_stats(cfqg_to_blkg(cfqq->cfqg), 3233 cfq_blkiocg_update_completion_stats(cfqg_to_blkg(cfqq->cfqg),
3223 rq_start_time_ns(rq), rq_io_start_time_ns(rq), 3234 &blkio_policy_cfq, rq_start_time_ns(rq),
3224 rq_data_dir(rq), rq_is_sync(rq)); 3235 rq_io_start_time_ns(rq), rq_data_dir(rq),
3236 rq_is_sync(rq));
3225 3237
3226 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; 3238 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3227 3239
diff --git a/block/cfq.h b/block/cfq.h
index 398760194e11..5584e1b63ca8 100644
--- a/block/cfq.h
+++ b/block/cfq.h
@@ -4,67 +4,79 @@
4 4
5#ifdef CONFIG_CFQ_GROUP_IOSCHED 5#ifdef CONFIG_CFQ_GROUP_IOSCHED
6static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg, 6static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
7 struct blkio_group *curr_blkg, bool direction, bool sync) 7 struct blkio_policy_type *pol,
8 struct blkio_group *curr_blkg,
9 bool direction, bool sync)
8{ 10{
9 blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync); 11 blkiocg_update_io_add_stats(blkg, pol, curr_blkg, direction, sync);
10} 12}
11 13
12static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg, 14static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
13 unsigned long dequeue) 15 struct blkio_policy_type *pol, unsigned long dequeue)
14{ 16{
15 blkiocg_update_dequeue_stats(blkg, dequeue); 17 blkiocg_update_dequeue_stats(blkg, pol, dequeue);
16} 18}
17 19
18static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, 20static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
19 unsigned long time, unsigned long unaccounted_time) 21 struct blkio_policy_type *pol, unsigned long time,
22 unsigned long unaccounted_time)
20{ 23{
21 blkiocg_update_timeslice_used(blkg, time, unaccounted_time); 24 blkiocg_update_timeslice_used(blkg, pol, time, unaccounted_time);
22} 25}
23 26
24static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) 27static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg,
28 struct blkio_policy_type *pol)
25{ 29{
26 blkiocg_set_start_empty_time(blkg); 30 blkiocg_set_start_empty_time(blkg, pol);
27} 31}
28 32
29static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, 33static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
30 bool direction, bool sync) 34 struct blkio_policy_type *pol, bool direction,
35 bool sync)
31{ 36{
32 blkiocg_update_io_remove_stats(blkg, direction, sync); 37 blkiocg_update_io_remove_stats(blkg, pol, direction, sync);
33} 38}
34 39
35static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg, 40static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
36 bool direction, bool sync) 41 struct blkio_policy_type *pol, bool direction,
42 bool sync)
37{ 43{
38 blkiocg_update_io_merged_stats(blkg, direction, sync); 44 blkiocg_update_io_merged_stats(blkg, pol, direction, sync);
39} 45}
40 46
41static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg) 47static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg,
48 struct blkio_policy_type *pol)
42{ 49{
43 blkiocg_update_idle_time_stats(blkg); 50 blkiocg_update_idle_time_stats(blkg, pol);
44} 51}
45 52
46static inline void 53static inline void
47cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) 54cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
55 struct blkio_policy_type *pol)
48{ 56{
49 blkiocg_update_avg_queue_size_stats(blkg); 57 blkiocg_update_avg_queue_size_stats(blkg, pol);
50} 58}
51 59
52static inline void 60static inline void
53cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) 61cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
62 struct blkio_policy_type *pol)
54{ 63{
55 blkiocg_update_set_idle_time_stats(blkg); 64 blkiocg_update_set_idle_time_stats(blkg, pol);
56} 65}
57 66
58static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg, 67static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
59 uint64_t bytes, bool direction, bool sync) 68 struct blkio_policy_type *pol, uint64_t bytes,
69 bool direction, bool sync)
60{ 70{
61 blkiocg_update_dispatch_stats(blkg, bytes, direction, sync); 71 blkiocg_update_dispatch_stats(blkg, pol, bytes, direction, sync);
62} 72}
63 73
64static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) 74static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg,
75 struct blkio_policy_type *pol, uint64_t start_time,
76 uint64_t io_start_time, bool direction, bool sync)
65{ 77{
66 blkiocg_update_completion_stats(blkg, start_time, io_start_time, 78 blkiocg_update_completion_stats(blkg, pol, start_time, io_start_time,
67 direction, sync); 79 direction, sync);
68} 80}
69 81
70static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg) 82static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
@@ -74,30 +86,38 @@ static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
74 86
75#else /* CFQ_GROUP_IOSCHED */ 87#else /* CFQ_GROUP_IOSCHED */
76static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg, 88static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
77 struct blkio_group *curr_blkg, bool direction, bool sync) {} 89 struct blkio_policy_type *pol,
78 90 struct blkio_group *curr_blkg, bool direction,
91 bool sync) { }
79static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg, 92static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
80 unsigned long dequeue) {} 93 struct blkio_policy_type *pol, unsigned long dequeue) { }
81
82static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, 94static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
83 unsigned long time, unsigned long unaccounted_time) {} 95 struct blkio_policy_type *pol, unsigned long time,
84static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {} 96 unsigned long unaccounted_time) { }
97static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg,
98 struct blkio_policy_type *pol) { }
85static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, 99static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
86 bool direction, bool sync) {} 100 struct blkio_policy_type *pol, bool direction,
101 bool sync) { }
87static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg, 102static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
88 bool direction, bool sync) {} 103 struct blkio_policy_type *pol, bool direction,
89static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg) 104 bool sync) { }
90{ 105static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg,
91} 106 struct blkio_policy_type *pol) { }
92static inline void 107static inline void
93cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {} 108cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
109 struct blkio_policy_type *pol) { }
94 110
95static inline void 111static inline void
96cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {} 112cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
113 struct blkio_policy_type *pol) { }
97 114
98static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg, 115static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
99 uint64_t bytes, bool direction, bool sync) {} 116 struct blkio_policy_type *pol, uint64_t bytes,
100static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {} 117 bool direction, bool sync) { }
118static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg,
119 struct blkio_policy_type *pol, uint64_t start_time,
120 uint64_t io_start_time, bool direction, bool sync) { }
101 121
102static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg) 122static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
103{ 123{