aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:15:16 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:23 -0500
commit549d3aa872cd1aec1ee540fd93afd9611faa0def (patch)
tree27c3703d3448dc0be9f58b5f01224ae901422a32
parent1adaf3dde37a8b9b59ea59c5f58fed7761178383 (diff)
blkcg: make blkg->pd an array and move configuration and stats into it
To prepare for unifying blkgs for different policies, make blkg->pd an array with BLKIO_NR_POLICIES elements and move blkg->conf, ->stats, and ->stats_cpu into blkg_policy_data. This patch doesn't introduce any functional difference. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-cgroup.c150
-rw-r--r--block/blk-cgroup.h18
2 files changed, 102 insertions, 66 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 3b6a0e1265aa..0eb39981e7c2 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -184,12 +184,14 @@ static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
184static void blkio_set_start_group_wait_time(struct blkio_group *blkg, 184static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
185 struct blkio_group *curr_blkg) 185 struct blkio_group *curr_blkg)
186{ 186{
187 if (blkio_blkg_waiting(&blkg->stats)) 187 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
188
189 if (blkio_blkg_waiting(&pd->stats))
188 return; 190 return;
189 if (blkg == curr_blkg) 191 if (blkg == curr_blkg)
190 return; 192 return;
191 blkg->stats.start_group_wait_time = sched_clock(); 193 pd->stats.start_group_wait_time = sched_clock();
192 blkio_mark_blkg_waiting(&blkg->stats); 194 blkio_mark_blkg_waiting(&pd->stats);
193} 195}
194 196
195/* This should be called with the blkg->stats_lock held. */ 197/* This should be called with the blkg->stats_lock held. */
@@ -222,24 +224,26 @@ static void blkio_end_empty_time(struct blkio_group_stats *stats)
222 224
223void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) 225void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
224{ 226{
227 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
225 unsigned long flags; 228 unsigned long flags;
226 229
227 spin_lock_irqsave(&blkg->stats_lock, flags); 230 spin_lock_irqsave(&blkg->stats_lock, flags);
228 BUG_ON(blkio_blkg_idling(&blkg->stats)); 231 BUG_ON(blkio_blkg_idling(&pd->stats));
229 blkg->stats.start_idle_time = sched_clock(); 232 pd->stats.start_idle_time = sched_clock();
230 blkio_mark_blkg_idling(&blkg->stats); 233 blkio_mark_blkg_idling(&pd->stats);
231 spin_unlock_irqrestore(&blkg->stats_lock, flags); 234 spin_unlock_irqrestore(&blkg->stats_lock, flags);
232} 235}
233EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats); 236EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
234 237
235void blkiocg_update_idle_time_stats(struct blkio_group *blkg) 238void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
236{ 239{
240 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
237 unsigned long flags; 241 unsigned long flags;
238 unsigned long long now; 242 unsigned long long now;
239 struct blkio_group_stats *stats; 243 struct blkio_group_stats *stats;
240 244
241 spin_lock_irqsave(&blkg->stats_lock, flags); 245 spin_lock_irqsave(&blkg->stats_lock, flags);
242 stats = &blkg->stats; 246 stats = &pd->stats;
243 if (blkio_blkg_idling(stats)) { 247 if (blkio_blkg_idling(stats)) {
244 now = sched_clock(); 248 now = sched_clock();
245 if (time_after64(now, stats->start_idle_time)) 249 if (time_after64(now, stats->start_idle_time))
@@ -252,11 +256,12 @@ EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
252 256
253void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) 257void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
254{ 258{
259 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
255 unsigned long flags; 260 unsigned long flags;
256 struct blkio_group_stats *stats; 261 struct blkio_group_stats *stats;
257 262
258 spin_lock_irqsave(&blkg->stats_lock, flags); 263 spin_lock_irqsave(&blkg->stats_lock, flags);
259 stats = &blkg->stats; 264 stats = &pd->stats;
260 stats->avg_queue_size_sum += 265 stats->avg_queue_size_sum +=
261 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] + 266 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
262 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]; 267 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
@@ -268,11 +273,12 @@ EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
268 273
269void blkiocg_set_start_empty_time(struct blkio_group *blkg) 274void blkiocg_set_start_empty_time(struct blkio_group *blkg)
270{ 275{
276 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
271 unsigned long flags; 277 unsigned long flags;
272 struct blkio_group_stats *stats; 278 struct blkio_group_stats *stats;
273 279
274 spin_lock_irqsave(&blkg->stats_lock, flags); 280 spin_lock_irqsave(&blkg->stats_lock, flags);
275 stats = &blkg->stats; 281 stats = &pd->stats;
276 282
277 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] || 283 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
278 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) { 284 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
@@ -299,7 +305,9 @@ EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
299void blkiocg_update_dequeue_stats(struct blkio_group *blkg, 305void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
300 unsigned long dequeue) 306 unsigned long dequeue)
301{ 307{
302 blkg->stats.dequeue += dequeue; 308 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
309
310 pd->stats.dequeue += dequeue;
303} 311}
304EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats); 312EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
305#else 313#else
@@ -312,12 +320,13 @@ void blkiocg_update_io_add_stats(struct blkio_group *blkg,
312 struct blkio_group *curr_blkg, bool direction, 320 struct blkio_group *curr_blkg, bool direction,
313 bool sync) 321 bool sync)
314{ 322{
323 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
315 unsigned long flags; 324 unsigned long flags;
316 325
317 spin_lock_irqsave(&blkg->stats_lock, flags); 326 spin_lock_irqsave(&blkg->stats_lock, flags);
318 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction, 327 blkio_add_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
319 sync); 328 sync);
320 blkio_end_empty_time(&blkg->stats); 329 blkio_end_empty_time(&pd->stats);
321 blkio_set_start_group_wait_time(blkg, curr_blkg); 330 blkio_set_start_group_wait_time(blkg, curr_blkg);
322 spin_unlock_irqrestore(&blkg->stats_lock, flags); 331 spin_unlock_irqrestore(&blkg->stats_lock, flags);
323} 332}
@@ -326,10 +335,11 @@ EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
326void blkiocg_update_io_remove_stats(struct blkio_group *blkg, 335void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
327 bool direction, bool sync) 336 bool direction, bool sync)
328{ 337{
338 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
329 unsigned long flags; 339 unsigned long flags;
330 340
331 spin_lock_irqsave(&blkg->stats_lock, flags); 341 spin_lock_irqsave(&blkg->stats_lock, flags);
332 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 342 blkio_check_and_dec_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED],
333 direction, sync); 343 direction, sync);
334 spin_unlock_irqrestore(&blkg->stats_lock, flags); 344 spin_unlock_irqrestore(&blkg->stats_lock, flags);
335} 345}
@@ -338,12 +348,13 @@ EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
338void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time, 348void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
339 unsigned long unaccounted_time) 349 unsigned long unaccounted_time)
340{ 350{
351 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
341 unsigned long flags; 352 unsigned long flags;
342 353
343 spin_lock_irqsave(&blkg->stats_lock, flags); 354 spin_lock_irqsave(&blkg->stats_lock, flags);
344 blkg->stats.time += time; 355 pd->stats.time += time;
345#ifdef CONFIG_DEBUG_BLK_CGROUP 356#ifdef CONFIG_DEBUG_BLK_CGROUP
346 blkg->stats.unaccounted_time += unaccounted_time; 357 pd->stats.unaccounted_time += unaccounted_time;
347#endif 358#endif
348 spin_unlock_irqrestore(&blkg->stats_lock, flags); 359 spin_unlock_irqrestore(&blkg->stats_lock, flags);
349} 360}
@@ -356,6 +367,7 @@ EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
356void blkiocg_update_dispatch_stats(struct blkio_group *blkg, 367void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
357 uint64_t bytes, bool direction, bool sync) 368 uint64_t bytes, bool direction, bool sync)
358{ 369{
370 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
359 struct blkio_group_stats_cpu *stats_cpu; 371 struct blkio_group_stats_cpu *stats_cpu;
360 unsigned long flags; 372 unsigned long flags;
361 373
@@ -366,7 +378,7 @@ void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
366 */ 378 */
367 local_irq_save(flags); 379 local_irq_save(flags);
368 380
369 stats_cpu = this_cpu_ptr(blkg->stats_cpu); 381 stats_cpu = this_cpu_ptr(pd->stats_cpu);
370 382
371 u64_stats_update_begin(&stats_cpu->syncp); 383 u64_stats_update_begin(&stats_cpu->syncp);
372 stats_cpu->sectors += bytes >> 9; 384 stats_cpu->sectors += bytes >> 9;
@@ -382,12 +394,13 @@ EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
382void blkiocg_update_completion_stats(struct blkio_group *blkg, 394void blkiocg_update_completion_stats(struct blkio_group *blkg,
383 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) 395 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
384{ 396{
397 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
385 struct blkio_group_stats *stats; 398 struct blkio_group_stats *stats;
386 unsigned long flags; 399 unsigned long flags;
387 unsigned long long now = sched_clock(); 400 unsigned long long now = sched_clock();
388 401
389 spin_lock_irqsave(&blkg->stats_lock, flags); 402 spin_lock_irqsave(&blkg->stats_lock, flags);
390 stats = &blkg->stats; 403 stats = &pd->stats;
391 if (time_after64(now, io_start_time)) 404 if (time_after64(now, io_start_time))
392 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME], 405 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
393 now - io_start_time, direction, sync); 406 now - io_start_time, direction, sync);
@@ -402,6 +415,7 @@ EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
402void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction, 415void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
403 bool sync) 416 bool sync)
404{ 417{
418 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
405 struct blkio_group_stats_cpu *stats_cpu; 419 struct blkio_group_stats_cpu *stats_cpu;
406 unsigned long flags; 420 unsigned long flags;
407 421
@@ -412,7 +426,7 @@ void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
412 */ 426 */
413 local_irq_save(flags); 427 local_irq_save(flags);
414 428
415 stats_cpu = this_cpu_ptr(blkg->stats_cpu); 429 stats_cpu = this_cpu_ptr(pd->stats_cpu);
416 430
417 u64_stats_update_begin(&stats_cpu->syncp); 431 u64_stats_update_begin(&stats_cpu->syncp);
418 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1, 432 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
@@ -430,11 +444,17 @@ EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
430 */ 444 */
431static void blkg_free(struct blkio_group *blkg) 445static void blkg_free(struct blkio_group *blkg)
432{ 446{
433 if (blkg) { 447 struct blkg_policy_data *pd;
434 free_percpu(blkg->stats_cpu); 448
435 kfree(blkg->pd); 449 if (!blkg)
436 kfree(blkg); 450 return;
451
452 pd = blkg->pd[blkg->plid];
453 if (pd) {
454 free_percpu(pd->stats_cpu);
455 kfree(pd);
437 } 456 }
457 kfree(blkg);
438} 458}
439 459
440/** 460/**
@@ -453,6 +473,7 @@ static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
453 struct blkio_policy_type *pol) 473 struct blkio_policy_type *pol)
454{ 474{
455 struct blkio_group *blkg; 475 struct blkio_group *blkg;
476 struct blkg_policy_data *pd;
456 477
457 /* alloc and init base part */ 478 /* alloc and init base part */
458 blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node); 479 blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
@@ -466,23 +487,26 @@ static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
466 blkg->refcnt = 1; 487 blkg->refcnt = 1;
467 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path)); 488 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
468 489
469 /* alloc per-policy data */ 490 /* alloc per-policy data and attach it to blkg */
470 blkg->pd = kzalloc_node(sizeof(*blkg->pd) + pol->pdata_size, GFP_ATOMIC, 491 pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
471 q->node); 492 q->node);
472 if (!blkg->pd) { 493 if (!pd) {
473 blkg_free(blkg); 494 blkg_free(blkg);
474 return NULL; 495 return NULL;
475 } 496 }
476 497
498 blkg->pd[pol->plid] = pd;
499 pd->blkg = blkg;
500
477 /* broken, read comment in the callsite */ 501 /* broken, read comment in the callsite */
478 blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu); 502
479 if (!blkg->stats_cpu) { 503 pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
504 if (!pd->stats_cpu) {
480 blkg_free(blkg); 505 blkg_free(blkg);
481 return NULL; 506 return NULL;
482 } 507 }
483 508
484 /* attach pd to blkg and invoke per-policy init */ 509 /* invoke per-policy init */
485 blkg->pd->blkg = blkg;
486 pol->ops.blkio_init_group_fn(blkg); 510 pol->ops.blkio_init_group_fn(blkg);
487 return blkg; 511 return blkg;
488} 512}
@@ -659,6 +683,7 @@ EXPORT_SYMBOL_GPL(__blkg_release);
659 683
660static void blkio_reset_stats_cpu(struct blkio_group *blkg) 684static void blkio_reset_stats_cpu(struct blkio_group *blkg)
661{ 685{
686 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
662 struct blkio_group_stats_cpu *stats_cpu; 687 struct blkio_group_stats_cpu *stats_cpu;
663 int i, j, k; 688 int i, j, k;
664 /* 689 /*
@@ -673,7 +698,7 @@ static void blkio_reset_stats_cpu(struct blkio_group *blkg)
673 * unless this becomes a real issue. 698 * unless this becomes a real issue.
674 */ 699 */
675 for_each_possible_cpu(i) { 700 for_each_possible_cpu(i) {
676 stats_cpu = per_cpu_ptr(blkg->stats_cpu, i); 701 stats_cpu = per_cpu_ptr(pd->stats_cpu, i);
677 stats_cpu->sectors = 0; 702 stats_cpu->sectors = 0;
678 for(j = 0; j < BLKIO_STAT_CPU_NR; j++) 703 for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
679 for (k = 0; k < BLKIO_STAT_TOTAL; k++) 704 for (k = 0; k < BLKIO_STAT_TOTAL; k++)
@@ -698,8 +723,10 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
698 blkcg = cgroup_to_blkio_cgroup(cgroup); 723 blkcg = cgroup_to_blkio_cgroup(cgroup);
699 spin_lock_irq(&blkcg->lock); 724 spin_lock_irq(&blkcg->lock);
700 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { 725 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
726 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
727
701 spin_lock(&blkg->stats_lock); 728 spin_lock(&blkg->stats_lock);
702 stats = &blkg->stats; 729 stats = &pd->stats;
703#ifdef CONFIG_DEBUG_BLK_CGROUP 730#ifdef CONFIG_DEBUG_BLK_CGROUP
704 idling = blkio_blkg_idling(stats); 731 idling = blkio_blkg_idling(stats);
705 waiting = blkio_blkg_waiting(stats); 732 waiting = blkio_blkg_waiting(stats);
@@ -779,13 +806,14 @@ static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
779static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, 806static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
780 enum stat_type_cpu type, enum stat_sub_type sub_type) 807 enum stat_type_cpu type, enum stat_sub_type sub_type)
781{ 808{
809 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
782 int cpu; 810 int cpu;
783 struct blkio_group_stats_cpu *stats_cpu; 811 struct blkio_group_stats_cpu *stats_cpu;
784 u64 val = 0, tval; 812 u64 val = 0, tval;
785 813
786 for_each_possible_cpu(cpu) { 814 for_each_possible_cpu(cpu) {
787 unsigned int start; 815 unsigned int start;
788 stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu); 816 stats_cpu = per_cpu_ptr(pd->stats_cpu, cpu);
789 817
790 do { 818 do {
791 start = u64_stats_fetch_begin(&stats_cpu->syncp); 819 start = u64_stats_fetch_begin(&stats_cpu->syncp);
@@ -837,20 +865,21 @@ static uint64_t blkio_get_stat(struct blkio_group *blkg,
837 struct cgroup_map_cb *cb, const char *dname, 865 struct cgroup_map_cb *cb, const char *dname,
838 enum stat_type type) 866 enum stat_type type)
839{ 867{
868 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
840 uint64_t disk_total; 869 uint64_t disk_total;
841 char key_str[MAX_KEY_LEN]; 870 char key_str[MAX_KEY_LEN];
842 enum stat_sub_type sub_type; 871 enum stat_sub_type sub_type;
843 872
844 if (type == BLKIO_STAT_TIME) 873 if (type == BLKIO_STAT_TIME)
845 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, 874 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
846 blkg->stats.time, cb, dname); 875 pd->stats.time, cb, dname);
847#ifdef CONFIG_DEBUG_BLK_CGROUP 876#ifdef CONFIG_DEBUG_BLK_CGROUP
848 if (type == BLKIO_STAT_UNACCOUNTED_TIME) 877 if (type == BLKIO_STAT_UNACCOUNTED_TIME)
849 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, 878 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
850 blkg->stats.unaccounted_time, cb, dname); 879 pd->stats.unaccounted_time, cb, dname);
851 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) { 880 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
852 uint64_t sum = blkg->stats.avg_queue_size_sum; 881 uint64_t sum = pd->stats.avg_queue_size_sum;
853 uint64_t samples = blkg->stats.avg_queue_size_samples; 882 uint64_t samples = pd->stats.avg_queue_size_samples;
854 if (samples) 883 if (samples)
855 do_div(sum, samples); 884 do_div(sum, samples);
856 else 885 else
@@ -860,26 +889,26 @@ static uint64_t blkio_get_stat(struct blkio_group *blkg,
860 } 889 }
861 if (type == BLKIO_STAT_GROUP_WAIT_TIME) 890 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
862 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, 891 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
863 blkg->stats.group_wait_time, cb, dname); 892 pd->stats.group_wait_time, cb, dname);
864 if (type == BLKIO_STAT_IDLE_TIME) 893 if (type == BLKIO_STAT_IDLE_TIME)
865 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, 894 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
866 blkg->stats.idle_time, cb, dname); 895 pd->stats.idle_time, cb, dname);
867 if (type == BLKIO_STAT_EMPTY_TIME) 896 if (type == BLKIO_STAT_EMPTY_TIME)
868 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, 897 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
869 blkg->stats.empty_time, cb, dname); 898 pd->stats.empty_time, cb, dname);
870 if (type == BLKIO_STAT_DEQUEUE) 899 if (type == BLKIO_STAT_DEQUEUE)
871 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, 900 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
872 blkg->stats.dequeue, cb, dname); 901 pd->stats.dequeue, cb, dname);
873#endif 902#endif
874 903
875 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL; 904 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
876 sub_type++) { 905 sub_type++) {
877 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN, 906 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
878 false); 907 false);
879 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]); 908 cb->fill(cb, key_str, pd->stats.stat_arr[type][sub_type]);
880 } 909 }
881 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] + 910 disk_total = pd->stats.stat_arr[type][BLKIO_STAT_READ] +
882 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE]; 911 pd->stats.stat_arr[type][BLKIO_STAT_WRITE];
883 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN, 912 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
884 false); 913 false);
885 cb->fill(cb, key_str, disk_total); 914 cb->fill(cb, key_str, disk_total);
@@ -891,6 +920,7 @@ static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
891{ 920{
892 struct gendisk *disk = NULL; 921 struct gendisk *disk = NULL;
893 struct blkio_group *blkg = NULL; 922 struct blkio_group *blkg = NULL;
923 struct blkg_policy_data *pd;
894 char *s[4], *p, *major_s = NULL, *minor_s = NULL; 924 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
895 unsigned long major, minor; 925 unsigned long major, minor;
896 int i = 0, ret = -EINVAL; 926 int i = 0, ret = -EINVAL;
@@ -950,35 +980,37 @@ static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
950 goto out_unlock; 980 goto out_unlock;
951 } 981 }
952 982
983 pd = blkg->pd[plid];
984
953 switch (plid) { 985 switch (plid) {
954 case BLKIO_POLICY_PROP: 986 case BLKIO_POLICY_PROP:
955 if ((temp < BLKIO_WEIGHT_MIN && temp > 0) || 987 if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
956 temp > BLKIO_WEIGHT_MAX) 988 temp > BLKIO_WEIGHT_MAX)
957 goto out_unlock; 989 goto out_unlock;
958 990
959 blkg->conf.weight = temp; 991 pd->conf.weight = temp;
960 blkio_update_group_weight(blkg, temp ?: blkcg->weight); 992 blkio_update_group_weight(blkg, temp ?: blkcg->weight);
961 break; 993 break;
962 case BLKIO_POLICY_THROTL: 994 case BLKIO_POLICY_THROTL:
963 switch(fileid) { 995 switch(fileid) {
964 case BLKIO_THROTL_read_bps_device: 996 case BLKIO_THROTL_read_bps_device:
965 blkg->conf.bps[READ] = temp; 997 pd->conf.bps[READ] = temp;
966 blkio_update_group_bps(blkg, temp ?: -1, fileid); 998 blkio_update_group_bps(blkg, temp ?: -1, fileid);
967 break; 999 break;
968 case BLKIO_THROTL_write_bps_device: 1000 case BLKIO_THROTL_write_bps_device:
969 blkg->conf.bps[WRITE] = temp; 1001 pd->conf.bps[WRITE] = temp;
970 blkio_update_group_bps(blkg, temp ?: -1, fileid); 1002 blkio_update_group_bps(blkg, temp ?: -1, fileid);
971 break; 1003 break;
972 case BLKIO_THROTL_read_iops_device: 1004 case BLKIO_THROTL_read_iops_device:
973 if (temp > THROTL_IOPS_MAX) 1005 if (temp > THROTL_IOPS_MAX)
974 goto out_unlock; 1006 goto out_unlock;
975 blkg->conf.iops[READ] = temp; 1007 pd->conf.iops[READ] = temp;
976 blkio_update_group_iops(blkg, temp ?: -1, fileid); 1008 blkio_update_group_iops(blkg, temp ?: -1, fileid);
977 break; 1009 break;
978 case BLKIO_THROTL_write_iops_device: 1010 case BLKIO_THROTL_write_iops_device:
979 if (temp > THROTL_IOPS_MAX) 1011 if (temp > THROTL_IOPS_MAX)
980 goto out_unlock; 1012 goto out_unlock;
981 blkg->conf.iops[WRITE] = temp; 1013 pd->conf.iops[WRITE] = temp;
982 blkio_update_group_iops(blkg, temp ?: -1, fileid); 1014 blkio_update_group_iops(blkg, temp ?: -1, fileid);
983 break; 1015 break;
984 } 1016 }
@@ -1034,6 +1066,7 @@ static const char *blkg_dev_name(struct blkio_group *blkg)
1034static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg, 1066static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
1035 struct seq_file *m) 1067 struct seq_file *m)
1036{ 1068{
1069 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
1037 const char *dname = blkg_dev_name(blkg); 1070 const char *dname = blkg_dev_name(blkg);
1038 int fileid = BLKIOFILE_ATTR(cft->private); 1071 int fileid = BLKIOFILE_ATTR(cft->private);
1039 int rw = WRITE; 1072 int rw = WRITE;
@@ -1043,25 +1076,25 @@ static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
1043 1076
1044 switch (blkg->plid) { 1077 switch (blkg->plid) {
1045 case BLKIO_POLICY_PROP: 1078 case BLKIO_POLICY_PROP:
1046 if (blkg->conf.weight) 1079 if (pd->conf.weight)
1047 seq_printf(m, "%s\t%u\n", 1080 seq_printf(m, "%s\t%u\n",
1048 dname, blkg->conf.weight); 1081 dname, pd->conf.weight);
1049 break; 1082 break;
1050 case BLKIO_POLICY_THROTL: 1083 case BLKIO_POLICY_THROTL:
1051 switch (fileid) { 1084 switch (fileid) {
1052 case BLKIO_THROTL_read_bps_device: 1085 case BLKIO_THROTL_read_bps_device:
1053 rw = READ; 1086 rw = READ;
1054 case BLKIO_THROTL_write_bps_device: 1087 case BLKIO_THROTL_write_bps_device:
1055 if (blkg->conf.bps[rw]) 1088 if (pd->conf.bps[rw])
1056 seq_printf(m, "%s\t%llu\n", 1089 seq_printf(m, "%s\t%llu\n",
1057 dname, blkg->conf.bps[rw]); 1090 dname, pd->conf.bps[rw]);
1058 break; 1091 break;
1059 case BLKIO_THROTL_read_iops_device: 1092 case BLKIO_THROTL_read_iops_device:
1060 rw = READ; 1093 rw = READ;
1061 case BLKIO_THROTL_write_iops_device: 1094 case BLKIO_THROTL_write_iops_device:
1062 if (blkg->conf.iops[rw]) 1095 if (pd->conf.iops[rw])
1063 seq_printf(m, "%s\t%u\n", 1096 seq_printf(m, "%s\t%u\n",
1064 dname, blkg->conf.iops[rw]); 1097 dname, pd->conf.iops[rw]);
1065 break; 1098 break;
1066 } 1099 }
1067 break; 1100 break;
@@ -1243,9 +1276,12 @@ static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val)
1243 spin_lock_irq(&blkcg->lock); 1276 spin_lock_irq(&blkcg->lock);
1244 blkcg->weight = (unsigned int)val; 1277 blkcg->weight = (unsigned int)val;
1245 1278
1246 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) 1279 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1247 if (blkg->plid == plid && !blkg->conf.weight) 1280 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
1281
1282 if (blkg->plid == plid && !pd->conf.weight)
1248 blkio_update_group_weight(blkg, blkcg->weight); 1283 blkio_update_group_weight(blkg, blkcg->weight);
1284 }
1249 1285
1250 spin_unlock_irq(&blkcg->lock); 1286 spin_unlock_irq(&blkcg->lock);
1251 spin_unlock(&blkio_list_lock); 1287 spin_unlock(&blkio_list_lock);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 7da106843f01..5dffd436f30d 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -164,6 +164,13 @@ struct blkg_policy_data {
164 /* the blkg this per-policy data belongs to */ 164 /* the blkg this per-policy data belongs to */
165 struct blkio_group *blkg; 165 struct blkio_group *blkg;
166 166
167 /* Configuration */
168 struct blkio_group_conf conf;
169
170 struct blkio_group_stats stats;
171 /* Per cpu stats pointer */
172 struct blkio_group_stats_cpu __percpu *stats_cpu;
173
167 /* pol->pdata_size bytes of private data used by policy impl */ 174 /* pol->pdata_size bytes of private data used by policy impl */
168 char pdata[] __aligned(__alignof__(unsigned long long)); 175 char pdata[] __aligned(__alignof__(unsigned long long));
169}; 176};
@@ -180,16 +187,9 @@ struct blkio_group {
180 /* reference count */ 187 /* reference count */
181 int refcnt; 188 int refcnt;
182 189
183 /* Configuration */
184 struct blkio_group_conf conf;
185
186 /* Need to serialize the stats in the case of reset/update */ 190 /* Need to serialize the stats in the case of reset/update */
187 spinlock_t stats_lock; 191 spinlock_t stats_lock;
188 struct blkio_group_stats stats; 192 struct blkg_policy_data *pd[BLKIO_NR_POLICIES];
189 /* Per cpu stats pointer */
190 struct blkio_group_stats_cpu __percpu *stats_cpu;
191
192 struct blkg_policy_data *pd;
193 193
194 struct rcu_head rcu_head; 194 struct rcu_head rcu_head;
195}; 195};
@@ -249,7 +249,7 @@ extern void blkg_destroy_all(struct request_queue *q);
249static inline void *blkg_to_pdata(struct blkio_group *blkg, 249static inline void *blkg_to_pdata(struct blkio_group *blkg,
250 struct blkio_policy_type *pol) 250 struct blkio_policy_type *pol)
251{ 251{
252 return blkg ? blkg->pd->pdata : NULL; 252 return blkg ? blkg->pd[pol->plid]->pdata : NULL;
253} 253}
254 254
255/** 255/**