aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-01 17:38:44 -0400
committerTejun Heo <tj@kernel.org>2012-04-01 17:38:44 -0400
commit629ed0b10209ffc4e1d439e5508d52d5e3a090b8 (patch)
tree69caa493e4ea0714aff247c8415c4fef7ebfe996 /block/blk-throttle.c
parent2ce4d50f9cfab40831eee5e51e950d5c4724994b (diff)
blkcg: move statistics update code to policies
As with conf/stats file handling code, there's no reason for stat update code to live in blkcg core with policies calling into update them. The current organization is both inflexible and complex. This patch moves stat update code to specific policies. All blkiocg_update_*_stats() functions which deal with BLKIO_POLICY_PROP stats are collapsed into their cfq_blkiocg_update_*_stats() counterparts. blkiocg_update_dispatch_stats() is used by both policies and duplicated as throtl_update_dispatch_stats() and cfq_blkiocg_update_dispatch_stats(). This will be cleaned up later. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c37
1 files changed, 30 insertions, 7 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index fb6f25778fb2..5d647edc02a1 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -562,17 +562,42 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
562 return 0; 562 return 0;
563} 563}
564 564
565static void throtl_update_dispatch_stats(struct blkio_group *blkg, u64 bytes,
566 int rw)
567{
568 struct blkg_policy_data *pd = blkg->pd[BLKIO_POLICY_THROTL];
569 struct blkio_group_stats_cpu *stats_cpu;
570 unsigned long flags;
571
572 /* If per cpu stats are not allocated yet, don't do any accounting. */
573 if (pd->stats_cpu == NULL)
574 return;
575
576 /*
577 * Disabling interrupts to provide mutual exclusion between two
578 * writes on same cpu. It probably is not needed for 64bit. Not
579 * optimizing that case yet.
580 */
581 local_irq_save(flags);
582
583 stats_cpu = this_cpu_ptr(pd->stats_cpu);
584
585 blkg_stat_add(&stats_cpu->sectors, bytes >> 9);
586 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
587 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
588
589 local_irq_restore(flags);
590}
591
565static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) 592static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
566{ 593{
567 bool rw = bio_data_dir(bio); 594 bool rw = bio_data_dir(bio);
568 bool sync = rw_is_sync(bio->bi_rw);
569 595
570 /* Charge the bio to the group */ 596 /* Charge the bio to the group */
571 tg->bytes_disp[rw] += bio->bi_size; 597 tg->bytes_disp[rw] += bio->bi_size;
572 tg->io_disp[rw]++; 598 tg->io_disp[rw]++;
573 599
574 blkiocg_update_dispatch_stats(tg_to_blkg(tg), &blkio_policy_throtl, 600 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
575 bio->bi_size, rw, sync);
576} 601}
577 602
578static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg, 603static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
@@ -1012,10 +1037,8 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1012 tg = throtl_lookup_tg(td, blkcg); 1037 tg = throtl_lookup_tg(td, blkcg);
1013 if (tg) { 1038 if (tg) {
1014 if (tg_no_rule_group(tg, rw)) { 1039 if (tg_no_rule_group(tg, rw)) {
1015 blkiocg_update_dispatch_stats(tg_to_blkg(tg), 1040 throtl_update_dispatch_stats(tg_to_blkg(tg),
1016 &blkio_policy_throtl, 1041 bio->bi_size, bio->bi_rw);
1017 bio->bi_size, rw,
1018 rw_is_sync(bio->bi_rw));
1019 goto out_unlock_rcu; 1042 goto out_unlock_rcu;
1020 } 1043 }
1021 } 1044 }