aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-01 17:38:44 -0400
committerTejun Heo <tj@kernel.org>2012-04-01 17:38:44 -0400
commit41b38b6d540f951c49315d8573e6f6195a6e736d (patch)
treee667cec0bba6e7678810ff0087c96547a21a45c7
parent629ed0b10209ffc4e1d439e5508d52d5e3a090b8 (diff)
blkcg: cfq doesn't need per-cpu dispatch stats
blkio_group_stats_cpu is used to count dispatch stats using per-cpu counters. This is used by both blk-throtl and cfq-iosched but the sharing is rather silly. * cfq-iosched doesn't need per-cpu dispatch stats. cfq always updates those stats while holding queue_lock. * blk-throtl needs per-cpu dispatch stats but only service_bytes and serviced. It doesn't make use of sectors. This patch makes cfq add and use global stats for service_bytes, serviced and sectors, removes per-cpu sectors counter and moves per-cpu stat printing code to blk-throttle.c. Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--block/blk-cgroup.c63
-rw-r--r--block/blk-cgroup.h12
-rw-r--r--block/blk-throttle.c31
-rw-r--r--block/cfq-iosched.c37
4 files changed, 48 insertions, 95 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 821a0a393e85..19ee29f1b7c5 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -390,7 +390,6 @@ static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
390 390
391 blkg_rwstat_reset(&sc->service_bytes); 391 blkg_rwstat_reset(&sc->service_bytes);
392 blkg_rwstat_reset(&sc->serviced); 392 blkg_rwstat_reset(&sc->serviced);
393 blkg_stat_reset(&sc->sectors);
394 } 393 }
395} 394}
396 395
@@ -417,6 +416,8 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
417 struct blkio_group_stats *stats = &pd->stats; 416 struct blkio_group_stats *stats = &pd->stats;
418 417
419 /* queued stats shouldn't be cleared */ 418 /* queued stats shouldn't be cleared */
419 blkg_rwstat_reset(&stats->service_bytes);
420 blkg_rwstat_reset(&stats->serviced);
420 blkg_rwstat_reset(&stats->merged); 421 blkg_rwstat_reset(&stats->merged);
421 blkg_rwstat_reset(&stats->service_time); 422 blkg_rwstat_reset(&stats->service_time);
422 blkg_rwstat_reset(&stats->wait_time); 423 blkg_rwstat_reset(&stats->wait_time);
@@ -577,66 +578,6 @@ int blkcg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
577} 578}
578EXPORT_SYMBOL_GPL(blkcg_print_rwstat); 579EXPORT_SYMBOL_GPL(blkcg_print_rwstat);
579 580
580static u64 blkg_prfill_cpu_stat(struct seq_file *sf,
581 struct blkg_policy_data *pd, int off)
582{
583 u64 v = 0;
584 int cpu;
585
586 for_each_possible_cpu(cpu) {
587 struct blkio_group_stats_cpu *sc =
588 per_cpu_ptr(pd->stats_cpu, cpu);
589
590 v += blkg_stat_read((void *)sc + off);
591 }
592
593 return __blkg_prfill_u64(sf, pd, v);
594}
595
596static u64 blkg_prfill_cpu_rwstat(struct seq_file *sf,
597 struct blkg_policy_data *pd, int off)
598{
599 struct blkg_rwstat rwstat = { }, tmp;
600 int i, cpu;
601
602 for_each_possible_cpu(cpu) {
603 struct blkio_group_stats_cpu *sc =
604 per_cpu_ptr(pd->stats_cpu, cpu);
605
606 tmp = blkg_rwstat_read((void *)sc + off);
607 for (i = 0; i < BLKG_RWSTAT_NR; i++)
608 rwstat.cnt[i] += tmp.cnt[i];
609 }
610
611 return __blkg_prfill_rwstat(sf, pd, &rwstat);
612}
613
614/* print per-cpu blkg_stat specified by BLKCG_STAT_PRIV() */
615int blkcg_print_cpu_stat(struct cgroup *cgrp, struct cftype *cft,
616 struct seq_file *sf)
617{
618 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
619
620 blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_stat,
621 BLKCG_STAT_POL(cft->private),
622 BLKCG_STAT_OFF(cft->private), false);
623 return 0;
624}
625EXPORT_SYMBOL_GPL(blkcg_print_cpu_stat);
626
627/* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */
628int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
629 struct seq_file *sf)
630{
631 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
632
633 blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_rwstat,
634 BLKCG_STAT_POL(cft->private),
635 BLKCG_STAT_OFF(cft->private), true);
636 return 0;
637}
638EXPORT_SYMBOL_GPL(blkcg_print_cpu_rwstat);
639
640/** 581/**
641 * blkg_conf_prep - parse and prepare for per-blkg config update 582 * blkg_conf_prep - parse and prepare for per-blkg config update
642 * @blkcg: target block cgroup 583 * @blkcg: target block cgroup
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 0b0a176ee007..c82de47ae69f 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -65,6 +65,10 @@ struct blkg_rwstat {
65}; 65};
66 66
67struct blkio_group_stats { 67struct blkio_group_stats {
68 /* total bytes transferred */
69 struct blkg_rwstat service_bytes;
70 /* total IOs serviced, post merge */
71 struct blkg_rwstat serviced;
68 /* number of ios merged */ 72 /* number of ios merged */
69 struct blkg_rwstat merged; 73 struct blkg_rwstat merged;
70 /* total time spent on device in ns, may not be accurate w/ queueing */ 74 /* total time spent on device in ns, may not be accurate w/ queueing */
@@ -73,6 +77,8 @@ struct blkio_group_stats {
73 struct blkg_rwstat wait_time; 77 struct blkg_rwstat wait_time;
74 /* number of IOs queued up */ 78 /* number of IOs queued up */
75 struct blkg_rwstat queued; 79 struct blkg_rwstat queued;
80 /* total sectors transferred */
81 struct blkg_stat sectors;
76 /* total disk time and nr sectors dispatched by this group */ 82 /* total disk time and nr sectors dispatched by this group */
77 struct blkg_stat time; 83 struct blkg_stat time;
78#ifdef CONFIG_DEBUG_BLK_CGROUP 84#ifdef CONFIG_DEBUG_BLK_CGROUP
@@ -104,8 +110,6 @@ struct blkio_group_stats_cpu {
104 struct blkg_rwstat service_bytes; 110 struct blkg_rwstat service_bytes;
105 /* total IOs serviced, post merge */ 111 /* total IOs serviced, post merge */
106 struct blkg_rwstat serviced; 112 struct blkg_rwstat serviced;
107 /* total sectors transferred */
108 struct blkg_stat sectors;
109}; 113};
110 114
111struct blkio_group_conf { 115struct blkio_group_conf {
@@ -183,10 +187,6 @@ int blkcg_print_stat(struct cgroup *cgrp, struct cftype *cft,
183 struct seq_file *sf); 187 struct seq_file *sf);
184int blkcg_print_rwstat(struct cgroup *cgrp, struct cftype *cft, 188int blkcg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
185 struct seq_file *sf); 189 struct seq_file *sf);
186int blkcg_print_cpu_stat(struct cgroup *cgrp, struct cftype *cft,
187 struct seq_file *sf);
188int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
189 struct seq_file *sf);
190 190
191struct blkg_conf_ctx { 191struct blkg_conf_ctx {
192 struct gendisk *disk; 192 struct gendisk *disk;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 5d647edc02a1..cb259bc46f43 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -582,7 +582,6 @@ static void throtl_update_dispatch_stats(struct blkio_group *blkg, u64 bytes,
582 582
583 stats_cpu = this_cpu_ptr(pd->stats_cpu); 583 stats_cpu = this_cpu_ptr(pd->stats_cpu);
584 584
585 blkg_stat_add(&stats_cpu->sectors, bytes >> 9);
586 blkg_rwstat_add(&stats_cpu->serviced, rw, 1); 585 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
587 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes); 586 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
588 587
@@ -843,6 +842,36 @@ static void throtl_update_blkio_group_common(struct throtl_data *td,
843 throtl_schedule_delayed_work(td, 0); 842 throtl_schedule_delayed_work(td, 0);
844} 843}
845 844
845static u64 blkg_prfill_cpu_rwstat(struct seq_file *sf,
846 struct blkg_policy_data *pd, int off)
847{
848 struct blkg_rwstat rwstat = { }, tmp;
849 int i, cpu;
850
851 for_each_possible_cpu(cpu) {
852 struct blkio_group_stats_cpu *sc =
853 per_cpu_ptr(pd->stats_cpu, cpu);
854
855 tmp = blkg_rwstat_read((void *)sc + off);
856 for (i = 0; i < BLKG_RWSTAT_NR; i++)
857 rwstat.cnt[i] += tmp.cnt[i];
858 }
859
860 return __blkg_prfill_rwstat(sf, pd, &rwstat);
861}
862
863/* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */
864static int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
865 struct seq_file *sf)
866{
867 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
868
869 blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_rwstat,
870 BLKCG_STAT_POL(cft->private),
871 BLKCG_STAT_OFF(cft->private), true);
872 return 0;
873}
874
846static u64 blkg_prfill_conf_u64(struct seq_file *sf, 875static u64 blkg_prfill_conf_u64(struct seq_file *sf,
847 struct blkg_policy_data *pd, int off) 876 struct blkg_policy_data *pd, int off)
848{ 877{
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 49913804e8dd..effd89489506 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -624,29 +624,12 @@ static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
624 struct blkio_policy_type *pol, uint64_t bytes, 624 struct blkio_policy_type *pol, uint64_t bytes,
625 bool direction, bool sync) 625 bool direction, bool sync)
626{ 626{
627 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
627 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); 628 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
628 struct blkg_policy_data *pd = blkg->pd[pol->plid];
629 struct blkio_group_stats_cpu *stats_cpu;
630 unsigned long flags;
631 629
632 /* If per cpu stats are not allocated yet, don't do any accounting. */ 630 blkg_stat_add(&stats->sectors, bytes >> 9);
633 if (pd->stats_cpu == NULL) 631 blkg_rwstat_add(&stats->serviced, rw, 1);
634 return; 632 blkg_rwstat_add(&stats->service_bytes, rw, bytes);
635
636 /*
637 * Disabling interrupts to provide mutual exclusion between two
638 * writes on same cpu. It probably is not needed for 64bit. Not
639 * optimizing that case yet.
640 */
641 local_irq_save(flags);
642
643 stats_cpu = this_cpu_ptr(pd->stats_cpu);
644
645 blkg_stat_add(&stats_cpu->sectors, bytes >> 9);
646 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
647 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
648
649 local_irq_restore(flags);
650} 633}
651 634
652static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, 635static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg,
@@ -1520,20 +1503,20 @@ static struct cftype cfq_blkcg_files[] = {
1520 { 1503 {
1521 .name = "sectors", 1504 .name = "sectors",
1522 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, 1505 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1523 offsetof(struct blkio_group_stats_cpu, sectors)), 1506 offsetof(struct blkio_group_stats, sectors)),
1524 .read_seq_string = blkcg_print_cpu_stat, 1507 .read_seq_string = blkcg_print_stat,
1525 }, 1508 },
1526 { 1509 {
1527 .name = "io_service_bytes", 1510 .name = "io_service_bytes",
1528 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, 1511 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1529 offsetof(struct blkio_group_stats_cpu, service_bytes)), 1512 offsetof(struct blkio_group_stats, service_bytes)),
1530 .read_seq_string = blkcg_print_cpu_rwstat, 1513 .read_seq_string = blkcg_print_rwstat,
1531 }, 1514 },
1532 { 1515 {
1533 .name = "io_serviced", 1516 .name = "io_serviced",
1534 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, 1517 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1535 offsetof(struct blkio_group_stats_cpu, serviced)), 1518 offsetof(struct blkio_group_stats, serviced)),
1536 .read_seq_string = blkcg_print_cpu_rwstat, 1519 .read_seq_string = blkcg_print_rwstat,
1537 }, 1520 },
1538 { 1521 {
1539 .name = "io_service_time", 1522 .name = "io_service_time",