diff options
author | Tejun Heo <tj@kernel.org> | 2012-04-01 17:38:42 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-04-01 17:38:42 -0400 |
commit | d3d32e69fa368e131b25ee68806aa3fd3916cec1 (patch) | |
tree | f6413e70f38cffb3b363fa831b5f868d90bb6c80 /block/blk-cgroup.c | |
parent | edcb0722c654947908388df660791abd41e6617c (diff) |
blkcg: restructure statistics printing
blkcg stats handling is a mess. None of the stats has much to do with
blkcg core but they are all implemented in blkcg core. Code sharing
is achieved by mixing common code with hard-coded cases for each stat
counter.
This patch restructures statistics printing such that
* Common logic exists as helper functions and specific print functions
use the helpers to implement specific cases.
* Printing functions serving multiple counters don't require hardcoded
switching on specific counters.
* Printing uses read_seq_string callback (other methods will be phased
out).
This change enables further cleanups and relocating stats code to the
policy implementation it belongs to.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r-- | block/blk-cgroup.c | 557 |
1 files changed, 239 insertions, 318 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 09ac462ba89e..951e7f3a8c89 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -753,186 +753,227 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) | |||
753 | return 0; | 753 | return 0; |
754 | } | 754 | } |
755 | 755 | ||
756 | static void blkio_get_key_name(enum blkg_rwstat_type type, const char *dname, | 756 | static const char *blkg_dev_name(struct blkio_group *blkg) |
757 | char *str, int chars_left, bool diskname_only) | 757 | { |
758 | { | 758 | /* some drivers (floppy) instantiate a queue w/o disk registered */ |
759 | snprintf(str, chars_left, "%s", dname); | 759 | if (blkg->q->backing_dev_info.dev) |
760 | chars_left -= strlen(str); | 760 | return dev_name(blkg->q->backing_dev_info.dev); |
761 | if (chars_left <= 0) { | 761 | return NULL; |
762 | printk(KERN_WARNING | ||
763 | "Possibly incorrect cgroup stat display format"); | ||
764 | return; | ||
765 | } | ||
766 | if (diskname_only) | ||
767 | return; | ||
768 | switch (type) { | ||
769 | case BLKG_RWSTAT_READ: | ||
770 | strlcat(str, " Read", chars_left); | ||
771 | break; | ||
772 | case BLKG_RWSTAT_WRITE: | ||
773 | strlcat(str, " Write", chars_left); | ||
774 | break; | ||
775 | case BLKG_RWSTAT_SYNC: | ||
776 | strlcat(str, " Sync", chars_left); | ||
777 | break; | ||
778 | case BLKG_RWSTAT_ASYNC: | ||
779 | strlcat(str, " Async", chars_left); | ||
780 | break; | ||
781 | case BLKG_RWSTAT_TOTAL: | ||
782 | strlcat(str, " Total", chars_left); | ||
783 | break; | ||
784 | default: | ||
785 | strlcat(str, " Invalid", chars_left); | ||
786 | } | ||
787 | } | 762 | } |
788 | 763 | ||
789 | static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, int plid, | 764 | /** |
790 | enum stat_type_cpu type, | 765 | * blkcg_print_blkgs - helper for printing per-blkg data |
791 | enum blkg_rwstat_type sub_type) | 766 | * @sf: seq_file to print to |
767 | * @blkcg: blkcg of interest | ||
768 | * @prfill: fill function to print out a blkg | ||
769 | * @pol: policy in question | ||
770 | * @data: data to be passed to @prfill | ||
771 | * @show_total: to print out sum of prfill return values or not | ||
772 | * | ||
773 | * This function invokes @prfill on each blkg of @blkcg if pd for the | ||
774 | * policy specified by @pol exists. @prfill is invoked with @sf, the | ||
775 | * policy data and @data. If @show_total is %true, the sum of the return | ||
776 | * values from @prfill is printed with "Total" label at the end. | ||
777 | * | ||
778 | * This is to be used to construct print functions for | ||
779 | * cftype->read_seq_string method. | ||
780 | */ | ||
781 | static void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg, | ||
782 | u64 (*prfill)(struct seq_file *, | ||
783 | struct blkg_policy_data *, int), | ||
784 | int pol, int data, bool show_total) | ||
792 | { | 785 | { |
793 | struct blkg_policy_data *pd = blkg->pd[plid]; | 786 | struct blkio_group *blkg; |
794 | u64 val = 0; | 787 | struct hlist_node *n; |
795 | int cpu; | 788 | u64 total = 0; |
796 | 789 | ||
797 | if (pd->stats_cpu == NULL) | 790 | spin_lock_irq(&blkcg->lock); |
798 | return val; | 791 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) |
792 | if (blkg->pd[pol]) | ||
793 | total += prfill(sf, blkg->pd[pol], data); | ||
794 | spin_unlock_irq(&blkcg->lock); | ||
795 | |||
796 | if (show_total) | ||
797 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); | ||
798 | } | ||
799 | |||
800 | /** | ||
801 | * __blkg_prfill_u64 - prfill helper for a single u64 value | ||
802 | * @sf: seq_file to print to | ||
803 | * @pd: policy data of interest | ||
804 | * @v: value to print | ||
805 | * | ||
806 | * Print @v to @sf for the device assocaited with @pd. | ||
807 | */ | ||
808 | static u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, | ||
809 | u64 v) | ||
810 | { | ||
811 | const char *dname = blkg_dev_name(pd->blkg); | ||
812 | |||
813 | if (!dname) | ||
814 | return 0; | ||
815 | |||
816 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); | ||
817 | return v; | ||
818 | } | ||
819 | |||
820 | /** | ||
821 | * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat | ||
822 | * @sf: seq_file to print to | ||
823 | * @pd: policy data of interest | ||
824 | * @rwstat: rwstat to print | ||
825 | * | ||
826 | * Print @rwstat to @sf for the device assocaited with @pd. | ||
827 | */ | ||
828 | static u64 __blkg_prfill_rwstat(struct seq_file *sf, | ||
829 | struct blkg_policy_data *pd, | ||
830 | const struct blkg_rwstat *rwstat) | ||
831 | { | ||
832 | static const char *rwstr[] = { | ||
833 | [BLKG_RWSTAT_READ] = "Read", | ||
834 | [BLKG_RWSTAT_WRITE] = "Write", | ||
835 | [BLKG_RWSTAT_SYNC] = "Sync", | ||
836 | [BLKG_RWSTAT_ASYNC] = "Async", | ||
837 | }; | ||
838 | const char *dname = blkg_dev_name(pd->blkg); | ||
839 | u64 v; | ||
840 | int i; | ||
841 | |||
842 | if (!dname) | ||
843 | return 0; | ||
844 | |||
845 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | ||
846 | seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], | ||
847 | (unsigned long long)rwstat->cnt[i]); | ||
848 | |||
849 | v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE]; | ||
850 | seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); | ||
851 | return v; | ||
852 | } | ||
853 | |||
854 | static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, | ||
855 | int off) | ||
856 | { | ||
857 | return __blkg_prfill_u64(sf, pd, | ||
858 | blkg_stat_read((void *)&pd->stats + off)); | ||
859 | } | ||
860 | |||
861 | static u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | ||
862 | int off) | ||
863 | { | ||
864 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)&pd->stats + off); | ||
865 | |||
866 | return __blkg_prfill_rwstat(sf, pd, &rwstat); | ||
867 | } | ||
868 | |||
869 | /* print blkg_stat specified by BLKCG_STAT_PRIV() */ | ||
870 | static int blkcg_print_stat(struct cgroup *cgrp, struct cftype *cft, | ||
871 | struct seq_file *sf) | ||
872 | { | ||
873 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); | ||
874 | |||
875 | blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, | ||
876 | BLKCG_STAT_POL(cft->private), | ||
877 | BLKCG_STAT_OFF(cft->private), false); | ||
878 | return 0; | ||
879 | } | ||
880 | |||
881 | /* print blkg_rwstat specified by BLKCG_STAT_PRIV() */ | ||
882 | static int blkcg_print_rwstat(struct cgroup *cgrp, struct cftype *cft, | ||
883 | struct seq_file *sf) | ||
884 | { | ||
885 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); | ||
886 | |||
887 | blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, | ||
888 | BLKCG_STAT_POL(cft->private), | ||
889 | BLKCG_STAT_OFF(cft->private), true); | ||
890 | return 0; | ||
891 | } | ||
892 | |||
893 | static u64 blkg_prfill_cpu_stat(struct seq_file *sf, | ||
894 | struct blkg_policy_data *pd, int off) | ||
895 | { | ||
896 | u64 v = 0; | ||
897 | int cpu; | ||
799 | 898 | ||
800 | for_each_possible_cpu(cpu) { | 899 | for_each_possible_cpu(cpu) { |
801 | struct blkio_group_stats_cpu *stats_cpu = | 900 | struct blkio_group_stats_cpu *sc = |
802 | per_cpu_ptr(pd->stats_cpu, cpu); | 901 | per_cpu_ptr(pd->stats_cpu, cpu); |
803 | struct blkg_rwstat rws; | ||
804 | 902 | ||
805 | switch (type) { | 903 | v += blkg_stat_read((void *)sc + off); |
806 | case BLKIO_STAT_CPU_SECTORS: | ||
807 | val += blkg_stat_read(&stats_cpu->sectors); | ||
808 | break; | ||
809 | case BLKIO_STAT_CPU_SERVICE_BYTES: | ||
810 | rws = blkg_rwstat_read(&stats_cpu->service_bytes); | ||
811 | val += rws.cnt[sub_type]; | ||
812 | break; | ||
813 | case BLKIO_STAT_CPU_SERVICED: | ||
814 | rws = blkg_rwstat_read(&stats_cpu->serviced); | ||
815 | val += rws.cnt[sub_type]; | ||
816 | break; | ||
817 | } | ||
818 | } | 904 | } |
819 | 905 | ||
820 | return val; | 906 | return __blkg_prfill_u64(sf, pd, v); |
821 | } | 907 | } |
822 | 908 | ||
823 | static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, int plid, | 909 | static u64 blkg_prfill_cpu_rwstat(struct seq_file *sf, |
824 | struct cgroup_map_cb *cb, const char *dname, | 910 | struct blkg_policy_data *pd, int off) |
825 | enum stat_type_cpu type) | ||
826 | { | 911 | { |
827 | uint64_t disk_total, val; | 912 | struct blkg_rwstat rwstat = { }, tmp; |
828 | char key_str[MAX_KEY_LEN]; | 913 | int i, cpu; |
829 | enum blkg_rwstat_type sub_type; | ||
830 | 914 | ||
831 | if (type == BLKIO_STAT_CPU_SECTORS) { | 915 | for_each_possible_cpu(cpu) { |
832 | val = blkio_read_stat_cpu(blkg, plid, type, 0); | 916 | struct blkio_group_stats_cpu *sc = |
833 | blkio_get_key_name(0, dname, key_str, MAX_KEY_LEN, true); | 917 | per_cpu_ptr(pd->stats_cpu, cpu); |
834 | cb->fill(cb, key_str, val); | ||
835 | return val; | ||
836 | } | ||
837 | 918 | ||
838 | for (sub_type = BLKG_RWSTAT_READ; sub_type < BLKG_RWSTAT_NR; | 919 | tmp = blkg_rwstat_read((void *)sc + off); |
839 | sub_type++) { | 920 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
840 | blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN, | 921 | rwstat.cnt[i] += tmp.cnt[i]; |
841 | false); | ||
842 | val = blkio_read_stat_cpu(blkg, plid, type, sub_type); | ||
843 | cb->fill(cb, key_str, val); | ||
844 | } | 922 | } |
845 | 923 | ||
846 | disk_total = blkio_read_stat_cpu(blkg, plid, type, BLKG_RWSTAT_READ) + | 924 | return __blkg_prfill_rwstat(sf, pd, &rwstat); |
847 | blkio_read_stat_cpu(blkg, plid, type, BLKG_RWSTAT_WRITE); | ||
848 | |||
849 | blkio_get_key_name(BLKG_RWSTAT_TOTAL, dname, key_str, MAX_KEY_LEN, | ||
850 | false); | ||
851 | cb->fill(cb, key_str, disk_total); | ||
852 | return disk_total; | ||
853 | } | 925 | } |
854 | 926 | ||
855 | static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid, | 927 | /* print per-cpu blkg_stat specified by BLKCG_STAT_PRIV() */ |
856 | struct cgroup_map_cb *cb, const char *dname, | 928 | static int blkcg_print_cpu_stat(struct cgroup *cgrp, struct cftype *cft, |
857 | enum stat_type type) | 929 | struct seq_file *sf) |
858 | { | 930 | { |
859 | struct blkio_group_stats *stats = &blkg->pd[plid]->stats; | 931 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); |
860 | uint64_t v = 0, disk_total = 0; | ||
861 | char key_str[MAX_KEY_LEN]; | ||
862 | struct blkg_rwstat rws = { }; | ||
863 | int st; | ||
864 | 932 | ||
865 | if (type >= BLKIO_STAT_ARR_NR) { | 933 | blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_stat, |
866 | switch (type) { | 934 | BLKCG_STAT_POL(cft->private), |
867 | case BLKIO_STAT_TIME: | 935 | BLKCG_STAT_OFF(cft->private), false); |
868 | v = blkg_stat_read(&stats->time); | 936 | return 0; |
869 | break; | 937 | } |
870 | #ifdef CONFIG_DEBUG_BLK_CGROUP | ||
871 | case BLKIO_STAT_UNACCOUNTED_TIME: | ||
872 | v = blkg_stat_read(&stats->unaccounted_time); | ||
873 | break; | ||
874 | case BLKIO_STAT_AVG_QUEUE_SIZE: { | ||
875 | uint64_t samples; | ||
876 | 938 | ||
877 | samples = blkg_stat_read(&stats->avg_queue_size_samples); | 939 | /* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */ |
878 | if (samples) { | 940 | static int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft, |
879 | v = blkg_stat_read(&stats->avg_queue_size_sum); | 941 | struct seq_file *sf) |
880 | do_div(v, samples); | 942 | { |
881 | } | 943 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); |
882 | break; | ||
883 | } | ||
884 | case BLKIO_STAT_IDLE_TIME: | ||
885 | v = blkg_stat_read(&stats->idle_time); | ||
886 | break; | ||
887 | case BLKIO_STAT_EMPTY_TIME: | ||
888 | v = blkg_stat_read(&stats->empty_time); | ||
889 | break; | ||
890 | case BLKIO_STAT_DEQUEUE: | ||
891 | v = blkg_stat_read(&stats->dequeue); | ||
892 | break; | ||
893 | case BLKIO_STAT_GROUP_WAIT_TIME: | ||
894 | v = blkg_stat_read(&stats->group_wait_time); | ||
895 | break; | ||
896 | #endif | ||
897 | default: | ||
898 | WARN_ON_ONCE(1); | ||
899 | } | ||
900 | 944 | ||
901 | blkio_get_key_name(0, dname, key_str, MAX_KEY_LEN, true); | 945 | blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_rwstat, |
902 | cb->fill(cb, key_str, v); | 946 | BLKCG_STAT_POL(cft->private), |
903 | return v; | 947 | BLKCG_STAT_OFF(cft->private), true); |
904 | } | 948 | return 0; |
949 | } | ||
905 | 950 | ||
906 | switch (type) { | 951 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
907 | case BLKIO_STAT_MERGED: | 952 | static u64 blkg_prfill_avg_queue_size(struct seq_file *sf, |
908 | rws = blkg_rwstat_read(&stats->merged); | 953 | struct blkg_policy_data *pd, int off) |
909 | break; | 954 | { |
910 | case BLKIO_STAT_SERVICE_TIME: | 955 | u64 samples = blkg_stat_read(&pd->stats.avg_queue_size_samples); |
911 | rws = blkg_rwstat_read(&stats->service_time); | 956 | u64 v = 0; |
912 | break; | ||
913 | case BLKIO_STAT_WAIT_TIME: | ||
914 | rws = blkg_rwstat_read(&stats->wait_time); | ||
915 | break; | ||
916 | case BLKIO_STAT_QUEUED: | ||
917 | rws = blkg_rwstat_read(&stats->queued); | ||
918 | break; | ||
919 | default: | ||
920 | WARN_ON_ONCE(true); | ||
921 | break; | ||
922 | } | ||
923 | 957 | ||
924 | for (st = BLKG_RWSTAT_READ; st < BLKG_RWSTAT_NR; st++) { | 958 | if (samples) { |
925 | blkio_get_key_name(st, dname, key_str, MAX_KEY_LEN, false); | 959 | v = blkg_stat_read(&pd->stats.avg_queue_size_sum); |
926 | cb->fill(cb, key_str, rws.cnt[st]); | 960 | do_div(v, samples); |
927 | if (st == BLKG_RWSTAT_READ || st == BLKG_RWSTAT_WRITE) | ||
928 | disk_total += rws.cnt[st]; | ||
929 | } | 961 | } |
962 | __blkg_prfill_u64(sf, pd, v); | ||
963 | return 0; | ||
964 | } | ||
965 | |||
966 | /* print avg_queue_size */ | ||
967 | static int blkcg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft, | ||
968 | struct seq_file *sf) | ||
969 | { | ||
970 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); | ||
930 | 971 | ||
931 | blkio_get_key_name(BLKG_RWSTAT_TOTAL, dname, key_str, MAX_KEY_LEN, | 972 | blkcg_print_blkgs(sf, blkcg, blkg_prfill_avg_queue_size, |
932 | false); | 973 | BLKIO_POLICY_PROP, 0, false); |
933 | cb->fill(cb, key_str, disk_total); | 974 | return 0; |
934 | return disk_total; | ||
935 | } | 975 | } |
976 | #endif /* CONFIG_DEBUG_BLK_CGROUP */ | ||
936 | 977 | ||
937 | static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid, | 978 | static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid, |
938 | int fileid, struct blkio_cgroup *blkcg) | 979 | int fileid, struct blkio_cgroup *blkcg) |
@@ -1074,14 +1115,6 @@ static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft, | |||
1074 | return ret; | 1115 | return ret; |
1075 | } | 1116 | } |
1076 | 1117 | ||
1077 | static const char *blkg_dev_name(struct blkio_group *blkg) | ||
1078 | { | ||
1079 | /* some drivers (floppy) instantiate a queue w/o disk registered */ | ||
1080 | if (blkg->q->backing_dev_info.dev) | ||
1081 | return dev_name(blkg->q->backing_dev_info.dev); | ||
1082 | return NULL; | ||
1083 | } | ||
1084 | |||
1085 | static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg, | 1118 | static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg, |
1086 | struct seq_file *m) | 1119 | struct seq_file *m) |
1087 | { | 1120 | { |
@@ -1174,116 +1207,6 @@ static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft, | |||
1174 | return 0; | 1207 | return 0; |
1175 | } | 1208 | } |
1176 | 1209 | ||
1177 | static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg, | ||
1178 | struct cftype *cft, struct cgroup_map_cb *cb, | ||
1179 | enum stat_type type, bool show_total, bool pcpu) | ||
1180 | { | ||
1181 | struct blkio_group *blkg; | ||
1182 | struct hlist_node *n; | ||
1183 | uint64_t cgroup_total = 0; | ||
1184 | |||
1185 | spin_lock_irq(&blkcg->lock); | ||
1186 | |||
1187 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { | ||
1188 | const char *dname = blkg_dev_name(blkg); | ||
1189 | int plid = BLKIOFILE_POLICY(cft->private); | ||
1190 | |||
1191 | if (!dname) | ||
1192 | continue; | ||
1193 | if (pcpu) | ||
1194 | cgroup_total += blkio_get_stat_cpu(blkg, plid, | ||
1195 | cb, dname, type); | ||
1196 | else | ||
1197 | cgroup_total += blkio_get_stat(blkg, plid, | ||
1198 | cb, dname, type); | ||
1199 | } | ||
1200 | if (show_total) | ||
1201 | cb->fill(cb, "Total", cgroup_total); | ||
1202 | |||
1203 | spin_unlock_irq(&blkcg->lock); | ||
1204 | return 0; | ||
1205 | } | ||
1206 | |||
1207 | /* All map kind of cgroup file get serviced by this function */ | ||
1208 | static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft, | ||
1209 | struct cgroup_map_cb *cb) | ||
1210 | { | ||
1211 | struct blkio_cgroup *blkcg; | ||
1212 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | ||
1213 | int name = BLKIOFILE_ATTR(cft->private); | ||
1214 | |||
1215 | blkcg = cgroup_to_blkio_cgroup(cgrp); | ||
1216 | |||
1217 | switch(plid) { | ||
1218 | case BLKIO_POLICY_PROP: | ||
1219 | switch(name) { | ||
1220 | case BLKIO_PROP_time: | ||
1221 | return blkio_read_blkg_stats(blkcg, cft, cb, | ||
1222 | BLKIO_STAT_TIME, 0, 0); | ||
1223 | case BLKIO_PROP_sectors: | ||
1224 | return blkio_read_blkg_stats(blkcg, cft, cb, | ||
1225 | BLKIO_STAT_CPU_SECTORS, 0, 1); | ||
1226 | case BLKIO_PROP_io_service_bytes: | ||
1227 | return blkio_read_blkg_stats(blkcg, cft, cb, | ||
1228 | BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1); | ||
1229 | case BLKIO_PROP_io_serviced: | ||
1230 | return blkio_read_blkg_stats(blkcg, cft, cb, | ||
1231 | BLKIO_STAT_CPU_SERVICED, 1, 1); | ||
1232 | case BLKIO_PROP_io_service_time: | ||
1233 | return blkio_read_blkg_stats(blkcg, cft, cb, | ||
1234 | BLKIO_STAT_SERVICE_TIME, 1, 0); | ||
1235 | case BLKIO_PROP_io_wait_time: | ||
1236 | return blkio_read_blkg_stats(blkcg, cft, cb, | ||
1237 | BLKIO_STAT_WAIT_TIME, 1, 0); | ||
1238 | case BLKIO_PROP_io_merged: | ||
1239 | return blkio_read_blkg_stats(blkcg, cft, cb, | ||
1240 | BLKIO_STAT_MERGED, 1, 0); | ||
1241 | case BLKIO_PROP_io_queued: | ||
1242 | return blkio_read_blkg_stats(blkcg, cft, cb, | ||
1243 | BLKIO_STAT_QUEUED, 1, 0); | ||
1244 | #ifdef CONFIG_DEBUG_BLK_CGROUP | ||
1245 | case BLKIO_PROP_unaccounted_time: | ||
1246 | return blkio_read_blkg_stats(blkcg, cft, cb, | ||
1247 | BLKIO_STAT_UNACCOUNTED_TIME, 0, 0); | ||
1248 | case BLKIO_PROP_dequeue: | ||
1249 | return blkio_read_blkg_stats(blkcg, cft, cb, | ||
1250 | BLKIO_STAT_DEQUEUE, 0, 0); | ||
1251 | case BLKIO_PROP_avg_queue_size: | ||
1252 | return blkio_read_blkg_stats(blkcg, cft, cb, | ||
1253 | BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0); | ||
1254 | case BLKIO_PROP_group_wait_time: | ||
1255 | return blkio_read_blkg_stats(blkcg, cft, cb, | ||
1256 | BLKIO_STAT_GROUP_WAIT_TIME, 0, 0); | ||
1257 | case BLKIO_PROP_idle_time: | ||
1258 | return blkio_read_blkg_stats(blkcg, cft, cb, | ||
1259 | BLKIO_STAT_IDLE_TIME, 0, 0); | ||
1260 | case BLKIO_PROP_empty_time: | ||
1261 | return blkio_read_blkg_stats(blkcg, cft, cb, | ||
1262 | BLKIO_STAT_EMPTY_TIME, 0, 0); | ||
1263 | #endif | ||
1264 | default: | ||
1265 | BUG(); | ||
1266 | } | ||
1267 | break; | ||
1268 | case BLKIO_POLICY_THROTL: | ||
1269 | switch(name){ | ||
1270 | case BLKIO_THROTL_io_service_bytes: | ||
1271 | return blkio_read_blkg_stats(blkcg, cft, cb, | ||
1272 | BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1); | ||
1273 | case BLKIO_THROTL_io_serviced: | ||
1274 | return blkio_read_blkg_stats(blkcg, cft, cb, | ||
1275 | BLKIO_STAT_CPU_SERVICED, 1, 1); | ||
1276 | default: | ||
1277 | BUG(); | ||
1278 | } | ||
1279 | break; | ||
1280 | default: | ||
1281 | BUG(); | ||
1282 | } | ||
1283 | |||
1284 | return 0; | ||
1285 | } | ||
1286 | |||
1287 | static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val) | 1210 | static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val) |
1288 | { | 1211 | { |
1289 | struct blkio_group *blkg; | 1212 | struct blkio_group *blkg; |
@@ -1369,51 +1292,51 @@ struct cftype blkio_files[] = { | |||
1369 | }, | 1292 | }, |
1370 | { | 1293 | { |
1371 | .name = "time", | 1294 | .name = "time", |
1372 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | 1295 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1373 | BLKIO_PROP_time), | 1296 | offsetof(struct blkio_group_stats, time)), |
1374 | .read_map = blkiocg_file_read_map, | 1297 | .read_seq_string = blkcg_print_stat, |
1375 | }, | 1298 | }, |
1376 | { | 1299 | { |
1377 | .name = "sectors", | 1300 | .name = "sectors", |
1378 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | 1301 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1379 | BLKIO_PROP_sectors), | 1302 | offsetof(struct blkio_group_stats_cpu, sectors)), |
1380 | .read_map = blkiocg_file_read_map, | 1303 | .read_seq_string = blkcg_print_cpu_stat, |
1381 | }, | 1304 | }, |
1382 | { | 1305 | { |
1383 | .name = "io_service_bytes", | 1306 | .name = "io_service_bytes", |
1384 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | 1307 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1385 | BLKIO_PROP_io_service_bytes), | 1308 | offsetof(struct blkio_group_stats_cpu, service_bytes)), |
1386 | .read_map = blkiocg_file_read_map, | 1309 | .read_seq_string = blkcg_print_cpu_rwstat, |
1387 | }, | 1310 | }, |
1388 | { | 1311 | { |
1389 | .name = "io_serviced", | 1312 | .name = "io_serviced", |
1390 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | 1313 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1391 | BLKIO_PROP_io_serviced), | 1314 | offsetof(struct blkio_group_stats_cpu, serviced)), |
1392 | .read_map = blkiocg_file_read_map, | 1315 | .read_seq_string = blkcg_print_cpu_rwstat, |
1393 | }, | 1316 | }, |
1394 | { | 1317 | { |
1395 | .name = "io_service_time", | 1318 | .name = "io_service_time", |
1396 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | 1319 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1397 | BLKIO_PROP_io_service_time), | 1320 | offsetof(struct blkio_group_stats, service_time)), |
1398 | .read_map = blkiocg_file_read_map, | 1321 | .read_seq_string = blkcg_print_rwstat, |
1399 | }, | 1322 | }, |
1400 | { | 1323 | { |
1401 | .name = "io_wait_time", | 1324 | .name = "io_wait_time", |
1402 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | 1325 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1403 | BLKIO_PROP_io_wait_time), | 1326 | offsetof(struct blkio_group_stats, wait_time)), |
1404 | .read_map = blkiocg_file_read_map, | 1327 | .read_seq_string = blkcg_print_rwstat, |
1405 | }, | 1328 | }, |
1406 | { | 1329 | { |
1407 | .name = "io_merged", | 1330 | .name = "io_merged", |
1408 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | 1331 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1409 | BLKIO_PROP_io_merged), | 1332 | offsetof(struct blkio_group_stats, merged)), |
1410 | .read_map = blkiocg_file_read_map, | 1333 | .read_seq_string = blkcg_print_rwstat, |
1411 | }, | 1334 | }, |
1412 | { | 1335 | { |
1413 | .name = "io_queued", | 1336 | .name = "io_queued", |
1414 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | 1337 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1415 | BLKIO_PROP_io_queued), | 1338 | offsetof(struct blkio_group_stats, queued)), |
1416 | .read_map = blkiocg_file_read_map, | 1339 | .read_seq_string = blkcg_print_rwstat, |
1417 | }, | 1340 | }, |
1418 | { | 1341 | { |
1419 | .name = "reset_stats", | 1342 | .name = "reset_stats", |
@@ -1457,54 +1380,52 @@ struct cftype blkio_files[] = { | |||
1457 | }, | 1380 | }, |
1458 | { | 1381 | { |
1459 | .name = "throttle.io_service_bytes", | 1382 | .name = "throttle.io_service_bytes", |
1460 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | 1383 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL, |
1461 | BLKIO_THROTL_io_service_bytes), | 1384 | offsetof(struct blkio_group_stats_cpu, service_bytes)), |
1462 | .read_map = blkiocg_file_read_map, | 1385 | .read_seq_string = blkcg_print_cpu_rwstat, |
1463 | }, | 1386 | }, |
1464 | { | 1387 | { |
1465 | .name = "throttle.io_serviced", | 1388 | .name = "throttle.io_serviced", |
1466 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | 1389 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL, |
1467 | BLKIO_THROTL_io_serviced), | 1390 | offsetof(struct blkio_group_stats_cpu, serviced)), |
1468 | .read_map = blkiocg_file_read_map, | 1391 | .read_seq_string = blkcg_print_cpu_rwstat, |
1469 | }, | 1392 | }, |
1470 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | 1393 | #endif /* CONFIG_BLK_DEV_THROTTLING */ |
1471 | 1394 | ||
1472 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 1395 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
1473 | { | 1396 | { |
1474 | .name = "avg_queue_size", | 1397 | .name = "avg_queue_size", |
1475 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | 1398 | .read_seq_string = blkcg_print_avg_queue_size, |
1476 | BLKIO_PROP_avg_queue_size), | ||
1477 | .read_map = blkiocg_file_read_map, | ||
1478 | }, | 1399 | }, |
1479 | { | 1400 | { |
1480 | .name = "group_wait_time", | 1401 | .name = "group_wait_time", |
1481 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | 1402 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1482 | BLKIO_PROP_group_wait_time), | 1403 | offsetof(struct blkio_group_stats, group_wait_time)), |
1483 | .read_map = blkiocg_file_read_map, | 1404 | .read_seq_string = blkcg_print_stat, |
1484 | }, | 1405 | }, |
1485 | { | 1406 | { |
1486 | .name = "idle_time", | 1407 | .name = "idle_time", |
1487 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | 1408 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1488 | BLKIO_PROP_idle_time), | 1409 | offsetof(struct blkio_group_stats, idle_time)), |
1489 | .read_map = blkiocg_file_read_map, | 1410 | .read_seq_string = blkcg_print_stat, |
1490 | }, | 1411 | }, |
1491 | { | 1412 | { |
1492 | .name = "empty_time", | 1413 | .name = "empty_time", |
1493 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | 1414 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1494 | BLKIO_PROP_empty_time), | 1415 | offsetof(struct blkio_group_stats, empty_time)), |
1495 | .read_map = blkiocg_file_read_map, | 1416 | .read_seq_string = blkcg_print_stat, |
1496 | }, | 1417 | }, |
1497 | { | 1418 | { |
1498 | .name = "dequeue", | 1419 | .name = "dequeue", |
1499 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | 1420 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1500 | BLKIO_PROP_dequeue), | 1421 | offsetof(struct blkio_group_stats, dequeue)), |
1501 | .read_map = blkiocg_file_read_map, | 1422 | .read_seq_string = blkcg_print_stat, |
1502 | }, | 1423 | }, |
1503 | { | 1424 | { |
1504 | .name = "unaccounted_time", | 1425 | .name = "unaccounted_time", |
1505 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | 1426 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1506 | BLKIO_PROP_unaccounted_time), | 1427 | offsetof(struct blkio_group_stats, unaccounted_time)), |
1507 | .read_map = blkiocg_file_read_map, | 1428 | .read_seq_string = blkcg_print_stat, |
1508 | }, | 1429 | }, |
1509 | #endif | 1430 | #endif |
1510 | { } /* terminate */ | 1431 | { } /* terminate */ |