aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-01 17:38:43 -0400
committerTejun Heo <tj@kernel.org>2012-04-01 17:38:43 -0400
commit60c2bc2d5a12369deef395cda41638d7e6b6bf19 (patch)
tree67fb6ace87a930215f800f0a96bb33f9773231c7 /block
parent44ea53de46a8b01a65ae6217f47e00b516725190 (diff)
blkcg: move conf/stat file handling code to policies
blkcg conf/stat handling is convoluted in that details which belong to specific policy implementations are all out in blkcg core and then policies hook into core layer to access and manipulate confs and stats. This sadly achieves both inflexibility (confs/stats can't be modified without messing with blkcg core) and complexity (all the call-ins and call-backs). The previous patches restructured conf and stat handling code such that they can be separated out. This patch relocates the file handling part. All conf/stat file handling code which belongs to BLKIO_POLICY_PROP is moved to cfq-iosched.c and all BKLIO_POLICY_THROTL code to blk-throtl.c. The move is verbatim except for blkio_update_group_{weight|bps|iops}() callbacks which relays conf changes to policies. The configuration settings are handled in policies themselves so the relaying isn't necessary. Conf setting functions are modified to directly call per-policy update functions and the relaying mechanism is dropped. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c373
-rw-r--r--block/blk-cgroup.h15
-rw-r--r--block/blk-throttle.c163
-rw-r--r--block/cfq-iosched.c202
4 files changed, 333 insertions, 420 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 4e714f8ddcd2..b963fb4b3995 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -63,63 +63,6 @@ struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
63} 63}
64EXPORT_SYMBOL_GPL(bio_blkio_cgroup); 64EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
65 65
66static inline void blkio_update_group_weight(struct blkio_group *blkg,
67 int plid, unsigned int weight)
68{
69 struct blkio_policy_type *blkiop;
70
71 list_for_each_entry(blkiop, &blkio_list, list) {
72 /* If this policy does not own the blkg, do not send updates */
73 if (blkiop->plid != plid)
74 continue;
75 if (blkiop->ops.blkio_update_group_weight_fn)
76 blkiop->ops.blkio_update_group_weight_fn(blkg->q,
77 blkg, weight);
78 }
79}
80
81static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
82 u64 bps, int rw)
83{
84 struct blkio_policy_type *blkiop;
85
86 list_for_each_entry(blkiop, &blkio_list, list) {
87
88 /* If this policy does not own the blkg, do not send updates */
89 if (blkiop->plid != plid)
90 continue;
91
92 if (rw == READ && blkiop->ops.blkio_update_group_read_bps_fn)
93 blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
94 blkg, bps);
95
96 if (rw == WRITE && blkiop->ops.blkio_update_group_write_bps_fn)
97 blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
98 blkg, bps);
99 }
100}
101
102static inline void blkio_update_group_iops(struct blkio_group *blkg, int plid,
103 u64 iops, int rw)
104{
105 struct blkio_policy_type *blkiop;
106
107 list_for_each_entry(blkiop, &blkio_list, list) {
108
109 /* If this policy does not own the blkg, do not send updates */
110 if (blkiop->plid != plid)
111 continue;
112
113 if (rw == READ && blkiop->ops.blkio_update_group_read_iops_fn)
114 blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
115 blkg, iops);
116
117 if (rw == WRITE && blkiop->ops.blkio_update_group_write_iops_fn)
118 blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
119 blkg,iops);
120 }
121}
122
123#ifdef CONFIG_DEBUG_BLK_CGROUP 66#ifdef CONFIG_DEBUG_BLK_CGROUP
124/* This should be called with the queue_lock held. */ 67/* This should be called with the queue_lock held. */
125static void blkio_set_start_group_wait_time(struct blkio_group *blkg, 68static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
@@ -939,33 +882,6 @@ int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
939} 882}
940EXPORT_SYMBOL_GPL(blkcg_print_cpu_rwstat); 883EXPORT_SYMBOL_GPL(blkcg_print_cpu_rwstat);
941 884
942#ifdef CONFIG_DEBUG_BLK_CGROUP
943static u64 blkg_prfill_avg_queue_size(struct seq_file *sf,
944 struct blkg_policy_data *pd, int off)
945{
946 u64 samples = blkg_stat_read(&pd->stats.avg_queue_size_samples);
947 u64 v = 0;
948
949 if (samples) {
950 v = blkg_stat_read(&pd->stats.avg_queue_size_sum);
951 do_div(v, samples);
952 }
953 __blkg_prfill_u64(sf, pd, v);
954 return 0;
955}
956
957/* print avg_queue_size */
958static int blkcg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
959 struct seq_file *sf)
960{
961 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
962
963 blkcg_print_blkgs(sf, blkcg, blkg_prfill_avg_queue_size,
964 BLKIO_POLICY_PROP, 0, false);
965 return 0;
966}
967#endif /* CONFIG_DEBUG_BLK_CGROUP */
968
969/** 885/**
970 * blkg_conf_prep - parse and prepare for per-blkg config update 886 * blkg_conf_prep - parse and prepare for per-blkg config update
971 * @blkcg: target block cgroup 887 * @blkcg: target block cgroup
@@ -1039,300 +955,11 @@ void blkg_conf_finish(struct blkg_conf_ctx *ctx)
1039} 955}
1040EXPORT_SYMBOL_GPL(blkg_conf_finish); 956EXPORT_SYMBOL_GPL(blkg_conf_finish);
1041 957
1042/* for propio conf */
1043static u64 blkg_prfill_weight_device(struct seq_file *sf,
1044 struct blkg_policy_data *pd, int off)
1045{
1046 if (!pd->conf.weight)
1047 return 0;
1048 return __blkg_prfill_u64(sf, pd, pd->conf.weight);
1049}
1050
1051static int blkcg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
1052 struct seq_file *sf)
1053{
1054 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
1055 blkg_prfill_weight_device, BLKIO_POLICY_PROP, 0,
1056 false);
1057 return 0;
1058}
1059
1060static int blkcg_print_weight(struct cgroup *cgrp, struct cftype *cft,
1061 struct seq_file *sf)
1062{
1063 seq_printf(sf, "%u\n", cgroup_to_blkio_cgroup(cgrp)->weight);
1064 return 0;
1065}
1066
1067static int blkcg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1068 const char *buf)
1069{
1070 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1071 struct blkg_policy_data *pd;
1072 struct blkg_conf_ctx ctx;
1073 int ret;
1074
1075 ret = blkg_conf_prep(blkcg, buf, &ctx);
1076 if (ret)
1077 return ret;
1078
1079 ret = -EINVAL;
1080 pd = ctx.blkg->pd[BLKIO_POLICY_PROP];
1081 if (pd && (!ctx.v || (ctx.v >= BLKIO_WEIGHT_MIN &&
1082 ctx.v <= BLKIO_WEIGHT_MAX))) {
1083 pd->conf.weight = ctx.v;
1084 blkio_update_group_weight(ctx.blkg, BLKIO_POLICY_PROP,
1085 ctx.v ?: blkcg->weight);
1086 ret = 0;
1087 }
1088
1089 blkg_conf_finish(&ctx);
1090 return ret;
1091}
1092
1093static int blkcg_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1094{
1095 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1096 struct blkio_group *blkg;
1097 struct hlist_node *n;
1098
1099 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1100 return -EINVAL;
1101
1102 spin_lock(&blkio_list_lock);
1103 spin_lock_irq(&blkcg->lock);
1104 blkcg->weight = (unsigned int)val;
1105
1106 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1107 struct blkg_policy_data *pd = blkg->pd[BLKIO_POLICY_PROP];
1108
1109 if (pd && !pd->conf.weight)
1110 blkio_update_group_weight(blkg, BLKIO_POLICY_PROP,
1111 blkcg->weight);
1112 }
1113
1114 spin_unlock_irq(&blkcg->lock);
1115 spin_unlock(&blkio_list_lock);
1116 return 0;
1117}
1118
1119/* for blk-throttle conf */
1120#ifdef CONFIG_BLK_DEV_THROTTLING
1121static u64 blkg_prfill_conf_u64(struct seq_file *sf,
1122 struct blkg_policy_data *pd, int off)
1123{
1124 u64 v = *(u64 *)((void *)&pd->conf + off);
1125
1126 if (!v)
1127 return 0;
1128 return __blkg_prfill_u64(sf, pd, v);
1129}
1130
1131static int blkcg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1132 struct seq_file *sf)
1133{
1134 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
1135 blkg_prfill_conf_u64, BLKIO_POLICY_THROTL,
1136 cft->private, false);
1137 return 0;
1138}
1139
1140static int blkcg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1141 const char *buf, int rw,
1142 void (*update)(struct blkio_group *, int, u64, int))
1143{
1144 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1145 struct blkg_policy_data *pd;
1146 struct blkg_conf_ctx ctx;
1147 int ret;
1148
1149 ret = blkg_conf_prep(blkcg, buf, &ctx);
1150 if (ret)
1151 return ret;
1152
1153 ret = -EINVAL;
1154 pd = ctx.blkg->pd[BLKIO_POLICY_THROTL];
1155 if (pd) {
1156 *(u64 *)((void *)&pd->conf + cft->private) = ctx.v;
1157 update(ctx.blkg, BLKIO_POLICY_THROTL, ctx.v ?: -1, rw);
1158 ret = 0;
1159 }
1160
1161 blkg_conf_finish(&ctx);
1162 return ret;
1163}
1164
1165static int blkcg_set_conf_bps_r(struct cgroup *cgrp, struct cftype *cft,
1166 const char *buf)
1167{
1168 return blkcg_set_conf_u64(cgrp, cft, buf, READ, blkio_update_group_bps);
1169}
1170
1171static int blkcg_set_conf_bps_w(struct cgroup *cgrp, struct cftype *cft,
1172 const char *buf)
1173{
1174 return blkcg_set_conf_u64(cgrp, cft, buf, WRITE, blkio_update_group_bps);
1175}
1176
1177static int blkcg_set_conf_iops_r(struct cgroup *cgrp, struct cftype *cft,
1178 const char *buf)
1179{
1180 return blkcg_set_conf_u64(cgrp, cft, buf, READ, blkio_update_group_iops);
1181}
1182
1183static int blkcg_set_conf_iops_w(struct cgroup *cgrp, struct cftype *cft,
1184 const char *buf)
1185{
1186 return blkcg_set_conf_u64(cgrp, cft, buf, WRITE, blkio_update_group_iops);
1187}
1188#endif
1189
1190struct cftype blkio_files[] = { 958struct cftype blkio_files[] = {
1191 { 959 {
1192 .name = "weight_device",
1193 .read_seq_string = blkcg_print_weight_device,
1194 .write_string = blkcg_set_weight_device,
1195 .max_write_len = 256,
1196 },
1197 {
1198 .name = "weight",
1199 .read_seq_string = blkcg_print_weight,
1200 .write_u64 = blkcg_set_weight,
1201 },
1202 {
1203 .name = "time",
1204 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1205 offsetof(struct blkio_group_stats, time)),
1206 .read_seq_string = blkcg_print_stat,
1207 },
1208 {
1209 .name = "sectors",
1210 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1211 offsetof(struct blkio_group_stats_cpu, sectors)),
1212 .read_seq_string = blkcg_print_cpu_stat,
1213 },
1214 {
1215 .name = "io_service_bytes",
1216 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1217 offsetof(struct blkio_group_stats_cpu, service_bytes)),
1218 .read_seq_string = blkcg_print_cpu_rwstat,
1219 },
1220 {
1221 .name = "io_serviced",
1222 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1223 offsetof(struct blkio_group_stats_cpu, serviced)),
1224 .read_seq_string = blkcg_print_cpu_rwstat,
1225 },
1226 {
1227 .name = "io_service_time",
1228 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1229 offsetof(struct blkio_group_stats, service_time)),
1230 .read_seq_string = blkcg_print_rwstat,
1231 },
1232 {
1233 .name = "io_wait_time",
1234 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1235 offsetof(struct blkio_group_stats, wait_time)),
1236 .read_seq_string = blkcg_print_rwstat,
1237 },
1238 {
1239 .name = "io_merged",
1240 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1241 offsetof(struct blkio_group_stats, merged)),
1242 .read_seq_string = blkcg_print_rwstat,
1243 },
1244 {
1245 .name = "io_queued",
1246 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1247 offsetof(struct blkio_group_stats, queued)),
1248 .read_seq_string = blkcg_print_rwstat,
1249 },
1250 {
1251 .name = "reset_stats", 960 .name = "reset_stats",
1252 .write_u64 = blkiocg_reset_stats, 961 .write_u64 = blkiocg_reset_stats,
1253 }, 962 },
1254#ifdef CONFIG_BLK_DEV_THROTTLING
1255 {
1256 .name = "throttle.read_bps_device",
1257 .private = offsetof(struct blkio_group_conf, bps[READ]),
1258 .read_seq_string = blkcg_print_conf_u64,
1259 .write_string = blkcg_set_conf_bps_r,
1260 .max_write_len = 256,
1261 },
1262
1263 {
1264 .name = "throttle.write_bps_device",
1265 .private = offsetof(struct blkio_group_conf, bps[WRITE]),
1266 .read_seq_string = blkcg_print_conf_u64,
1267 .write_string = blkcg_set_conf_bps_w,
1268 .max_write_len = 256,
1269 },
1270
1271 {
1272 .name = "throttle.read_iops_device",
1273 .private = offsetof(struct blkio_group_conf, iops[READ]),
1274 .read_seq_string = blkcg_print_conf_u64,
1275 .write_string = blkcg_set_conf_iops_r,
1276 .max_write_len = 256,
1277 },
1278
1279 {
1280 .name = "throttle.write_iops_device",
1281 .private = offsetof(struct blkio_group_conf, iops[WRITE]),
1282 .read_seq_string = blkcg_print_conf_u64,
1283 .write_string = blkcg_set_conf_iops_w,
1284 .max_write_len = 256,
1285 },
1286 {
1287 .name = "throttle.io_service_bytes",
1288 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
1289 offsetof(struct blkio_group_stats_cpu, service_bytes)),
1290 .read_seq_string = blkcg_print_cpu_rwstat,
1291 },
1292 {
1293 .name = "throttle.io_serviced",
1294 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
1295 offsetof(struct blkio_group_stats_cpu, serviced)),
1296 .read_seq_string = blkcg_print_cpu_rwstat,
1297 },
1298#endif /* CONFIG_BLK_DEV_THROTTLING */
1299
1300#ifdef CONFIG_DEBUG_BLK_CGROUP
1301 {
1302 .name = "avg_queue_size",
1303 .read_seq_string = blkcg_print_avg_queue_size,
1304 },
1305 {
1306 .name = "group_wait_time",
1307 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1308 offsetof(struct blkio_group_stats, group_wait_time)),
1309 .read_seq_string = blkcg_print_stat,
1310 },
1311 {
1312 .name = "idle_time",
1313 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1314 offsetof(struct blkio_group_stats, idle_time)),
1315 .read_seq_string = blkcg_print_stat,
1316 },
1317 {
1318 .name = "empty_time",
1319 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1320 offsetof(struct blkio_group_stats, empty_time)),
1321 .read_seq_string = blkcg_print_stat,
1322 },
1323 {
1324 .name = "dequeue",
1325 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1326 offsetof(struct blkio_group_stats, dequeue)),
1327 .read_seq_string = blkcg_print_stat,
1328 },
1329 {
1330 .name = "unaccounted_time",
1331 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1332 offsetof(struct blkio_group_stats, unaccounted_time)),
1333 .read_seq_string = blkcg_print_stat,
1334 },
1335#endif
1336 { } /* terminate */ 963 { } /* terminate */
1337}; 964};
1338 965
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index fa744d57bebb..ba64b2857571 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -156,24 +156,9 @@ struct blkio_group {
156}; 156};
157 157
158typedef void (blkio_init_group_fn)(struct blkio_group *blkg); 158typedef void (blkio_init_group_fn)(struct blkio_group *blkg);
159typedef void (blkio_update_group_weight_fn)(struct request_queue *q,
160 struct blkio_group *blkg, unsigned int weight);
161typedef void (blkio_update_group_read_bps_fn)(struct request_queue *q,
162 struct blkio_group *blkg, u64 read_bps);
163typedef void (blkio_update_group_write_bps_fn)(struct request_queue *q,
164 struct blkio_group *blkg, u64 write_bps);
165typedef void (blkio_update_group_read_iops_fn)(struct request_queue *q,
166 struct blkio_group *blkg, unsigned int read_iops);
167typedef void (blkio_update_group_write_iops_fn)(struct request_queue *q,
168 struct blkio_group *blkg, unsigned int write_iops);
169 159
170struct blkio_policy_ops { 160struct blkio_policy_ops {
171 blkio_init_group_fn *blkio_init_group_fn; 161 blkio_init_group_fn *blkio_init_group_fn;
172 blkio_update_group_weight_fn *blkio_update_group_weight_fn;
173 blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn;
174 blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn;
175 blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn;
176 blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn;
177}; 162};
178 163
179struct blkio_policy_type { 164struct blkio_policy_type {
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 1cc6c23de2c1..fb6f25778fb2 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -804,6 +804,11 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
804 } 804 }
805} 805}
806 806
807/*
808 * Can not take queue lock in update functions as queue lock under
809 * blkcg_lock is not allowed. Under other paths we take blkcg_lock under
810 * queue_lock.
811 */
807static void throtl_update_blkio_group_common(struct throtl_data *td, 812static void throtl_update_blkio_group_common(struct throtl_data *td,
808 struct throtl_grp *tg) 813 struct throtl_grp *tg)
809{ 814{
@@ -813,51 +818,158 @@ static void throtl_update_blkio_group_common(struct throtl_data *td,
813 throtl_schedule_delayed_work(td, 0); 818 throtl_schedule_delayed_work(td, 0);
814} 819}
815 820
816/* 821static u64 blkg_prfill_conf_u64(struct seq_file *sf,
817 * For all update functions, @q should be a valid pointer because these 822 struct blkg_policy_data *pd, int off)
818 * update functions are called under blkcg_lock, that means, blkg is 823{
819 * valid and in turn @q is valid. queue exit path can not race because 824 u64 v = *(u64 *)((void *)&pd->conf + off);
820 * of blkcg_lock 825
821 * 826 if (!v)
822 * Can not take queue lock in update functions as queue lock under blkcg_lock 827 return 0;
823 * is not allowed. Under other paths we take blkcg_lock under queue_lock. 828 return __blkg_prfill_u64(sf, pd, v);
824 */ 829}
825static void throtl_update_blkio_group_read_bps(struct request_queue *q, 830
826 struct blkio_group *blkg, u64 read_bps) 831static int blkcg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
832 struct seq_file *sf)
833{
834 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
835 blkg_prfill_conf_u64, BLKIO_POLICY_THROTL,
836 cft->private, false);
837 return 0;
838}
839
840static void throtl_update_blkio_group_read_bps(struct blkio_group *blkg,
841 u64 read_bps)
827{ 842{
828 struct throtl_grp *tg = blkg_to_tg(blkg); 843 struct throtl_grp *tg = blkg_to_tg(blkg);
829 844
830 tg->bps[READ] = read_bps; 845 tg->bps[READ] = read_bps;
831 throtl_update_blkio_group_common(q->td, tg); 846 throtl_update_blkio_group_common(blkg->q->td, tg);
832} 847}
833 848
834static void throtl_update_blkio_group_write_bps(struct request_queue *q, 849static void throtl_update_blkio_group_write_bps(struct blkio_group *blkg,
835 struct blkio_group *blkg, u64 write_bps) 850 u64 write_bps)
836{ 851{
837 struct throtl_grp *tg = blkg_to_tg(blkg); 852 struct throtl_grp *tg = blkg_to_tg(blkg);
838 853
839 tg->bps[WRITE] = write_bps; 854 tg->bps[WRITE] = write_bps;
840 throtl_update_blkio_group_common(q->td, tg); 855 throtl_update_blkio_group_common(blkg->q->td, tg);
841} 856}
842 857
843static void throtl_update_blkio_group_read_iops(struct request_queue *q, 858static void throtl_update_blkio_group_read_iops(struct blkio_group *blkg,
844 struct blkio_group *blkg, unsigned int read_iops) 859 u64 read_iops)
845{ 860{
846 struct throtl_grp *tg = blkg_to_tg(blkg); 861 struct throtl_grp *tg = blkg_to_tg(blkg);
847 862
848 tg->iops[READ] = read_iops; 863 tg->iops[READ] = read_iops;
849 throtl_update_blkio_group_common(q->td, tg); 864 throtl_update_blkio_group_common(blkg->q->td, tg);
850} 865}
851 866
852static void throtl_update_blkio_group_write_iops(struct request_queue *q, 867static void throtl_update_blkio_group_write_iops(struct blkio_group *blkg,
853 struct blkio_group *blkg, unsigned int write_iops) 868 u64 write_iops)
854{ 869{
855 struct throtl_grp *tg = blkg_to_tg(blkg); 870 struct throtl_grp *tg = blkg_to_tg(blkg);
856 871
857 tg->iops[WRITE] = write_iops; 872 tg->iops[WRITE] = write_iops;
858 throtl_update_blkio_group_common(q->td, tg); 873 throtl_update_blkio_group_common(blkg->q->td, tg);
874}
875
876static int blkcg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
877 const char *buf,
878 void (*update)(struct blkio_group *, u64))
879{
880 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
881 struct blkg_policy_data *pd;
882 struct blkg_conf_ctx ctx;
883 int ret;
884
885 ret = blkg_conf_prep(blkcg, buf, &ctx);
886 if (ret)
887 return ret;
888
889 ret = -EINVAL;
890 pd = ctx.blkg->pd[BLKIO_POLICY_THROTL];
891 if (pd) {
892 *(u64 *)((void *)&pd->conf + cft->private) = ctx.v;
893 update(ctx.blkg, ctx.v ?: -1);
894 ret = 0;
895 }
896
897 blkg_conf_finish(&ctx);
898 return ret;
859} 899}
860 900
901static int blkcg_set_conf_bps_r(struct cgroup *cgrp, struct cftype *cft,
902 const char *buf)
903{
904 return blkcg_set_conf_u64(cgrp, cft, buf,
905 throtl_update_blkio_group_read_bps);
906}
907
908static int blkcg_set_conf_bps_w(struct cgroup *cgrp, struct cftype *cft,
909 const char *buf)
910{
911 return blkcg_set_conf_u64(cgrp, cft, buf,
912 throtl_update_blkio_group_write_bps);
913}
914
915static int blkcg_set_conf_iops_r(struct cgroup *cgrp, struct cftype *cft,
916 const char *buf)
917{
918 return blkcg_set_conf_u64(cgrp, cft, buf,
919 throtl_update_blkio_group_read_iops);
920}
921
922static int blkcg_set_conf_iops_w(struct cgroup *cgrp, struct cftype *cft,
923 const char *buf)
924{
925 return blkcg_set_conf_u64(cgrp, cft, buf,
926 throtl_update_blkio_group_write_iops);
927}
928
929static struct cftype throtl_files[] = {
930 {
931 .name = "throttle.read_bps_device",
932 .private = offsetof(struct blkio_group_conf, bps[READ]),
933 .read_seq_string = blkcg_print_conf_u64,
934 .write_string = blkcg_set_conf_bps_r,
935 .max_write_len = 256,
936 },
937 {
938 .name = "throttle.write_bps_device",
939 .private = offsetof(struct blkio_group_conf, bps[WRITE]),
940 .read_seq_string = blkcg_print_conf_u64,
941 .write_string = blkcg_set_conf_bps_w,
942 .max_write_len = 256,
943 },
944 {
945 .name = "throttle.read_iops_device",
946 .private = offsetof(struct blkio_group_conf, iops[READ]),
947 .read_seq_string = blkcg_print_conf_u64,
948 .write_string = blkcg_set_conf_iops_r,
949 .max_write_len = 256,
950 },
951 {
952 .name = "throttle.write_iops_device",
953 .private = offsetof(struct blkio_group_conf, iops[WRITE]),
954 .read_seq_string = blkcg_print_conf_u64,
955 .write_string = blkcg_set_conf_iops_w,
956 .max_write_len = 256,
957 },
958 {
959 .name = "throttle.io_service_bytes",
960 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
961 offsetof(struct blkio_group_stats_cpu, service_bytes)),
962 .read_seq_string = blkcg_print_cpu_rwstat,
963 },
964 {
965 .name = "throttle.io_serviced",
966 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
967 offsetof(struct blkio_group_stats_cpu, serviced)),
968 .read_seq_string = blkcg_print_cpu_rwstat,
969 },
970 { } /* terminate */
971};
972
861static void throtl_shutdown_wq(struct request_queue *q) 973static void throtl_shutdown_wq(struct request_queue *q)
862{ 974{
863 struct throtl_data *td = q->td; 975 struct throtl_data *td = q->td;
@@ -868,17 +980,10 @@ static void throtl_shutdown_wq(struct request_queue *q)
868static struct blkio_policy_type blkio_policy_throtl = { 980static struct blkio_policy_type blkio_policy_throtl = {
869 .ops = { 981 .ops = {
870 .blkio_init_group_fn = throtl_init_blkio_group, 982 .blkio_init_group_fn = throtl_init_blkio_group,
871 .blkio_update_group_read_bps_fn =
872 throtl_update_blkio_group_read_bps,
873 .blkio_update_group_write_bps_fn =
874 throtl_update_blkio_group_write_bps,
875 .blkio_update_group_read_iops_fn =
876 throtl_update_blkio_group_read_iops,
877 .blkio_update_group_write_iops_fn =
878 throtl_update_blkio_group_write_iops,
879 }, 983 },
880 .plid = BLKIO_POLICY_THROTL, 984 .plid = BLKIO_POLICY_THROTL,
881 .pdata_size = sizeof(struct throtl_grp), 985 .pdata_size = sizeof(struct throtl_grp),
986 .cftypes = throtl_files,
882}; 987};
883 988
884bool blk_throtl_bio(struct request_queue *q, struct bio *bio) 989bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 8cca6161d0bc..119e061a7675 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1058,8 +1058,7 @@ static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1058} 1058}
1059 1059
1060#ifdef CONFIG_CFQ_GROUP_IOSCHED 1060#ifdef CONFIG_CFQ_GROUP_IOSCHED
1061static void cfq_update_blkio_group_weight(struct request_queue *q, 1061static void cfq_update_blkio_group_weight(struct blkio_group *blkg,
1062 struct blkio_group *blkg,
1063 unsigned int weight) 1062 unsigned int weight)
1064{ 1063{
1065 struct cfq_group *cfqg = blkg_to_cfqg(blkg); 1064 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
@@ -1111,6 +1110,203 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1111 cfqg_get(cfqg); 1110 cfqg_get(cfqg);
1112} 1111}
1113 1112
1113static u64 blkg_prfill_weight_device(struct seq_file *sf,
1114 struct blkg_policy_data *pd, int off)
1115{
1116 if (!pd->conf.weight)
1117 return 0;
1118 return __blkg_prfill_u64(sf, pd, pd->conf.weight);
1119}
1120
1121static int blkcg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
1122 struct seq_file *sf)
1123{
1124 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
1125 blkg_prfill_weight_device, BLKIO_POLICY_PROP, 0,
1126 false);
1127 return 0;
1128}
1129
1130static int blkcg_print_weight(struct cgroup *cgrp, struct cftype *cft,
1131 struct seq_file *sf)
1132{
1133 seq_printf(sf, "%u\n", cgroup_to_blkio_cgroup(cgrp)->weight);
1134 return 0;
1135}
1136
1137static int blkcg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1138 const char *buf)
1139{
1140 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1141 struct blkg_policy_data *pd;
1142 struct blkg_conf_ctx ctx;
1143 int ret;
1144
1145 ret = blkg_conf_prep(blkcg, buf, &ctx);
1146 if (ret)
1147 return ret;
1148
1149 ret = -EINVAL;
1150 pd = ctx.blkg->pd[BLKIO_POLICY_PROP];
1151 if (pd && (!ctx.v || (ctx.v >= BLKIO_WEIGHT_MIN &&
1152 ctx.v <= BLKIO_WEIGHT_MAX))) {
1153 pd->conf.weight = ctx.v;
1154 cfq_update_blkio_group_weight(ctx.blkg, ctx.v ?: blkcg->weight);
1155 ret = 0;
1156 }
1157
1158 blkg_conf_finish(&ctx);
1159 return ret;
1160}
1161
1162static int blkcg_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1163{
1164 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1165 struct blkio_group *blkg;
1166 struct hlist_node *n;
1167
1168 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1169 return -EINVAL;
1170
1171 spin_lock_irq(&blkcg->lock);
1172 blkcg->weight = (unsigned int)val;
1173
1174 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1175 struct blkg_policy_data *pd = blkg->pd[BLKIO_POLICY_PROP];
1176
1177 if (pd && !pd->conf.weight)
1178 cfq_update_blkio_group_weight(blkg, blkcg->weight);
1179 }
1180
1181 spin_unlock_irq(&blkcg->lock);
1182 return 0;
1183}
1184
1185#ifdef CONFIG_DEBUG_BLK_CGROUP
1186static u64 blkg_prfill_avg_queue_size(struct seq_file *sf,
1187 struct blkg_policy_data *pd, int off)
1188{
1189 u64 samples = blkg_stat_read(&pd->stats.avg_queue_size_samples);
1190 u64 v = 0;
1191
1192 if (samples) {
1193 v = blkg_stat_read(&pd->stats.avg_queue_size_sum);
1194 do_div(v, samples);
1195 }
1196 __blkg_prfill_u64(sf, pd, v);
1197 return 0;
1198}
1199
1200/* print avg_queue_size */
1201static int blkcg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
1202 struct seq_file *sf)
1203{
1204 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1205
1206 blkcg_print_blkgs(sf, blkcg, blkg_prfill_avg_queue_size,
1207 BLKIO_POLICY_PROP, 0, false);
1208 return 0;
1209}
1210#endif /* CONFIG_DEBUG_BLK_CGROUP */
1211
1212static struct cftype cfq_blkcg_files[] = {
1213 {
1214 .name = "weight_device",
1215 .read_seq_string = blkcg_print_weight_device,
1216 .write_string = blkcg_set_weight_device,
1217 .max_write_len = 256,
1218 },
1219 {
1220 .name = "weight",
1221 .read_seq_string = blkcg_print_weight,
1222 .write_u64 = blkcg_set_weight,
1223 },
1224 {
1225 .name = "time",
1226 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1227 offsetof(struct blkio_group_stats, time)),
1228 .read_seq_string = blkcg_print_stat,
1229 },
1230 {
1231 .name = "sectors",
1232 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1233 offsetof(struct blkio_group_stats_cpu, sectors)),
1234 .read_seq_string = blkcg_print_cpu_stat,
1235 },
1236 {
1237 .name = "io_service_bytes",
1238 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1239 offsetof(struct blkio_group_stats_cpu, service_bytes)),
1240 .read_seq_string = blkcg_print_cpu_rwstat,
1241 },
1242 {
1243 .name = "io_serviced",
1244 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1245 offsetof(struct blkio_group_stats_cpu, serviced)),
1246 .read_seq_string = blkcg_print_cpu_rwstat,
1247 },
1248 {
1249 .name = "io_service_time",
1250 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1251 offsetof(struct blkio_group_stats, service_time)),
1252 .read_seq_string = blkcg_print_rwstat,
1253 },
1254 {
1255 .name = "io_wait_time",
1256 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1257 offsetof(struct blkio_group_stats, wait_time)),
1258 .read_seq_string = blkcg_print_rwstat,
1259 },
1260 {
1261 .name = "io_merged",
1262 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1263 offsetof(struct blkio_group_stats, merged)),
1264 .read_seq_string = blkcg_print_rwstat,
1265 },
1266 {
1267 .name = "io_queued",
1268 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1269 offsetof(struct blkio_group_stats, queued)),
1270 .read_seq_string = blkcg_print_rwstat,
1271 },
1272#ifdef CONFIG_DEBUG_BLK_CGROUP
1273 {
1274 .name = "avg_queue_size",
1275 .read_seq_string = blkcg_print_avg_queue_size,
1276 },
1277 {
1278 .name = "group_wait_time",
1279 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1280 offsetof(struct blkio_group_stats, group_wait_time)),
1281 .read_seq_string = blkcg_print_stat,
1282 },
1283 {
1284 .name = "idle_time",
1285 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1286 offsetof(struct blkio_group_stats, idle_time)),
1287 .read_seq_string = blkcg_print_stat,
1288 },
1289 {
1290 .name = "empty_time",
1291 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1292 offsetof(struct blkio_group_stats, empty_time)),
1293 .read_seq_string = blkcg_print_stat,
1294 },
1295 {
1296 .name = "dequeue",
1297 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1298 offsetof(struct blkio_group_stats, dequeue)),
1299 .read_seq_string = blkcg_print_stat,
1300 },
1301 {
1302 .name = "unaccounted_time",
1303 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1304 offsetof(struct blkio_group_stats, unaccounted_time)),
1305 .read_seq_string = blkcg_print_stat,
1306 },
1307#endif /* CONFIG_DEBUG_BLK_CGROUP */
1308 { } /* terminate */
1309};
1114#else /* GROUP_IOSCHED */ 1310#else /* GROUP_IOSCHED */
1115static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, 1311static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1116 struct blkio_cgroup *blkcg) 1312 struct blkio_cgroup *blkcg)
@@ -3715,10 +3911,10 @@ static struct elevator_type iosched_cfq = {
3715static struct blkio_policy_type blkio_policy_cfq = { 3911static struct blkio_policy_type blkio_policy_cfq = {
3716 .ops = { 3912 .ops = {
3717 .blkio_init_group_fn = cfq_init_blkio_group, 3913 .blkio_init_group_fn = cfq_init_blkio_group,
3718 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
3719 }, 3914 },
3720 .plid = BLKIO_POLICY_PROP, 3915 .plid = BLKIO_POLICY_PROP,
3721 .pdata_size = sizeof(struct cfq_group), 3916 .pdata_size = sizeof(struct cfq_group),
3917 .cftypes = cfq_blkcg_files,
3722}; 3918};
3723#endif 3919#endif
3724 3920