aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-01 17:38:43 -0400
committerTejun Heo <tj@kernel.org>2012-04-01 17:38:43 -0400
commit3a8b31d396b296df4b8594429d86d415d3409432 (patch)
treefe75761c55f91e558b5899cc284fa9b4134d8167 /block/blk-cgroup.c
parentc4682aec9caaca1fcfd1dd4b59cef47af22cbdc6 (diff)
blkcg: restructure blkio_group configruation setting
As part of userland interface restructuring, this patch updates per-blkio_group configuration setting. Instead of funneling everything through a master function which has hard-coded cases for each config file it may handle, the common part is factored into blkg_conf_prep() and blkg_conf_finish() and different configuration setters are implemented using the helpers. While this doesn't result in immediate LOC reduction, this enables further cleanups and more modular implementation. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c274
1 files changed, 147 insertions, 127 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index a9723a8dc983..1e1ee2af7b5f 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -43,12 +43,6 @@ EXPORT_SYMBOL_GPL(blkio_root_cgroup);
43 43
44static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES]; 44static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
45 45
46/* for encoding cft->private value on file */
47#define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
48/* What policy owns the file, proportional or throttle */
49#define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
50#define BLKIOFILE_ATTR(val) ((val) & 0xffff)
51
52struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) 46struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
53{ 47{
54 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), 48 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
@@ -86,7 +80,7 @@ static inline void blkio_update_group_weight(struct blkio_group *blkg,
86} 80}
87 81
88static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid, 82static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
89 u64 bps, int fileid) 83 u64 bps, int rw)
90{ 84{
91 struct blkio_policy_type *blkiop; 85 struct blkio_policy_type *blkiop;
92 86
@@ -96,21 +90,18 @@ static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
96 if (blkiop->plid != plid) 90 if (blkiop->plid != plid)
97 continue; 91 continue;
98 92
99 if (fileid == BLKIO_THROTL_read_bps_device 93 if (rw == READ && blkiop->ops.blkio_update_group_read_bps_fn)
100 && blkiop->ops.blkio_update_group_read_bps_fn)
101 blkiop->ops.blkio_update_group_read_bps_fn(blkg->q, 94 blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
102 blkg, bps); 95 blkg, bps);
103 96
104 if (fileid == BLKIO_THROTL_write_bps_device 97 if (rw == WRITE && blkiop->ops.blkio_update_group_write_bps_fn)
105 && blkiop->ops.blkio_update_group_write_bps_fn)
106 blkiop->ops.blkio_update_group_write_bps_fn(blkg->q, 98 blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
107 blkg, bps); 99 blkg, bps);
108 } 100 }
109} 101}
110 102
111static inline void blkio_update_group_iops(struct blkio_group *blkg, 103static inline void blkio_update_group_iops(struct blkio_group *blkg, int plid,
112 int plid, unsigned int iops, 104 u64 iops, int rw)
113 int fileid)
114{ 105{
115 struct blkio_policy_type *blkiop; 106 struct blkio_policy_type *blkiop;
116 107
@@ -120,13 +111,11 @@ static inline void blkio_update_group_iops(struct blkio_group *blkg,
120 if (blkiop->plid != plid) 111 if (blkiop->plid != plid)
121 continue; 112 continue;
122 113
123 if (fileid == BLKIO_THROTL_read_iops_device 114 if (rw == READ && blkiop->ops.blkio_update_group_read_iops_fn)
124 && blkiop->ops.blkio_update_group_read_iops_fn)
125 blkiop->ops.blkio_update_group_read_iops_fn(blkg->q, 115 blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
126 blkg, iops); 116 blkg, iops);
127 117
128 if (fileid == BLKIO_THROTL_write_iops_device 118 if (rw == WRITE && blkiop->ops.blkio_update_group_write_iops_fn)
129 && blkiop->ops.blkio_update_group_write_iops_fn)
130 blkiop->ops.blkio_update_group_write_iops_fn(blkg->q, 119 blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
131 blkg,iops); 120 blkg,iops);
132 } 121 }
@@ -975,19 +964,40 @@ static int blkcg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
975} 964}
976#endif /* CONFIG_DEBUG_BLK_CGROUP */ 965#endif /* CONFIG_DEBUG_BLK_CGROUP */
977 966
978static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid, 967struct blkg_conf_ctx {
979 int fileid, struct blkio_cgroup *blkcg) 968 struct gendisk *disk;
969 struct blkio_group *blkg;
970 u64 v;
971};
972
973/**
974 * blkg_conf_prep - parse and prepare for per-blkg config update
975 * @blkcg: target block cgroup
976 * @input: input string
977 * @ctx: blkg_conf_ctx to be filled
978 *
979 * Parse per-blkg config update from @input and initialize @ctx with the
980 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
981 * value. This function returns with RCU read locked and must be paired
982 * with blkg_conf_finish().
983 */
984static int blkg_conf_prep(struct blkio_cgroup *blkcg, const char *input,
985 struct blkg_conf_ctx *ctx)
986 __acquires(rcu)
980{ 987{
981 struct gendisk *disk = NULL; 988 struct gendisk *disk;
982 struct blkio_group *blkg = NULL; 989 struct blkio_group *blkg;
983 struct blkg_policy_data *pd; 990 char *buf, *s[4], *p, *major_s, *minor_s;
984 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
985 unsigned long major, minor; 991 unsigned long major, minor;
986 int i = 0, ret = -EINVAL; 992 int i = 0, ret = -EINVAL;
987 int part; 993 int part;
988 dev_t dev; 994 dev_t dev;
989 u64 temp; 995 u64 temp;
990 996
997 buf = kstrdup(input, GFP_KERNEL);
998 if (!buf)
999 return -ENOMEM;
1000
991 memset(s, 0, sizeof(s)); 1001 memset(s, 0, sizeof(s));
992 1002
993 while ((p = strsep(&buf, " ")) != NULL) { 1003 while ((p = strsep(&buf, " ")) != NULL) {
@@ -1037,82 +1047,42 @@ static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
1037 1047
1038 if (IS_ERR(blkg)) { 1048 if (IS_ERR(blkg)) {
1039 ret = PTR_ERR(blkg); 1049 ret = PTR_ERR(blkg);
1040 goto out_unlock; 1050 rcu_read_unlock();
1041 } 1051 put_disk(disk);
1042 1052 /*
1043 pd = blkg->pd[plid]; 1053 * If queue was bypassing, we should retry. Do so after a
1044 1054 * short msleep(). It isn't strictly necessary but queue
1045 switch (plid) { 1055 * can be bypassing for some time and it's always nice to
1046 case BLKIO_POLICY_PROP: 1056 * avoid busy looping.
1047 if ((temp < BLKIO_WEIGHT_MIN && temp > 0) || 1057 */
1048 temp > BLKIO_WEIGHT_MAX) 1058 if (ret == -EBUSY) {
1049 goto out_unlock; 1059 msleep(10);
1050 1060 ret = restart_syscall();
1051 pd->conf.weight = temp;
1052 blkio_update_group_weight(blkg, plid, temp ?: blkcg->weight);
1053 break;
1054 case BLKIO_POLICY_THROTL:
1055 switch(fileid) {
1056 case BLKIO_THROTL_read_bps_device:
1057 pd->conf.bps[READ] = temp;
1058 blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
1059 break;
1060 case BLKIO_THROTL_write_bps_device:
1061 pd->conf.bps[WRITE] = temp;
1062 blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
1063 break;
1064 case BLKIO_THROTL_read_iops_device:
1065 if (temp > THROTL_IOPS_MAX)
1066 goto out_unlock;
1067 pd->conf.iops[READ] = temp;
1068 blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
1069 break;
1070 case BLKIO_THROTL_write_iops_device:
1071 if (temp > THROTL_IOPS_MAX)
1072 goto out_unlock;
1073 pd->conf.iops[WRITE] = temp;
1074 blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
1075 break;
1076 } 1061 }
1077 break; 1062 goto out;
1078 default:
1079 BUG();
1080 } 1063 }
1064
1065 ctx->disk = disk;
1066 ctx->blkg = blkg;
1067 ctx->v = temp;
1081 ret = 0; 1068 ret = 0;
1082out_unlock:
1083 rcu_read_unlock();
1084out: 1069out:
1085 put_disk(disk); 1070 kfree(buf);
1086
1087 /*
1088 * If queue was bypassing, we should retry. Do so after a short
1089 * msleep(). It isn't strictly necessary but queue can be
1090 * bypassing for some time and it's always nice to avoid busy
1091 * looping.
1092 */
1093 if (ret == -EBUSY) {
1094 msleep(10);
1095 return restart_syscall();
1096 }
1097 return ret; 1071 return ret;
1098} 1072}
1099 1073
1100static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft, 1074/**
1101 const char *buffer) 1075 * blkg_conf_finish - finish up per-blkg config update
1076 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
1077 *
1078 * Finish up after per-blkg config update. This function must be paired
1079 * with blkg_conf_prep().
1080 */
1081static void blkg_conf_finish(struct blkg_conf_ctx *ctx)
1082 __releases(rcu)
1102{ 1083{
1103 int ret = 0; 1084 rcu_read_unlock();
1104 char *buf; 1085 put_disk(ctx->disk);
1105 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1106 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1107 int fileid = BLKIOFILE_ATTR(cft->private);
1108
1109 buf = kstrdup(buffer, GFP_KERNEL);
1110 if (!buf)
1111 return -ENOMEM;
1112
1113 ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg);
1114 kfree(buf);
1115 return ret;
1116} 1086}
1117 1087
1118/* for propio conf */ 1088/* for propio conf */
@@ -1140,6 +1110,32 @@ static int blkcg_print_weight(struct cgroup *cgrp, struct cftype *cft,
1140 return 0; 1110 return 0;
1141} 1111}
1142 1112
1113static int blkcg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1114 const char *buf)
1115{
1116 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1117 struct blkg_policy_data *pd;
1118 struct blkg_conf_ctx ctx;
1119 int ret;
1120
1121 ret = blkg_conf_prep(blkcg, buf, &ctx);
1122 if (ret)
1123 return ret;
1124
1125 ret = -EINVAL;
1126 pd = ctx.blkg->pd[BLKIO_POLICY_PROP];
1127 if (pd && (!ctx.v || (ctx.v >= BLKIO_WEIGHT_MIN &&
1128 ctx.v <= BLKIO_WEIGHT_MAX))) {
1129 pd->conf.weight = ctx.v;
1130 blkio_update_group_weight(ctx.blkg, BLKIO_POLICY_PROP,
1131 ctx.v ?: blkcg->weight);
1132 ret = 0;
1133 }
1134
1135 blkg_conf_finish(&ctx);
1136 return ret;
1137}
1138
1143static int blkcg_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val) 1139static int blkcg_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1144{ 1140{
1145 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); 1141 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
@@ -1181,39 +1177,67 @@ static u64 blkg_prfill_conf_u64(struct seq_file *sf,
1181static int blkcg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft, 1177static int blkcg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1182 struct seq_file *sf) 1178 struct seq_file *sf)
1183{ 1179{
1184 int off;
1185
1186 switch (BLKIOFILE_ATTR(cft->private)) {
1187 case BLKIO_THROTL_read_bps_device:
1188 off = offsetof(struct blkio_group_conf, bps[READ]);
1189 break;
1190 case BLKIO_THROTL_write_bps_device:
1191 off = offsetof(struct blkio_group_conf, bps[WRITE]);
1192 break;
1193 case BLKIO_THROTL_read_iops_device:
1194 off = offsetof(struct blkio_group_conf, iops[READ]);
1195 break;
1196 case BLKIO_THROTL_write_iops_device:
1197 off = offsetof(struct blkio_group_conf, iops[WRITE]);
1198 break;
1199 default:
1200 return -EINVAL;
1201 }
1202
1203 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp), 1180 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
1204 blkg_prfill_conf_u64, BLKIO_POLICY_THROTL, 1181 blkg_prfill_conf_u64, BLKIO_POLICY_THROTL,
1205 off, false); 1182 cft->private, false);
1206 return 0; 1183 return 0;
1207} 1184}
1185
1186static int blkcg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1187 const char *buf, int rw,
1188 void (*update)(struct blkio_group *, int, u64, int))
1189{
1190 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1191 struct blkg_policy_data *pd;
1192 struct blkg_conf_ctx ctx;
1193 int ret;
1194
1195 ret = blkg_conf_prep(blkcg, buf, &ctx);
1196 if (ret)
1197 return ret;
1198
1199 ret = -EINVAL;
1200 pd = ctx.blkg->pd[BLKIO_POLICY_THROTL];
1201 if (pd) {
1202 *(u64 *)((void *)&pd->conf + cft->private) = ctx.v;
1203 update(ctx.blkg, BLKIO_POLICY_THROTL, ctx.v ?: -1, rw);
1204 ret = 0;
1205 }
1206
1207 blkg_conf_finish(&ctx);
1208 return ret;
1209}
1210
1211static int blkcg_set_conf_bps_r(struct cgroup *cgrp, struct cftype *cft,
1212 const char *buf)
1213{
1214 return blkcg_set_conf_u64(cgrp, cft, buf, READ, blkio_update_group_bps);
1215}
1216
1217static int blkcg_set_conf_bps_w(struct cgroup *cgrp, struct cftype *cft,
1218 const char *buf)
1219{
1220 return blkcg_set_conf_u64(cgrp, cft, buf, WRITE, blkio_update_group_bps);
1221}
1222
1223static int blkcg_set_conf_iops_r(struct cgroup *cgrp, struct cftype *cft,
1224 const char *buf)
1225{
1226 return blkcg_set_conf_u64(cgrp, cft, buf, READ, blkio_update_group_iops);
1227}
1228
1229static int blkcg_set_conf_iops_w(struct cgroup *cgrp, struct cftype *cft,
1230 const char *buf)
1231{
1232 return blkcg_set_conf_u64(cgrp, cft, buf, WRITE, blkio_update_group_iops);
1233}
1208#endif 1234#endif
1209 1235
1210struct cftype blkio_files[] = { 1236struct cftype blkio_files[] = {
1211 { 1237 {
1212 .name = "weight_device", 1238 .name = "weight_device",
1213 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1214 BLKIO_PROP_weight_device),
1215 .read_seq_string = blkcg_print_weight_device, 1239 .read_seq_string = blkcg_print_weight_device,
1216 .write_string = blkiocg_file_write, 1240 .write_string = blkcg_set_weight_device,
1217 .max_write_len = 256, 1241 .max_write_len = 256,
1218 }, 1242 },
1219 { 1243 {
@@ -1276,37 +1300,33 @@ struct cftype blkio_files[] = {
1276#ifdef CONFIG_BLK_DEV_THROTTLING 1300#ifdef CONFIG_BLK_DEV_THROTTLING
1277 { 1301 {
1278 .name = "throttle.read_bps_device", 1302 .name = "throttle.read_bps_device",
1279 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, 1303 .private = offsetof(struct blkio_group_conf, bps[READ]),
1280 BLKIO_THROTL_read_bps_device),
1281 .read_seq_string = blkcg_print_conf_u64, 1304 .read_seq_string = blkcg_print_conf_u64,
1282 .write_string = blkiocg_file_write, 1305 .write_string = blkcg_set_conf_bps_r,
1283 .max_write_len = 256, 1306 .max_write_len = 256,
1284 }, 1307 },
1285 1308
1286 { 1309 {
1287 .name = "throttle.write_bps_device", 1310 .name = "throttle.write_bps_device",
1288 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, 1311 .private = offsetof(struct blkio_group_conf, bps[WRITE]),
1289 BLKIO_THROTL_write_bps_device),
1290 .read_seq_string = blkcg_print_conf_u64, 1312 .read_seq_string = blkcg_print_conf_u64,
1291 .write_string = blkiocg_file_write, 1313 .write_string = blkcg_set_conf_bps_w,
1292 .max_write_len = 256, 1314 .max_write_len = 256,
1293 }, 1315 },
1294 1316
1295 { 1317 {
1296 .name = "throttle.read_iops_device", 1318 .name = "throttle.read_iops_device",
1297 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, 1319 .private = offsetof(struct blkio_group_conf, iops[READ]),
1298 BLKIO_THROTL_read_iops_device),
1299 .read_seq_string = blkcg_print_conf_u64, 1320 .read_seq_string = blkcg_print_conf_u64,
1300 .write_string = blkiocg_file_write, 1321 .write_string = blkcg_set_conf_iops_r,
1301 .max_write_len = 256, 1322 .max_write_len = 256,
1302 }, 1323 },
1303 1324
1304 { 1325 {
1305 .name = "throttle.write_iops_device", 1326 .name = "throttle.write_iops_device",
1306 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, 1327 .private = offsetof(struct blkio_group_conf, iops[WRITE]),
1307 BLKIO_THROTL_write_iops_device),
1308 .read_seq_string = blkcg_print_conf_u64, 1328 .read_seq_string = blkcg_print_conf_u64,
1309 .write_string = blkiocg_file_write, 1329 .write_string = blkcg_set_conf_iops_w,
1310 .max_write_len = 256, 1330 .max_write_len = 256,
1311 }, 1331 },
1312 { 1332 {