aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c373
1 files changed, 0 insertions, 373 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 4e714f8ddcd2..b963fb4b3995 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -63,63 +63,6 @@ struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
63} 63}
64EXPORT_SYMBOL_GPL(bio_blkio_cgroup); 64EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
65 65
66static inline void blkio_update_group_weight(struct blkio_group *blkg,
67 int plid, unsigned int weight)
68{
69 struct blkio_policy_type *blkiop;
70
71 list_for_each_entry(blkiop, &blkio_list, list) {
72 /* If this policy does not own the blkg, do not send updates */
73 if (blkiop->plid != plid)
74 continue;
75 if (blkiop->ops.blkio_update_group_weight_fn)
76 blkiop->ops.blkio_update_group_weight_fn(blkg->q,
77 blkg, weight);
78 }
79}
80
81static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
82 u64 bps, int rw)
83{
84 struct blkio_policy_type *blkiop;
85
86 list_for_each_entry(blkiop, &blkio_list, list) {
87
88 /* If this policy does not own the blkg, do not send updates */
89 if (blkiop->plid != plid)
90 continue;
91
92 if (rw == READ && blkiop->ops.blkio_update_group_read_bps_fn)
93 blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
94 blkg, bps);
95
96 if (rw == WRITE && blkiop->ops.blkio_update_group_write_bps_fn)
97 blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
98 blkg, bps);
99 }
100}
101
102static inline void blkio_update_group_iops(struct blkio_group *blkg, int plid,
103 u64 iops, int rw)
104{
105 struct blkio_policy_type *blkiop;
106
107 list_for_each_entry(blkiop, &blkio_list, list) {
108
109 /* If this policy does not own the blkg, do not send updates */
110 if (blkiop->plid != plid)
111 continue;
112
113 if (rw == READ && blkiop->ops.blkio_update_group_read_iops_fn)
114 blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
115 blkg, iops);
116
117 if (rw == WRITE && blkiop->ops.blkio_update_group_write_iops_fn)
118 blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
119 blkg,iops);
120 }
121}
122
123#ifdef CONFIG_DEBUG_BLK_CGROUP 66#ifdef CONFIG_DEBUG_BLK_CGROUP
124/* This should be called with the queue_lock held. */ 67/* This should be called with the queue_lock held. */
125static void blkio_set_start_group_wait_time(struct blkio_group *blkg, 68static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
@@ -939,33 +882,6 @@ int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
939} 882}
940EXPORT_SYMBOL_GPL(blkcg_print_cpu_rwstat); 883EXPORT_SYMBOL_GPL(blkcg_print_cpu_rwstat);
941 884
942#ifdef CONFIG_DEBUG_BLK_CGROUP
943static u64 blkg_prfill_avg_queue_size(struct seq_file *sf,
944 struct blkg_policy_data *pd, int off)
945{
946 u64 samples = blkg_stat_read(&pd->stats.avg_queue_size_samples);
947 u64 v = 0;
948
949 if (samples) {
950 v = blkg_stat_read(&pd->stats.avg_queue_size_sum);
951 do_div(v, samples);
952 }
953 __blkg_prfill_u64(sf, pd, v);
954 return 0;
955}
956
957/* print avg_queue_size */
958static int blkcg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
959 struct seq_file *sf)
960{
961 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
962
963 blkcg_print_blkgs(sf, blkcg, blkg_prfill_avg_queue_size,
964 BLKIO_POLICY_PROP, 0, false);
965 return 0;
966}
967#endif /* CONFIG_DEBUG_BLK_CGROUP */
968
969/** 885/**
970 * blkg_conf_prep - parse and prepare for per-blkg config update 886 * blkg_conf_prep - parse and prepare for per-blkg config update
971 * @blkcg: target block cgroup 887 * @blkcg: target block cgroup
@@ -1039,300 +955,11 @@ void blkg_conf_finish(struct blkg_conf_ctx *ctx)
1039} 955}
1040EXPORT_SYMBOL_GPL(blkg_conf_finish); 956EXPORT_SYMBOL_GPL(blkg_conf_finish);
1041 957
1042/* for propio conf */
1043static u64 blkg_prfill_weight_device(struct seq_file *sf,
1044 struct blkg_policy_data *pd, int off)
1045{
1046 if (!pd->conf.weight)
1047 return 0;
1048 return __blkg_prfill_u64(sf, pd, pd->conf.weight);
1049}
1050
1051static int blkcg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
1052 struct seq_file *sf)
1053{
1054 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
1055 blkg_prfill_weight_device, BLKIO_POLICY_PROP, 0,
1056 false);
1057 return 0;
1058}
1059
1060static int blkcg_print_weight(struct cgroup *cgrp, struct cftype *cft,
1061 struct seq_file *sf)
1062{
1063 seq_printf(sf, "%u\n", cgroup_to_blkio_cgroup(cgrp)->weight);
1064 return 0;
1065}
1066
1067static int blkcg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1068 const char *buf)
1069{
1070 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1071 struct blkg_policy_data *pd;
1072 struct blkg_conf_ctx ctx;
1073 int ret;
1074
1075 ret = blkg_conf_prep(blkcg, buf, &ctx);
1076 if (ret)
1077 return ret;
1078
1079 ret = -EINVAL;
1080 pd = ctx.blkg->pd[BLKIO_POLICY_PROP];
1081 if (pd && (!ctx.v || (ctx.v >= BLKIO_WEIGHT_MIN &&
1082 ctx.v <= BLKIO_WEIGHT_MAX))) {
1083 pd->conf.weight = ctx.v;
1084 blkio_update_group_weight(ctx.blkg, BLKIO_POLICY_PROP,
1085 ctx.v ?: blkcg->weight);
1086 ret = 0;
1087 }
1088
1089 blkg_conf_finish(&ctx);
1090 return ret;
1091}
1092
1093static int blkcg_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1094{
1095 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1096 struct blkio_group *blkg;
1097 struct hlist_node *n;
1098
1099 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1100 return -EINVAL;
1101
1102 spin_lock(&blkio_list_lock);
1103 spin_lock_irq(&blkcg->lock);
1104 blkcg->weight = (unsigned int)val;
1105
1106 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1107 struct blkg_policy_data *pd = blkg->pd[BLKIO_POLICY_PROP];
1108
1109 if (pd && !pd->conf.weight)
1110 blkio_update_group_weight(blkg, BLKIO_POLICY_PROP,
1111 blkcg->weight);
1112 }
1113
1114 spin_unlock_irq(&blkcg->lock);
1115 spin_unlock(&blkio_list_lock);
1116 return 0;
1117}
1118
1119/* for blk-throttle conf */
1120#ifdef CONFIG_BLK_DEV_THROTTLING
1121static u64 blkg_prfill_conf_u64(struct seq_file *sf,
1122 struct blkg_policy_data *pd, int off)
1123{
1124 u64 v = *(u64 *)((void *)&pd->conf + off);
1125
1126 if (!v)
1127 return 0;
1128 return __blkg_prfill_u64(sf, pd, v);
1129}
1130
1131static int blkcg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1132 struct seq_file *sf)
1133{
1134 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
1135 blkg_prfill_conf_u64, BLKIO_POLICY_THROTL,
1136 cft->private, false);
1137 return 0;
1138}
1139
1140static int blkcg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1141 const char *buf, int rw,
1142 void (*update)(struct blkio_group *, int, u64, int))
1143{
1144 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1145 struct blkg_policy_data *pd;
1146 struct blkg_conf_ctx ctx;
1147 int ret;
1148
1149 ret = blkg_conf_prep(blkcg, buf, &ctx);
1150 if (ret)
1151 return ret;
1152
1153 ret = -EINVAL;
1154 pd = ctx.blkg->pd[BLKIO_POLICY_THROTL];
1155 if (pd) {
1156 *(u64 *)((void *)&pd->conf + cft->private) = ctx.v;
1157 update(ctx.blkg, BLKIO_POLICY_THROTL, ctx.v ?: -1, rw);
1158 ret = 0;
1159 }
1160
1161 blkg_conf_finish(&ctx);
1162 return ret;
1163}
1164
1165static int blkcg_set_conf_bps_r(struct cgroup *cgrp, struct cftype *cft,
1166 const char *buf)
1167{
1168 return blkcg_set_conf_u64(cgrp, cft, buf, READ, blkio_update_group_bps);
1169}
1170
1171static int blkcg_set_conf_bps_w(struct cgroup *cgrp, struct cftype *cft,
1172 const char *buf)
1173{
1174 return blkcg_set_conf_u64(cgrp, cft, buf, WRITE, blkio_update_group_bps);
1175}
1176
1177static int blkcg_set_conf_iops_r(struct cgroup *cgrp, struct cftype *cft,
1178 const char *buf)
1179{
1180 return blkcg_set_conf_u64(cgrp, cft, buf, READ, blkio_update_group_iops);
1181}
1182
1183static int blkcg_set_conf_iops_w(struct cgroup *cgrp, struct cftype *cft,
1184 const char *buf)
1185{
1186 return blkcg_set_conf_u64(cgrp, cft, buf, WRITE, blkio_update_group_iops);
1187}
1188#endif
1189
1190struct cftype blkio_files[] = { 958struct cftype blkio_files[] = {
1191 { 959 {
1192 .name = "weight_device",
1193 .read_seq_string = blkcg_print_weight_device,
1194 .write_string = blkcg_set_weight_device,
1195 .max_write_len = 256,
1196 },
1197 {
1198 .name = "weight",
1199 .read_seq_string = blkcg_print_weight,
1200 .write_u64 = blkcg_set_weight,
1201 },
1202 {
1203 .name = "time",
1204 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1205 offsetof(struct blkio_group_stats, time)),
1206 .read_seq_string = blkcg_print_stat,
1207 },
1208 {
1209 .name = "sectors",
1210 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1211 offsetof(struct blkio_group_stats_cpu, sectors)),
1212 .read_seq_string = blkcg_print_cpu_stat,
1213 },
1214 {
1215 .name = "io_service_bytes",
1216 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1217 offsetof(struct blkio_group_stats_cpu, service_bytes)),
1218 .read_seq_string = blkcg_print_cpu_rwstat,
1219 },
1220 {
1221 .name = "io_serviced",
1222 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1223 offsetof(struct blkio_group_stats_cpu, serviced)),
1224 .read_seq_string = blkcg_print_cpu_rwstat,
1225 },
1226 {
1227 .name = "io_service_time",
1228 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1229 offsetof(struct blkio_group_stats, service_time)),
1230 .read_seq_string = blkcg_print_rwstat,
1231 },
1232 {
1233 .name = "io_wait_time",
1234 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1235 offsetof(struct blkio_group_stats, wait_time)),
1236 .read_seq_string = blkcg_print_rwstat,
1237 },
1238 {
1239 .name = "io_merged",
1240 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1241 offsetof(struct blkio_group_stats, merged)),
1242 .read_seq_string = blkcg_print_rwstat,
1243 },
1244 {
1245 .name = "io_queued",
1246 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1247 offsetof(struct blkio_group_stats, queued)),
1248 .read_seq_string = blkcg_print_rwstat,
1249 },
1250 {
1251 .name = "reset_stats", 960 .name = "reset_stats",
1252 .write_u64 = blkiocg_reset_stats, 961 .write_u64 = blkiocg_reset_stats,
1253 }, 962 },
1254#ifdef CONFIG_BLK_DEV_THROTTLING
1255 {
1256 .name = "throttle.read_bps_device",
1257 .private = offsetof(struct blkio_group_conf, bps[READ]),
1258 .read_seq_string = blkcg_print_conf_u64,
1259 .write_string = blkcg_set_conf_bps_r,
1260 .max_write_len = 256,
1261 },
1262
1263 {
1264 .name = "throttle.write_bps_device",
1265 .private = offsetof(struct blkio_group_conf, bps[WRITE]),
1266 .read_seq_string = blkcg_print_conf_u64,
1267 .write_string = blkcg_set_conf_bps_w,
1268 .max_write_len = 256,
1269 },
1270
1271 {
1272 .name = "throttle.read_iops_device",
1273 .private = offsetof(struct blkio_group_conf, iops[READ]),
1274 .read_seq_string = blkcg_print_conf_u64,
1275 .write_string = blkcg_set_conf_iops_r,
1276 .max_write_len = 256,
1277 },
1278
1279 {
1280 .name = "throttle.write_iops_device",
1281 .private = offsetof(struct blkio_group_conf, iops[WRITE]),
1282 .read_seq_string = blkcg_print_conf_u64,
1283 .write_string = blkcg_set_conf_iops_w,
1284 .max_write_len = 256,
1285 },
1286 {
1287 .name = "throttle.io_service_bytes",
1288 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
1289 offsetof(struct blkio_group_stats_cpu, service_bytes)),
1290 .read_seq_string = blkcg_print_cpu_rwstat,
1291 },
1292 {
1293 .name = "throttle.io_serviced",
1294 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
1295 offsetof(struct blkio_group_stats_cpu, serviced)),
1296 .read_seq_string = blkcg_print_cpu_rwstat,
1297 },
1298#endif /* CONFIG_BLK_DEV_THROTTLING */
1299
1300#ifdef CONFIG_DEBUG_BLK_CGROUP
1301 {
1302 .name = "avg_queue_size",
1303 .read_seq_string = blkcg_print_avg_queue_size,
1304 },
1305 {
1306 .name = "group_wait_time",
1307 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1308 offsetof(struct blkio_group_stats, group_wait_time)),
1309 .read_seq_string = blkcg_print_stat,
1310 },
1311 {
1312 .name = "idle_time",
1313 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1314 offsetof(struct blkio_group_stats, idle_time)),
1315 .read_seq_string = blkcg_print_stat,
1316 },
1317 {
1318 .name = "empty_time",
1319 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1320 offsetof(struct blkio_group_stats, empty_time)),
1321 .read_seq_string = blkcg_print_stat,
1322 },
1323 {
1324 .name = "dequeue",
1325 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1326 offsetof(struct blkio_group_stats, dequeue)),
1327 .read_seq_string = blkcg_print_stat,
1328 },
1329 {
1330 .name = "unaccounted_time",
1331 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1332 offsetof(struct blkio_group_stats, unaccounted_time)),
1333 .read_seq_string = blkcg_print_stat,
1334 },
1335#endif
1336 { } /* terminate */ 963 { } /* terminate */
1337}; 964};
1338 965