aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-16 16:57:25 -0400
committerJens Axboe <axboe@kernel.dk>2012-04-20 04:06:17 -0400
commit3c798398e393e5f9502dbab2b51e6c25e2e8f2ac (patch)
treed6f638e6a25dec4887e64bcc35b98bc394cb974f /block/blk-throttle.c
parent36558c8a30e121f97b5852ae33e28081af21bdbf (diff)
blkcg: mass rename of blkcg API
During the recent blkcg cleanup, most of blkcg API has changed to such extent that mass renaming wouldn't cause any noticeable pain. Take the chance and cleanup the naming. * Rename blkio_cgroup to blkcg. * Drop blkio / blkiocg prefixes and consistently use blkcg. * Rename blkio_group to blkcg_gq, which is consistent with io_cq but keep the blkg prefix / variable name. * Rename policy method type and field names to signify they're dealing with policy data. * Rename blkio_policy_type to blkcg_policy. This patch doesn't cause any functional change. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c72
1 files changed, 36 insertions, 36 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index e9b7a47f6da0..00c7eff66ecf 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -21,7 +21,7 @@ static int throtl_quantum = 32;
21/* Throttling is performed over 100ms slice and after that slice is renewed */ 21/* Throttling is performed over 100ms slice and after that slice is renewed */
22static unsigned long throtl_slice = HZ/10; /* 100 ms */ 22static unsigned long throtl_slice = HZ/10; /* 100 ms */
23 23
24static struct blkio_policy_type blkio_policy_throtl; 24static struct blkcg_policy blkcg_policy_throtl;
25 25
26/* A workqueue to queue throttle related work */ 26/* A workqueue to queue throttle related work */
27static struct workqueue_struct *kthrotld_workqueue; 27static struct workqueue_struct *kthrotld_workqueue;
@@ -120,12 +120,12 @@ static LIST_HEAD(tg_stats_alloc_list);
120static void tg_stats_alloc_fn(struct work_struct *); 120static void tg_stats_alloc_fn(struct work_struct *);
121static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn); 121static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
122 122
123static inline struct throtl_grp *blkg_to_tg(struct blkio_group *blkg) 123static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
124{ 124{
125 return blkg_to_pdata(blkg, &blkio_policy_throtl); 125 return blkg_to_pdata(blkg, &blkcg_policy_throtl);
126} 126}
127 127
128static inline struct blkio_group *tg_to_blkg(struct throtl_grp *tg) 128static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
129{ 129{
130 return pdata_to_blkg(tg); 130 return pdata_to_blkg(tg);
131} 131}
@@ -208,7 +208,7 @@ alloc_stats:
208 goto alloc_stats; 208 goto alloc_stats;
209} 209}
210 210
211static void throtl_init_blkio_group(struct blkio_group *blkg) 211static void throtl_pd_init(struct blkcg_gq *blkg)
212{ 212{
213 struct throtl_grp *tg = blkg_to_tg(blkg); 213 struct throtl_grp *tg = blkg_to_tg(blkg);
214 214
@@ -233,7 +233,7 @@ static void throtl_init_blkio_group(struct blkio_group *blkg)
233 spin_unlock(&tg_stats_alloc_lock); 233 spin_unlock(&tg_stats_alloc_lock);
234} 234}
235 235
236static void throtl_exit_blkio_group(struct blkio_group *blkg) 236static void throtl_pd_exit(struct blkcg_gq *blkg)
237{ 237{
238 struct throtl_grp *tg = blkg_to_tg(blkg); 238 struct throtl_grp *tg = blkg_to_tg(blkg);
239 239
@@ -244,7 +244,7 @@ static void throtl_exit_blkio_group(struct blkio_group *blkg)
244 free_percpu(tg->stats_cpu); 244 free_percpu(tg->stats_cpu);
245} 245}
246 246
247static void throtl_reset_group_stats(struct blkio_group *blkg) 247static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
248{ 248{
249 struct throtl_grp *tg = blkg_to_tg(blkg); 249 struct throtl_grp *tg = blkg_to_tg(blkg);
250 int cpu; 250 int cpu;
@@ -260,33 +260,33 @@ static void throtl_reset_group_stats(struct blkio_group *blkg)
260 } 260 }
261} 261}
262 262
263static struct 263static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
264throtl_grp *throtl_lookup_tg(struct throtl_data *td, struct blkio_cgroup *blkcg) 264 struct blkcg *blkcg)
265{ 265{
266 /* 266 /*
267 * This is the common case when there are no blkio cgroups. 267 * This is the common case when there are no blkcgs. Avoid lookup
268 * Avoid lookup in this case 268 * in this case
269 */ 269 */
270 if (blkcg == &blkio_root_cgroup) 270 if (blkcg == &blkcg_root)
271 return td_root_tg(td); 271 return td_root_tg(td);
272 272
273 return blkg_to_tg(blkg_lookup(blkcg, td->queue)); 273 return blkg_to_tg(blkg_lookup(blkcg, td->queue));
274} 274}
275 275
276static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td, 276static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
277 struct blkio_cgroup *blkcg) 277 struct blkcg *blkcg)
278{ 278{
279 struct request_queue *q = td->queue; 279 struct request_queue *q = td->queue;
280 struct throtl_grp *tg = NULL; 280 struct throtl_grp *tg = NULL;
281 281
282 /* 282 /*
283 * This is the common case when there are no blkio cgroups. 283 * This is the common case when there are no blkcgs. Avoid lookup
284 * Avoid lookup in this case 284 * in this case
285 */ 285 */
286 if (blkcg == &blkio_root_cgroup) { 286 if (blkcg == &blkcg_root) {
287 tg = td_root_tg(td); 287 tg = td_root_tg(td);
288 } else { 288 } else {
289 struct blkio_group *blkg; 289 struct blkcg_gq *blkg;
290 290
291 blkg = blkg_lookup_create(blkcg, q); 291 blkg = blkg_lookup_create(blkcg, q);
292 292
@@ -665,7 +665,7 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
665 return 0; 665 return 0;
666} 666}
667 667
668static void throtl_update_dispatch_stats(struct blkio_group *blkg, u64 bytes, 668static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
669 int rw) 669 int rw)
670{ 670{
671 struct throtl_grp *tg = blkg_to_tg(blkg); 671 struct throtl_grp *tg = blkg_to_tg(blkg);
@@ -822,7 +822,7 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
822static void throtl_process_limit_change(struct throtl_data *td) 822static void throtl_process_limit_change(struct throtl_data *td)
823{ 823{
824 struct request_queue *q = td->queue; 824 struct request_queue *q = td->queue;
825 struct blkio_group *blkg, *n; 825 struct blkcg_gq *blkg, *n;
826 826
827 if (!td->limits_changed) 827 if (!td->limits_changed)
828 return; 828 return;
@@ -951,9 +951,9 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf, void *pdata, int off)
951static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft, 951static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
952 struct seq_file *sf) 952 struct seq_file *sf)
953{ 953{
954 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); 954 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
955 955
956 blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkio_policy_throtl, 956 blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
957 cft->private, true); 957 cft->private, true);
958 return 0; 958 return 0;
959} 959}
@@ -979,29 +979,29 @@ static u64 tg_prfill_conf_uint(struct seq_file *sf, void *pdata, int off)
979static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft, 979static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
980 struct seq_file *sf) 980 struct seq_file *sf)
981{ 981{
982 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp), tg_prfill_conf_u64, 982 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
983 &blkio_policy_throtl, cft->private, false); 983 &blkcg_policy_throtl, cft->private, false);
984 return 0; 984 return 0;
985} 985}
986 986
987static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft, 987static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
988 struct seq_file *sf) 988 struct seq_file *sf)
989{ 989{
990 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp), tg_prfill_conf_uint, 990 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
991 &blkio_policy_throtl, cft->private, false); 991 &blkcg_policy_throtl, cft->private, false);
992 return 0; 992 return 0;
993} 993}
994 994
995static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf, 995static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
996 bool is_u64) 996 bool is_u64)
997{ 997{
998 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); 998 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
999 struct blkg_conf_ctx ctx; 999 struct blkg_conf_ctx ctx;
1000 struct throtl_grp *tg; 1000 struct throtl_grp *tg;
1001 struct throtl_data *td; 1001 struct throtl_data *td;
1002 int ret; 1002 int ret;
1003 1003
1004 ret = blkg_conf_prep(blkcg, &blkio_policy_throtl, buf, &ctx); 1004 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1005 if (ret) 1005 if (ret)
1006 return ret; 1006 return ret;
1007 1007
@@ -1086,11 +1086,11 @@ static void throtl_shutdown_wq(struct request_queue *q)
1086 cancel_delayed_work_sync(&td->throtl_work); 1086 cancel_delayed_work_sync(&td->throtl_work);
1087} 1087}
1088 1088
1089static struct blkio_policy_type blkio_policy_throtl = { 1089static struct blkcg_policy blkcg_policy_throtl = {
1090 .ops = { 1090 .ops = {
1091 .blkio_init_group_fn = throtl_init_blkio_group, 1091 .pd_init_fn = throtl_pd_init,
1092 .blkio_exit_group_fn = throtl_exit_blkio_group, 1092 .pd_exit_fn = throtl_pd_exit,
1093 .blkio_reset_group_stats_fn = throtl_reset_group_stats, 1093 .pd_reset_stats_fn = throtl_pd_reset_stats,
1094 }, 1094 },
1095 .pdata_size = sizeof(struct throtl_grp), 1095 .pdata_size = sizeof(struct throtl_grp),
1096 .cftypes = throtl_files, 1096 .cftypes = throtl_files,
@@ -1101,7 +1101,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1101 struct throtl_data *td = q->td; 1101 struct throtl_data *td = q->td;
1102 struct throtl_grp *tg; 1102 struct throtl_grp *tg;
1103 bool rw = bio_data_dir(bio), update_disptime = true; 1103 bool rw = bio_data_dir(bio), update_disptime = true;
1104 struct blkio_cgroup *blkcg; 1104 struct blkcg *blkcg;
1105 bool throttled = false; 1105 bool throttled = false;
1106 1106
1107 if (bio->bi_rw & REQ_THROTTLED) { 1107 if (bio->bi_rw & REQ_THROTTLED) {
@@ -1118,7 +1118,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1118 * just update the dispatch stats in lockless manner and return. 1118 * just update the dispatch stats in lockless manner and return.
1119 */ 1119 */
1120 rcu_read_lock(); 1120 rcu_read_lock();
1121 blkcg = bio_blkio_cgroup(bio); 1121 blkcg = bio_blkcg(bio);
1122 tg = throtl_lookup_tg(td, blkcg); 1122 tg = throtl_lookup_tg(td, blkcg);
1123 if (tg) { 1123 if (tg) {
1124 if (tg_no_rule_group(tg, rw)) { 1124 if (tg_no_rule_group(tg, rw)) {
@@ -1243,7 +1243,7 @@ int blk_throtl_init(struct request_queue *q)
1243 td->queue = q; 1243 td->queue = q;
1244 1244
1245 /* activate policy */ 1245 /* activate policy */
1246 ret = blkcg_activate_policy(q, &blkio_policy_throtl); 1246 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
1247 if (ret) 1247 if (ret)
1248 kfree(td); 1248 kfree(td);
1249 return ret; 1249 return ret;
@@ -1253,7 +1253,7 @@ void blk_throtl_exit(struct request_queue *q)
1253{ 1253{
1254 BUG_ON(!q->td); 1254 BUG_ON(!q->td);
1255 throtl_shutdown_wq(q); 1255 throtl_shutdown_wq(q);
1256 blkcg_deactivate_policy(q, &blkio_policy_throtl); 1256 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
1257 kfree(q->td); 1257 kfree(q->td);
1258} 1258}
1259 1259
@@ -1263,7 +1263,7 @@ static int __init throtl_init(void)
1263 if (!kthrotld_workqueue) 1263 if (!kthrotld_workqueue)
1264 panic("Failed to create kthrotld\n"); 1264 panic("Failed to create kthrotld\n");
1265 1265
1266 return blkio_policy_register(&blkio_policy_throtl); 1266 return blkcg_policy_register(&blkcg_policy_throtl);
1267} 1267}
1268 1268
1269module_init(throtl_init); 1269module_init(throtl_init);