aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-13 16:11:33 -0400
committerJens Axboe <axboe@kernel.dk>2012-04-20 04:06:06 -0400
commita2b1693bac45ea3fe3ba612fd22c45f17449f610 (patch)
tree2e05859caab6453efbc85d584dd72dca7ef03cd0 /block/blk-throttle.c
parent03d8e11142a893ad322285d3c8a08e88b570cda1 (diff)
blkcg: implement per-queue policy activation
All blkcg policies were assumed to be enabled on all request_queues. Due to various implementation obstacles, during the recent blkcg core updates, this was temporarily implemented as shooting down all !root blkgs on elevator switch and policy [de]registration combined with half-broken in-place root blkg updates. In addition to being buggy and racy, this meant losing all blkcg configurations across those events. Now that blkcg is cleaned up enough, this patch replaces the temporary implementation with proper per-queue policy activation. Each blkcg policy should call the new blkcg_[de]activate_policy() to enable and disable the policy on a specific queue. blkcg_activate_policy() allocates and installs policy data for the policy for all existing blkgs. blkcg_deactivate_policy() does the reverse. If a policy is not enabled for a given queue, blkg printing / config functions skip the respective blkg for the queue. blkcg_activate_policy() also takes care of root blkg creation, and cfq_init_queue() and blk_throtl_init() are updated accordingly. This replaces blkcg_bypass_{start|end}() and update_root_blkg_pd() unnecessary. Dropped. v2: cfq_init_queue() was returning uninitialized @ret on root_group alloc failure if !CONFIG_CFQ_GROUP_IOSCHED. Fixed. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c52
1 files changed, 19 insertions, 33 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 8c520fad6885..2fc964e06ea4 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -995,35 +995,31 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
995 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); 995 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
996 struct blkg_conf_ctx ctx; 996 struct blkg_conf_ctx ctx;
997 struct throtl_grp *tg; 997 struct throtl_grp *tg;
998 struct throtl_data *td;
998 int ret; 999 int ret;
999 1000
1000 ret = blkg_conf_prep(blkcg, &blkio_policy_throtl, buf, &ctx); 1001 ret = blkg_conf_prep(blkcg, &blkio_policy_throtl, buf, &ctx);
1001 if (ret) 1002 if (ret)
1002 return ret; 1003 return ret;
1003 1004
1004 ret = -EINVAL;
1005 tg = blkg_to_tg(ctx.blkg); 1005 tg = blkg_to_tg(ctx.blkg);
1006 if (tg) { 1006 td = ctx.blkg->q->td;
1007 struct throtl_data *td = ctx.blkg->q->td;
1008
1009 if (!ctx.v)
1010 ctx.v = -1;
1011 1007
1012 if (is_u64) 1008 if (!ctx.v)
1013 *(u64 *)((void *)tg + cft->private) = ctx.v; 1009 ctx.v = -1;
1014 else
1015 *(unsigned int *)((void *)tg + cft->private) = ctx.v;
1016 1010
1017 /* XXX: we don't need the following deferred processing */ 1011 if (is_u64)
1018 xchg(&tg->limits_changed, true); 1012 *(u64 *)((void *)tg + cft->private) = ctx.v;
1019 xchg(&td->limits_changed, true); 1013 else
1020 throtl_schedule_delayed_work(td, 0); 1014 *(unsigned int *)((void *)tg + cft->private) = ctx.v;
1021 1015
1022 ret = 0; 1016 /* XXX: we don't need the following deferred processing */
1023 } 1017 xchg(&tg->limits_changed, true);
1018 xchg(&td->limits_changed, true);
1019 throtl_schedule_delayed_work(td, 0);
1024 1020
1025 blkg_conf_finish(&ctx); 1021 blkg_conf_finish(&ctx);
1026 return ret; 1022 return 0;
1027} 1023}
1028 1024
1029static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft, 1025static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
@@ -1230,7 +1226,7 @@ void blk_throtl_drain(struct request_queue *q)
1230int blk_throtl_init(struct request_queue *q) 1226int blk_throtl_init(struct request_queue *q)
1231{ 1227{
1232 struct throtl_data *td; 1228 struct throtl_data *td;
1233 struct blkio_group *blkg; 1229 int ret;
1234 1230
1235 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); 1231 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1236 if (!td) 1232 if (!td)
@@ -1243,28 +1239,18 @@ int blk_throtl_init(struct request_queue *q)
1243 q->td = td; 1239 q->td = td;
1244 td->queue = q; 1240 td->queue = q;
1245 1241
1246 /* alloc and init root group. */ 1242 /* activate policy */
1247 rcu_read_lock(); 1243 ret = blkcg_activate_policy(q, &blkio_policy_throtl);
1248 spin_lock_irq(q->queue_lock); 1244 if (ret)
1249
1250 blkg = blkg_lookup_create(&blkio_root_cgroup, q, true);
1251 if (!IS_ERR(blkg))
1252 q->root_blkg = blkg;
1253
1254 spin_unlock_irq(q->queue_lock);
1255 rcu_read_unlock();
1256
1257 if (!q->root_blkg) {
1258 kfree(td); 1245 kfree(td);
1259 return -ENOMEM; 1246 return ret;
1260 }
1261 return 0;
1262} 1247}
1263 1248
1264void blk_throtl_exit(struct request_queue *q) 1249void blk_throtl_exit(struct request_queue *q)
1265{ 1250{
1266 BUG_ON(!q->td); 1251 BUG_ON(!q->td);
1267 throtl_shutdown_wq(q); 1252 throtl_shutdown_wq(q);
1253 blkcg_deactivate_policy(q, &blkio_policy_throtl);
1268 kfree(q->td); 1254 kfree(q->td);
1269} 1255}
1270 1256