aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-13 16:11:33 -0400
committerJens Axboe <axboe@kernel.dk>2012-04-20 04:06:06 -0400
commita2b1693bac45ea3fe3ba612fd22c45f17449f610 (patch)
tree2e05859caab6453efbc85d584dd72dca7ef03cd0 /block/cfq-iosched.c
parent03d8e11142a893ad322285d3c8a08e88b570cda1 (diff)
blkcg: implement per-queue policy activation
All blkcg policies were assumed to be enabled on all request_queues. Due to various implementation obstacles, during the recent blkcg core updates, this was temporarily implemented as shooting down all !root blkgs on elevator switch and policy [de]registration combined with half-broken in-place root blkg updates. In addition to being buggy and racy, this meant losing all blkcg configurations across those events. Now that blkcg is cleaned up enough, this patch replaces the temporary implementation with proper per-queue policy activation. Each blkcg policy should call the new blkcg_[de]activate_policy() to enable and disable the policy on a specific queue. blkcg_activate_policy() allocates and installs policy data for the policy for all existing blkgs. blkcg_deactivate_policy() does the reverse. If a policy is not enabled for a given queue, blkg printing / config functions skip the respective blkg for the queue. blkcg_activate_policy() also takes care of root blkg creation, and cfq_init_queue() and blk_throtl_init() are updated accordingly. This replaces blkcg_bypass_{start|end}() and update_root_blkg_pd() unnecessary. Dropped. v2: cfq_init_queue() was returning uninitialized @ret on root_group alloc failure if !CONFIG_CFQ_GROUP_IOSCHED. Fixed. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c37
1 files changed, 16 insertions, 21 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 86440e04f3ee..0203652e1f34 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1406,8 +1406,7 @@ static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1406 1406
1407 ret = -EINVAL; 1407 ret = -EINVAL;
1408 cfqg = blkg_to_cfqg(ctx.blkg); 1408 cfqg = blkg_to_cfqg(ctx.blkg);
1409 if (cfqg && (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && 1409 if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
1410 ctx.v <= CFQ_WEIGHT_MAX))) {
1411 cfqg->dev_weight = ctx.v; 1410 cfqg->dev_weight = ctx.v;
1412 cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight; 1411 cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
1413 ret = 0; 1412 ret = 0;
@@ -3938,7 +3937,7 @@ static void cfq_exit_queue(struct elevator_queue *e)
3938#ifndef CONFIG_CFQ_GROUP_IOSCHED 3937#ifndef CONFIG_CFQ_GROUP_IOSCHED
3939 kfree(cfqd->root_group); 3938 kfree(cfqd->root_group);
3940#endif 3939#endif
3941 update_root_blkg_pd(q, &blkio_policy_cfq); 3940 blkcg_deactivate_policy(q, &blkio_policy_cfq);
3942 kfree(cfqd); 3941 kfree(cfqd);
3943} 3942}
3944 3943
@@ -3946,7 +3945,7 @@ static int cfq_init_queue(struct request_queue *q)
3946{ 3945{
3947 struct cfq_data *cfqd; 3946 struct cfq_data *cfqd;
3948 struct blkio_group *blkg __maybe_unused; 3947 struct blkio_group *blkg __maybe_unused;
3949 int i; 3948 int i, ret;
3950 3949
3951 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); 3950 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3952 if (!cfqd) 3951 if (!cfqd)
@@ -3960,28 +3959,20 @@ static int cfq_init_queue(struct request_queue *q)
3960 3959
3961 /* Init root group and prefer root group over other groups by default */ 3960 /* Init root group and prefer root group over other groups by default */
3962#ifdef CONFIG_CFQ_GROUP_IOSCHED 3961#ifdef CONFIG_CFQ_GROUP_IOSCHED
3963 rcu_read_lock(); 3962 ret = blkcg_activate_policy(q, &blkio_policy_cfq);
3964 spin_lock_irq(q->queue_lock); 3963 if (ret)
3965 3964 goto out_free;
3966 blkg = blkg_lookup_create(&blkio_root_cgroup, q, true);
3967 if (!IS_ERR(blkg)) {
3968 q->root_blkg = blkg;
3969 cfqd->root_group = blkg_to_cfqg(blkg);
3970 }
3971 3965
3972 spin_unlock_irq(q->queue_lock); 3966 cfqd->root_group = blkg_to_cfqg(q->root_blkg);
3973 rcu_read_unlock();
3974#else 3967#else
3968 ret = -ENOMEM;
3975 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group), 3969 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
3976 GFP_KERNEL, cfqd->queue->node); 3970 GFP_KERNEL, cfqd->queue->node);
3977 if (cfqd->root_group) 3971 if (!cfqd->root_group)
3978 cfq_init_cfqg_base(cfqd->root_group); 3972 goto out_free;
3979#endif
3980 if (!cfqd->root_group) {
3981 kfree(cfqd);
3982 return -ENOMEM;
3983 }
3984 3973
3974 cfq_init_cfqg_base(cfqd->root_group);
3975#endif
3985 cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT; 3976 cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
3986 3977
3987 /* 3978 /*
@@ -4031,6 +4022,10 @@ static int cfq_init_queue(struct request_queue *q)
4031 */ 4022 */
4032 cfqd->last_delayed_sync = jiffies - HZ; 4023 cfqd->last_delayed_sync = jiffies - HZ;
4033 return 0; 4024 return 0;
4025
4026out_free:
4027 kfree(cfqd);
4028 return ret;
4034} 4029}
4035 4030
4036/* 4031/*