aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:15:05 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:22 -0500
commitf51b802c17e2a21926b29911493f5e7ddf6eee87 (patch)
treec32d9ea2a61201b0c6bf59b349300af04dbc3686 /block/blk-throttle.c
parent035d10b2fa7e5f7e9bf9465dbc39c35affd5ac32 (diff)
blkcg: use the usual get blkg path for root blkio_group
For root blkg, blk_throtl_init() was using throtl_alloc_tg() explicitly and cfq_init_queue() was manually initializing embedded cfqd->root_group, adding unnecessarily different code paths to blkg handling. Make both use the usual blkio_group get functions - throtl_get_tg() and cfq_get_cfqg() - for the root blkio_group too. Note that blk_throtl_init() callsite is pushed downwards in blk_alloc_queue_node() so that @q is sufficiently initialized for throtl_get_tg(). This simplifies root blkg handling noticeably for cfq and will allow further modularization of blkcg API. -v2: Vivek pointed out that using cfq_get_cfqg() won't work if CONFIG_CFQ_GROUP_IOSCHED is disabled. Fix it by factoring out initialization of base part of cfqg into cfq_init_cfqg_base() and alloc/init/free explicitly if !CONFIG_CFQ_GROUP_IOSCHED. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 6613de78e364..aeeb798d1cda 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1252,7 +1252,6 @@ void blk_throtl_drain(struct request_queue *q)
1252int blk_throtl_init(struct request_queue *q) 1252int blk_throtl_init(struct request_queue *q)
1253{ 1253{
1254 struct throtl_data *td; 1254 struct throtl_data *td;
1255 struct throtl_grp *tg;
1256 1255
1257 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); 1256 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1258 if (!td) 1257 if (!td)
@@ -1265,19 +1264,20 @@ int blk_throtl_init(struct request_queue *q)
1265 1264
1266 /* alloc and Init root group. */ 1265 /* alloc and Init root group. */
1267 td->queue = q; 1266 td->queue = q;
1268 tg = throtl_alloc_tg(td);
1269 1267
1270 if (!tg) { 1268 rcu_read_lock();
1271 kfree(td); 1269 spin_lock_irq(q->queue_lock);
1272 return -ENOMEM;
1273 }
1274 1270
1275 td->root_tg = tg; 1271 td->root_tg = throtl_get_tg(td, &blkio_root_cgroup);
1276 1272
1277 rcu_read_lock(); 1273 spin_unlock_irq(q->queue_lock);
1278 throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
1279 rcu_read_unlock(); 1274 rcu_read_unlock();
1280 1275
1276 if (!td->root_tg) {
1277 kfree(td);
1278 return -ENOMEM;
1279 }
1280
1281 /* Attach throtl data to request queue */ 1281 /* Attach throtl data to request queue */
1282 q->td = td; 1282 q->td = td;
1283 return 0; 1283 return 0;