aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-13 16:11:32 -0400
committerJens Axboe <axboe@kernel.dk>2012-04-20 04:06:06 -0400
commit03d8e11142a893ad322285d3c8a08e88b570cda1 (patch)
tree0f6117ffdb0ad9565a165a98a8d4985ced094eb1 /block/blk-throttle.c
parentb82d4b197c782ced82a8b7b76664125d2d3c156c (diff)
blkcg: add request_queue->root_blkg
With per-queue policy activation, root blkg creation will be moved to blkcg core. Add q->root_blkg in preparation. For blk-throtl, this replaces throtl_data->root_tg; however, cfq needs to keep cfqd->root_group for !CONFIG_CFQ_GROUP_IOSCHED. This is to prepare for per-queue policy activation and doesn't cause any functional difference. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 6f1bfdf9a1b7..8c520fad6885 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -97,7 +97,6 @@ struct throtl_data
97 /* service tree for active throtl groups */ 97 /* service tree for active throtl groups */
98 struct throtl_rb_root tg_service_tree; 98 struct throtl_rb_root tg_service_tree;
99 99
100 struct throtl_grp *root_tg;
101 struct request_queue *queue; 100 struct request_queue *queue;
102 101
103 /* Total Number of queued bios on READ and WRITE lists */ 102 /* Total Number of queued bios on READ and WRITE lists */
@@ -131,6 +130,11 @@ static inline struct blkio_group *tg_to_blkg(struct throtl_grp *tg)
131 return pdata_to_blkg(tg); 130 return pdata_to_blkg(tg);
132} 131}
133 132
133static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
134{
135 return blkg_to_tg(td->queue->root_blkg);
136}
137
134enum tg_state_flags { 138enum tg_state_flags {
135 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */ 139 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
136}; 140};
@@ -261,7 +265,7 @@ throtl_grp *throtl_lookup_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
261 * Avoid lookup in this case 265 * Avoid lookup in this case
262 */ 266 */
263 if (blkcg == &blkio_root_cgroup) 267 if (blkcg == &blkio_root_cgroup)
264 return td->root_tg; 268 return td_root_tg(td);
265 269
266 return blkg_to_tg(blkg_lookup(blkcg, td->queue)); 270 return blkg_to_tg(blkg_lookup(blkcg, td->queue));
267} 271}
@@ -277,7 +281,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
277 * Avoid lookup in this case 281 * Avoid lookup in this case
278 */ 282 */
279 if (blkcg == &blkio_root_cgroup) { 283 if (blkcg == &blkio_root_cgroup) {
280 tg = td->root_tg; 284 tg = td_root_tg(td);
281 } else { 285 } else {
282 struct blkio_group *blkg; 286 struct blkio_group *blkg;
283 287
@@ -287,7 +291,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
287 if (!IS_ERR(blkg)) 291 if (!IS_ERR(blkg))
288 tg = blkg_to_tg(blkg); 292 tg = blkg_to_tg(blkg);
289 else if (!blk_queue_dead(q)) 293 else if (!blk_queue_dead(q))
290 tg = td->root_tg; 294 tg = td_root_tg(td);
291 } 295 }
292 296
293 return tg; 297 return tg;
@@ -1245,12 +1249,12 @@ int blk_throtl_init(struct request_queue *q)
1245 1249
1246 blkg = blkg_lookup_create(&blkio_root_cgroup, q, true); 1250 blkg = blkg_lookup_create(&blkio_root_cgroup, q, true);
1247 if (!IS_ERR(blkg)) 1251 if (!IS_ERR(blkg))
1248 td->root_tg = blkg_to_tg(blkg); 1252 q->root_blkg = blkg;
1249 1253
1250 spin_unlock_irq(q->queue_lock); 1254 spin_unlock_irq(q->queue_lock);
1251 rcu_read_unlock(); 1255 rcu_read_unlock();
1252 1256
1253 if (!td->root_tg) { 1257 if (!q->root_blkg) {
1254 kfree(td); 1258 kfree(td);
1255 return -ENOMEM; 1259 return -ENOMEM;
1256 } 1260 }