aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2011-05-19 15:38:24 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-05-20 14:34:52 -0400
commit29b125892f3317ada86b662e0b6ebc0f79be9037 (patch)
treea8caa5a8d521c0d667bbae956ff4d8b160ad02d9 /block/blk-throttle.c
parentf469a7b4d5b1d1d053200a9015fd25d59c057f49 (diff)
blk-throttle: Dynamically allocate root group
Currently, we allocate root throtl_grp statically. But as we will be introducing per cpu stat pointers and that will be allocated dynamically even for root group, we might as well make whole root throtl_grp allocation dynamic and treat it in same manner as other groups. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c27
1 files changed, 12 insertions, 15 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index c201967b33c..68f2ac3f3b0 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -88,7 +88,7 @@ struct throtl_data
88 /* service tree for active throtl groups */ 88 /* service tree for active throtl groups */
89 struct throtl_rb_root tg_service_tree; 89 struct throtl_rb_root tg_service_tree;
90 90
91 struct throtl_grp root_tg; 91 struct throtl_grp *root_tg;
92 struct request_queue *queue; 92 struct request_queue *queue;
93 93
94 /* Total Number of queued bios on READ and WRITE lists */ 94 /* Total Number of queued bios on READ and WRITE lists */
@@ -233,7 +233,7 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
233 * Avoid lookup in this case 233 * Avoid lookup in this case
234 */ 234 */
235 if (blkcg == &blkio_root_cgroup) 235 if (blkcg == &blkio_root_cgroup)
236 tg = &td->root_tg; 236 tg = td->root_tg;
237 else 237 else
238 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key)); 238 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
239 239
@@ -313,7 +313,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
313 313
314 /* Group allocation failed. Account the IO to root group */ 314 /* Group allocation failed. Account the IO to root group */
315 if (!tg) { 315 if (!tg) {
316 tg = &td->root_tg; 316 tg = td->root_tg;
317 return tg; 317 return tg;
318 } 318 }
319 319
@@ -1153,18 +1153,16 @@ int blk_throtl_init(struct request_queue *q)
1153 td->limits_changed = false; 1153 td->limits_changed = false;
1154 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work); 1154 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
1155 1155
1156 /* Init root group */ 1156 /* alloc and Init root group. */
1157 tg = &td->root_tg; 1157 td->queue = q;
1158 throtl_init_group(tg); 1158 tg = throtl_alloc_tg(td);
1159 1159
1160 /* 1160 if (!tg) {
1161 * Set root group reference to 2. One reference will be dropped when 1161 kfree(td);
1162 * all groups on tg_list are being deleted during queue exit. Other 1162 return -ENOMEM;
1163 * reference will remain there as we don't want to delete this group 1163 }
1164 * as it is statically allocated and gets destroyed when throtl_data 1164
1165 * goes away. 1165 td->root_tg = tg;
1166 */
1167 atomic_inc(&tg->ref);
1168 1166
1169 rcu_read_lock(); 1167 rcu_read_lock();
1170 blkiocg_add_blkio_group(&blkio_root_cgroup, &tg->blkg, (void *)td, 1168 blkiocg_add_blkio_group(&blkio_root_cgroup, &tg->blkg, (void *)td,
@@ -1173,7 +1171,6 @@ int blk_throtl_init(struct request_queue *q)
1173 throtl_add_group_to_td_list(td, tg); 1171 throtl_add_group_to_td_list(td, tg);
1174 1172
1175 /* Attach throtl data to request queue */ 1173 /* Attach throtl data to request queue */
1176 td->queue = q;
1177 q->td = td; 1174 q->td = td;
1178 return 0; 1175 return 0;
1179} 1176}