aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2011-05-19 15:38:19 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-05-20 14:34:51 -0400
commita29a171e7c46c60842b85729280e2f5690372683 (patch)
tree83d9be7f8801c04feeac0256e677da7240239046 /block
parent698567f3fa790fea37509a54dea855302dd88331 (diff)
blk-throttle: Do the new group initialization with the help of a function
Group initialization code seems to be at two places. root group initialization in blk_throtl_init() and dynamically allocated group in throtl_find_alloc_tg(). Create a common function and use at both the places. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-throttle.c64
1 files changed, 35 insertions, 29 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 252a81a306f7..fa9a900c1254 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -159,6 +159,35 @@ static void throtl_put_tg(struct throtl_grp *tg)
159 kfree(tg); 159 kfree(tg);
160} 160}
161 161
162static void throtl_init_group(struct throtl_grp *tg)
163{
164 INIT_HLIST_NODE(&tg->tg_node);
165 RB_CLEAR_NODE(&tg->rb_node);
166 bio_list_init(&tg->bio_lists[0]);
167 bio_list_init(&tg->bio_lists[1]);
168 tg->limits_changed = false;
169
170 /* Practically unlimited BW */
171 tg->bps[0] = tg->bps[1] = -1;
172 tg->iops[0] = tg->iops[1] = -1;
173
174 /*
175 * Take the initial reference that will be released on destroy
176 * This can be thought of a joint reference by cgroup and
177 * request queue which will be dropped by either request queue
178 * exit or cgroup deletion path depending on who is exiting first.
179 */
180 atomic_set(&tg->ref, 1);
181}
182
183/* Should be called with rcu read lock held (needed for blkcg) */
184static void
185throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
186{
187 hlist_add_head(&tg->tg_node, &td->tg_list);
188 td->nr_undestroyed_grps++;
189}
190
162static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, 191static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
163 struct blkio_cgroup *blkcg) 192 struct blkio_cgroup *blkcg)
164{ 193{
@@ -196,19 +225,7 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
196 if (!tg) 225 if (!tg)
197 goto done; 226 goto done;
198 227
199 INIT_HLIST_NODE(&tg->tg_node); 228 throtl_init_group(tg);
200 RB_CLEAR_NODE(&tg->rb_node);
201 bio_list_init(&tg->bio_lists[0]);
202 bio_list_init(&tg->bio_lists[1]);
203 td->limits_changed = false;
204
205 /*
206 * Take the initial reference that will be released on destroy
207 * This can be thought of a joint reference by cgroup and
208 * request queue which will be dropped by either request queue
209 * exit or cgroup deletion path depending on who is exiting first.
210 */
211 atomic_set(&tg->ref, 1);
212 229
213 /* Add group onto cgroup list */ 230 /* Add group onto cgroup list */
214 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 231 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
@@ -220,8 +237,7 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
220 tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev); 237 tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
221 tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev); 238 tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
222 239
223 hlist_add_head(&tg->tg_node, &td->tg_list); 240 throtl_add_group_to_td_list(td, tg);
224 td->nr_undestroyed_grps++;
225done: 241done:
226 return tg; 242 return tg;
227} 243}
@@ -1060,18 +1076,11 @@ int blk_throtl_init(struct request_queue *q)
1060 INIT_HLIST_HEAD(&td->tg_list); 1076 INIT_HLIST_HEAD(&td->tg_list);
1061 td->tg_service_tree = THROTL_RB_ROOT; 1077 td->tg_service_tree = THROTL_RB_ROOT;
1062 td->limits_changed = false; 1078 td->limits_changed = false;
1079 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
1063 1080
1064 /* Init root group */ 1081 /* Init root group */
1065 tg = &td->root_tg; 1082 tg = &td->root_tg;
1066 INIT_HLIST_NODE(&tg->tg_node); 1083 throtl_init_group(tg);
1067 RB_CLEAR_NODE(&tg->rb_node);
1068 bio_list_init(&tg->bio_lists[0]);
1069 bio_list_init(&tg->bio_lists[1]);
1070
1071 /* Practically unlimited BW */
1072 tg->bps[0] = tg->bps[1] = -1;
1073 tg->iops[0] = tg->iops[1] = -1;
1074 td->limits_changed = false;
1075 1084
1076 /* 1085 /*
1077 * Set root group reference to 2. One reference will be dropped when 1086 * Set root group reference to 2. One reference will be dropped when
@@ -1080,16 +1089,13 @@ int blk_throtl_init(struct request_queue *q)
1080 * as it is statically allocated and gets destroyed when throtl_data 1089 * as it is statically allocated and gets destroyed when throtl_data
1081 * goes away. 1090 * goes away.
1082 */ 1091 */
1083 atomic_set(&tg->ref, 2); 1092 atomic_inc(&tg->ref);
1084 hlist_add_head(&tg->tg_node, &td->tg_list);
1085 td->nr_undestroyed_grps++;
1086
1087 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
1088 1093
1089 rcu_read_lock(); 1094 rcu_read_lock();
1090 blkiocg_add_blkio_group(&blkio_root_cgroup, &tg->blkg, (void *)td, 1095 blkiocg_add_blkio_group(&blkio_root_cgroup, &tg->blkg, (void *)td,
1091 0, BLKIO_POLICY_THROTL); 1096 0, BLKIO_POLICY_THROTL);
1092 rcu_read_unlock(); 1097 rcu_read_unlock();
1098 throtl_add_group_to_td_list(td, tg);
1093 1099
1094 /* Attach throtl data to request queue */ 1100 /* Attach throtl data to request queue */
1095 td->queue = q; 1101 td->queue = q;