aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:15:09 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:22 -0500
commit7a4dd281ec66224f802093962d1d903d86b09560 (patch)
treec38c5c40464d4d7eb429cc14348a435176832ef2 /block/blk-throttle.c
parent4bfd482e73b30284cb21e10834ce729fa81aa256 (diff)
blkcg: kill the mind-bending blkg->dev
blkg->dev is dev_t recording the device number of the block device for the associated request_queue. It is used to identify the associated block device when printing out configuration or stats. This is redundant to begin with. A blkg is an association between a cgroup and a request_queue and it of course is possible to reach request_queue from blkg and synchronization conventions are in place for safe q dereferencing, so this shouldn't be necessary from the beginning. Furthermore, it's initialized by sscanf()ing the device name of backing_dev_info. The mind boggles. Anyways, if blkg is visible under rcu lock, we *know* that the associated request_queue hasn't gone away yet and its bdi is registered and alive - blkg can't be created for request_queue which hasn't been fully initialized and it can't go away before blkg is removed. Let stat and conf read functions get device name from blkg->q->backing_dev_info.dev and pass it down to printing functions and remove blkg->dev. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c51
1 files changed, 2 insertions, 49 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 791b10719e4..52a429397d3 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -212,50 +212,12 @@ static struct blkio_group *throtl_alloc_blkio_group(struct request_queue *q,
212 return &tg->blkg; 212 return &tg->blkg;
213} 213}
214 214
215static void
216__throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
217{
218 struct backing_dev_info *bdi = &td->queue->backing_dev_info;
219 unsigned int major, minor;
220
221 if (!tg || tg->blkg.dev)
222 return;
223
224 /*
225 * Fill in device details for a group which might not have been
226 * filled at group creation time as queue was being instantiated
227 * and driver had not attached a device yet
228 */
229 if (bdi->dev && dev_name(bdi->dev)) {
230 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
231 tg->blkg.dev = MKDEV(major, minor);
232 }
233}
234
235/*
236 * Should be called with without queue lock held. Here queue lock will be
237 * taken rarely. It will be taken only once during life time of a group
238 * if need be
239 */
240static void
241throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
242{
243 if (!tg || tg->blkg.dev)
244 return;
245
246 spin_lock_irq(td->queue->queue_lock);
247 __throtl_tg_fill_dev_details(td, tg);
248 spin_unlock_irq(td->queue->queue_lock);
249}
250
251static void throtl_link_blkio_group(struct request_queue *q, 215static void throtl_link_blkio_group(struct request_queue *q,
252 struct blkio_group *blkg) 216 struct blkio_group *blkg)
253{ 217{
254 struct throtl_data *td = q->td; 218 struct throtl_data *td = q->td;
255 struct throtl_grp *tg = tg_of_blkg(blkg); 219 struct throtl_grp *tg = tg_of_blkg(blkg);
256 220
257 __throtl_tg_fill_dev_details(td, tg);
258
259 hlist_add_head(&tg->tg_node, &td->tg_list); 221 hlist_add_head(&tg->tg_node, &td->tg_list);
260 td->nr_undestroyed_grps++; 222 td->nr_undestroyed_grps++;
261} 223}
@@ -263,20 +225,14 @@ static void throtl_link_blkio_group(struct request_queue *q,
263static struct 225static struct
264throtl_grp *throtl_lookup_tg(struct throtl_data *td, struct blkio_cgroup *blkcg) 226throtl_grp *throtl_lookup_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
265{ 227{
266 struct throtl_grp *tg = NULL;
267
268 /* 228 /*
269 * This is the common case when there are no blkio cgroups. 229 * This is the common case when there are no blkio cgroups.
270 * Avoid lookup in this case 230 * Avoid lookup in this case
271 */ 231 */
272 if (blkcg == &blkio_root_cgroup) 232 if (blkcg == &blkio_root_cgroup)
273 tg = td->root_tg; 233 return td->root_tg;
274 else
275 tg = tg_of_blkg(blkg_lookup(blkcg, td->queue,
276 BLKIO_POLICY_THROTL));
277 234
278 __throtl_tg_fill_dev_details(td, tg); 235 return tg_of_blkg(blkg_lookup(blkcg, td->queue, BLKIO_POLICY_THROTL));
279 return tg;
280} 236}
281 237
282static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td, 238static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
@@ -303,7 +259,6 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
303 tg = td->root_tg; 259 tg = td->root_tg;
304 } 260 }
305 261
306 __throtl_tg_fill_dev_details(td, tg);
307 return tg; 262 return tg;
308} 263}
309 264
@@ -1090,8 +1045,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1090 blkcg = task_blkio_cgroup(current); 1045 blkcg = task_blkio_cgroup(current);
1091 tg = throtl_lookup_tg(td, blkcg); 1046 tg = throtl_lookup_tg(td, blkcg);
1092 if (tg) { 1047 if (tg) {
1093 throtl_tg_fill_dev_details(td, tg);
1094
1095 if (tg_no_rule_group(tg, rw)) { 1048 if (tg_no_rule_group(tg, rw)) {
1096 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, 1049 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
1097 rw, rw_is_sync(bio->bi_rw)); 1050 rw, rw_is_sync(bio->bi_rw));