aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:15:11 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:23 -0500
commit7ee9c5620504906e98451dc9a1945b2b9e892cb8 (patch)
tree1daf4d9fc8d03ebbc88ceedb7abeec46d8c71df3 /block
parent92616b5b3a7c7fa8148df82e7ff6183056f2bfc8 (diff)
blkcg: let blkio_group point to blkio_cgroup directly
Currently, blkg points to the associated blkcg via its css_id. This unnecessarily complicates dereferencing blkcg. Let blkg hold a reference to the associated blkcg and point directly to it and disable css_id on blkio_subsys. This change requires splitting blkiocg_destroy() into blkiocg_pre_destroy() and blkiocg_destroy() so that all blkg's can be destroyed and all the blkcg references held by them dropped during cgroup removal. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c43
-rw-r--r--block/blk-cgroup.h2
-rw-r--r--block/blk-throttle.c3
-rw-r--r--block/cfq-iosched.c4
4 files changed, 32 insertions, 20 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 76942360872b..d42d826ece39 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -37,6 +37,7 @@ static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
37 struct cgroup_taskset *); 37 struct cgroup_taskset *);
38static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *, 38static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
39 struct cgroup_taskset *); 39 struct cgroup_taskset *);
40static int blkiocg_pre_destroy(struct cgroup_subsys *, struct cgroup *);
40static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); 41static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
41static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); 42static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
42 43
@@ -51,10 +52,10 @@ struct cgroup_subsys blkio_subsys = {
51 .create = blkiocg_create, 52 .create = blkiocg_create,
52 .can_attach = blkiocg_can_attach, 53 .can_attach = blkiocg_can_attach,
53 .attach = blkiocg_attach, 54 .attach = blkiocg_attach,
55 .pre_destroy = blkiocg_pre_destroy,
54 .destroy = blkiocg_destroy, 56 .destroy = blkiocg_destroy,
55 .populate = blkiocg_populate, 57 .populate = blkiocg_populate,
56 .subsys_id = blkio_subsys_id, 58 .subsys_id = blkio_subsys_id,
57 .use_id = 1,
58 .module = THIS_MODULE, 59 .module = THIS_MODULE,
59}; 60};
60EXPORT_SYMBOL_GPL(blkio_subsys); 61EXPORT_SYMBOL_GPL(blkio_subsys);
@@ -442,6 +443,7 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
442 if (blkg) 443 if (blkg)
443 return blkg; 444 return blkg;
444 445
446 /* blkg holds a reference to blkcg */
445 if (!css_tryget(&blkcg->css)) 447 if (!css_tryget(&blkcg->css))
446 return ERR_PTR(-EINVAL); 448 return ERR_PTR(-EINVAL);
447 449
@@ -463,15 +465,16 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
463 465
464 spin_lock_init(&new_blkg->stats_lock); 466 spin_lock_init(&new_blkg->stats_lock);
465 rcu_assign_pointer(new_blkg->q, q); 467 rcu_assign_pointer(new_blkg->q, q);
466 new_blkg->blkcg_id = css_id(&blkcg->css); 468 new_blkg->blkcg = blkcg;
467 new_blkg->plid = plid; 469 new_blkg->plid = plid;
468 cgroup_path(blkcg->css.cgroup, new_blkg->path, 470 cgroup_path(blkcg->css.cgroup, new_blkg->path,
469 sizeof(new_blkg->path)); 471 sizeof(new_blkg->path));
472 } else {
473 css_put(&blkcg->css);
470 } 474 }
471 475
472 rcu_read_lock(); 476 rcu_read_lock();
473 spin_lock_irq(q->queue_lock); 477 spin_lock_irq(q->queue_lock);
474 css_put(&blkcg->css);
475 478
476 /* did bypass get turned on inbetween? */ 479 /* did bypass get turned on inbetween? */
477 if (unlikely(blk_queue_bypass(q)) && !for_root) { 480 if (unlikely(blk_queue_bypass(q)) && !for_root) {
@@ -500,6 +503,7 @@ out:
500 if (new_blkg) { 503 if (new_blkg) {
501 free_percpu(new_blkg->stats_cpu); 504 free_percpu(new_blkg->stats_cpu);
502 kfree(new_blkg); 505 kfree(new_blkg);
506 css_put(&blkcg->css);
503 } 507 }
504 return blkg; 508 return blkg;
505} 509}
@@ -508,7 +512,6 @@ EXPORT_SYMBOL_GPL(blkg_lookup_create);
508static void __blkiocg_del_blkio_group(struct blkio_group *blkg) 512static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
509{ 513{
510 hlist_del_init_rcu(&blkg->blkcg_node); 514 hlist_del_init_rcu(&blkg->blkcg_node);
511 blkg->blkcg_id = 0;
512} 515}
513 516
514/* 517/*
@@ -517,24 +520,17 @@ static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
517 */ 520 */
518int blkiocg_del_blkio_group(struct blkio_group *blkg) 521int blkiocg_del_blkio_group(struct blkio_group *blkg)
519{ 522{
520 struct blkio_cgroup *blkcg; 523 struct blkio_cgroup *blkcg = blkg->blkcg;
521 unsigned long flags; 524 unsigned long flags;
522 struct cgroup_subsys_state *css;
523 int ret = 1; 525 int ret = 1;
524 526
525 rcu_read_lock(); 527 spin_lock_irqsave(&blkcg->lock, flags);
526 css = css_lookup(&blkio_subsys, blkg->blkcg_id); 528 if (!hlist_unhashed(&blkg->blkcg_node)) {
527 if (css) { 529 __blkiocg_del_blkio_group(blkg);
528 blkcg = container_of(css, struct blkio_cgroup, css); 530 ret = 0;
529 spin_lock_irqsave(&blkcg->lock, flags);
530 if (!hlist_unhashed(&blkg->blkcg_node)) {
531 __blkiocg_del_blkio_group(blkg);
532 ret = 0;
533 }
534 spin_unlock_irqrestore(&blkcg->lock, flags);
535 } 531 }
532 spin_unlock_irqrestore(&blkcg->lock, flags);
536 533
537 rcu_read_unlock();
538 return ret; 534 return ret;
539} 535}
540EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group); 536EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
@@ -1387,7 +1383,8 @@ static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1387 ARRAY_SIZE(blkio_files)); 1383 ARRAY_SIZE(blkio_files));
1388} 1384}
1389 1385
1390static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) 1386static int blkiocg_pre_destroy(struct cgroup_subsys *subsys,
1387 struct cgroup *cgroup)
1391{ 1388{
1392 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); 1389 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1393 unsigned long flags; 1390 unsigned long flags;
@@ -1396,6 +1393,7 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1396 struct blkio_policy_type *blkiop; 1393 struct blkio_policy_type *blkiop;
1397 1394
1398 rcu_read_lock(); 1395 rcu_read_lock();
1396
1399 do { 1397 do {
1400 spin_lock_irqsave(&blkcg->lock, flags); 1398 spin_lock_irqsave(&blkcg->lock, flags);
1401 1399
@@ -1425,8 +1423,15 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1425 spin_unlock(&blkio_list_lock); 1423 spin_unlock(&blkio_list_lock);
1426 } while (1); 1424 } while (1);
1427 1425
1428 free_css_id(&blkio_subsys, &blkcg->css);
1429 rcu_read_unlock(); 1426 rcu_read_unlock();
1427
1428 return 0;
1429}
1430
1431static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1432{
1433 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1434
1430 if (blkcg != &blkio_root_cgroup) 1435 if (blkcg != &blkio_root_cgroup)
1431 kfree(blkcg); 1436 kfree(blkcg);
1432} 1437}
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 7ebecf6ea8f1..ca1fc637bd6e 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -163,7 +163,7 @@ struct blkio_group {
163 /* Pointer to the associated request_queue, RCU protected */ 163 /* Pointer to the associated request_queue, RCU protected */
164 struct request_queue __rcu *q; 164 struct request_queue __rcu *q;
165 struct hlist_node blkcg_node; 165 struct hlist_node blkcg_node;
166 unsigned short blkcg_id; 166 struct blkio_cgroup *blkcg;
167 /* Store cgroup path */ 167 /* Store cgroup path */
168 char path[128]; 168 char path[128];
169 /* policy which owns this blk group */ 169 /* policy which owns this blk group */
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 52a429397d3b..fe6a442b8482 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -169,6 +169,9 @@ static void throtl_put_tg(struct throtl_grp *tg)
169 if (!atomic_dec_and_test(&tg->ref)) 169 if (!atomic_dec_and_test(&tg->ref))
170 return; 170 return;
171 171
172 /* release the extra blkcg reference this blkg has been holding */
173 css_put(&tg->blkg.blkcg->css);
174
172 /* 175 /*
173 * A group is freed in rcu manner. But having an rcu lock does not 176 * A group is freed in rcu manner. But having an rcu lock does not
174 * mean that one can access all the fields of blkg and assume these 177 * mean that one can access all the fields of blkg and assume these
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index f67d109eb974..9ef86fbfc9ae 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1133,6 +1133,10 @@ static void cfq_put_cfqg(struct cfq_group *cfqg)
1133 cfqg->ref--; 1133 cfqg->ref--;
1134 if (cfqg->ref) 1134 if (cfqg->ref)
1135 return; 1135 return;
1136
1137 /* release the extra blkcg reference this blkg has been holding */
1138 css_put(&cfqg->blkg.blkcg->css);
1139
1136 for_each_cfqg_st(cfqg, i, j, st) 1140 for_each_cfqg_st(cfqg, i, j, st)
1137 BUG_ON(!RB_EMPTY_ROOT(&st->rb)); 1141 BUG_ON(!RB_EMPTY_ROOT(&st->rb));
1138 free_percpu(cfqg->blkg.stats_cpu); 1142 free_percpu(cfqg->blkg.stats_cpu);