aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorDennis Zhou <dennis@kernel.org>2018-12-05 12:10:38 -0500
committerJens Axboe <axboe@kernel.dk>2018-12-08 00:26:38 -0500
commit7fcf2b033b84e261dca283bc2911aaea4b07b525 (patch)
tree87573c8336fd2fb9063dc554262d5b293877aa84 /block/blk-cgroup.c
parent6f70fb66182b02e50deea65e9a3a86b7bf659a39 (diff)
blkcg: change blkg reference counting to use percpu_ref
Every bio is now associated with a blkg putting blkg_get, blkg_try_get, and blkg_put on the hot path. Switch over the refcnt in blkg to use percpu_ref. Signed-off-by: Dennis Zhou <dennis@kernel.org> Acked-by: Tejun Heo <tj@kernel.org> Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c41
1 files changed, 39 insertions, 2 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 120f2e2835fb..2ca7611fe274 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -81,6 +81,37 @@ static void blkg_free(struct blkcg_gq *blkg)
81 kfree(blkg); 81 kfree(blkg);
82} 82}
83 83
84static void __blkg_release(struct rcu_head *rcu)
85{
86 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
87
88 percpu_ref_exit(&blkg->refcnt);
89
90 /* release the blkcg and parent blkg refs this blkg has been holding */
91 css_put(&blkg->blkcg->css);
92 if (blkg->parent)
93 blkg_put(blkg->parent);
94
95 wb_congested_put(blkg->wb_congested);
96
97 blkg_free(blkg);
98}
99
100/*
101 * A group is RCU protected, but having an rcu lock does not mean that one
102 * can access all the fields of blkg and assume these are valid. For
103 * example, don't try to follow throtl_data and request queue links.
104 *
105 * Having a reference to blkg under an rcu allows accesses to only values
106 * local to groups like group stats and group rate limits.
107 */
108static void blkg_release(struct percpu_ref *ref)
109{
110 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
111
112 call_rcu(&blkg->rcu_head, __blkg_release);
113}
114
84/** 115/**
85 * blkg_alloc - allocate a blkg 116 * blkg_alloc - allocate a blkg
86 * @blkcg: block cgroup the new blkg is associated with 117 * @blkcg: block cgroup the new blkg is associated with
@@ -107,7 +138,6 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
107 blkg->q = q; 138 blkg->q = q;
108 INIT_LIST_HEAD(&blkg->q_node); 139 INIT_LIST_HEAD(&blkg->q_node);
109 blkg->blkcg = blkcg; 140 blkg->blkcg = blkcg;
110 atomic_set(&blkg->refcnt, 1);
111 141
112 for (i = 0; i < BLKCG_MAX_POLS; i++) { 142 for (i = 0; i < BLKCG_MAX_POLS; i++) {
113 struct blkcg_policy *pol = blkcg_policy[i]; 143 struct blkcg_policy *pol = blkcg_policy[i];
@@ -207,6 +237,11 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
207 blkg_get(blkg->parent); 237 blkg_get(blkg->parent);
208 } 238 }
209 239
240 ret = percpu_ref_init(&blkg->refcnt, blkg_release, 0,
241 GFP_NOWAIT | __GFP_NOWARN);
242 if (ret)
243 goto err_cancel_ref;
244
210 /* invoke per-policy init */ 245 /* invoke per-policy init */
211 for (i = 0; i < BLKCG_MAX_POLS; i++) { 246 for (i = 0; i < BLKCG_MAX_POLS; i++) {
212 struct blkcg_policy *pol = blkcg_policy[i]; 247 struct blkcg_policy *pol = blkcg_policy[i];
@@ -239,6 +274,8 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
239 blkg_put(blkg); 274 blkg_put(blkg);
240 return ERR_PTR(ret); 275 return ERR_PTR(ret);
241 276
277err_cancel_ref:
278 percpu_ref_exit(&blkg->refcnt);
242err_put_congested: 279err_put_congested:
243 wb_congested_put(wb_congested); 280 wb_congested_put(wb_congested);
244err_put_css: 281err_put_css:
@@ -367,7 +404,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
367 * Put the reference taken at the time of creation so that when all 404 * Put the reference taken at the time of creation so that when all
368 * queues are gone, group can be destroyed. 405 * queues are gone, group can be destroyed.
369 */ 406 */
370 blkg_put(blkg); 407 percpu_ref_kill(&blkg->refcnt);
371} 408}
372 409
373/** 410/**