aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:15:15 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:23 -0500
commit1adaf3dde37a8b9b59ea59c5f58fed7761178383 (patch)
treee4a46485b1bf0370aa41a5b9a8f138fba34c9d23 /block/blk-throttle.c
parent0381411e4b1a52cee134eb73750e5e3cc1155d09 (diff)
blkcg: move refcnt to blkcg core
Currently, blkcg policy implementations manage blkg refcnt duplicating mostly identical code in both policies. This patch moves refcnt to blkg and let blkcg core handle refcnt and freeing of blkgs. * cfq blkgs now also get freed via RCU. * cfq blkgs lose RB_EMPTY_ROOT() sanity check on blkg free. If necessary, we can add blkio_exit_group_fn() to resurrect this. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c58
1 files changed, 4 insertions, 54 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 9c8a12477e13..153ba509446b 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -54,7 +54,6 @@ struct throtl_grp {
54 */ 54 */
55 unsigned long disptime; 55 unsigned long disptime;
56 56
57 atomic_t ref;
58 unsigned int flags; 57 unsigned int flags;
59 58
60 /* Two lists for READ and WRITE */ 59 /* Two lists for READ and WRITE */
@@ -80,8 +79,6 @@ struct throtl_grp {
80 79
81 /* Some throttle limits got updated for the group */ 80 /* Some throttle limits got updated for the group */
82 int limits_changed; 81 int limits_changed;
83
84 struct rcu_head rcu_head;
85}; 82};
86 83
87struct throtl_data 84struct throtl_data
@@ -151,45 +148,6 @@ static inline unsigned int total_nr_queued(struct throtl_data *td)
151 return td->nr_queued[0] + td->nr_queued[1]; 148 return td->nr_queued[0] + td->nr_queued[1];
152} 149}
153 150
154static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
155{
156 atomic_inc(&tg->ref);
157 return tg;
158}
159
160static void throtl_free_tg(struct rcu_head *head)
161{
162 struct throtl_grp *tg = container_of(head, struct throtl_grp, rcu_head);
163 struct blkio_group *blkg = tg_to_blkg(tg);
164
165 free_percpu(blkg->stats_cpu);
166 kfree(blkg->pd);
167 kfree(blkg);
168}
169
170static void throtl_put_tg(struct throtl_grp *tg)
171{
172 struct blkio_group *blkg = tg_to_blkg(tg);
173
174 BUG_ON(atomic_read(&tg->ref) <= 0);
175 if (!atomic_dec_and_test(&tg->ref))
176 return;
177
178 /* release the extra blkcg reference this blkg has been holding */
179 css_put(&blkg->blkcg->css);
180
181 /*
182 * A group is freed in rcu manner. But having an rcu lock does not
183 * mean that one can access all the fields of blkg and assume these
184 * are valid. For example, don't try to follow throtl_data and
185 * request queue links.
186 *
187 * Having a reference to blkg under an rcu allows acess to only
188 * values local to groups like group stats and group rate limits
189 */
190 call_rcu(&tg->rcu_head, throtl_free_tg);
191}
192
193static void throtl_init_blkio_group(struct blkio_group *blkg) 151static void throtl_init_blkio_group(struct blkio_group *blkg)
194{ 152{
195 struct throtl_grp *tg = blkg_to_tg(blkg); 153 struct throtl_grp *tg = blkg_to_tg(blkg);
@@ -204,14 +162,6 @@ static void throtl_init_blkio_group(struct blkio_group *blkg)
204 tg->bps[WRITE] = -1; 162 tg->bps[WRITE] = -1;
205 tg->iops[READ] = -1; 163 tg->iops[READ] = -1;
206 tg->iops[WRITE] = -1; 164 tg->iops[WRITE] = -1;
207
208 /*
209 * Take the initial reference that will be released on destroy
210 * This can be thought of a joint reference by cgroup and
211 * request queue which will be dropped by either request queue
212 * exit or cgroup deletion path depending on who is exiting first.
213 */
214 atomic_set(&tg->ref, 1);
215} 165}
216 166
217static void throtl_link_blkio_group(struct request_queue *q, 167static void throtl_link_blkio_group(struct request_queue *q,
@@ -648,7 +598,7 @@ static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
648 598
649 bio_list_add(&tg->bio_lists[rw], bio); 599 bio_list_add(&tg->bio_lists[rw], bio);
650 /* Take a bio reference on tg */ 600 /* Take a bio reference on tg */
651 throtl_ref_get_tg(tg); 601 blkg_get(tg_to_blkg(tg));
652 tg->nr_queued[rw]++; 602 tg->nr_queued[rw]++;
653 td->nr_queued[rw]++; 603 td->nr_queued[rw]++;
654 throtl_enqueue_tg(td, tg); 604 throtl_enqueue_tg(td, tg);
@@ -681,8 +631,8 @@ static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
681 631
682 bio = bio_list_pop(&tg->bio_lists[rw]); 632 bio = bio_list_pop(&tg->bio_lists[rw]);
683 tg->nr_queued[rw]--; 633 tg->nr_queued[rw]--;
684 /* Drop bio reference on tg */ 634 /* Drop bio reference on blkg */
685 throtl_put_tg(tg); 635 blkg_put(tg_to_blkg(tg));
686 636
687 BUG_ON(td->nr_queued[rw] <= 0); 637 BUG_ON(td->nr_queued[rw] <= 0);
688 td->nr_queued[rw]--; 638 td->nr_queued[rw]--;
@@ -880,7 +830,7 @@ throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
880 * Put the reference taken at the time of creation so that when all 830 * Put the reference taken at the time of creation so that when all
881 * queues are gone, group can be destroyed. 831 * queues are gone, group can be destroyed.
882 */ 832 */
883 throtl_put_tg(tg); 833 blkg_put(tg_to_blkg(tg));
884 td->nr_undestroyed_grps--; 834 td->nr_undestroyed_grps--;
885} 835}
886 836