diff options
author | Tejun Heo <tj@kernel.org> | 2012-04-13 16:11:35 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-04-20 04:06:06 -0400 |
commit | 6d18b008daf46bcd82b8ae250aae0785f9714096 (patch) | |
tree | e70e66ce74220d5a0c7382acd20efbd4835c8f77 /block | |
parent | 3c96cb32d318f323c1bf972a4c66821f8499e34d (diff) |
blkcg: shoot down blkgs if all policies are deactivated
There's no reason to keep blkgs around if no policy is activated for
the queue. This patch moves queue locking out of blkg_destroy_all()
and call it from blkg_deactivate_policy() on deactivation of the last
policy on the queue.
This change was suggested by Vivek.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 10f0d2fc0b23..b1807d4ecedb 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -258,7 +258,7 @@ static void blkg_destroy_all(struct request_queue *q) | |||
258 | { | 258 | { |
259 | struct blkio_group *blkg, *n; | 259 | struct blkio_group *blkg, *n; |
260 | 260 | ||
261 | spin_lock_irq(q->queue_lock); | 261 | lockdep_assert_held(q->queue_lock); |
262 | 262 | ||
263 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { | 263 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
264 | struct blkio_cgroup *blkcg = blkg->blkcg; | 264 | struct blkio_cgroup *blkcg = blkg->blkcg; |
@@ -267,8 +267,6 @@ static void blkg_destroy_all(struct request_queue *q) | |||
267 | blkg_destroy(blkg); | 267 | blkg_destroy(blkg); |
268 | spin_unlock(&blkcg->lock); | 268 | spin_unlock(&blkcg->lock); |
269 | } | 269 | } |
270 | |||
271 | spin_unlock_irq(q->queue_lock); | ||
272 | } | 270 | } |
273 | 271 | ||
274 | static void blkg_rcu_free(struct rcu_head *rcu_head) | 272 | static void blkg_rcu_free(struct rcu_head *rcu_head) |
@@ -646,7 +644,10 @@ void blkcg_drain_queue(struct request_queue *q) | |||
646 | */ | 644 | */ |
647 | void blkcg_exit_queue(struct request_queue *q) | 645 | void blkcg_exit_queue(struct request_queue *q) |
648 | { | 646 | { |
647 | spin_lock_irq(q->queue_lock); | ||
649 | blkg_destroy_all(q); | 648 | blkg_destroy_all(q); |
649 | spin_unlock_irq(q->queue_lock); | ||
650 | |||
650 | blk_throtl_exit(q); | 651 | blk_throtl_exit(q); |
651 | } | 652 | } |
652 | 653 | ||
@@ -802,6 +803,10 @@ void blkcg_deactivate_policy(struct request_queue *q, | |||
802 | 803 | ||
803 | __clear_bit(pol->plid, q->blkcg_pols); | 804 | __clear_bit(pol->plid, q->blkcg_pols); |
804 | 805 | ||
806 | /* if no policy is left, no need for blkgs - shoot them down */ | ||
807 | if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS)) | ||
808 | blkg_destroy_all(q); | ||
809 | |||
805 | list_for_each_entry(blkg, &q->blkg_list, q_node) { | 810 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
806 | /* grab blkcg lock too while removing @pd from @blkg */ | 811 | /* grab blkcg lock too while removing @pd from @blkg */ |
807 | spin_lock(&blkg->blkcg->lock); | 812 | spin_lock(&blkg->blkcg->lock); |