aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:15:22 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:24 -0500
commitc875f4d0250a1f070fa26087a73bdd8f54c48100 (patch)
tree4ed2bae2fc48e54ac712d28eaaae8217c8064c1d /block/blk-throttle.c
parent9f13ef678efd977487fc0c2e489f17c9a8c67a3e (diff)
blkcg: drop unnecessary RCU locking
Now that blkg additions / removals are always done under both q and blkcg locks, the only places RCU locking is necessary are blkg_lookup[_create]() for lookup w/o blkcg lock. This patch drops unncessary RCU locking replacing it with plain blkcg locking as necessary. * blkiocg_pre_destroy() already perform proper locking and don't need RCU. Dropped. * blkio_read_blkg_stats() now uses blkcg->lock instead of RCU read lock. This isn't a hot path. * Now unnecessary synchronize_rcu() from queue exit paths removed. This makes q->nr_blkgs unnecessary. Dropped. * RCU annotation on blkg->q removed. -v2: Vivek pointed out that blkg_lookup_create() still needs to be called under rcu_read_lock(). Updated. -v3: After the update, stats_lock locking in blkio_read_blkg_stats() shouldn't be using _irq variant as it otherwise ends up enabling irq while blkcg->lock is locked. Fixed. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c33
1 files changed, 1 insertions, 32 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index e35ee7aeea69..bfa5168249eb 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1046,39 +1046,8 @@ int blk_throtl_init(struct request_queue *q)
1046 1046
1047void blk_throtl_exit(struct request_queue *q) 1047void blk_throtl_exit(struct request_queue *q)
1048{ 1048{
1049 struct throtl_data *td = q->td; 1049 BUG_ON(!q->td);
1050 bool wait;
1051
1052 BUG_ON(!td);
1053
1054 throtl_shutdown_wq(q); 1050 throtl_shutdown_wq(q);
1055
1056 /* If there are other groups */
1057 spin_lock_irq(q->queue_lock);
1058 wait = q->nr_blkgs;
1059 spin_unlock_irq(q->queue_lock);
1060
1061 /*
1062 * Wait for tg_to_blkg(tg)->q accessors to exit their grace periods.
1063 * Do this wait only if there are other undestroyed groups out
1064 * there (other than root group). This can happen if cgroup deletion
1065 * path claimed the responsibility of cleaning up a group before
1066 * queue cleanup code get to the group.
1067 *
1068 * Do not call synchronize_rcu() unconditionally as there are drivers
1069 * which create/delete request queue hundreds of times during scan/boot
1070 * and synchronize_rcu() can take significant time and slow down boot.
1071 */
1072 if (wait)
1073 synchronize_rcu();
1074
1075 /*
1076 * Just being safe to make sure after previous flush if some body did
1077 * update limits through cgroup and another work got queued, cancel
1078 * it.
1079 */
1080 throtl_shutdown_wq(q);
1081
1082 kfree(q->td); 1051 kfree(q->td);
1083} 1052}
1084 1053