diff options
author | Tejun Heo <tj@kernel.org> | 2012-03-05 16:15:22 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-03-06 15:27:24 -0500 |
commit | c875f4d0250a1f070fa26087a73bdd8f54c48100 (patch) | |
tree | 4ed2bae2fc48e54ac712d28eaaae8217c8064c1d /block/cfq-iosched.c | |
parent | 9f13ef678efd977487fc0c2e489f17c9a8c67a3e (diff) |
blkcg: drop unnecessary RCU locking
Now that blkg additions / removals are always done under both q and
blkcg locks, the only places RCU locking is necessary are
blkg_lookup[_create]() for lookup w/o blkcg lock. This patch drops
unncessary RCU locking replacing it with plain blkcg locking as
necessary.
* blkiocg_pre_destroy() already perform proper locking and don't need
RCU. Dropped.
* blkio_read_blkg_stats() now uses blkcg->lock instead of RCU read
lock. This isn't a hot path.
* Now unnecessary synchronize_rcu() from queue exit paths removed.
This makes q->nr_blkgs unnecessary. Dropped.
* RCU annotation on blkg->q removed.
-v2: Vivek pointed out that blkg_lookup_create() still needs to be
called under rcu_read_lock(). Updated.
-v3: After the update, stats_lock locking in blkio_read_blkg_stats()
shouldn't be using _irq variant as it otherwise ends up enabling
irq while blkcg->lock is locked. Fixed.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 24 |
1 files changed, 0 insertions, 24 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 393eaa59913b..9e386d9bcb79 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -3449,7 +3449,6 @@ static void cfq_exit_queue(struct elevator_queue *e) | |||
3449 | { | 3449 | { |
3450 | struct cfq_data *cfqd = e->elevator_data; | 3450 | struct cfq_data *cfqd = e->elevator_data; |
3451 | struct request_queue *q = cfqd->queue; | 3451 | struct request_queue *q = cfqd->queue; |
3452 | bool wait = false; | ||
3453 | 3452 | ||
3454 | cfq_shutdown_timer_wq(cfqd); | 3453 | cfq_shutdown_timer_wq(cfqd); |
3455 | 3454 | ||
@@ -3462,31 +3461,8 @@ static void cfq_exit_queue(struct elevator_queue *e) | |||
3462 | 3461 | ||
3463 | spin_unlock_irq(q->queue_lock); | 3462 | spin_unlock_irq(q->queue_lock); |
3464 | 3463 | ||
3465 | #ifdef CONFIG_BLK_CGROUP | ||
3466 | /* | ||
3467 | * If there are groups which we could not unlink from blkcg list, | ||
3468 | * wait for a rcu period for them to be freed. | ||
3469 | */ | ||
3470 | spin_lock_irq(q->queue_lock); | ||
3471 | wait = q->nr_blkgs; | ||
3472 | spin_unlock_irq(q->queue_lock); | ||
3473 | #endif | ||
3474 | cfq_shutdown_timer_wq(cfqd); | 3464 | cfq_shutdown_timer_wq(cfqd); |
3475 | 3465 | ||
3476 | /* | ||
3477 | * Wait for cfqg->blkg->key accessors to exit their grace periods. | ||
3478 | * Do this wait only if there are other unlinked groups out | ||
3479 | * there. This can happen if cgroup deletion path claimed the | ||
3480 | * responsibility of cleaning up a group before queue cleanup code | ||
3481 | * get to the group. | ||
3482 | * | ||
3483 | * Do not call synchronize_rcu() unconditionally as there are drivers | ||
3484 | * which create/delete request queue hundreds of times during scan/boot | ||
3485 | * and synchronize_rcu() can take significant time and slow down boot. | ||
3486 | */ | ||
3487 | if (wait) | ||
3488 | synchronize_rcu(); | ||
3489 | |||
3490 | #ifndef CONFIG_CFQ_GROUP_IOSCHED | 3466 | #ifndef CONFIG_CFQ_GROUP_IOSCHED |
3491 | kfree(cfqd->root_group); | 3467 | kfree(cfqd->root_group); |
3492 | #endif | 3468 | #endif |