aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorShaohua Li <shli@kernel.org>2014-04-08 23:27:42 -0400
committerNeilBrown <neilb@suse.de>2014-04-09 00:42:42 -0400
commite240c1839d11152b0355442f8ac6d2d2d921be36 (patch)
treebb2f80fd9a3be90a710e2e2053c246ff1dedf6f7 /drivers/md/raid5.c
parent27c0f68f0745218cec70f19ba7560c8c5fc3f817 (diff)
raid5: get_active_stripe avoids device_lock
For sequential workload (or request size big workload), get_active_stripe can find cached stripe. In this case, we always hold device_lock, which exposes a lot of lock contention for such workload. If stripe count isn't 0, we don't need hold the lock actually, since we just increase its count. And this is the hot code path for such workload. Unfortunately we must delete the BUG_ON. Signed-off-by: Shaohua Li <shli@fusionio.com> Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c9
1 files changed, 2 insertions, 7 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a904a2c80fc8..25247a852912 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -679,14 +679,9 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
679 init_stripe(sh, sector, previous); 679 init_stripe(sh, sector, previous);
680 atomic_inc(&sh->count); 680 atomic_inc(&sh->count);
681 } 681 }
682 } else { 682 } else if (!atomic_inc_not_zero(&sh->count)) {
683 spin_lock(&conf->device_lock); 683 spin_lock(&conf->device_lock);
684 if (atomic_read(&sh->count)) { 684 if (!atomic_read(&sh->count)) {
685 BUG_ON(!list_empty(&sh->lru)
686 && !test_bit(STRIPE_EXPANDING, &sh->state)
687 && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
688 );
689 } else {
690 if (!test_bit(STRIPE_HANDLE, &sh->state)) 685 if (!test_bit(STRIPE_HANDLE, &sh->state))
691 atomic_inc(&conf->active_stripes); 686 atomic_inc(&conf->active_stripes);
692 BUG_ON(list_empty(&sh->lru) && 687 BUG_ON(list_empty(&sh->lru) &&