diff options
author | NeilBrown <neilb@suse.de> | 2014-01-21 19:45:03 -0500 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2014-01-21 19:45:03 -0500 |
commit | 7da9d450ab2843bf1db378c156acc6304dbc1c2b (patch) | |
tree | 66fd6c85cc6f11f6ce33d8360643e817e13d1b18 /drivers | |
parent | 9f97e4b128d2ea90a5f5063ea0ee3b0911f4c669 (diff) |
md/raid5: close recently introduced race in stripe_head management.
As release_stripe and __release_stripe decrement ->count and then
manipulate ->lru both under ->device_lock, it is important that
get_active_stripe() increments ->count and clears ->lru also under
->device_lock.
However we currently list_del_init ->lru under the lock, but increment
the ->count outside the lock. This can lead to races and list
corruption.
So move the atomic_inc(&sh->count) up inside the ->device_lock
protected region.
Note that we still increment ->count without device lock in the case
where get_free_stripe() was called, and in fact don't take
->device_lock at all in that path.
This is safe because if the stripe_head can be found by
get_free_stripe, then the hash lock assures us the no-one else could
possibly be calling release_stripe() at the same time.
Fixes: 566c09c53455d7c4f1130928ef8071da1a24ea65
Cc: stable@vger.kernel.org (3.13)
Reported-and-tested-by: Ian Kumlien <ian.kumlien@gmail.com>
Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/raid5.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 3088d3af5a89..03f82ab87d9e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -675,8 +675,10 @@ get_active_stripe(struct r5conf *conf, sector_t sector, | |||
675 | || !conf->inactive_blocked), | 675 | || !conf->inactive_blocked), |
676 | *(conf->hash_locks + hash)); | 676 | *(conf->hash_locks + hash)); |
677 | conf->inactive_blocked = 0; | 677 | conf->inactive_blocked = 0; |
678 | } else | 678 | } else { |
679 | init_stripe(sh, sector, previous); | 679 | init_stripe(sh, sector, previous); |
680 | atomic_inc(&sh->count); | ||
681 | } | ||
680 | } else { | 682 | } else { |
681 | spin_lock(&conf->device_lock); | 683 | spin_lock(&conf->device_lock); |
682 | if (atomic_read(&sh->count)) { | 684 | if (atomic_read(&sh->count)) { |
@@ -695,13 +697,11 @@ get_active_stripe(struct r5conf *conf, sector_t sector, | |||
695 | sh->group = NULL; | 697 | sh->group = NULL; |
696 | } | 698 | } |
697 | } | 699 | } |
700 | atomic_inc(&sh->count); | ||
698 | spin_unlock(&conf->device_lock); | 701 | spin_unlock(&conf->device_lock); |
699 | } | 702 | } |
700 | } while (sh == NULL); | 703 | } while (sh == NULL); |
701 | 704 | ||
702 | if (sh) | ||
703 | atomic_inc(&sh->count); | ||
704 | |||
705 | spin_unlock_irq(conf->hash_locks + hash); | 705 | spin_unlock_irq(conf->hash_locks + hash); |
706 | return sh; | 706 | return sh; |
707 | } | 707 | } |