diff options
author | NeilBrown <neilb@suse.de> | 2015-05-08 04:19:04 -0400 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2015-05-08 04:40:01 -0400 |
commit | f18c1a35f62caccb527e8b0990c8801596e7c662 (patch) | |
tree | 237497fe32aed9fa9aed1b92d8fe7875a7b188b8 /drivers/md | |
parent | b6538fe32966e63ef38897860ef220980d904974 (diff) |
md/raid5: new alloc_stripe() to allocate an initialize a stripe.
The new batch_lock and batch_list fields are being initialized in
grow_one_stripe() but not in resize_stripes(). This causes a crash
on resize.
So separate the core initialization into a new function and call it
from both allocation sites.
Signed-off-by: NeilBrown <neilb@suse.de>
Fixes: 59fc630b8b5f ("RAID5: batch adjacent full stripe write")
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/raid5.c | 32 |
1 files changed, 18 insertions, 14 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 77dfd720aaa0..91a1e8b26b52 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -1971,17 +1971,30 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) | |||
1971 | put_cpu(); | 1971 | put_cpu(); |
1972 | } | 1972 | } |
1973 | 1973 | ||
1974 | static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp) | ||
1975 | { | ||
1976 | struct stripe_head *sh; | ||
1977 | |||
1978 | sh = kmem_cache_zalloc(sc, gfp); | ||
1979 | if (sh) { | ||
1980 | spin_lock_init(&sh->stripe_lock); | ||
1981 | spin_lock_init(&sh->batch_lock); | ||
1982 | INIT_LIST_HEAD(&sh->batch_list); | ||
1983 | INIT_LIST_HEAD(&sh->lru); | ||
1984 | atomic_set(&sh->count, 1); | ||
1985 | } | ||
1986 | return sh; | ||
1987 | } | ||
1974 | static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) | 1988 | static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) |
1975 | { | 1989 | { |
1976 | struct stripe_head *sh; | 1990 | struct stripe_head *sh; |
1977 | sh = kmem_cache_zalloc(conf->slab_cache, gfp); | 1991 | |
1992 | sh = alloc_stripe(conf->slab_cache, gfp); | ||
1978 | if (!sh) | 1993 | if (!sh) |
1979 | return 0; | 1994 | return 0; |
1980 | 1995 | ||
1981 | sh->raid_conf = conf; | 1996 | sh->raid_conf = conf; |
1982 | 1997 | ||
1983 | spin_lock_init(&sh->stripe_lock); | ||
1984 | |||
1985 | if (grow_buffers(sh, gfp)) { | 1998 | if (grow_buffers(sh, gfp)) { |
1986 | shrink_buffers(sh); | 1999 | shrink_buffers(sh); |
1987 | kmem_cache_free(conf->slab_cache, sh); | 2000 | kmem_cache_free(conf->slab_cache, sh); |
@@ -1990,13 +2003,8 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) | |||
1990 | sh->hash_lock_index = | 2003 | sh->hash_lock_index = |
1991 | conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; | 2004 | conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; |
1992 | /* we just created an active stripe so... */ | 2005 | /* we just created an active stripe so... */ |
1993 | atomic_set(&sh->count, 1); | ||
1994 | atomic_inc(&conf->active_stripes); | 2006 | atomic_inc(&conf->active_stripes); |
1995 | INIT_LIST_HEAD(&sh->lru); | ||
1996 | 2007 | ||
1997 | spin_lock_init(&sh->batch_lock); | ||
1998 | INIT_LIST_HEAD(&sh->batch_list); | ||
1999 | sh->batch_head = NULL; | ||
2000 | release_stripe(sh); | 2008 | release_stripe(sh); |
2001 | conf->max_nr_stripes++; | 2009 | conf->max_nr_stripes++; |
2002 | return 1; | 2010 | return 1; |
@@ -2109,13 +2117,11 @@ static int resize_stripes(struct r5conf *conf, int newsize) | |||
2109 | return -ENOMEM; | 2117 | return -ENOMEM; |
2110 | 2118 | ||
2111 | for (i = conf->max_nr_stripes; i; i--) { | 2119 | for (i = conf->max_nr_stripes; i; i--) { |
2112 | nsh = kmem_cache_zalloc(sc, GFP_KERNEL); | 2120 | nsh = alloc_stripe(sc, GFP_KERNEL); |
2113 | if (!nsh) | 2121 | if (!nsh) |
2114 | break; | 2122 | break; |
2115 | 2123 | ||
2116 | nsh->raid_conf = conf; | 2124 | nsh->raid_conf = conf; |
2117 | spin_lock_init(&nsh->stripe_lock); | ||
2118 | |||
2119 | list_add(&nsh->lru, &newstripes); | 2125 | list_add(&nsh->lru, &newstripes); |
2120 | } | 2126 | } |
2121 | if (i) { | 2127 | if (i) { |
@@ -2142,13 +2148,11 @@ static int resize_stripes(struct r5conf *conf, int newsize) | |||
2142 | lock_device_hash_lock(conf, hash)); | 2148 | lock_device_hash_lock(conf, hash)); |
2143 | osh = get_free_stripe(conf, hash); | 2149 | osh = get_free_stripe(conf, hash); |
2144 | unlock_device_hash_lock(conf, hash); | 2150 | unlock_device_hash_lock(conf, hash); |
2145 | atomic_set(&nsh->count, 1); | 2151 | |
2146 | for(i=0; i<conf->pool_size; i++) { | 2152 | for(i=0; i<conf->pool_size; i++) { |
2147 | nsh->dev[i].page = osh->dev[i].page; | 2153 | nsh->dev[i].page = osh->dev[i].page; |
2148 | nsh->dev[i].orig_page = osh->dev[i].page; | 2154 | nsh->dev[i].orig_page = osh->dev[i].page; |
2149 | } | 2155 | } |
2150 | for( ; i<newsize; i++) | ||
2151 | nsh->dev[i].page = NULL; | ||
2152 | nsh->hash_lock_index = hash; | 2156 | nsh->hash_lock_index = hash; |
2153 | kmem_cache_free(conf->slab_cache, osh); | 2157 | kmem_cache_free(conf->slab_cache, osh); |
2154 | cnt++; | 2158 | cnt++; |