aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2010-06-16 02:45:16 -0400
committerNeilBrown <neilb@suse.de>2010-06-23 23:35:02 -0400
commite4e11e385d1e5516ac76c956d6c25e6c2fa1b8d0 (patch)
tree978807084ae3136b20a12efab3720ecec861d0ff /drivers/md
parent049d6c1ef983c9ac43aa423dfd752071a5b0002d (diff)
md/raid5: avoid oops when number of devices is reduced then increased.
The entries in the stripe_cache maintained by raid5 are enlarged when we increased the number of devices in the array, but not shrunk when we reduce the number of devices. So if entries are added after reducing the number of devices, we much ensure to initialise the whole entry, not just the part that is currently relevant. Otherwise if we enlarge the array again, we will reference uninitialised values. As grow_buffers/shrink_buffer now want to use a count that is stored explicity in the raid_conf, they should get it from there rather than being passed it as a parameter. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid5.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d2c0f94fa37d..2c055dec8c68 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -277,12 +277,13 @@ out:
277 return sh; 277 return sh;
278} 278}
279 279
280static void shrink_buffers(struct stripe_head *sh, int num) 280static void shrink_buffers(struct stripe_head *sh)
281{ 281{
282 struct page *p; 282 struct page *p;
283 int i; 283 int i;
284 int num = sh->raid_conf->pool_size;
284 285
285 for (i=0; i<num ; i++) { 286 for (i = 0; i < num ; i++) {
286 p = sh->dev[i].page; 287 p = sh->dev[i].page;
287 if (!p) 288 if (!p)
288 continue; 289 continue;
@@ -291,11 +292,12 @@ static void shrink_buffers(struct stripe_head *sh, int num)
291 } 292 }
292} 293}
293 294
294static int grow_buffers(struct stripe_head *sh, int num) 295static int grow_buffers(struct stripe_head *sh)
295{ 296{
296 int i; 297 int i;
298 int num = sh->raid_conf->pool_size;
297 299
298 for (i=0; i<num; i++) { 300 for (i = 0; i < num; i++) {
299 struct page *page; 301 struct page *page;
300 302
301 if (!(page = alloc_page(GFP_KERNEL))) { 303 if (!(page = alloc_page(GFP_KERNEL))) {
@@ -1240,19 +1242,18 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1240static int grow_one_stripe(raid5_conf_t *conf) 1242static int grow_one_stripe(raid5_conf_t *conf)
1241{ 1243{
1242 struct stripe_head *sh; 1244 struct stripe_head *sh;
1243 int disks = max(conf->raid_disks, conf->previous_raid_disks);
1244 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); 1245 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
1245 if (!sh) 1246 if (!sh)
1246 return 0; 1247 return 0;
1247 memset(sh, 0, sizeof(*sh) + (disks-1)*sizeof(struct r5dev)); 1248 memset(sh, 0, sizeof(*sh) + (conf->pool_size-1)*sizeof(struct r5dev));
1248 sh->raid_conf = conf; 1249 sh->raid_conf = conf;
1249 spin_lock_init(&sh->lock); 1250 spin_lock_init(&sh->lock);
1250 #ifdef CONFIG_MULTICORE_RAID456 1251 #ifdef CONFIG_MULTICORE_RAID456
1251 init_waitqueue_head(&sh->ops.wait_for_ops); 1252 init_waitqueue_head(&sh->ops.wait_for_ops);
1252 #endif 1253 #endif
1253 1254
1254 if (grow_buffers(sh, disks)) { 1255 if (grow_buffers(sh)) {
1255 shrink_buffers(sh, disks); 1256 shrink_buffers(sh);
1256 kmem_cache_free(conf->slab_cache, sh); 1257 kmem_cache_free(conf->slab_cache, sh);
1257 return 0; 1258 return 0;
1258 } 1259 }
@@ -1468,7 +1469,7 @@ static int drop_one_stripe(raid5_conf_t *conf)
1468 if (!sh) 1469 if (!sh)
1469 return 0; 1470 return 0;
1470 BUG_ON(atomic_read(&sh->count)); 1471 BUG_ON(atomic_read(&sh->count));
1471 shrink_buffers(sh, conf->pool_size); 1472 shrink_buffers(sh);
1472 kmem_cache_free(conf->slab_cache, sh); 1473 kmem_cache_free(conf->slab_cache, sh);
1473 atomic_dec(&conf->active_stripes); 1474 atomic_dec(&conf->active_stripes);
1474 return 1; 1475 return 1;