aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2010-03-16 02:23:35 -0400
committerNeilBrown <neilb@suse.de>2010-05-18 01:27:47 -0400
commit84707f38e767ac470fd82af6c45a8cafe2bd1b9a (patch)
treebc480c9aeedccd735e144576284523da1406cb98 /drivers
parentc0cc75f84e0e413bce2dcabea74ef418da45c7c1 (diff)
md: don't use mddev->raid_disks in raid0 or raid10 while array is active.
In a subsequent patch we will make it possible to change mddev->raid_disks while a RAID0 or RAID10 array is active. This is part of the process of reshaping such an array. This means that we cannot use this value while processes requests (it is OK to use it during initialisation as we are locked against changes then). Both RAID0 and RAID10 have the same value stored in the private data structure, so use that value instead. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/raid0.c15
-rw-r--r--drivers/md/raid10.c16
2 files changed, 18 insertions, 13 deletions
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 377cf2a3c333..c2e0d1d28102 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -28,9 +28,10 @@ static void raid0_unplug(struct request_queue *q)
28 mddev_t *mddev = q->queuedata; 28 mddev_t *mddev = q->queuedata;
29 raid0_conf_t *conf = mddev->private; 29 raid0_conf_t *conf = mddev->private;
30 mdk_rdev_t **devlist = conf->devlist; 30 mdk_rdev_t **devlist = conf->devlist;
31 int raid_disks = conf->strip_zone[0].nb_dev;
31 int i; 32 int i;
32 33
33 for (i=0; i<mddev->raid_disks; i++) { 34 for (i=0; i < raid_disks; i++) {
34 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev); 35 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
35 36
36 blk_unplug(r_queue); 37 blk_unplug(r_queue);
@@ -42,12 +43,13 @@ static int raid0_congested(void *data, int bits)
42 mddev_t *mddev = data; 43 mddev_t *mddev = data;
43 raid0_conf_t *conf = mddev->private; 44 raid0_conf_t *conf = mddev->private;
44 mdk_rdev_t **devlist = conf->devlist; 45 mdk_rdev_t **devlist = conf->devlist;
46 int raid_disks = conf->strip_zone[0].nb_dev;
45 int i, ret = 0; 47 int i, ret = 0;
46 48
47 if (mddev_congested(mddev, bits)) 49 if (mddev_congested(mddev, bits))
48 return 1; 50 return 1;
49 51
50 for (i = 0; i < mddev->raid_disks && !ret ; i++) { 52 for (i = 0; i < raid_disks && !ret ; i++) {
51 struct request_queue *q = bdev_get_queue(devlist[i]->bdev); 53 struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
52 54
53 ret |= bdi_congested(&q->backing_dev_info, bits); 55 ret |= bdi_congested(&q->backing_dev_info, bits);
@@ -65,6 +67,7 @@ static void dump_zones(mddev_t *mddev)
65 sector_t zone_start = 0; 67 sector_t zone_start = 0;
66 char b[BDEVNAME_SIZE]; 68 char b[BDEVNAME_SIZE];
67 raid0_conf_t *conf = mddev->private; 69 raid0_conf_t *conf = mddev->private;
70 int raid_disks = conf->strip_zone[0].nb_dev;
68 printk(KERN_INFO "******* %s configuration *********\n", 71 printk(KERN_INFO "******* %s configuration *********\n",
69 mdname(mddev)); 72 mdname(mddev));
70 h = 0; 73 h = 0;
@@ -72,7 +75,7 @@ static void dump_zones(mddev_t *mddev)
72 printk(KERN_INFO "zone%d=[", j); 75 printk(KERN_INFO "zone%d=[", j);
73 for (k = 0; k < conf->strip_zone[j].nb_dev; k++) 76 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
74 printk("%s/", 77 printk("%s/",
75 bdevname(conf->devlist[j*mddev->raid_disks 78 bdevname(conf->devlist[j*raid_disks
76 + k]->bdev, b)); 79 + k]->bdev, b));
77 printk("]\n"); 80 printk("]\n");
78 81
@@ -401,6 +404,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
401 unsigned int sect_in_chunk; 404 unsigned int sect_in_chunk;
402 sector_t chunk; 405 sector_t chunk;
403 raid0_conf_t *conf = mddev->private; 406 raid0_conf_t *conf = mddev->private;
407 int raid_disks = conf->strip_zone[0].nb_dev;
404 unsigned int chunk_sects = mddev->chunk_sectors; 408 unsigned int chunk_sects = mddev->chunk_sectors;
405 409
406 if (is_power_of_2(chunk_sects)) { 410 if (is_power_of_2(chunk_sects)) {
@@ -423,7 +427,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
423 * + the position in the chunk 427 * + the position in the chunk
424 */ 428 */
425 *sector_offset = (chunk * chunk_sects) + sect_in_chunk; 429 *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
426 return conf->devlist[(zone - conf->strip_zone)*mddev->raid_disks 430 return conf->devlist[(zone - conf->strip_zone)*raid_disks
427 + sector_div(sector, zone->nb_dev)]; 431 + sector_div(sector, zone->nb_dev)];
428} 432}
429 433
@@ -518,6 +522,7 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
518 int j, k, h; 522 int j, k, h;
519 char b[BDEVNAME_SIZE]; 523 char b[BDEVNAME_SIZE];
520 raid0_conf_t *conf = mddev->private; 524 raid0_conf_t *conf = mddev->private;
525 int raid_disks = conf->strip_zone[0].nb_dev;
521 526
522 sector_t zone_size; 527 sector_t zone_size;
523 sector_t zone_start = 0; 528 sector_t zone_start = 0;
@@ -528,7 +533,7 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
528 seq_printf(seq, "=["); 533 seq_printf(seq, "=[");
529 for (k = 0; k < conf->strip_zone[j].nb_dev; k++) 534 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
530 seq_printf(seq, "%s/", bdevname( 535 seq_printf(seq, "%s/", bdevname(
531 conf->devlist[j*mddev->raid_disks + k] 536 conf->devlist[j*raid_disks + k]
532 ->bdev, b)); 537 ->bdev, b));
533 538
534 zone_size = conf->strip_zone[j].zone_end - zone_start; 539 zone_size = conf->strip_zone[j].zone_end - zone_start;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b90fef607f63..044c1157d98d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -600,7 +600,7 @@ static void unplug_slaves(mddev_t *mddev)
600 int i; 600 int i;
601 601
602 rcu_read_lock(); 602 rcu_read_lock();
603 for (i=0; i<mddev->raid_disks; i++) { 603 for (i=0; i < conf->raid_disks; i++) {
604 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 604 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
605 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 605 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
606 struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 606 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
@@ -634,7 +634,7 @@ static int raid10_congested(void *data, int bits)
634 if (mddev_congested(mddev, bits)) 634 if (mddev_congested(mddev, bits))
635 return 1; 635 return 1;
636 rcu_read_lock(); 636 rcu_read_lock();
637 for (i = 0; i < mddev->raid_disks && ret == 0; i++) { 637 for (i = 0; i < conf->raid_disks && ret == 0; i++) {
638 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 638 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
639 if (rdev && !test_bit(Faulty, &rdev->flags)) { 639 if (rdev && !test_bit(Faulty, &rdev->flags)) {
640 struct request_queue *q = bdev_get_queue(rdev->bdev); 640 struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -1131,7 +1131,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1131 int mirror; 1131 int mirror;
1132 mirror_info_t *p; 1132 mirror_info_t *p;
1133 int first = 0; 1133 int first = 0;
1134 int last = mddev->raid_disks - 1; 1134 int last = conf->raid_disks - 1;
1135 1135
1136 if (mddev->recovery_cp < MaxSector) 1136 if (mddev->recovery_cp < MaxSector)
1137 /* only hot-add to in-sync arrays, as recovery is 1137 /* only hot-add to in-sync arrays, as recovery is
@@ -2139,7 +2139,7 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
2139 conf_t *conf = mddev->private; 2139 conf_t *conf = mddev->private;
2140 2140
2141 if (!raid_disks) 2141 if (!raid_disks)
2142 raid_disks = mddev->raid_disks; 2142 raid_disks = conf->raid_disks;
2143 if (!sectors) 2143 if (!sectors)
2144 sectors = mddev->dev_sectors; 2144 sectors = mddev->dev_sectors;
2145 2145
@@ -2250,7 +2250,7 @@ static int run(mddev_t *mddev)
2250 2250
2251 list_for_each_entry(rdev, &mddev->disks, same_set) { 2251 list_for_each_entry(rdev, &mddev->disks, same_set) {
2252 disk_idx = rdev->raid_disk; 2252 disk_idx = rdev->raid_disk;
2253 if (disk_idx >= mddev->raid_disks 2253 if (disk_idx >= conf->raid_disks
2254 || disk_idx < 0) 2254 || disk_idx < 0)
2255 continue; 2255 continue;
2256 disk = conf->mirrors + disk_idx; 2256 disk = conf->mirrors + disk_idx;
@@ -2311,8 +2311,8 @@ static int run(mddev_t *mddev)
2311 mdname(mddev)); 2311 mdname(mddev));
2312 printk(KERN_INFO 2312 printk(KERN_INFO
2313 "raid10: raid set %s active with %d out of %d devices\n", 2313 "raid10: raid set %s active with %d out of %d devices\n",
2314 mdname(mddev), mddev->raid_disks - mddev->degraded, 2314 mdname(mddev), conf->raid_disks - mddev->degraded,
2315 mddev->raid_disks); 2315 conf->raid_disks);
2316 /* 2316 /*
2317 * Ok, everything is just fine now 2317 * Ok, everything is just fine now
2318 */ 2318 */
@@ -2335,7 +2335,7 @@ static int run(mddev_t *mddev)
2335 mddev->queue->backing_dev_info.ra_pages = 2* stripe; 2335 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
2336 } 2336 }
2337 2337
2338 if (conf->near_copies < mddev->raid_disks) 2338 if (conf->near_copies < conf->raid_disks)
2339 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); 2339 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
2340 md_integrity_register(mddev); 2340 md_integrity_register(mddev);
2341 return 0; 2341 return 0;