aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-raid.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-23 20:08:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-23 20:08:40 -0400
commitc80ddb526331a72c9e9d1480f85f6fd7c74e3d2d (patch)
tree0212803a009f171990032abb94fad84156baa153 /drivers/md/dm-raid.c
parent2c13bc0f8f0d3e13b42be70bf74fec8e56b58324 (diff)
parent1dff2b87a34a1ac1d1898ea109bf97ed396aca53 (diff)
Merge tag 'md-3.5' of git://neil.brown.name/md
Pull md updates from NeilBrown: "It's been a busy cycle for md - lots of fun stuff here.. if you like this kind of thing :-) Main features: - RAID10 arrays can be reshaped - adding and removing devices and changing chunks (not 'far' array though) - allow RAID5 arrays to be reshaped with a backup file (not tested yet, but the priciple works fine for RAID10). - arrays can be reshaped while a bitmap is present - you no longer need to remove it first - SSSE3 support for RAID6 syndrome calculations and of course a number of minor fixes etc." * tag 'md-3.5' of git://neil.brown.name/md: (56 commits) md/bitmap: record the space available for the bitmap in the superblock. md/raid10: Remove extras after reshape to smaller number of devices. md/raid5: improve removal of extra devices after reshape. md: check the return of mddev_find() MD RAID1: Further conditionalize 'fullsync' DM RAID: Use md_error() in place of simply setting Faulty bit DM RAID: Record and handle missing devices DM RAID: Set recovery flags on resume md/raid5: Allow reshape while a bitmap is present. md/raid10: resize bitmap when required during reshape. md: allow array to be resized while bitmap is present. md/bitmap: make sure reshape request are reflected in superblock. md/bitmap: add bitmap_resize function to allow bitmap resizing. md/bitmap: use DIV_ROUND_UP instead of open-code md/bitmap: create a 'struct bitmap_counts' substructure of 'struct bitmap' md/bitmap: make bitmap bitops atomic. md/bitmap: make _page_attr bitops atomic. md/bitmap: merge bitmap_file_unmap and bitmap_file_put. md/bitmap: remove async freeing of bitmap file. md/bitmap: convert some spin_lock_irqsave to spin_lock_irq ...
Diffstat (limited to 'drivers/md/dm-raid.c')
-rw-r--r--drivers/md/dm-raid.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 68965e663248..017c34d78d61 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -155,10 +155,7 @@ static void context_free(struct raid_set *rs)
155 for (i = 0; i < rs->md.raid_disks; i++) { 155 for (i = 0; i < rs->md.raid_disks; i++) {
156 if (rs->dev[i].meta_dev) 156 if (rs->dev[i].meta_dev)
157 dm_put_device(rs->ti, rs->dev[i].meta_dev); 157 dm_put_device(rs->ti, rs->dev[i].meta_dev);
158 if (rs->dev[i].rdev.sb_page) 158 md_rdev_clear(&rs->dev[i].rdev);
159 put_page(rs->dev[i].rdev.sb_page);
160 rs->dev[i].rdev.sb_page = NULL;
161 rs->dev[i].rdev.sb_loaded = 0;
162 if (rs->dev[i].data_dev) 159 if (rs->dev[i].data_dev)
163 dm_put_device(rs->ti, rs->dev[i].data_dev); 160 dm_put_device(rs->ti, rs->dev[i].data_dev);
164 } 161 }
@@ -606,7 +603,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
606 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) { 603 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
607 DMERR("Failed to read superblock of device at position %d", 604 DMERR("Failed to read superblock of device at position %d",
608 rdev->raid_disk); 605 rdev->raid_disk);
609 set_bit(Faulty, &rdev->flags); 606 md_error(rdev->mddev, rdev);
610 return -EINVAL; 607 return -EINVAL;
611 } 608 }
612 609
@@ -617,16 +614,18 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
617 614
618static void super_sync(struct mddev *mddev, struct md_rdev *rdev) 615static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
619{ 616{
620 struct md_rdev *r; 617 int i;
621 uint64_t failed_devices; 618 uint64_t failed_devices;
622 struct dm_raid_superblock *sb; 619 struct dm_raid_superblock *sb;
620 struct raid_set *rs = container_of(mddev, struct raid_set, md);
623 621
624 sb = page_address(rdev->sb_page); 622 sb = page_address(rdev->sb_page);
625 failed_devices = le64_to_cpu(sb->failed_devices); 623 failed_devices = le64_to_cpu(sb->failed_devices);
626 624
627 rdev_for_each(r, mddev) 625 for (i = 0; i < mddev->raid_disks; i++)
628 if ((r->raid_disk >= 0) && test_bit(Faulty, &r->flags)) 626 if (!rs->dev[i].data_dev ||
629 failed_devices |= (1ULL << r->raid_disk); 627 test_bit(Faulty, &(rs->dev[i].rdev.flags)))
628 failed_devices |= (1ULL << i);
630 629
631 memset(sb, 0, sizeof(*sb)); 630 memset(sb, 0, sizeof(*sb));
632 631
@@ -1252,12 +1251,13 @@ static void raid_resume(struct dm_target *ti)
1252{ 1251{
1253 struct raid_set *rs = ti->private; 1252 struct raid_set *rs = ti->private;
1254 1253
1254 set_bit(MD_CHANGE_DEVS, &rs->md.flags);
1255 if (!rs->bitmap_loaded) { 1255 if (!rs->bitmap_loaded) {
1256 bitmap_load(&rs->md); 1256 bitmap_load(&rs->md);
1257 rs->bitmap_loaded = 1; 1257 rs->bitmap_loaded = 1;
1258 } else 1258 }
1259 md_wakeup_thread(rs->md.thread);
1260 1259
1260 clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
1261 mddev_resume(&rs->md); 1261 mddev_resume(&rs->md);
1262} 1262}
1263 1263