aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/md.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c108
1 files changed, 102 insertions, 6 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 7f5ffc51c28..e2ec8bd0fb9 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -757,6 +757,10 @@ static void free_disk_sb(mdk_rdev_t * rdev)
757 rdev->sb_start = 0; 757 rdev->sb_start = 0;
758 rdev->sectors = 0; 758 rdev->sectors = 0;
759 } 759 }
760 if (rdev->bb_page) {
761 put_page(rdev->bb_page);
762 rdev->bb_page = NULL;
763 }
760} 764}
761 765
762 766
@@ -1395,6 +1399,8 @@ static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1395 return cpu_to_le32(csum); 1399 return cpu_to_le32(csum);
1396} 1400}
1397 1401
1402static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
1403 int acknowledged);
1398static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 1404static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1399{ 1405{
1400 struct mdp_superblock_1 *sb; 1406 struct mdp_superblock_1 *sb;
@@ -1473,6 +1479,47 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1473 else 1479 else
1474 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1480 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1475 1481
1482 if (!rdev->bb_page) {
1483 rdev->bb_page = alloc_page(GFP_KERNEL);
1484 if (!rdev->bb_page)
1485 return -ENOMEM;
1486 }
1487 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1488 rdev->badblocks.count == 0) {
1489 /* need to load the bad block list.
1490 * Currently we limit it to one page.
1491 */
1492 s32 offset;
1493 sector_t bb_sector;
1494 u64 *bbp;
1495 int i;
1496 int sectors = le16_to_cpu(sb->bblog_size);
1497 if (sectors > (PAGE_SIZE / 512))
1498 return -EINVAL;
1499 offset = le32_to_cpu(sb->bblog_offset);
1500 if (offset == 0)
1501 return -EINVAL;
1502 bb_sector = (long long)offset;
1503 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1504 rdev->bb_page, READ, true))
1505 return -EIO;
1506 bbp = (u64 *)page_address(rdev->bb_page);
1507 rdev->badblocks.shift = sb->bblog_shift;
1508 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1509 u64 bb = le64_to_cpu(*bbp);
1510 int count = bb & (0x3ff);
1511 u64 sector = bb >> 10;
1512 sector <<= sb->bblog_shift;
1513 count <<= sb->bblog_shift;
1514 if (bb + 1 == 0)
1515 break;
1516 if (md_set_badblocks(&rdev->badblocks,
1517 sector, count, 1) == 0)
1518 return -EINVAL;
1519 }
1520 } else if (sb->bblog_offset == 0)
1521 rdev->badblocks.shift = -1;
1522
1476 if (!refdev) { 1523 if (!refdev) {
1477 ret = 1; 1524 ret = 1;
1478 } else { 1525 } else {
@@ -1624,7 +1671,6 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1624 sb->pad0 = 0; 1671 sb->pad0 = 0;
1625 sb->recovery_offset = cpu_to_le64(0); 1672 sb->recovery_offset = cpu_to_le64(0);
1626 memset(sb->pad1, 0, sizeof(sb->pad1)); 1673 memset(sb->pad1, 0, sizeof(sb->pad1));
1627 memset(sb->pad2, 0, sizeof(sb->pad2));
1628 memset(sb->pad3, 0, sizeof(sb->pad3)); 1674 memset(sb->pad3, 0, sizeof(sb->pad3));
1629 1675
1630 sb->utime = cpu_to_le64((__u64)mddev->utime); 1676 sb->utime = cpu_to_le64((__u64)mddev->utime);
@@ -1664,6 +1710,40 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1664 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 1710 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1665 } 1711 }
1666 1712
1713 if (rdev->badblocks.count == 0)
1714 /* Nothing to do for bad blocks*/ ;
1715 else if (sb->bblog_offset == 0)
1716 /* Cannot record bad blocks on this device */
1717 md_error(mddev, rdev);
1718 else {
1719 struct badblocks *bb = &rdev->badblocks;
1720 u64 *bbp = (u64 *)page_address(rdev->bb_page);
1721 u64 *p = bb->page;
1722 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
1723 if (bb->changed) {
1724 unsigned seq;
1725
1726retry:
1727 seq = read_seqbegin(&bb->lock);
1728
1729 memset(bbp, 0xff, PAGE_SIZE);
1730
1731 for (i = 0 ; i < bb->count ; i++) {
1732 u64 internal_bb = *p++;
1733 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
1734 | BB_LEN(internal_bb));
1735 *bbp++ = cpu_to_le64(store_bb);
1736 }
1737 if (read_seqretry(&bb->lock, seq))
1738 goto retry;
1739
1740 bb->sector = (rdev->sb_start +
1741 (int)le32_to_cpu(sb->bblog_offset));
1742 bb->size = le16_to_cpu(sb->bblog_size);
1743 bb->changed = 0;
1744 }
1745 }
1746
1667 max_dev = 0; 1747 max_dev = 0;
1668 list_for_each_entry(rdev2, &mddev->disks, same_set) 1748 list_for_each_entry(rdev2, &mddev->disks, same_set)
1669 if (rdev2->desc_nr+1 > max_dev) 1749 if (rdev2->desc_nr+1 > max_dev)
@@ -2196,6 +2276,7 @@ static void md_update_sb(mddev_t * mddev, int force_change)
2196 mdk_rdev_t *rdev; 2276 mdk_rdev_t *rdev;
2197 int sync_req; 2277 int sync_req;
2198 int nospares = 0; 2278 int nospares = 0;
2279 int any_badblocks_changed = 0;
2199 2280
2200repeat: 2281repeat:
2201 /* First make sure individual recovery_offsets are correct */ 2282 /* First make sure individual recovery_offsets are correct */
@@ -2267,6 +2348,11 @@ repeat:
2267 MD_BUG(); 2348 MD_BUG();
2268 mddev->events --; 2349 mddev->events --;
2269 } 2350 }
2351
2352 list_for_each_entry(rdev, &mddev->disks, same_set)
2353 if (rdev->badblocks.changed)
2354 any_badblocks_changed++;
2355
2270 sync_sbs(mddev, nospares); 2356 sync_sbs(mddev, nospares);
2271 spin_unlock_irq(&mddev->write_lock); 2357 spin_unlock_irq(&mddev->write_lock);
2272 2358
@@ -2292,6 +2378,13 @@ repeat:
2292 bdevname(rdev->bdev,b), 2378 bdevname(rdev->bdev,b),
2293 (unsigned long long)rdev->sb_start); 2379 (unsigned long long)rdev->sb_start);
2294 rdev->sb_events = mddev->events; 2380 rdev->sb_events = mddev->events;
2381 if (rdev->badblocks.size) {
2382 md_super_write(mddev, rdev,
2383 rdev->badblocks.sector,
2384 rdev->badblocks.size << 9,
2385 rdev->bb_page);
2386 rdev->badblocks.size = 0;
2387 }
2295 2388
2296 } else 2389 } else
2297 dprintk(")\n"); 2390 dprintk(")\n");
@@ -2315,6 +2408,9 @@ repeat:
2315 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2408 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2316 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2409 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2317 2410
2411 if (any_badblocks_changed)
2412 list_for_each_entry(rdev, &mddev->disks, same_set)
2413 md_ack_all_badblocks(&rdev->badblocks);
2318} 2414}
2319 2415
2320/* words written to sysfs files may, or may not, be \n terminated. 2416/* words written to sysfs files may, or may not, be \n terminated.
@@ -2822,6 +2918,8 @@ int md_rdev_init(mdk_rdev_t *rdev)
2822 rdev->sb_events = 0; 2918 rdev->sb_events = 0;
2823 rdev->last_read_error.tv_sec = 0; 2919 rdev->last_read_error.tv_sec = 0;
2824 rdev->last_read_error.tv_nsec = 0; 2920 rdev->last_read_error.tv_nsec = 0;
2921 rdev->sb_loaded = 0;
2922 rdev->bb_page = NULL;
2825 atomic_set(&rdev->nr_pending, 0); 2923 atomic_set(&rdev->nr_pending, 0);
2826 atomic_set(&rdev->read_errors, 0); 2924 atomic_set(&rdev->read_errors, 0);
2827 atomic_set(&rdev->corrected_errors, 0); 2925 atomic_set(&rdev->corrected_errors, 0);
@@ -2910,11 +3008,9 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
2910 return rdev; 3008 return rdev;
2911 3009
2912abort_free: 3010abort_free:
2913 if (rdev->sb_page) { 3011 if (rdev->bdev)
2914 if (rdev->bdev) 3012 unlock_rdev(rdev);
2915 unlock_rdev(rdev); 3013 free_disk_sb(rdev);
2916 free_disk_sb(rdev);
2917 }
2918 kfree(rdev->badblocks.page); 3014 kfree(rdev->badblocks.page);
2919 kfree(rdev); 3015 kfree(rdev);
2920 return ERR_PTR(err); 3016 return ERR_PTR(err);