diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bitmap.c | 75 | ||||
-rw-r--r-- | drivers/md/dm-raid1.c | 2 | ||||
-rw-r--r-- | drivers/md/md.c | 4 | ||||
-rw-r--r-- | drivers/md/raid1.c | 29 | ||||
-rw-r--r-- | drivers/md/raid5.c | 1 | ||||
-rw-r--r-- | drivers/md/raid6main.c | 1 |
6 files changed, 67 insertions, 45 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 70bca955e0de..41df4cda66e2 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -818,8 +818,7 @@ int bitmap_unplug(struct bitmap *bitmap) | |||
818 | return 0; | 818 | return 0; |
819 | } | 819 | } |
820 | 820 | ||
821 | static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, | 821 | static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset); |
822 | unsigned long sectors, int in_sync); | ||
823 | /* * bitmap_init_from_disk -- called at bitmap_create time to initialize | 822 | /* * bitmap_init_from_disk -- called at bitmap_create time to initialize |
824 | * the in-memory bitmap from the on-disk bitmap -- also, sets up the | 823 | * the in-memory bitmap from the on-disk bitmap -- also, sets up the |
825 | * memory mapping of the bitmap file | 824 | * memory mapping of the bitmap file |
@@ -828,7 +827,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, | |||
828 | * previously kicked from the array, we mark all the bits as | 827 | * previously kicked from the array, we mark all the bits as |
829 | * 1's in order to cause a full resync. | 828 | * 1's in order to cause a full resync. |
830 | */ | 829 | */ |
831 | static int bitmap_init_from_disk(struct bitmap *bitmap, int in_sync) | 830 | static int bitmap_init_from_disk(struct bitmap *bitmap) |
832 | { | 831 | { |
833 | unsigned long i, chunks, index, oldindex, bit; | 832 | unsigned long i, chunks, index, oldindex, bit; |
834 | struct page *page = NULL, *oldpage = NULL; | 833 | struct page *page = NULL, *oldpage = NULL; |
@@ -929,8 +928,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, int in_sync) | |||
929 | } | 928 | } |
930 | if (test_bit(bit, page_address(page))) { | 929 | if (test_bit(bit, page_address(page))) { |
931 | /* if the disk bit is set, set the memory bit */ | 930 | /* if the disk bit is set, set the memory bit */ |
932 | bitmap_set_memory_bits(bitmap, | 931 | bitmap_set_memory_bits(bitmap, i << CHUNK_BLOCK_SHIFT(bitmap)); |
933 | i << CHUNK_BLOCK_SHIFT(bitmap), 1, in_sync); | ||
934 | bit_cnt++; | 932 | bit_cnt++; |
935 | } | 933 | } |
936 | } | 934 | } |
@@ -1426,35 +1424,53 @@ void bitmap_close_sync(struct bitmap *bitmap) | |||
1426 | } | 1424 | } |
1427 | } | 1425 | } |
1428 | 1426 | ||
1429 | static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, | 1427 | static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset) |
1430 | unsigned long sectors, int in_sync) | ||
1431 | { | 1428 | { |
1432 | /* For each chunk covered by any of these sectors, set the | 1429 | /* For each chunk covered by any of these sectors, set the |
1433 | * counter to 1 and set resync_needed unless in_sync. They should all | 1430 | * counter to 1 and set resync_needed. They should all |
1434 | * be 0 at this point | 1431 | * be 0 at this point |
1435 | */ | 1432 | */ |
1436 | while (sectors) { | 1433 | |
1437 | int secs; | 1434 | int secs; |
1438 | bitmap_counter_t *bmc; | 1435 | bitmap_counter_t *bmc; |
1439 | spin_lock_irq(&bitmap->lock); | 1436 | spin_lock_irq(&bitmap->lock); |
1440 | bmc = bitmap_get_counter(bitmap, offset, &secs, 1); | 1437 | bmc = bitmap_get_counter(bitmap, offset, &secs, 1); |
1441 | if (!bmc) { | 1438 | if (!bmc) { |
1442 | spin_unlock_irq(&bitmap->lock); | ||
1443 | return; | ||
1444 | } | ||
1445 | if (! *bmc) { | ||
1446 | struct page *page; | ||
1447 | *bmc = 1 | (in_sync? 0 : NEEDED_MASK); | ||
1448 | bitmap_count_page(bitmap, offset, 1); | ||
1449 | page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)); | ||
1450 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); | ||
1451 | } | ||
1452 | spin_unlock_irq(&bitmap->lock); | 1439 | spin_unlock_irq(&bitmap->lock); |
1453 | if (sectors > secs) | 1440 | return; |
1454 | sectors -= secs; | 1441 | } |
1455 | else | 1442 | if (! *bmc) { |
1456 | sectors = 0; | 1443 | struct page *page; |
1444 | *bmc = 1 | NEEDED_MASK; | ||
1445 | bitmap_count_page(bitmap, offset, 1); | ||
1446 | page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)); | ||
1447 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); | ||
1457 | } | 1448 | } |
1449 | spin_unlock_irq(&bitmap->lock); | ||
1450 | |||
1451 | } | ||
1452 | |||
1453 | /* | ||
1454 | * flush out any pending updates | ||
1455 | */ | ||
1456 | void bitmap_flush(mddev_t *mddev) | ||
1457 | { | ||
1458 | struct bitmap *bitmap = mddev->bitmap; | ||
1459 | int sleep; | ||
1460 | |||
1461 | if (!bitmap) /* there was no bitmap */ | ||
1462 | return; | ||
1463 | |||
1464 | /* run the daemon_work three time to ensure everything is flushed | ||
1465 | * that can be | ||
1466 | */ | ||
1467 | sleep = bitmap->daemon_sleep; | ||
1468 | bitmap->daemon_sleep = 0; | ||
1469 | bitmap_daemon_work(bitmap); | ||
1470 | bitmap_daemon_work(bitmap); | ||
1471 | bitmap_daemon_work(bitmap); | ||
1472 | bitmap->daemon_sleep = sleep; | ||
1473 | bitmap_update_sb(bitmap); | ||
1458 | } | 1474 | } |
1459 | 1475 | ||
1460 | /* | 1476 | /* |
@@ -1565,7 +1581,8 @@ int bitmap_create(mddev_t *mddev) | |||
1565 | 1581 | ||
1566 | /* now that we have some pages available, initialize the in-memory | 1582 | /* now that we have some pages available, initialize the in-memory |
1567 | * bitmap from the on-disk bitmap */ | 1583 | * bitmap from the on-disk bitmap */ |
1568 | err = bitmap_init_from_disk(bitmap, mddev->recovery_cp == MaxSector); | 1584 | err = bitmap_init_from_disk(bitmap); |
1585 | |||
1569 | if (err) | 1586 | if (err) |
1570 | return err; | 1587 | return err; |
1571 | 1588 | ||
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 12031c9d3f1e..b08df8b9b2ca 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -1230,7 +1230,7 @@ static int __init dm_mirror_init(void) | |||
1230 | if (r) | 1230 | if (r) |
1231 | return r; | 1231 | return r; |
1232 | 1232 | ||
1233 | _kmirrord_wq = create_workqueue("kmirrord"); | 1233 | _kmirrord_wq = create_singlethread_workqueue("kmirrord"); |
1234 | if (!_kmirrord_wq) { | 1234 | if (!_kmirrord_wq) { |
1235 | DMERR("couldn't start kmirrord"); | 1235 | DMERR("couldn't start kmirrord"); |
1236 | dm_dirty_log_exit(); | 1236 | dm_dirty_log_exit(); |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 6580e0fa4a47..480f658db6f2 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -1798,6 +1798,8 @@ static int do_md_stop(mddev_t * mddev, int ro) | |||
1798 | goto out; | 1798 | goto out; |
1799 | mddev->ro = 1; | 1799 | mddev->ro = 1; |
1800 | } else { | 1800 | } else { |
1801 | bitmap_flush(mddev); | ||
1802 | wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); | ||
1801 | if (mddev->ro) | 1803 | if (mddev->ro) |
1802 | set_disk_ro(disk, 0); | 1804 | set_disk_ro(disk, 0); |
1803 | blk_queue_make_request(mddev->queue, md_fail_request); | 1805 | blk_queue_make_request(mddev->queue, md_fail_request); |
@@ -3484,7 +3486,6 @@ static void md_do_sync(mddev_t *mddev) | |||
3484 | goto skip; | 3486 | goto skip; |
3485 | } | 3487 | } |
3486 | ITERATE_MDDEV(mddev2,tmp) { | 3488 | ITERATE_MDDEV(mddev2,tmp) { |
3487 | printk("."); | ||
3488 | if (mddev2 == mddev) | 3489 | if (mddev2 == mddev) |
3489 | continue; | 3490 | continue; |
3490 | if (mddev2->curr_resync && | 3491 | if (mddev2->curr_resync && |
@@ -4007,3 +4008,4 @@ EXPORT_SYMBOL(md_wakeup_thread); | |||
4007 | EXPORT_SYMBOL(md_print_devices); | 4008 | EXPORT_SYMBOL(md_print_devices); |
4008 | EXPORT_SYMBOL(md_check_recovery); | 4009 | EXPORT_SYMBOL(md_check_recovery); |
4009 | MODULE_LICENSE("GPL"); | 4010 | MODULE_LICENSE("GPL"); |
4011 | MODULE_ALIAS("md"); | ||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d3a64a04a6d8..51d9645ed09c 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -893,7 +893,6 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) | |||
893 | if (!uptodate) { | 893 | if (!uptodate) { |
894 | md_error(r1_bio->mddev, | 894 | md_error(r1_bio->mddev, |
895 | conf->mirrors[r1_bio->read_disk].rdev); | 895 | conf->mirrors[r1_bio->read_disk].rdev); |
896 | set_bit(R1BIO_Degraded, &r1_bio->state); | ||
897 | } else | 896 | } else |
898 | set_bit(R1BIO_Uptodate, &r1_bio->state); | 897 | set_bit(R1BIO_Uptodate, &r1_bio->state); |
899 | rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); | 898 | rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); |
@@ -918,10 +917,9 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error) | |||
918 | mirror = i; | 917 | mirror = i; |
919 | break; | 918 | break; |
920 | } | 919 | } |
921 | if (!uptodate) { | 920 | if (!uptodate) |
922 | md_error(mddev, conf->mirrors[mirror].rdev); | 921 | md_error(mddev, conf->mirrors[mirror].rdev); |
923 | set_bit(R1BIO_Degraded, &r1_bio->state); | 922 | |
924 | } | ||
925 | update_head_pos(mirror, r1_bio); | 923 | update_head_pos(mirror, r1_bio); |
926 | 924 | ||
927 | if (atomic_dec_and_test(&r1_bio->remaining)) { | 925 | if (atomic_dec_and_test(&r1_bio->remaining)) { |
@@ -1109,6 +1107,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1109 | int i; | 1107 | int i; |
1110 | int write_targets = 0; | 1108 | int write_targets = 0; |
1111 | int sync_blocks; | 1109 | int sync_blocks; |
1110 | int still_degraded = 0; | ||
1112 | 1111 | ||
1113 | if (!conf->r1buf_pool) | 1112 | if (!conf->r1buf_pool) |
1114 | { | 1113 | { |
@@ -1137,7 +1136,10 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1137 | return 0; | 1136 | return 0; |
1138 | } | 1137 | } |
1139 | 1138 | ||
1140 | if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, mddev->degraded) && | 1139 | /* before building a request, check if we can skip these blocks.. |
1140 | * This call the bitmap_start_sync doesn't actually record anything | ||
1141 | */ | ||
1142 | if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && | ||
1141 | !conf->fullsync) { | 1143 | !conf->fullsync) { |
1142 | /* We can skip this block, and probably several more */ | 1144 | /* We can skip this block, and probably several more */ |
1143 | *skipped = 1; | 1145 | *skipped = 1; |
@@ -1203,24 +1205,23 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1203 | if (i == disk) { | 1205 | if (i == disk) { |
1204 | bio->bi_rw = READ; | 1206 | bio->bi_rw = READ; |
1205 | bio->bi_end_io = end_sync_read; | 1207 | bio->bi_end_io = end_sync_read; |
1206 | } else if (conf->mirrors[i].rdev && | 1208 | } else if (conf->mirrors[i].rdev == NULL || |
1207 | !conf->mirrors[i].rdev->faulty && | 1209 | conf->mirrors[i].rdev->faulty) { |
1208 | (!conf->mirrors[i].rdev->in_sync || | 1210 | still_degraded = 1; |
1209 | sector_nr + RESYNC_SECTORS > mddev->recovery_cp)) { | 1211 | continue; |
1212 | } else if (!conf->mirrors[i].rdev->in_sync || | ||
1213 | sector_nr + RESYNC_SECTORS > mddev->recovery_cp) { | ||
1210 | bio->bi_rw = WRITE; | 1214 | bio->bi_rw = WRITE; |
1211 | bio->bi_end_io = end_sync_write; | 1215 | bio->bi_end_io = end_sync_write; |
1212 | write_targets ++; | 1216 | write_targets ++; |
1213 | } else | 1217 | } else |
1218 | /* no need to read or write here */ | ||
1214 | continue; | 1219 | continue; |
1215 | bio->bi_sector = sector_nr + conf->mirrors[i].rdev->data_offset; | 1220 | bio->bi_sector = sector_nr + conf->mirrors[i].rdev->data_offset; |
1216 | bio->bi_bdev = conf->mirrors[i].rdev->bdev; | 1221 | bio->bi_bdev = conf->mirrors[i].rdev->bdev; |
1217 | bio->bi_private = r1_bio; | 1222 | bio->bi_private = r1_bio; |
1218 | } | 1223 | } |
1219 | 1224 | ||
1220 | if (write_targets + 1 < conf->raid_disks) | ||
1221 | /* array degraded, can't clear bitmap */ | ||
1222 | set_bit(R1BIO_Degraded, &r1_bio->state); | ||
1223 | |||
1224 | if (write_targets == 0) { | 1225 | if (write_targets == 0) { |
1225 | /* There is nowhere to write, so all non-sync | 1226 | /* There is nowhere to write, so all non-sync |
1226 | * drives must be failed - so we are finished | 1227 | * drives must be failed - so we are finished |
@@ -1243,7 +1244,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1243 | break; | 1244 | break; |
1244 | if (sync_blocks == 0) { | 1245 | if (sync_blocks == 0) { |
1245 | if (!bitmap_start_sync(mddev->bitmap, sector_nr, | 1246 | if (!bitmap_start_sync(mddev->bitmap, sector_nr, |
1246 | &sync_blocks, mddev->degraded) && | 1247 | &sync_blocks, still_degraded) && |
1247 | !conf->fullsync) | 1248 | !conf->fullsync) |
1248 | break; | 1249 | break; |
1249 | if (sync_blocks < (PAGE_SIZE>>9)) | 1250 | if (sync_blocks < (PAGE_SIZE>>9)) |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4698d5f79575..43f231a467d5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -1653,6 +1653,7 @@ static int run (mddev_t *mddev) | |||
1653 | 1653 | ||
1654 | /* device size must be a multiple of chunk size */ | 1654 | /* device size must be a multiple of chunk size */ |
1655 | mddev->size &= ~(mddev->chunk_size/1024 -1); | 1655 | mddev->size &= ~(mddev->chunk_size/1024 -1); |
1656 | mddev->resync_max_sectors = mddev->size << 1; | ||
1656 | 1657 | ||
1657 | if (!conf->chunk_size || conf->chunk_size % 4) { | 1658 | if (!conf->chunk_size || conf->chunk_size % 4) { |
1658 | printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", | 1659 | printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", |
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index f5ee16805111..495dee1d1e83 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c | |||
@@ -1813,6 +1813,7 @@ static int run (mddev_t *mddev) | |||
1813 | 1813 | ||
1814 | /* device size must be a multiple of chunk size */ | 1814 | /* device size must be a multiple of chunk size */ |
1815 | mddev->size &= ~(mddev->chunk_size/1024 -1); | 1815 | mddev->size &= ~(mddev->chunk_size/1024 -1); |
1816 | mddev->resync_max_sectors = mddev->size << 1; | ||
1816 | 1817 | ||
1817 | if (conf->raid_disks < 4) { | 1818 | if (conf->raid_disks < 4) { |
1818 | printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", | 1819 | printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", |