aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/md.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2011-10-19 08:30:42 -0400
committerJens Axboe <axboe@kernel.dk>2011-10-19 08:30:42 -0400
commit5c04b426f2e8b46cfc7969a35b2631063a3c646c (patch)
tree2d27d9f5d2fe5d5e8fbc01a467ec58bcb50235c1 /drivers/md/md.c
parent499337bb6511e665a236a6a947f819d98ea340c6 (diff)
parent899e3ee404961a90b828ad527573aaaac39f0ab1 (diff)
Merge branch 'v3.1-rc10' into for-3.2/core
Conflicts: block/blk-core.c include/linux/blkdev.h Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c50
1 files changed, 42 insertions, 8 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5c2178562c96..8f52d4eb78a0 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -61,6 +61,11 @@
61static void autostart_arrays(int part); 61static void autostart_arrays(int part);
62#endif 62#endif
63 63
64/* pers_list is a list of registered personalities protected
65 * by pers_lock.
66 * pers_lock does extra service to protect accesses to
67 * mddev->thread when the mutex cannot be held.
68 */
64static LIST_HEAD(pers_list); 69static LIST_HEAD(pers_list);
65static DEFINE_SPINLOCK(pers_lock); 70static DEFINE_SPINLOCK(pers_lock);
66 71
@@ -735,7 +740,12 @@ static void mddev_unlock(mddev_t * mddev)
735 } else 740 } else
736 mutex_unlock(&mddev->reconfig_mutex); 741 mutex_unlock(&mddev->reconfig_mutex);
737 742
743 /* was we've dropped the mutex we need a spinlock to
744 * make sur the thread doesn't disappear
745 */
746 spin_lock(&pers_lock);
738 md_wakeup_thread(mddev->thread); 747 md_wakeup_thread(mddev->thread);
748 spin_unlock(&pers_lock);
739} 749}
740 750
741static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 751static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
@@ -844,7 +854,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
844 bio->bi_end_io = super_written; 854 bio->bi_end_io = super_written;
845 855
846 atomic_inc(&mddev->pending_writes); 856 atomic_inc(&mddev->pending_writes);
847 submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio); 857 submit_bio(WRITE_FLUSH_FUA, bio);
848} 858}
849 859
850void md_super_wait(mddev_t *mddev) 860void md_super_wait(mddev_t *mddev)
@@ -1134,8 +1144,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
1134 ret = 0; 1144 ret = 0;
1135 } 1145 }
1136 rdev->sectors = rdev->sb_start; 1146 rdev->sectors = rdev->sb_start;
1147 /* Limit to 4TB as metadata cannot record more than that */
1148 if (rdev->sectors >= (2ULL << 32))
1149 rdev->sectors = (2ULL << 32) - 2;
1137 1150
1138 if (rdev->sectors < sb->size * 2 && sb->level > 1) 1151 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1139 /* "this cannot possibly happen" ... */ 1152 /* "this cannot possibly happen" ... */
1140 ret = -EINVAL; 1153 ret = -EINVAL;
1141 1154
@@ -1169,7 +1182,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1169 mddev->clevel[0] = 0; 1182 mddev->clevel[0] = 0;
1170 mddev->layout = sb->layout; 1183 mddev->layout = sb->layout;
1171 mddev->raid_disks = sb->raid_disks; 1184 mddev->raid_disks = sb->raid_disks;
1172 mddev->dev_sectors = sb->size * 2; 1185 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1173 mddev->events = ev1; 1186 mddev->events = ev1;
1174 mddev->bitmap_info.offset = 0; 1187 mddev->bitmap_info.offset = 0;
1175 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1188 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
@@ -1411,6 +1424,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1411 rdev->sb_start = calc_dev_sboffset(rdev); 1424 rdev->sb_start = calc_dev_sboffset(rdev);
1412 if (!num_sectors || num_sectors > rdev->sb_start) 1425 if (!num_sectors || num_sectors > rdev->sb_start)
1413 num_sectors = rdev->sb_start; 1426 num_sectors = rdev->sb_start;
1427 /* Limit to 4TB as metadata cannot record more than that.
1428 * 4TB == 2^32 KB, or 2*2^32 sectors.
1429 */
1430 if (num_sectors >= (2ULL << 32))
1431 num_sectors = (2ULL << 32) - 2;
1414 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1432 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1415 rdev->sb_page); 1433 rdev->sb_page);
1416 md_super_wait(rdev->mddev); 1434 md_super_wait(rdev->mddev);
@@ -1734,6 +1752,11 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1734 sb->level = cpu_to_le32(mddev->level); 1752 sb->level = cpu_to_le32(mddev->level);
1735 sb->layout = cpu_to_le32(mddev->layout); 1753 sb->layout = cpu_to_le32(mddev->layout);
1736 1754
1755 if (test_bit(WriteMostly, &rdev->flags))
1756 sb->devflags |= WriteMostly1;
1757 else
1758 sb->devflags &= ~WriteMostly1;
1759
1737 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 1760 if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1738 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 1761 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1739 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1762 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
@@ -2557,7 +2580,10 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2557 int err = -EINVAL; 2580 int err = -EINVAL;
2558 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2581 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2559 md_error(rdev->mddev, rdev); 2582 md_error(rdev->mddev, rdev);
2560 err = 0; 2583 if (test_bit(Faulty, &rdev->flags))
2584 err = 0;
2585 else
2586 err = -EBUSY;
2561 } else if (cmd_match(buf, "remove")) { 2587 } else if (cmd_match(buf, "remove")) {
2562 if (rdev->raid_disk >= 0) 2588 if (rdev->raid_disk >= 0)
2563 err = -EBUSY; 2589 err = -EBUSY;
@@ -2580,7 +2606,7 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2580 err = 0; 2606 err = 0;
2581 } else if (cmd_match(buf, "-blocked")) { 2607 } else if (cmd_match(buf, "-blocked")) {
2582 if (!test_bit(Faulty, &rdev->flags) && 2608 if (!test_bit(Faulty, &rdev->flags) &&
2583 test_bit(BlockedBadBlocks, &rdev->flags)) { 2609 rdev->badblocks.unacked_exist) {
2584 /* metadata handler doesn't understand badblocks, 2610 /* metadata handler doesn't understand badblocks,
2585 * so we need to fail the device 2611 * so we need to fail the device
2586 */ 2612 */
@@ -5979,6 +6005,8 @@ static int set_disk_faulty(mddev_t *mddev, dev_t dev)
5979 return -ENODEV; 6005 return -ENODEV;
5980 6006
5981 md_error(mddev, rdev); 6007 md_error(mddev, rdev);
6008 if (!test_bit(Faulty, &rdev->flags))
6009 return -EBUSY;
5982 return 0; 6010 return 0;
5983} 6011}
5984 6012
@@ -6407,11 +6435,18 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
6407 return thread; 6435 return thread;
6408} 6436}
6409 6437
6410void md_unregister_thread(mdk_thread_t *thread) 6438void md_unregister_thread(mdk_thread_t **threadp)
6411{ 6439{
6440 mdk_thread_t *thread = *threadp;
6412 if (!thread) 6441 if (!thread)
6413 return; 6442 return;
6414 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 6443 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
6444 /* Locking ensures that mddev_unlock does not wake_up a
6445 * non-existent thread
6446 */
6447 spin_lock(&pers_lock);
6448 *threadp = NULL;
6449 spin_unlock(&pers_lock);
6415 6450
6416 kthread_stop(thread->tsk); 6451 kthread_stop(thread->tsk);
6417 kfree(thread); 6452 kfree(thread);
@@ -7318,8 +7353,7 @@ static void reap_sync_thread(mddev_t *mddev)
7318 mdk_rdev_t *rdev; 7353 mdk_rdev_t *rdev;
7319 7354
7320 /* resync has finished, collect result */ 7355 /* resync has finished, collect result */
7321 md_unregister_thread(mddev->sync_thread); 7356 md_unregister_thread(&mddev->sync_thread);
7322 mddev->sync_thread = NULL;
7323 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7357 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7324 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 7358 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7325 /* success...*/ 7359 /* success...*/