diff options
author | Jens Axboe <axboe@kernel.dk> | 2011-10-19 08:30:42 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2011-10-19 08:30:42 -0400 |
commit | 5c04b426f2e8b46cfc7969a35b2631063a3c646c (patch) | |
tree | 2d27d9f5d2fe5d5e8fbc01a467ec58bcb50235c1 /drivers/md | |
parent | 499337bb6511e665a236a6a947f819d98ea340c6 (diff) | |
parent | 899e3ee404961a90b828ad527573aaaac39f0ab1 (diff) |
Merge branch 'v3.1-rc10' into for-3.2/core
Conflicts:
block/blk-core.c
include/linux/blkdev.h
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-crypt.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-flakey.c | 4 | ||||
-rw-r--r-- | drivers/md/dm-raid.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 32 | ||||
-rw-r--r-- | drivers/md/linear.h | 2 | ||||
-rw-r--r-- | drivers/md/md.c | 50 | ||||
-rw-r--r-- | drivers/md/md.h | 2 | ||||
-rw-r--r-- | drivers/md/multipath.c | 3 | ||||
-rw-r--r-- | drivers/md/raid1.c | 17 | ||||
-rw-r--r-- | drivers/md/raid10.c | 52 | ||||
-rw-r--r-- | drivers/md/raid5.c | 8 |
11 files changed, 116 insertions, 58 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 49da55c1528a..8c2a000cf3f5 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -1698,6 +1698,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1698 | } | 1698 | } |
1699 | 1699 | ||
1700 | ti->num_flush_requests = 1; | 1700 | ti->num_flush_requests = 1; |
1701 | ti->discard_zeroes_data_unsupported = 1; | ||
1702 | |||
1701 | return 0; | 1703 | return 0; |
1702 | 1704 | ||
1703 | bad: | 1705 | bad: |
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 89f73ca22cfa..f84c08029b21 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c | |||
@@ -81,8 +81,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, | |||
81 | * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags> | 81 | * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags> |
82 | */ | 82 | */ |
83 | if (!strcasecmp(arg_name, "corrupt_bio_byte")) { | 83 | if (!strcasecmp(arg_name, "corrupt_bio_byte")) { |
84 | if (!argc) | 84 | if (!argc) { |
85 | ti->error = "Feature corrupt_bio_byte requires parameters"; | 85 | ti->error = "Feature corrupt_bio_byte requires parameters"; |
86 | return -EINVAL; | ||
87 | } | ||
86 | 88 | ||
87 | r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); | 89 | r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); |
88 | if (r) | 90 | if (r) |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index a002dd85db1e..86df8b2cf927 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -449,7 +449,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv, | |||
449 | rs->ti->error = "write_mostly option is only valid for RAID1"; | 449 | rs->ti->error = "write_mostly option is only valid for RAID1"; |
450 | return -EINVAL; | 450 | return -EINVAL; |
451 | } | 451 | } |
452 | if (value > rs->md.raid_disks) { | 452 | if (value >= rs->md.raid_disks) { |
453 | rs->ti->error = "Invalid write_mostly drive index given"; | 453 | rs->ti->error = "Invalid write_mostly drive index given"; |
454 | return -EINVAL; | 454 | return -EINVAL; |
455 | } | 455 | } |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 986b8754bb08..bc04518e9d8b 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -1238,14 +1238,15 @@ static void dm_table_set_integrity(struct dm_table *t) | |||
1238 | return; | 1238 | return; |
1239 | 1239 | ||
1240 | template_disk = dm_table_get_integrity_disk(t, true); | 1240 | template_disk = dm_table_get_integrity_disk(t, true); |
1241 | if (!template_disk && | 1241 | if (template_disk) |
1242 | blk_integrity_is_initialized(dm_disk(t->md))) { | 1242 | blk_integrity_register(dm_disk(t->md), |
1243 | blk_get_integrity(template_disk)); | ||
1244 | else if (blk_integrity_is_initialized(dm_disk(t->md))) | ||
1243 | DMWARN("%s: device no longer has a valid integrity profile", | 1245 | DMWARN("%s: device no longer has a valid integrity profile", |
1244 | dm_device_name(t->md)); | 1246 | dm_device_name(t->md)); |
1245 | return; | 1247 | else |
1246 | } | 1248 | DMWARN("%s: unable to establish an integrity profile", |
1247 | blk_integrity_register(dm_disk(t->md), | 1249 | dm_device_name(t->md)); |
1248 | blk_get_integrity(template_disk)); | ||
1249 | } | 1250 | } |
1250 | 1251 | ||
1251 | static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, | 1252 | static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, |
@@ -1282,6 +1283,22 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush) | |||
1282 | return 0; | 1283 | return 0; |
1283 | } | 1284 | } |
1284 | 1285 | ||
1286 | static bool dm_table_discard_zeroes_data(struct dm_table *t) | ||
1287 | { | ||
1288 | struct dm_target *ti; | ||
1289 | unsigned i = 0; | ||
1290 | |||
1291 | /* Ensure that all targets supports discard_zeroes_data. */ | ||
1292 | while (i < dm_table_get_num_targets(t)) { | ||
1293 | ti = dm_table_get_target(t, i++); | ||
1294 | |||
1295 | if (ti->discard_zeroes_data_unsupported) | ||
1296 | return 0; | ||
1297 | } | ||
1298 | |||
1299 | return 1; | ||
1300 | } | ||
1301 | |||
1285 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | 1302 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
1286 | struct queue_limits *limits) | 1303 | struct queue_limits *limits) |
1287 | { | 1304 | { |
@@ -1304,6 +1321,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
1304 | } | 1321 | } |
1305 | blk_queue_flush(q, flush); | 1322 | blk_queue_flush(q, flush); |
1306 | 1323 | ||
1324 | if (!dm_table_discard_zeroes_data(t)) | ||
1325 | q->limits.discard_zeroes_data = 0; | ||
1326 | |||
1307 | dm_table_set_integrity(t); | 1327 | dm_table_set_integrity(t); |
1308 | 1328 | ||
1309 | /* | 1329 | /* |
diff --git a/drivers/md/linear.h b/drivers/md/linear.h index 0ce29b61605a..2f2da05b2ce9 100644 --- a/drivers/md/linear.h +++ b/drivers/md/linear.h | |||
@@ -10,9 +10,9 @@ typedef struct dev_info dev_info_t; | |||
10 | 10 | ||
11 | struct linear_private_data | 11 | struct linear_private_data |
12 | { | 12 | { |
13 | struct rcu_head rcu; | ||
13 | sector_t array_sectors; | 14 | sector_t array_sectors; |
14 | dev_info_t disks[0]; | 15 | dev_info_t disks[0]; |
15 | struct rcu_head rcu; | ||
16 | }; | 16 | }; |
17 | 17 | ||
18 | 18 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index 5c2178562c96..8f52d4eb78a0 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -61,6 +61,11 @@ | |||
61 | static void autostart_arrays(int part); | 61 | static void autostart_arrays(int part); |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | /* pers_list is a list of registered personalities protected | ||
65 | * by pers_lock. | ||
66 | * pers_lock does extra service to protect accesses to | ||
67 | * mddev->thread when the mutex cannot be held. | ||
68 | */ | ||
64 | static LIST_HEAD(pers_list); | 69 | static LIST_HEAD(pers_list); |
65 | static DEFINE_SPINLOCK(pers_lock); | 70 | static DEFINE_SPINLOCK(pers_lock); |
66 | 71 | ||
@@ -735,7 +740,12 @@ static void mddev_unlock(mddev_t * mddev) | |||
735 | } else | 740 | } else |
736 | mutex_unlock(&mddev->reconfig_mutex); | 741 | mutex_unlock(&mddev->reconfig_mutex); |
737 | 742 | ||
743 | /* was we've dropped the mutex we need a spinlock to | ||
744 | * make sur the thread doesn't disappear | ||
745 | */ | ||
746 | spin_lock(&pers_lock); | ||
738 | md_wakeup_thread(mddev->thread); | 747 | md_wakeup_thread(mddev->thread); |
748 | spin_unlock(&pers_lock); | ||
739 | } | 749 | } |
740 | 750 | ||
741 | static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) | 751 | static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) |
@@ -844,7 +854,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, | |||
844 | bio->bi_end_io = super_written; | 854 | bio->bi_end_io = super_written; |
845 | 855 | ||
846 | atomic_inc(&mddev->pending_writes); | 856 | atomic_inc(&mddev->pending_writes); |
847 | submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio); | 857 | submit_bio(WRITE_FLUSH_FUA, bio); |
848 | } | 858 | } |
849 | 859 | ||
850 | void md_super_wait(mddev_t *mddev) | 860 | void md_super_wait(mddev_t *mddev) |
@@ -1134,8 +1144,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version | |||
1134 | ret = 0; | 1144 | ret = 0; |
1135 | } | 1145 | } |
1136 | rdev->sectors = rdev->sb_start; | 1146 | rdev->sectors = rdev->sb_start; |
1147 | /* Limit to 4TB as metadata cannot record more than that */ | ||
1148 | if (rdev->sectors >= (2ULL << 32)) | ||
1149 | rdev->sectors = (2ULL << 32) - 2; | ||
1137 | 1150 | ||
1138 | if (rdev->sectors < sb->size * 2 && sb->level > 1) | 1151 | if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) |
1139 | /* "this cannot possibly happen" ... */ | 1152 | /* "this cannot possibly happen" ... */ |
1140 | ret = -EINVAL; | 1153 | ret = -EINVAL; |
1141 | 1154 | ||
@@ -1169,7 +1182,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1169 | mddev->clevel[0] = 0; | 1182 | mddev->clevel[0] = 0; |
1170 | mddev->layout = sb->layout; | 1183 | mddev->layout = sb->layout; |
1171 | mddev->raid_disks = sb->raid_disks; | 1184 | mddev->raid_disks = sb->raid_disks; |
1172 | mddev->dev_sectors = sb->size * 2; | 1185 | mddev->dev_sectors = ((sector_t)sb->size) * 2; |
1173 | mddev->events = ev1; | 1186 | mddev->events = ev1; |
1174 | mddev->bitmap_info.offset = 0; | 1187 | mddev->bitmap_info.offset = 0; |
1175 | mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; | 1188 | mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; |
@@ -1411,6 +1424,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) | |||
1411 | rdev->sb_start = calc_dev_sboffset(rdev); | 1424 | rdev->sb_start = calc_dev_sboffset(rdev); |
1412 | if (!num_sectors || num_sectors > rdev->sb_start) | 1425 | if (!num_sectors || num_sectors > rdev->sb_start) |
1413 | num_sectors = rdev->sb_start; | 1426 | num_sectors = rdev->sb_start; |
1427 | /* Limit to 4TB as metadata cannot record more than that. | ||
1428 | * 4TB == 2^32 KB, or 2*2^32 sectors. | ||
1429 | */ | ||
1430 | if (num_sectors >= (2ULL << 32)) | ||
1431 | num_sectors = (2ULL << 32) - 2; | ||
1414 | md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, | 1432 | md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, |
1415 | rdev->sb_page); | 1433 | rdev->sb_page); |
1416 | md_super_wait(rdev->mddev); | 1434 | md_super_wait(rdev->mddev); |
@@ -1734,6 +1752,11 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1734 | sb->level = cpu_to_le32(mddev->level); | 1752 | sb->level = cpu_to_le32(mddev->level); |
1735 | sb->layout = cpu_to_le32(mddev->layout); | 1753 | sb->layout = cpu_to_le32(mddev->layout); |
1736 | 1754 | ||
1755 | if (test_bit(WriteMostly, &rdev->flags)) | ||
1756 | sb->devflags |= WriteMostly1; | ||
1757 | else | ||
1758 | sb->devflags &= ~WriteMostly1; | ||
1759 | |||
1737 | if (mddev->bitmap && mddev->bitmap_info.file == NULL) { | 1760 | if (mddev->bitmap && mddev->bitmap_info.file == NULL) { |
1738 | sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); | 1761 | sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); |
1739 | sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); | 1762 | sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); |
@@ -2557,7 +2580,10 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2557 | int err = -EINVAL; | 2580 | int err = -EINVAL; |
2558 | if (cmd_match(buf, "faulty") && rdev->mddev->pers) { | 2581 | if (cmd_match(buf, "faulty") && rdev->mddev->pers) { |
2559 | md_error(rdev->mddev, rdev); | 2582 | md_error(rdev->mddev, rdev); |
2560 | err = 0; | 2583 | if (test_bit(Faulty, &rdev->flags)) |
2584 | err = 0; | ||
2585 | else | ||
2586 | err = -EBUSY; | ||
2561 | } else if (cmd_match(buf, "remove")) { | 2587 | } else if (cmd_match(buf, "remove")) { |
2562 | if (rdev->raid_disk >= 0) | 2588 | if (rdev->raid_disk >= 0) |
2563 | err = -EBUSY; | 2589 | err = -EBUSY; |
@@ -2580,7 +2606,7 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2580 | err = 0; | 2606 | err = 0; |
2581 | } else if (cmd_match(buf, "-blocked")) { | 2607 | } else if (cmd_match(buf, "-blocked")) { |
2582 | if (!test_bit(Faulty, &rdev->flags) && | 2608 | if (!test_bit(Faulty, &rdev->flags) && |
2583 | test_bit(BlockedBadBlocks, &rdev->flags)) { | 2609 | rdev->badblocks.unacked_exist) { |
2584 | /* metadata handler doesn't understand badblocks, | 2610 | /* metadata handler doesn't understand badblocks, |
2585 | * so we need to fail the device | 2611 | * so we need to fail the device |
2586 | */ | 2612 | */ |
@@ -5979,6 +6005,8 @@ static int set_disk_faulty(mddev_t *mddev, dev_t dev) | |||
5979 | return -ENODEV; | 6005 | return -ENODEV; |
5980 | 6006 | ||
5981 | md_error(mddev, rdev); | 6007 | md_error(mddev, rdev); |
6008 | if (!test_bit(Faulty, &rdev->flags)) | ||
6009 | return -EBUSY; | ||
5982 | return 0; | 6010 | return 0; |
5983 | } | 6011 | } |
5984 | 6012 | ||
@@ -6407,11 +6435,18 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, | |||
6407 | return thread; | 6435 | return thread; |
6408 | } | 6436 | } |
6409 | 6437 | ||
6410 | void md_unregister_thread(mdk_thread_t *thread) | 6438 | void md_unregister_thread(mdk_thread_t **threadp) |
6411 | { | 6439 | { |
6440 | mdk_thread_t *thread = *threadp; | ||
6412 | if (!thread) | 6441 | if (!thread) |
6413 | return; | 6442 | return; |
6414 | dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); | 6443 | dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); |
6444 | /* Locking ensures that mddev_unlock does not wake_up a | ||
6445 | * non-existent thread | ||
6446 | */ | ||
6447 | spin_lock(&pers_lock); | ||
6448 | *threadp = NULL; | ||
6449 | spin_unlock(&pers_lock); | ||
6415 | 6450 | ||
6416 | kthread_stop(thread->tsk); | 6451 | kthread_stop(thread->tsk); |
6417 | kfree(thread); | 6452 | kfree(thread); |
@@ -7318,8 +7353,7 @@ static void reap_sync_thread(mddev_t *mddev) | |||
7318 | mdk_rdev_t *rdev; | 7353 | mdk_rdev_t *rdev; |
7319 | 7354 | ||
7320 | /* resync has finished, collect result */ | 7355 | /* resync has finished, collect result */ |
7321 | md_unregister_thread(mddev->sync_thread); | 7356 | md_unregister_thread(&mddev->sync_thread); |
7322 | mddev->sync_thread = NULL; | ||
7323 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && | 7357 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && |
7324 | !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { | 7358 | !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { |
7325 | /* success...*/ | 7359 | /* success...*/ |
diff --git a/drivers/md/md.h b/drivers/md/md.h index bd47847cf7ca..1509a3eb9ae1 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -560,7 +560,7 @@ extern int register_md_personality(struct mdk_personality *p); | |||
560 | extern int unregister_md_personality(struct mdk_personality *p); | 560 | extern int unregister_md_personality(struct mdk_personality *p); |
561 | extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), | 561 | extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), |
562 | mddev_t *mddev, const char *name); | 562 | mddev_t *mddev, const char *name); |
563 | extern void md_unregister_thread(mdk_thread_t *thread); | 563 | extern void md_unregister_thread(mdk_thread_t **threadp); |
564 | extern void md_wakeup_thread(mdk_thread_t *thread); | 564 | extern void md_wakeup_thread(mdk_thread_t *thread); |
565 | extern void md_check_recovery(mddev_t *mddev); | 565 | extern void md_check_recovery(mddev_t *mddev); |
566 | extern void md_write_start(mddev_t *mddev, struct bio *bi); | 566 | extern void md_write_start(mddev_t *mddev, struct bio *bi); |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 407cb5691425..618dd9e22513 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -514,8 +514,7 @@ static int multipath_stop (mddev_t *mddev) | |||
514 | { | 514 | { |
515 | multipath_conf_t *conf = mddev->private; | 515 | multipath_conf_t *conf = mddev->private; |
516 | 516 | ||
517 | md_unregister_thread(mddev->thread); | 517 | md_unregister_thread(&mddev->thread); |
518 | mddev->thread = NULL; | ||
519 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | 518 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
520 | mempool_destroy(conf->pool); | 519 | mempool_destroy(conf->pool); |
521 | kfree(conf->multipaths); | 520 | kfree(conf->multipaths); |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 97f2a5f977b1..d4ddfa627301 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1099,12 +1099,11 @@ read_again: | |||
1099 | bio_list_add(&conf->pending_bio_list, mbio); | 1099 | bio_list_add(&conf->pending_bio_list, mbio); |
1100 | spin_unlock_irqrestore(&conf->device_lock, flags); | 1100 | spin_unlock_irqrestore(&conf->device_lock, flags); |
1101 | } | 1101 | } |
1102 | r1_bio_write_done(r1_bio); | 1102 | /* Mustn't call r1_bio_write_done before this next test, |
1103 | 1103 | * as it could result in the bio being freed. | |
1104 | /* In case raid1d snuck in to freeze_array */ | 1104 | */ |
1105 | wake_up(&conf->wait_barrier); | ||
1106 | |||
1107 | if (sectors_handled < (bio->bi_size >> 9)) { | 1105 | if (sectors_handled < (bio->bi_size >> 9)) { |
1106 | r1_bio_write_done(r1_bio); | ||
1108 | /* We need another r1_bio. It has already been counted | 1107 | /* We need another r1_bio. It has already been counted |
1109 | * in bio->bi_phys_segments | 1108 | * in bio->bi_phys_segments |
1110 | */ | 1109 | */ |
@@ -1117,6 +1116,11 @@ read_again: | |||
1117 | goto retry_write; | 1116 | goto retry_write; |
1118 | } | 1117 | } |
1119 | 1118 | ||
1119 | r1_bio_write_done(r1_bio); | ||
1120 | |||
1121 | /* In case raid1d snuck in to freeze_array */ | ||
1122 | wake_up(&conf->wait_barrier); | ||
1123 | |||
1120 | if (do_sync || !bitmap || !plugged) | 1124 | if (do_sync || !bitmap || !plugged) |
1121 | md_wakeup_thread(mddev->thread); | 1125 | md_wakeup_thread(mddev->thread); |
1122 | } | 1126 | } |
@@ -2556,8 +2560,7 @@ static int stop(mddev_t *mddev) | |||
2556 | raise_barrier(conf); | 2560 | raise_barrier(conf); |
2557 | lower_barrier(conf); | 2561 | lower_barrier(conf); |
2558 | 2562 | ||
2559 | md_unregister_thread(mddev->thread); | 2563 | md_unregister_thread(&mddev->thread); |
2560 | mddev->thread = NULL; | ||
2561 | if (conf->r1bio_pool) | 2564 | if (conf->r1bio_pool) |
2562 | mempool_destroy(conf->r1bio_pool); | 2565 | mempool_destroy(conf->r1bio_pool); |
2563 | kfree(conf->mirrors); | 2566 | kfree(conf->mirrors); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 04b625e1cb60..ea5fc0b6a84c 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -337,6 +337,21 @@ static void close_write(r10bio_t *r10_bio) | |||
337 | md_write_end(r10_bio->mddev); | 337 | md_write_end(r10_bio->mddev); |
338 | } | 338 | } |
339 | 339 | ||
340 | static void one_write_done(r10bio_t *r10_bio) | ||
341 | { | ||
342 | if (atomic_dec_and_test(&r10_bio->remaining)) { | ||
343 | if (test_bit(R10BIO_WriteError, &r10_bio->state)) | ||
344 | reschedule_retry(r10_bio); | ||
345 | else { | ||
346 | close_write(r10_bio); | ||
347 | if (test_bit(R10BIO_MadeGood, &r10_bio->state)) | ||
348 | reschedule_retry(r10_bio); | ||
349 | else | ||
350 | raid_end_bio_io(r10_bio); | ||
351 | } | ||
352 | } | ||
353 | } | ||
354 | |||
340 | static void raid10_end_write_request(struct bio *bio, int error) | 355 | static void raid10_end_write_request(struct bio *bio, int error) |
341 | { | 356 | { |
342 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 357 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
@@ -387,17 +402,7 @@ static void raid10_end_write_request(struct bio *bio, int error) | |||
387 | * Let's see if all mirrored write operations have finished | 402 | * Let's see if all mirrored write operations have finished |
388 | * already. | 403 | * already. |
389 | */ | 404 | */ |
390 | if (atomic_dec_and_test(&r10_bio->remaining)) { | 405 | one_write_done(r10_bio); |
391 | if (test_bit(R10BIO_WriteError, &r10_bio->state)) | ||
392 | reschedule_retry(r10_bio); | ||
393 | else { | ||
394 | close_write(r10_bio); | ||
395 | if (test_bit(R10BIO_MadeGood, &r10_bio->state)) | ||
396 | reschedule_retry(r10_bio); | ||
397 | else | ||
398 | raid_end_bio_io(r10_bio); | ||
399 | } | ||
400 | } | ||
401 | if (dec_rdev) | 406 | if (dec_rdev) |
402 | rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); | 407 | rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); |
403 | } | 408 | } |
@@ -1125,20 +1130,12 @@ retry_write: | |||
1125 | spin_unlock_irqrestore(&conf->device_lock, flags); | 1130 | spin_unlock_irqrestore(&conf->device_lock, flags); |
1126 | } | 1131 | } |
1127 | 1132 | ||
1128 | if (atomic_dec_and_test(&r10_bio->remaining)) { | 1133 | /* Don't remove the bias on 'remaining' (one_write_done) until |
1129 | /* This matches the end of raid10_end_write_request() */ | 1134 | * after checking if we need to go around again. |
1130 | bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, | 1135 | */ |
1131 | r10_bio->sectors, | ||
1132 | !test_bit(R10BIO_Degraded, &r10_bio->state), | ||
1133 | 0); | ||
1134 | md_write_end(mddev); | ||
1135 | raid_end_bio_io(r10_bio); | ||
1136 | } | ||
1137 | |||
1138 | /* In case raid10d snuck in to freeze_array */ | ||
1139 | wake_up(&conf->wait_barrier); | ||
1140 | 1136 | ||
1141 | if (sectors_handled < (bio->bi_size >> 9)) { | 1137 | if (sectors_handled < (bio->bi_size >> 9)) { |
1138 | one_write_done(r10_bio); | ||
1142 | /* We need another r10_bio. It has already been counted | 1139 | /* We need another r10_bio. It has already been counted |
1143 | * in bio->bi_phys_segments. | 1140 | * in bio->bi_phys_segments. |
1144 | */ | 1141 | */ |
@@ -1152,6 +1149,10 @@ retry_write: | |||
1152 | r10_bio->state = 0; | 1149 | r10_bio->state = 0; |
1153 | goto retry_write; | 1150 | goto retry_write; |
1154 | } | 1151 | } |
1152 | one_write_done(r10_bio); | ||
1153 | |||
1154 | /* In case raid10d snuck in to freeze_array */ | ||
1155 | wake_up(&conf->wait_barrier); | ||
1155 | 1156 | ||
1156 | if (do_sync || !mddev->bitmap || !plugged) | 1157 | if (do_sync || !mddev->bitmap || !plugged) |
1157 | md_wakeup_thread(mddev->thread); | 1158 | md_wakeup_thread(mddev->thread); |
@@ -2951,7 +2952,7 @@ static int run(mddev_t *mddev) | |||
2951 | return 0; | 2952 | return 0; |
2952 | 2953 | ||
2953 | out_free_conf: | 2954 | out_free_conf: |
2954 | md_unregister_thread(mddev->thread); | 2955 | md_unregister_thread(&mddev->thread); |
2955 | if (conf->r10bio_pool) | 2956 | if (conf->r10bio_pool) |
2956 | mempool_destroy(conf->r10bio_pool); | 2957 | mempool_destroy(conf->r10bio_pool); |
2957 | safe_put_page(conf->tmppage); | 2958 | safe_put_page(conf->tmppage); |
@@ -2969,8 +2970,7 @@ static int stop(mddev_t *mddev) | |||
2969 | raise_barrier(conf, 0); | 2970 | raise_barrier(conf, 0); |
2970 | lower_barrier(conf); | 2971 | lower_barrier(conf); |
2971 | 2972 | ||
2972 | md_unregister_thread(mddev->thread); | 2973 | md_unregister_thread(&mddev->thread); |
2973 | mddev->thread = NULL; | ||
2974 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | 2974 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
2975 | if (conf->r10bio_pool) | 2975 | if (conf->r10bio_pool) |
2976 | mempool_destroy(conf->r10bio_pool); | 2976 | mempool_destroy(conf->r10bio_pool); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 96b7f6a1b6f2..83f2c44e170f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -3336,7 +3336,7 @@ static void handle_stripe(struct stripe_head *sh) | |||
3336 | 3336 | ||
3337 | finish: | 3337 | finish: |
3338 | /* wait for this device to become unblocked */ | 3338 | /* wait for this device to become unblocked */ |
3339 | if (unlikely(s.blocked_rdev)) | 3339 | if (conf->mddev->external && unlikely(s.blocked_rdev)) |
3340 | md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); | 3340 | md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); |
3341 | 3341 | ||
3342 | if (s.handle_bad_blocks) | 3342 | if (s.handle_bad_blocks) |
@@ -4939,8 +4939,7 @@ static int run(mddev_t *mddev) | |||
4939 | 4939 | ||
4940 | return 0; | 4940 | return 0; |
4941 | abort: | 4941 | abort: |
4942 | md_unregister_thread(mddev->thread); | 4942 | md_unregister_thread(&mddev->thread); |
4943 | mddev->thread = NULL; | ||
4944 | if (conf) { | 4943 | if (conf) { |
4945 | print_raid5_conf(conf); | 4944 | print_raid5_conf(conf); |
4946 | free_conf(conf); | 4945 | free_conf(conf); |
@@ -4954,8 +4953,7 @@ static int stop(mddev_t *mddev) | |||
4954 | { | 4953 | { |
4955 | raid5_conf_t *conf = mddev->private; | 4954 | raid5_conf_t *conf = mddev->private; |
4956 | 4955 | ||
4957 | md_unregister_thread(mddev->thread); | 4956 | md_unregister_thread(&mddev->thread); |
4958 | mddev->thread = NULL; | ||
4959 | if (mddev->queue) | 4957 | if (mddev->queue) |
4960 | mddev->queue->backing_dev_info.congested_fn = NULL; | 4958 | mddev->queue->backing_dev_info.congested_fn = NULL; |
4961 | free_conf(conf); | 4959 | free_conf(conf); |