aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2009-02-24 21:18:47 -0500
committerNeilBrown <neilb@suse.de>2009-02-24 21:18:47 -0500
commit73d5c38a9536142e062c35997b044e89166e063b (patch)
tree68fd385cbdee1fa75269974ef210d53a0ae5e311 /drivers/md
parent78200d45cde2a79c0d0ae0407883bb264caa3c18 (diff)
md: avoid races when stopping resync.
There has been a race in raid10 and raid1 for a long time which has only recently started showing up due to a scheduler changed. When a sync_read request finishes, as soon as reschedule_retry is called, another thread can mark the resync request as having completed, so md_do_sync can finish, ->stop can be called, and ->conf can be freed. So using conf after reschedule_retry is not safe. Similarly, when finishing a sync_write, calling md_done_sync must be the last thing we do, as it allows a chain of events which will free conf and other data structures. The first of these requires action in raid10.c The second requires action in raid1.c and raid10.c Cc: stable@kernel.org Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c7
2 files changed, 6 insertions, 4 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 01e3cffd03b..e2466425d9c 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1237,8 +1237,9 @@ static void end_sync_write(struct bio *bio, int error)
1237 update_head_pos(mirror, r1_bio); 1237 update_head_pos(mirror, r1_bio);
1238 1238
1239 if (atomic_dec_and_test(&r1_bio->remaining)) { 1239 if (atomic_dec_and_test(&r1_bio->remaining)) {
1240 md_done_sync(mddev, r1_bio->sectors, uptodate); 1240 sector_t s = r1_bio->sectors;
1241 put_buf(r1_bio); 1241 put_buf(r1_bio);
1242 md_done_sync(mddev, s, uptodate);
1242 } 1243 }
1243} 1244}
1244 1245
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e1feb87afc6..7301631abe0 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1236,6 +1236,7 @@ static void end_sync_read(struct bio *bio, int error)
1236 /* for reconstruct, we always reschedule after a read. 1236 /* for reconstruct, we always reschedule after a read.
1237 * for resync, only after all reads 1237 * for resync, only after all reads
1238 */ 1238 */
1239 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1239 if (test_bit(R10BIO_IsRecover, &r10_bio->state) || 1240 if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1240 atomic_dec_and_test(&r10_bio->remaining)) { 1241 atomic_dec_and_test(&r10_bio->remaining)) {
1241 /* we have read all the blocks, 1242 /* we have read all the blocks,
@@ -1243,7 +1244,6 @@ static void end_sync_read(struct bio *bio, int error)
1243 */ 1244 */
1244 reschedule_retry(r10_bio); 1245 reschedule_retry(r10_bio);
1245 } 1246 }
1246 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1247} 1247}
1248 1248
1249static void end_sync_write(struct bio *bio, int error) 1249static void end_sync_write(struct bio *bio, int error)
@@ -1264,11 +1264,13 @@ static void end_sync_write(struct bio *bio, int error)
1264 1264
1265 update_head_pos(i, r10_bio); 1265 update_head_pos(i, r10_bio);
1266 1266
1267 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1267 while (atomic_dec_and_test(&r10_bio->remaining)) { 1268 while (atomic_dec_and_test(&r10_bio->remaining)) {
1268 if (r10_bio->master_bio == NULL) { 1269 if (r10_bio->master_bio == NULL) {
1269 /* the primary of several recovery bios */ 1270 /* the primary of several recovery bios */
1270 md_done_sync(mddev, r10_bio->sectors, 1); 1271 sector_t s = r10_bio->sectors;
1271 put_buf(r10_bio); 1272 put_buf(r10_bio);
1273 md_done_sync(mddev, s, 1);
1272 break; 1274 break;
1273 } else { 1275 } else {
1274 r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio; 1276 r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
@@ -1276,7 +1278,6 @@ static void end_sync_write(struct bio *bio, int error)
1276 r10_bio = r10_bio2; 1278 r10_bio = r10_bio2;
1277 } 1279 }
1278 } 1280 }
1279 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1280} 1281}
1281 1282
1282/* 1283/*