aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.com>2016-06-02 02:19:52 -0400
committerShaohua Li <shli@fb.com>2016-06-13 14:54:15 -0400
commitd094d6860b6678057f70dee27121ea4860c55e06 (patch)
treef15d56552c46374fea09de2818e49aec0437626f /drivers/md
parentf90145f317efad72e6552cecb09ab7a4e5d1e404 (diff)
md/raid10: add rcu protection to rdev access during reshape.
mirrors[].rdev can become NULL at any point unless: - a counted reference is held - ->reconfig_mutex is held, or - rcu_read_lock() is held Reshape isn't always suitably careful as in the past rdev couldn't be removed during reshape. It can now, so add protection. Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: Shaohua Li <shli@fb.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid10.c30
1 files changed, 22 insertions, 8 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index cb997c63bfe0..e644f6f5c4a7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4361,15 +4361,16 @@ read_more:
4361 blist = read_bio; 4361 blist = read_bio;
4362 read_bio->bi_next = NULL; 4362 read_bio->bi_next = NULL;
4363 4363
4364 rcu_read_lock();
4364 for (s = 0; s < conf->copies*2; s++) { 4365 for (s = 0; s < conf->copies*2; s++) {
4365 struct bio *b; 4366 struct bio *b;
4366 int d = r10_bio->devs[s/2].devnum; 4367 int d = r10_bio->devs[s/2].devnum;
4367 struct md_rdev *rdev2; 4368 struct md_rdev *rdev2;
4368 if (s&1) { 4369 if (s&1) {
4369 rdev2 = conf->mirrors[d].replacement; 4370 rdev2 = rcu_dereference(conf->mirrors[d].replacement);
4370 b = r10_bio->devs[s/2].repl_bio; 4371 b = r10_bio->devs[s/2].repl_bio;
4371 } else { 4372 } else {
4372 rdev2 = conf->mirrors[d].rdev; 4373 rdev2 = rcu_dereference(conf->mirrors[d].rdev);
4373 b = r10_bio->devs[s/2].bio; 4374 b = r10_bio->devs[s/2].bio;
4374 } 4375 }
4375 if (!rdev2 || test_bit(Faulty, &rdev2->flags)) 4376 if (!rdev2 || test_bit(Faulty, &rdev2->flags))
@@ -4414,6 +4415,7 @@ read_more:
4414 nr_sectors += len >> 9; 4415 nr_sectors += len >> 9;
4415 } 4416 }
4416bio_full: 4417bio_full:
4418 rcu_read_unlock();
4417 r10_bio->sectors = nr_sectors; 4419 r10_bio->sectors = nr_sectors;
4418 4420
4419 /* Now submit the read */ 4421 /* Now submit the read */
@@ -4465,16 +4467,20 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4465 struct bio *b; 4467 struct bio *b;
4466 int d = r10_bio->devs[s/2].devnum; 4468 int d = r10_bio->devs[s/2].devnum;
4467 struct md_rdev *rdev; 4469 struct md_rdev *rdev;
4470 rcu_read_lock();
4468 if (s&1) { 4471 if (s&1) {
4469 rdev = conf->mirrors[d].replacement; 4472 rdev = rcu_dereference(conf->mirrors[d].replacement);
4470 b = r10_bio->devs[s/2].repl_bio; 4473 b = r10_bio->devs[s/2].repl_bio;
4471 } else { 4474 } else {
4472 rdev = conf->mirrors[d].rdev; 4475 rdev = rcu_dereference(conf->mirrors[d].rdev);
4473 b = r10_bio->devs[s/2].bio; 4476 b = r10_bio->devs[s/2].bio;
4474 } 4477 }
4475 if (!rdev || test_bit(Faulty, &rdev->flags)) 4478 if (!rdev || test_bit(Faulty, &rdev->flags)) {
4479 rcu_read_unlock();
4476 continue; 4480 continue;
4481 }
4477 atomic_inc(&rdev->nr_pending); 4482 atomic_inc(&rdev->nr_pending);
4483 rcu_read_unlock();
4478 md_sync_acct(b->bi_bdev, r10_bio->sectors); 4484 md_sync_acct(b->bi_bdev, r10_bio->sectors);
4479 atomic_inc(&r10_bio->remaining); 4485 atomic_inc(&r10_bio->remaining);
4480 b->bi_next = NULL; 4486 b->bi_next = NULL;
@@ -4535,9 +4541,10 @@ static int handle_reshape_read_error(struct mddev *mddev,
4535 if (s > (PAGE_SIZE >> 9)) 4541 if (s > (PAGE_SIZE >> 9))
4536 s = PAGE_SIZE >> 9; 4542 s = PAGE_SIZE >> 9;
4537 4543
4544 rcu_read_lock();
4538 while (!success) { 4545 while (!success) {
4539 int d = r10b->devs[slot].devnum; 4546 int d = r10b->devs[slot].devnum;
4540 struct md_rdev *rdev = conf->mirrors[d].rdev; 4547 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
4541 sector_t addr; 4548 sector_t addr;
4542 if (rdev == NULL || 4549 if (rdev == NULL ||
4543 test_bit(Faulty, &rdev->flags) || 4550 test_bit(Faulty, &rdev->flags) ||
@@ -4545,11 +4552,15 @@ static int handle_reshape_read_error(struct mddev *mddev,
4545 goto failed; 4552 goto failed;
4546 4553
4547 addr = r10b->devs[slot].addr + idx * PAGE_SIZE; 4554 addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
4555 atomic_inc(&rdev->nr_pending);
4556 rcu_read_unlock();
4548 success = sync_page_io(rdev, 4557 success = sync_page_io(rdev,
4549 addr, 4558 addr,
4550 s << 9, 4559 s << 9,
4551 bvec[idx].bv_page, 4560 bvec[idx].bv_page,
4552 READ, false); 4561 READ, false);
4562 rdev_dec_pending(rdev, mddev);
4563 rcu_read_lock();
4553 if (success) 4564 if (success)
4554 break; 4565 break;
4555 failed: 4566 failed:
@@ -4559,6 +4570,7 @@ static int handle_reshape_read_error(struct mddev *mddev,
4559 if (slot == first_slot) 4570 if (slot == first_slot)
4560 break; 4571 break;
4561 } 4572 }
4573 rcu_read_unlock();
4562 if (!success) { 4574 if (!success) {
4563 /* couldn't read this block, must give up */ 4575 /* couldn't read this block, must give up */
4564 set_bit(MD_RECOVERY_INTR, 4576 set_bit(MD_RECOVERY_INTR,
@@ -4628,16 +4640,18 @@ static void raid10_finish_reshape(struct mddev *mddev)
4628 } 4640 }
4629 } else { 4641 } else {
4630 int d; 4642 int d;
4643 rcu_read_lock();
4631 for (d = conf->geo.raid_disks ; 4644 for (d = conf->geo.raid_disks ;
4632 d < conf->geo.raid_disks - mddev->delta_disks; 4645 d < conf->geo.raid_disks - mddev->delta_disks;
4633 d++) { 4646 d++) {
4634 struct md_rdev *rdev = conf->mirrors[d].rdev; 4647 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
4635 if (rdev) 4648 if (rdev)
4636 clear_bit(In_sync, &rdev->flags); 4649 clear_bit(In_sync, &rdev->flags);
4637 rdev = conf->mirrors[d].replacement; 4650 rdev = rcu_dereference(conf->mirrors[d].replacement);
4638 if (rdev) 4651 if (rdev)
4639 clear_bit(In_sync, &rdev->flags); 4652 clear_bit(In_sync, &rdev->flags);
4640 } 4653 }
4654 rcu_read_unlock();
4641 } 4655 }
4642 mddev->layout = mddev->new_layout; 4656 mddev->layout = mddev->new_layout;
4643 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; 4657 mddev->chunk_sectors = 1 << conf->geo.chunk_shift;