aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f80f1af61ce7..967a4ed73929 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
336 spin_lock_irqsave(&conf->device_lock, flags); 336 spin_lock_irqsave(&conf->device_lock, flags);
337 if (r1_bio->mddev->degraded == conf->raid_disks || 337 if (r1_bio->mddev->degraded == conf->raid_disks ||
338 (r1_bio->mddev->degraded == conf->raid_disks-1 && 338 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
339 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) 339 test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
340 uptodate = 1; 340 uptodate = 1;
341 spin_unlock_irqrestore(&conf->device_lock, flags); 341 spin_unlock_irqrestore(&conf->device_lock, flags);
342 } 342 }
@@ -541,7 +541,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
541 541
542 if ((conf->mddev->recovery_cp < this_sector + sectors) || 542 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
543 (mddev_is_clustered(conf->mddev) && 543 (mddev_is_clustered(conf->mddev) &&
544 md_cluster_ops->area_resyncing(conf->mddev, this_sector, 544 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
545 this_sector + sectors))) 545 this_sector + sectors)))
546 choose_first = 1; 546 choose_first = 1;
547 else 547 else
@@ -1111,7 +1111,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1111 ((bio_end_sector(bio) > mddev->suspend_lo && 1111 ((bio_end_sector(bio) > mddev->suspend_lo &&
1112 bio->bi_iter.bi_sector < mddev->suspend_hi) || 1112 bio->bi_iter.bi_sector < mddev->suspend_hi) ||
1113 (mddev_is_clustered(mddev) && 1113 (mddev_is_clustered(mddev) &&
1114 md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) { 1114 md_cluster_ops->area_resyncing(mddev, WRITE,
1115 bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
1115 /* As the suspend_* range is controlled by 1116 /* As the suspend_* range is controlled by
1116 * userspace, we want an interruptible 1117 * userspace, we want an interruptible
1117 * wait. 1118 * wait.
@@ -1124,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1124 if (bio_end_sector(bio) <= mddev->suspend_lo || 1125 if (bio_end_sector(bio) <= mddev->suspend_lo ||
1125 bio->bi_iter.bi_sector >= mddev->suspend_hi || 1126 bio->bi_iter.bi_sector >= mddev->suspend_hi ||
1126 (mddev_is_clustered(mddev) && 1127 (mddev_is_clustered(mddev) &&
1127 !md_cluster_ops->area_resyncing(mddev, 1128 !md_cluster_ops->area_resyncing(mddev, WRITE,
1128 bio->bi_iter.bi_sector, bio_end_sector(bio)))) 1129 bio->bi_iter.bi_sector, bio_end_sector(bio))))
1129 break; 1130 break;
1130 schedule(); 1131 schedule();
@@ -1475,6 +1476,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
1475{ 1476{
1476 char b[BDEVNAME_SIZE]; 1477 char b[BDEVNAME_SIZE];
1477 struct r1conf *conf = mddev->private; 1478 struct r1conf *conf = mddev->private;
1479 unsigned long flags;
1478 1480
1479 /* 1481 /*
1480 * If it is not operational, then we have already marked it as dead 1482 * If it is not operational, then we have already marked it as dead
@@ -1494,14 +1496,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
1494 return; 1496 return;
1495 } 1497 }
1496 set_bit(Blocked, &rdev->flags); 1498 set_bit(Blocked, &rdev->flags);
1499 spin_lock_irqsave(&conf->device_lock, flags);
1497 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1500 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1498 unsigned long flags;
1499 spin_lock_irqsave(&conf->device_lock, flags);
1500 mddev->degraded++; 1501 mddev->degraded++;
1501 set_bit(Faulty, &rdev->flags); 1502 set_bit(Faulty, &rdev->flags);
1502 spin_unlock_irqrestore(&conf->device_lock, flags);
1503 } else 1503 } else
1504 set_bit(Faulty, &rdev->flags); 1504 set_bit(Faulty, &rdev->flags);
1505 spin_unlock_irqrestore(&conf->device_lock, flags);
1505 /* 1506 /*
1506 * if recovery is running, make sure it aborts. 1507 * if recovery is running, make sure it aborts.
1507 */ 1508 */
@@ -1567,7 +1568,10 @@ static int raid1_spare_active(struct mddev *mddev)
1567 * Find all failed disks within the RAID1 configuration 1568 * Find all failed disks within the RAID1 configuration
1568 * and mark them readable. 1569 * and mark them readable.
1569 * Called under mddev lock, so rcu protection not needed. 1570 * Called under mddev lock, so rcu protection not needed.
1571 * device_lock used to avoid races with raid1_end_read_request
1572 * which expects 'In_sync' flags and ->degraded to be consistent.
1570 */ 1573 */
1574 spin_lock_irqsave(&conf->device_lock, flags);
1571 for (i = 0; i < conf->raid_disks; i++) { 1575 for (i = 0; i < conf->raid_disks; i++) {
1572 struct md_rdev *rdev = conf->mirrors[i].rdev; 1576 struct md_rdev *rdev = conf->mirrors[i].rdev;
1573 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; 1577 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
@@ -1598,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev)
1598 sysfs_notify_dirent_safe(rdev->sysfs_state); 1602 sysfs_notify_dirent_safe(rdev->sysfs_state);
1599 } 1603 }
1600 } 1604 }
1601 spin_lock_irqsave(&conf->device_lock, flags);
1602 mddev->degraded -= count; 1605 mddev->degraded -= count;
1603 spin_unlock_irqrestore(&conf->device_lock, flags); 1606 spin_unlock_irqrestore(&conf->device_lock, flags);
1604 1607