diff options
author | Mark Brown <broonie@kernel.org> | 2014-10-20 12:55:07 -0400 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2014-10-20 13:27:32 -0400 |
commit | b7a40242c82cd73cfcea305f23e67d068dd8401a (patch) | |
tree | 251b49d19cd7c371847ae1f951e1b537ca0e1c15 /drivers/md/raid1.c | |
parent | d26833bfce5e56017bea9f1f50838f20e18e7b7e (diff) | |
parent | 9c6de47d53a3ce8df1642ae67823688eb98a190a (diff) |
Merge branch 'fix/dw' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi into spi-dw
Conflicts:
drivers/spi/spi-dw-mid.c
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r-- | drivers/md/raid1.c | 40 |
1 files changed, 22 insertions, 18 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d7690f86fdb9..55de4f6f7eaf 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -540,11 +540,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect | |||
540 | has_nonrot_disk = 0; | 540 | has_nonrot_disk = 0; |
541 | choose_next_idle = 0; | 541 | choose_next_idle = 0; |
542 | 542 | ||
543 | if (conf->mddev->recovery_cp < MaxSector && | 543 | choose_first = (conf->mddev->recovery_cp < this_sector + sectors); |
544 | (this_sector + sectors >= conf->next_resync)) | ||
545 | choose_first = 1; | ||
546 | else | ||
547 | choose_first = 0; | ||
548 | 544 | ||
549 | for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { | 545 | for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { |
550 | sector_t dist; | 546 | sector_t dist; |
@@ -831,7 +827,7 @@ static void flush_pending_writes(struct r1conf *conf) | |||
831 | * there is no normal IO happeing. It must arrange to call | 827 | * there is no normal IO happeing. It must arrange to call |
832 | * lower_barrier when the particular background IO completes. | 828 | * lower_barrier when the particular background IO completes. |
833 | */ | 829 | */ |
834 | static void raise_barrier(struct r1conf *conf) | 830 | static void raise_barrier(struct r1conf *conf, sector_t sector_nr) |
835 | { | 831 | { |
836 | spin_lock_irq(&conf->resync_lock); | 832 | spin_lock_irq(&conf->resync_lock); |
837 | 833 | ||
@@ -841,6 +837,7 @@ static void raise_barrier(struct r1conf *conf) | |||
841 | 837 | ||
842 | /* block any new IO from starting */ | 838 | /* block any new IO from starting */ |
843 | conf->barrier++; | 839 | conf->barrier++; |
840 | conf->next_resync = sector_nr; | ||
844 | 841 | ||
845 | /* For these conditions we must wait: | 842 | /* For these conditions we must wait: |
846 | * A: while the array is in frozen state | 843 | * A: while the array is in frozen state |
@@ -849,14 +846,17 @@ static void raise_barrier(struct r1conf *conf) | |||
849 | * C: next_resync + RESYNC_SECTORS > start_next_window, meaning | 846 | * C: next_resync + RESYNC_SECTORS > start_next_window, meaning |
850 | * next resync will reach to the window which normal bios are | 847 | * next resync will reach to the window which normal bios are |
851 | * handling. | 848 | * handling. |
849 | * D: while there are any active requests in the current window. | ||
852 | */ | 850 | */ |
853 | wait_event_lock_irq(conf->wait_barrier, | 851 | wait_event_lock_irq(conf->wait_barrier, |
854 | !conf->array_frozen && | 852 | !conf->array_frozen && |
855 | conf->barrier < RESYNC_DEPTH && | 853 | conf->barrier < RESYNC_DEPTH && |
854 | conf->current_window_requests == 0 && | ||
856 | (conf->start_next_window >= | 855 | (conf->start_next_window >= |
857 | conf->next_resync + RESYNC_SECTORS), | 856 | conf->next_resync + RESYNC_SECTORS), |
858 | conf->resync_lock); | 857 | conf->resync_lock); |
859 | 858 | ||
859 | conf->nr_pending++; | ||
860 | spin_unlock_irq(&conf->resync_lock); | 860 | spin_unlock_irq(&conf->resync_lock); |
861 | } | 861 | } |
862 | 862 | ||
@@ -866,6 +866,7 @@ static void lower_barrier(struct r1conf *conf) | |||
866 | BUG_ON(conf->barrier <= 0); | 866 | BUG_ON(conf->barrier <= 0); |
867 | spin_lock_irqsave(&conf->resync_lock, flags); | 867 | spin_lock_irqsave(&conf->resync_lock, flags); |
868 | conf->barrier--; | 868 | conf->barrier--; |
869 | conf->nr_pending--; | ||
869 | spin_unlock_irqrestore(&conf->resync_lock, flags); | 870 | spin_unlock_irqrestore(&conf->resync_lock, flags); |
870 | wake_up(&conf->wait_barrier); | 871 | wake_up(&conf->wait_barrier); |
871 | } | 872 | } |
@@ -877,12 +878,10 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio) | |||
877 | if (conf->array_frozen || !bio) | 878 | if (conf->array_frozen || !bio) |
878 | wait = true; | 879 | wait = true; |
879 | else if (conf->barrier && bio_data_dir(bio) == WRITE) { | 880 | else if (conf->barrier && bio_data_dir(bio) == WRITE) { |
880 | if (conf->next_resync < RESYNC_WINDOW_SECTORS) | 881 | if ((conf->mddev->curr_resync_completed |
881 | wait = true; | 882 | >= bio_end_sector(bio)) || |
882 | else if ((conf->next_resync - RESYNC_WINDOW_SECTORS | 883 | (conf->next_resync + NEXT_NORMALIO_DISTANCE |
883 | >= bio_end_sector(bio)) || | 884 | <= bio->bi_iter.bi_sector)) |
884 | (conf->next_resync + NEXT_NORMALIO_DISTANCE | ||
885 | <= bio->bi_iter.bi_sector)) | ||
886 | wait = false; | 885 | wait = false; |
887 | else | 886 | else |
888 | wait = true; | 887 | wait = true; |
@@ -919,8 +918,8 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) | |||
919 | } | 918 | } |
920 | 919 | ||
921 | if (bio && bio_data_dir(bio) == WRITE) { | 920 | if (bio && bio_data_dir(bio) == WRITE) { |
922 | if (conf->next_resync + NEXT_NORMALIO_DISTANCE | 921 | if (bio->bi_iter.bi_sector >= |
923 | <= bio->bi_iter.bi_sector) { | 922 | conf->mddev->curr_resync_completed) { |
924 | if (conf->start_next_window == MaxSector) | 923 | if (conf->start_next_window == MaxSector) |
925 | conf->start_next_window = | 924 | conf->start_next_window = |
926 | conf->next_resync + | 925 | conf->next_resync + |
@@ -1186,6 +1185,7 @@ read_again: | |||
1186 | atomic_read(&bitmap->behind_writes) == 0); | 1185 | atomic_read(&bitmap->behind_writes) == 0); |
1187 | } | 1186 | } |
1188 | r1_bio->read_disk = rdisk; | 1187 | r1_bio->read_disk = rdisk; |
1188 | r1_bio->start_next_window = 0; | ||
1189 | 1189 | ||
1190 | read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); | 1190 | read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); |
1191 | bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, | 1191 | bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, |
@@ -1548,8 +1548,13 @@ static void close_sync(struct r1conf *conf) | |||
1548 | mempool_destroy(conf->r1buf_pool); | 1548 | mempool_destroy(conf->r1buf_pool); |
1549 | conf->r1buf_pool = NULL; | 1549 | conf->r1buf_pool = NULL; |
1550 | 1550 | ||
1551 | spin_lock_irq(&conf->resync_lock); | ||
1551 | conf->next_resync = 0; | 1552 | conf->next_resync = 0; |
1552 | conf->start_next_window = MaxSector; | 1553 | conf->start_next_window = MaxSector; |
1554 | conf->current_window_requests += | ||
1555 | conf->next_window_requests; | ||
1556 | conf->next_window_requests = 0; | ||
1557 | spin_unlock_irq(&conf->resync_lock); | ||
1553 | } | 1558 | } |
1554 | 1559 | ||
1555 | static int raid1_spare_active(struct mddev *mddev) | 1560 | static int raid1_spare_active(struct mddev *mddev) |
@@ -2150,7 +2155,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, | |||
2150 | d--; | 2155 | d--; |
2151 | rdev = conf->mirrors[d].rdev; | 2156 | rdev = conf->mirrors[d].rdev; |
2152 | if (rdev && | 2157 | if (rdev && |
2153 | test_bit(In_sync, &rdev->flags)) | 2158 | !test_bit(Faulty, &rdev->flags)) |
2154 | r1_sync_page_io(rdev, sect, s, | 2159 | r1_sync_page_io(rdev, sect, s, |
2155 | conf->tmppage, WRITE); | 2160 | conf->tmppage, WRITE); |
2156 | } | 2161 | } |
@@ -2162,7 +2167,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, | |||
2162 | d--; | 2167 | d--; |
2163 | rdev = conf->mirrors[d].rdev; | 2168 | rdev = conf->mirrors[d].rdev; |
2164 | if (rdev && | 2169 | if (rdev && |
2165 | test_bit(In_sync, &rdev->flags)) { | 2170 | !test_bit(Faulty, &rdev->flags)) { |
2166 | if (r1_sync_page_io(rdev, sect, s, | 2171 | if (r1_sync_page_io(rdev, sect, s, |
2167 | conf->tmppage, READ)) { | 2172 | conf->tmppage, READ)) { |
2168 | atomic_add(s, &rdev->corrected_errors); | 2173 | atomic_add(s, &rdev->corrected_errors); |
@@ -2541,9 +2546,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp | |||
2541 | 2546 | ||
2542 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); | 2547 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); |
2543 | r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); | 2548 | r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); |
2544 | raise_barrier(conf); | ||
2545 | 2549 | ||
2546 | conf->next_resync = sector_nr; | 2550 | raise_barrier(conf, sector_nr); |
2547 | 2551 | ||
2548 | rcu_read_lock(); | 2552 | rcu_read_lock(); |
2549 | /* | 2553 | /* |