aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2014-10-20 12:55:07 -0400
committerMark Brown <broonie@kernel.org>2014-10-20 13:27:32 -0400
commitb7a40242c82cd73cfcea305f23e67d068dd8401a (patch)
tree251b49d19cd7c371847ae1f951e1b537ca0e1c15 /drivers/md
parentd26833bfce5e56017bea9f1f50838f20e18e7b7e (diff)
parent9c6de47d53a3ce8df1642ae67823688eb98a190a (diff)
Merge branch 'fix/dw' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi into spi-dw
Conflicts: drivers/spi/spi-dw-mid.c
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-cache-target.c4
-rw-r--r--drivers/md/dm-crypt.c25
-rw-r--r--drivers/md/raid1.c40
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5.c4
5 files changed, 52 insertions, 28 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1af40ee209e2..7130505c2425 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -895,8 +895,8 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
895 struct cache *cache = mg->cache; 895 struct cache *cache = mg->cache;
896 896
897 if (mg->writeback) { 897 if (mg->writeback) {
898 cell_defer(cache, mg->old_ocell, false);
899 clear_dirty(cache, mg->old_oblock, mg->cblock); 898 clear_dirty(cache, mg->old_oblock, mg->cblock);
899 cell_defer(cache, mg->old_ocell, false);
900 cleanup_migration(mg); 900 cleanup_migration(mg);
901 return; 901 return;
902 902
@@ -951,13 +951,13 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
951 } 951 }
952 952
953 } else { 953 } else {
954 clear_dirty(cache, mg->new_oblock, mg->cblock);
954 if (mg->requeue_holder) 955 if (mg->requeue_holder)
955 cell_defer(cache, mg->new_ocell, true); 956 cell_defer(cache, mg->new_ocell, true);
956 else { 957 else {
957 bio_endio(mg->new_ocell->holder, 0); 958 bio_endio(mg->new_ocell->holder, 0);
958 cell_defer(cache, mg->new_ocell, false); 959 cell_defer(cache, mg->new_ocell, false);
959 } 960 }
960 clear_dirty(cache, mg->new_oblock, mg->cblock);
961 cleanup_migration(mg); 961 cleanup_migration(mg);
962 } 962 }
963} 963}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 2785007e0e46..cd15e0801228 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1688,6 +1688,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1688 unsigned int key_size, opt_params; 1688 unsigned int key_size, opt_params;
1689 unsigned long long tmpll; 1689 unsigned long long tmpll;
1690 int ret; 1690 int ret;
1691 size_t iv_size_padding;
1691 struct dm_arg_set as; 1692 struct dm_arg_set as;
1692 const char *opt_string; 1693 const char *opt_string;
1693 char dummy; 1694 char dummy;
@@ -1724,20 +1725,32 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1724 1725
1725 cc->dmreq_start = sizeof(struct ablkcipher_request); 1726 cc->dmreq_start = sizeof(struct ablkcipher_request);
1726 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); 1727 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
1727 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); 1728 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
1728 cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) & 1729
1729 ~(crypto_tfm_ctx_alignment() - 1); 1730 if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
1731 /* Allocate the padding exactly */
1732 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
1733 & crypto_ablkcipher_alignmask(any_tfm(cc));
1734 } else {
1735 /*
1736 * If the cipher requires greater alignment than kmalloc
1737 * alignment, we don't know the exact position of the
1738 * initialization vector. We must assume worst case.
1739 */
1740 iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
1741 }
1730 1742
1731 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 1743 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
1732 sizeof(struct dm_crypt_request) + cc->iv_size); 1744 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
1733 if (!cc->req_pool) { 1745 if (!cc->req_pool) {
1734 ti->error = "Cannot allocate crypt request mempool"; 1746 ti->error = "Cannot allocate crypt request mempool";
1735 goto bad; 1747 goto bad;
1736 } 1748 }
1737 1749
1738 cc->per_bio_data_size = ti->per_bio_data_size = 1750 cc->per_bio_data_size = ti->per_bio_data_size =
1739 sizeof(struct dm_crypt_io) + cc->dmreq_start + 1751 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
1740 sizeof(struct dm_crypt_request) + cc->iv_size; 1752 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
1753 ARCH_KMALLOC_MINALIGN);
1741 1754
1742 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 1755 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
1743 if (!cc->page_pool) { 1756 if (!cc->page_pool) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d7690f86fdb9..55de4f6f7eaf 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -540,11 +540,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
540 has_nonrot_disk = 0; 540 has_nonrot_disk = 0;
541 choose_next_idle = 0; 541 choose_next_idle = 0;
542 542
543 if (conf->mddev->recovery_cp < MaxSector && 543 choose_first = (conf->mddev->recovery_cp < this_sector + sectors);
544 (this_sector + sectors >= conf->next_resync))
545 choose_first = 1;
546 else
547 choose_first = 0;
548 544
549 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { 545 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
550 sector_t dist; 546 sector_t dist;
@@ -831,7 +827,7 @@ static void flush_pending_writes(struct r1conf *conf)
831 * there is no normal IO happeing. It must arrange to call 827 * there is no normal IO happeing. It must arrange to call
832 * lower_barrier when the particular background IO completes. 828 * lower_barrier when the particular background IO completes.
833 */ 829 */
834static void raise_barrier(struct r1conf *conf) 830static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
835{ 831{
836 spin_lock_irq(&conf->resync_lock); 832 spin_lock_irq(&conf->resync_lock);
837 833
@@ -841,6 +837,7 @@ static void raise_barrier(struct r1conf *conf)
841 837
842 /* block any new IO from starting */ 838 /* block any new IO from starting */
843 conf->barrier++; 839 conf->barrier++;
840 conf->next_resync = sector_nr;
844 841
845 /* For these conditions we must wait: 842 /* For these conditions we must wait:
846 * A: while the array is in frozen state 843 * A: while the array is in frozen state
@@ -849,14 +846,17 @@ static void raise_barrier(struct r1conf *conf)
849 * C: next_resync + RESYNC_SECTORS > start_next_window, meaning 846 * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
850 * next resync will reach to the window which normal bios are 847 * next resync will reach to the window which normal bios are
851 * handling. 848 * handling.
849 * D: while there are any active requests in the current window.
852 */ 850 */
853 wait_event_lock_irq(conf->wait_barrier, 851 wait_event_lock_irq(conf->wait_barrier,
854 !conf->array_frozen && 852 !conf->array_frozen &&
855 conf->barrier < RESYNC_DEPTH && 853 conf->barrier < RESYNC_DEPTH &&
854 conf->current_window_requests == 0 &&
856 (conf->start_next_window >= 855 (conf->start_next_window >=
857 conf->next_resync + RESYNC_SECTORS), 856 conf->next_resync + RESYNC_SECTORS),
858 conf->resync_lock); 857 conf->resync_lock);
859 858
859 conf->nr_pending++;
860 spin_unlock_irq(&conf->resync_lock); 860 spin_unlock_irq(&conf->resync_lock);
861} 861}
862 862
@@ -866,6 +866,7 @@ static void lower_barrier(struct r1conf *conf)
866 BUG_ON(conf->barrier <= 0); 866 BUG_ON(conf->barrier <= 0);
867 spin_lock_irqsave(&conf->resync_lock, flags); 867 spin_lock_irqsave(&conf->resync_lock, flags);
868 conf->barrier--; 868 conf->barrier--;
869 conf->nr_pending--;
869 spin_unlock_irqrestore(&conf->resync_lock, flags); 870 spin_unlock_irqrestore(&conf->resync_lock, flags);
870 wake_up(&conf->wait_barrier); 871 wake_up(&conf->wait_barrier);
871} 872}
@@ -877,12 +878,10 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
877 if (conf->array_frozen || !bio) 878 if (conf->array_frozen || !bio)
878 wait = true; 879 wait = true;
879 else if (conf->barrier && bio_data_dir(bio) == WRITE) { 880 else if (conf->barrier && bio_data_dir(bio) == WRITE) {
880 if (conf->next_resync < RESYNC_WINDOW_SECTORS) 881 if ((conf->mddev->curr_resync_completed
881 wait = true; 882 >= bio_end_sector(bio)) ||
882 else if ((conf->next_resync - RESYNC_WINDOW_SECTORS 883 (conf->next_resync + NEXT_NORMALIO_DISTANCE
883 >= bio_end_sector(bio)) || 884 <= bio->bi_iter.bi_sector))
884 (conf->next_resync + NEXT_NORMALIO_DISTANCE
885 <= bio->bi_iter.bi_sector))
886 wait = false; 885 wait = false;
887 else 886 else
888 wait = true; 887 wait = true;
@@ -919,8 +918,8 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
919 } 918 }
920 919
921 if (bio && bio_data_dir(bio) == WRITE) { 920 if (bio && bio_data_dir(bio) == WRITE) {
922 if (conf->next_resync + NEXT_NORMALIO_DISTANCE 921 if (bio->bi_iter.bi_sector >=
923 <= bio->bi_iter.bi_sector) { 922 conf->mddev->curr_resync_completed) {
924 if (conf->start_next_window == MaxSector) 923 if (conf->start_next_window == MaxSector)
925 conf->start_next_window = 924 conf->start_next_window =
926 conf->next_resync + 925 conf->next_resync +
@@ -1186,6 +1185,7 @@ read_again:
1186 atomic_read(&bitmap->behind_writes) == 0); 1185 atomic_read(&bitmap->behind_writes) == 0);
1187 } 1186 }
1188 r1_bio->read_disk = rdisk; 1187 r1_bio->read_disk = rdisk;
1188 r1_bio->start_next_window = 0;
1189 1189
1190 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1190 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1191 bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, 1191 bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
@@ -1548,8 +1548,13 @@ static void close_sync(struct r1conf *conf)
1548 mempool_destroy(conf->r1buf_pool); 1548 mempool_destroy(conf->r1buf_pool);
1549 conf->r1buf_pool = NULL; 1549 conf->r1buf_pool = NULL;
1550 1550
1551 spin_lock_irq(&conf->resync_lock);
1551 conf->next_resync = 0; 1552 conf->next_resync = 0;
1552 conf->start_next_window = MaxSector; 1553 conf->start_next_window = MaxSector;
1554 conf->current_window_requests +=
1555 conf->next_window_requests;
1556 conf->next_window_requests = 0;
1557 spin_unlock_irq(&conf->resync_lock);
1553} 1558}
1554 1559
1555static int raid1_spare_active(struct mddev *mddev) 1560static int raid1_spare_active(struct mddev *mddev)
@@ -2150,7 +2155,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
2150 d--; 2155 d--;
2151 rdev = conf->mirrors[d].rdev; 2156 rdev = conf->mirrors[d].rdev;
2152 if (rdev && 2157 if (rdev &&
2153 test_bit(In_sync, &rdev->flags)) 2158 !test_bit(Faulty, &rdev->flags))
2154 r1_sync_page_io(rdev, sect, s, 2159 r1_sync_page_io(rdev, sect, s,
2155 conf->tmppage, WRITE); 2160 conf->tmppage, WRITE);
2156 } 2161 }
@@ -2162,7 +2167,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
2162 d--; 2167 d--;
2163 rdev = conf->mirrors[d].rdev; 2168 rdev = conf->mirrors[d].rdev;
2164 if (rdev && 2169 if (rdev &&
2165 test_bit(In_sync, &rdev->flags)) { 2170 !test_bit(Faulty, &rdev->flags)) {
2166 if (r1_sync_page_io(rdev, sect, s, 2171 if (r1_sync_page_io(rdev, sect, s,
2167 conf->tmppage, READ)) { 2172 conf->tmppage, READ)) {
2168 atomic_add(s, &rdev->corrected_errors); 2173 atomic_add(s, &rdev->corrected_errors);
@@ -2541,9 +2546,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2541 2546
2542 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 2547 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
2543 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); 2548 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
2544 raise_barrier(conf);
2545 2549
2546 conf->next_resync = sector_nr; 2550 raise_barrier(conf, sector_nr);
2547 2551
2548 rcu_read_lock(); 2552 rcu_read_lock();
2549 /* 2553 /*
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b08c18871323..6703751d87d7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2953,6 +2953,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
2953 */ 2953 */
2954 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 2954 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
2955 end_reshape(conf); 2955 end_reshape(conf);
2956 close_sync(conf);
2956 return 0; 2957 return 0;
2957 } 2958 }
2958 2959
@@ -3081,6 +3082,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3081 } 3082 }
3082 3083
3083 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 3084 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
3085 r10_bio->state = 0;
3084 raise_barrier(conf, rb2 != NULL); 3086 raise_barrier(conf, rb2 != NULL);
3085 atomic_set(&r10_bio->remaining, 0); 3087 atomic_set(&r10_bio->remaining, 0);
3086 3088
@@ -3269,6 +3271,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3269 if (sync_blocks < max_sync) 3271 if (sync_blocks < max_sync)
3270 max_sync = sync_blocks; 3272 max_sync = sync_blocks;
3271 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 3273 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
3274 r10_bio->state = 0;
3272 3275
3273 r10_bio->mddev = mddev; 3276 r10_bio->mddev = mddev;
3274 atomic_set(&r10_bio->remaining, 0); 3277 atomic_set(&r10_bio->remaining, 0);
@@ -4384,6 +4387,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4384read_more: 4387read_more:
4385 /* Now schedule reads for blocks from sector_nr to last */ 4388 /* Now schedule reads for blocks from sector_nr to last */
4386 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 4389 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
4390 r10_bio->state = 0;
4387 raise_barrier(conf, sectors_done != 0); 4391 raise_barrier(conf, sectors_done != 0);
4388 atomic_set(&r10_bio->remaining, 0); 4392 atomic_set(&r10_bio->remaining, 0);
4389 r10_bio->mddev = mddev; 4393 r10_bio->mddev = mddev;
@@ -4398,6 +4402,7 @@ read_more:
4398 * on all the target devices. 4402 * on all the target devices.
4399 */ 4403 */
4400 // FIXME 4404 // FIXME
4405 mempool_free(r10_bio, conf->r10buf_pool);
4401 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4406 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4402 return sectors_done; 4407 return sectors_done;
4403 } 4408 }
@@ -4410,7 +4415,7 @@ read_more:
4410 read_bio->bi_private = r10_bio; 4415 read_bio->bi_private = r10_bio;
4411 read_bio->bi_end_io = end_sync_read; 4416 read_bio->bi_end_io = end_sync_read;
4412 read_bio->bi_rw = READ; 4417 read_bio->bi_rw = READ;
4413 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); 4418 read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
4414 read_bio->bi_flags |= 1 << BIO_UPTODATE; 4419 read_bio->bi_flags |= 1 << BIO_UPTODATE;
4415 read_bio->bi_vcnt = 0; 4420 read_bio->bi_vcnt = 0;
4416 read_bio->bi_iter.bi_size = 0; 4421 read_bio->bi_iter.bi_size = 0;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 6234b2e84587..183588b11fc1 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2922,7 +2922,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
2922 (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) && 2922 (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) &&
2923 !test_bit(R5_OVERWRITE, &fdev[0]->flags)) || 2923 !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
2924 (sh->raid_conf->level == 6 && s->failed && s->to_write && 2924 (sh->raid_conf->level == 6 && s->failed && s->to_write &&
2925 s->to_write < sh->raid_conf->raid_disks - 2 && 2925 s->to_write - s->non_overwrite < sh->raid_conf->raid_disks - 2 &&
2926 (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) { 2926 (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) {
2927 /* we would like to get this block, possibly by computing it, 2927 /* we would like to get this block, possibly by computing it,
2928 * otherwise read it if the backing disk is insync 2928 * otherwise read it if the backing disk is insync
@@ -3817,6 +3817,8 @@ static void handle_stripe(struct stripe_head *sh)
3817 set_bit(R5_Wantwrite, &dev->flags); 3817 set_bit(R5_Wantwrite, &dev->flags);
3818 if (prexor) 3818 if (prexor)
3819 continue; 3819 continue;
3820 if (s.failed > 1)
3821 continue;
3820 if (!test_bit(R5_Insync, &dev->flags) || 3822 if (!test_bit(R5_Insync, &dev->flags) ||
3821 ((i == sh->pd_idx || i == sh->qd_idx) && 3823 ((i == sh->pd_idx || i == sh->qd_idx) &&
3822 s.failed == 0)) 3824 s.failed == 0))