aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.com>2015-09-05 05:07:04 -0400
committerNeilBrown <neilb@suse.com>2015-09-05 05:08:32 -0400
commite89c6fdf9e0eb1b5a03574d4ca73e83eae8deb91 (patch)
treef9df292ed03a5a3c4ddc658ae3646f02a1ffafce /drivers/md/raid1.c
parentc3cce6cda162eb2b2960a85d9c8992f4f3be85d0 (diff)
parent1081230b748de8f03f37f80c53dfa89feda9b8de (diff)
Merge linux-block/for-4.3/core into md/for-linux
There were a few conflicts that are fairly easy to resolve. Signed-off-by: NeilBrown <neilb@suse.com>
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c115
1 files changed, 29 insertions, 86 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 3d9ca836247f..4517f06c41ba 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -255,9 +255,10 @@ static void call_bio_endio(struct r1bio *r1_bio)
255 done = 1; 255 done = 1;
256 256
257 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) 257 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
258 clear_bit(BIO_UPTODATE, &bio->bi_flags); 258 bio->bi_error = -EIO;
259
259 if (done) { 260 if (done) {
260 bio_endio(bio, 0); 261 bio_endio(bio);
261 /* 262 /*
262 * Wake up any possible resync thread that waits for the device 263 * Wake up any possible resync thread that waits for the device
263 * to go idle. 264 * to go idle.
@@ -312,9 +313,9 @@ static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
312 return mirror; 313 return mirror;
313} 314}
314 315
315static void raid1_end_read_request(struct bio *bio, int error) 316static void raid1_end_read_request(struct bio *bio)
316{ 317{
317 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 318 int uptodate = !bio->bi_error;
318 struct r1bio *r1_bio = bio->bi_private; 319 struct r1bio *r1_bio = bio->bi_private;
319 int mirror; 320 int mirror;
320 struct r1conf *conf = r1_bio->mddev->private; 321 struct r1conf *conf = r1_bio->mddev->private;
@@ -397,9 +398,8 @@ static void r1_bio_write_done(struct r1bio *r1_bio)
397 } 398 }
398} 399}
399 400
400static void raid1_end_write_request(struct bio *bio, int error) 401static void raid1_end_write_request(struct bio *bio)
401{ 402{
402 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
403 struct r1bio *r1_bio = bio->bi_private; 403 struct r1bio *r1_bio = bio->bi_private;
404 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); 404 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
405 struct r1conf *conf = r1_bio->mddev->private; 405 struct r1conf *conf = r1_bio->mddev->private;
@@ -410,7 +410,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
410 /* 410 /*
411 * 'one mirror IO has finished' event handler: 411 * 'one mirror IO has finished' event handler:
412 */ 412 */
413 if (!uptodate) { 413 if (bio->bi_error) {
414 set_bit(WriteErrorSeen, 414 set_bit(WriteErrorSeen,
415 &conf->mirrors[mirror].rdev->flags); 415 &conf->mirrors[mirror].rdev->flags);
416 if (!test_and_set_bit(WantReplacement, 416 if (!test_and_set_bit(WantReplacement,
@@ -557,7 +557,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
557 rdev = rcu_dereference(conf->mirrors[disk].rdev); 557 rdev = rcu_dereference(conf->mirrors[disk].rdev);
558 if (r1_bio->bios[disk] == IO_BLOCKED 558 if (r1_bio->bios[disk] == IO_BLOCKED
559 || rdev == NULL 559 || rdev == NULL
560 || test_bit(Unmerged, &rdev->flags)
561 || test_bit(Faulty, &rdev->flags)) 560 || test_bit(Faulty, &rdev->flags))
562 continue; 561 continue;
563 if (!test_bit(In_sync, &rdev->flags) && 562 if (!test_bit(In_sync, &rdev->flags) &&
@@ -708,38 +707,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
708 return best_disk; 707 return best_disk;
709} 708}
710 709
711static int raid1_mergeable_bvec(struct mddev *mddev,
712 struct bvec_merge_data *bvm,
713 struct bio_vec *biovec)
714{
715 struct r1conf *conf = mddev->private;
716 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
717 int max = biovec->bv_len;
718
719 if (mddev->merge_check_needed) {
720 int disk;
721 rcu_read_lock();
722 for (disk = 0; disk < conf->raid_disks * 2; disk++) {
723 struct md_rdev *rdev = rcu_dereference(
724 conf->mirrors[disk].rdev);
725 if (rdev && !test_bit(Faulty, &rdev->flags)) {
726 struct request_queue *q =
727 bdev_get_queue(rdev->bdev);
728 if (q->merge_bvec_fn) {
729 bvm->bi_sector = sector +
730 rdev->data_offset;
731 bvm->bi_bdev = rdev->bdev;
732 max = min(max, q->merge_bvec_fn(
733 q, bvm, biovec));
734 }
735 }
736 }
737 rcu_read_unlock();
738 }
739 return max;
740
741}
742
743static int raid1_congested(struct mddev *mddev, int bits) 710static int raid1_congested(struct mddev *mddev, int bits)
744{ 711{
745 struct r1conf *conf = mddev->private; 712 struct r1conf *conf = mddev->private;
@@ -793,7 +760,7 @@ static void flush_pending_writes(struct r1conf *conf)
793 if (unlikely((bio->bi_rw & REQ_DISCARD) && 760 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
794 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 761 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
795 /* Just ignore it */ 762 /* Just ignore it */
796 bio_endio(bio, 0); 763 bio_endio(bio);
797 else 764 else
798 generic_make_request(bio); 765 generic_make_request(bio);
799 bio = next; 766 bio = next;
@@ -1068,7 +1035,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1068 if (unlikely((bio->bi_rw & REQ_DISCARD) && 1035 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
1069 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 1036 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
1070 /* Just ignore it */ 1037 /* Just ignore it */
1071 bio_endio(bio, 0); 1038 bio_endio(bio);
1072 else 1039 else
1073 generic_make_request(bio); 1040 generic_make_request(bio);
1074 bio = next; 1041 bio = next;
@@ -1158,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1158 * non-zero, then it is the number of not-completed requests. 1125 * non-zero, then it is the number of not-completed requests.
1159 */ 1126 */
1160 bio->bi_phys_segments = 0; 1127 bio->bi_phys_segments = 0;
1161 clear_bit(BIO_SEG_VALID, &bio->bi_flags); 1128 bio_clear_flag(bio, BIO_SEG_VALID);
1162 1129
1163 if (rw == READ) { 1130 if (rw == READ) {
1164 /* 1131 /*
@@ -1269,8 +1236,7 @@ read_again:
1269 break; 1236 break;
1270 } 1237 }
1271 r1_bio->bios[i] = NULL; 1238 r1_bio->bios[i] = NULL;
1272 if (!rdev || test_bit(Faulty, &rdev->flags) 1239 if (!rdev || test_bit(Faulty, &rdev->flags)) {
1273 || test_bit(Unmerged, &rdev->flags)) {
1274 if (i < conf->raid_disks) 1240 if (i < conf->raid_disks)
1275 set_bit(R1BIO_Degraded, &r1_bio->state); 1241 set_bit(R1BIO_Degraded, &r1_bio->state);
1276 continue; 1242 continue;
@@ -1618,7 +1584,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1618 struct raid1_info *p; 1584 struct raid1_info *p;
1619 int first = 0; 1585 int first = 0;
1620 int last = conf->raid_disks - 1; 1586 int last = conf->raid_disks - 1;
1621 struct request_queue *q = bdev_get_queue(rdev->bdev);
1622 1587
1623 if (mddev->recovery_disabled == conf->recovery_disabled) 1588 if (mddev->recovery_disabled == conf->recovery_disabled)
1624 return -EBUSY; 1589 return -EBUSY;
@@ -1626,11 +1591,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1626 if (rdev->raid_disk >= 0) 1591 if (rdev->raid_disk >= 0)
1627 first = last = rdev->raid_disk; 1592 first = last = rdev->raid_disk;
1628 1593
1629 if (q->merge_bvec_fn) {
1630 set_bit(Unmerged, &rdev->flags);
1631 mddev->merge_check_needed = 1;
1632 }
1633
1634 for (mirror = first; mirror <= last; mirror++) { 1594 for (mirror = first; mirror <= last; mirror++) {
1635 p = conf->mirrors+mirror; 1595 p = conf->mirrors+mirror;
1636 if (!p->rdev) { 1596 if (!p->rdev) {
@@ -1662,19 +1622,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1662 break; 1622 break;
1663 } 1623 }
1664 } 1624 }
1665 if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
1666 /* Some requests might not have seen this new
1667 * merge_bvec_fn. We must wait for them to complete
1668 * before merging the device fully.
1669 * First we make sure any code which has tested
1670 * our function has submitted the request, then
1671 * we wait for all outstanding requests to complete.
1672 */
1673 synchronize_sched();
1674 freeze_array(conf, 0);
1675 unfreeze_array(conf);
1676 clear_bit(Unmerged, &rdev->flags);
1677 }
1678 md_integrity_add_rdev(rdev, mddev); 1625 md_integrity_add_rdev(rdev, mddev);
1679 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) 1626 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1680 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 1627 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
@@ -1738,7 +1685,7 @@ abort:
1738 return err; 1685 return err;
1739} 1686}
1740 1687
1741static void end_sync_read(struct bio *bio, int error) 1688static void end_sync_read(struct bio *bio)
1742{ 1689{
1743 struct r1bio *r1_bio = bio->bi_private; 1690 struct r1bio *r1_bio = bio->bi_private;
1744 1691
@@ -1749,16 +1696,16 @@ static void end_sync_read(struct bio *bio, int error)
1749 * or re-read if the read failed. 1696 * or re-read if the read failed.
1750 * We don't do much here, just schedule handling by raid1d 1697 * We don't do much here, just schedule handling by raid1d
1751 */ 1698 */
1752 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 1699 if (!bio->bi_error)
1753 set_bit(R1BIO_Uptodate, &r1_bio->state); 1700 set_bit(R1BIO_Uptodate, &r1_bio->state);
1754 1701
1755 if (atomic_dec_and_test(&r1_bio->remaining)) 1702 if (atomic_dec_and_test(&r1_bio->remaining))
1756 reschedule_retry(r1_bio); 1703 reschedule_retry(r1_bio);
1757} 1704}
1758 1705
1759static void end_sync_write(struct bio *bio, int error) 1706static void end_sync_write(struct bio *bio)
1760{ 1707{
1761 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1708 int uptodate = !bio->bi_error;
1762 struct r1bio *r1_bio = bio->bi_private; 1709 struct r1bio *r1_bio = bio->bi_private;
1763 struct mddev *mddev = r1_bio->mddev; 1710 struct mddev *mddev = r1_bio->mddev;
1764 struct r1conf *conf = mddev->private; 1711 struct r1conf *conf = mddev->private;
@@ -1945,7 +1892,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
1945 idx ++; 1892 idx ++;
1946 } 1893 }
1947 set_bit(R1BIO_Uptodate, &r1_bio->state); 1894 set_bit(R1BIO_Uptodate, &r1_bio->state);
1948 set_bit(BIO_UPTODATE, &bio->bi_flags); 1895 bio->bi_error = 0;
1949 return 1; 1896 return 1;
1950} 1897}
1951 1898
@@ -1969,15 +1916,14 @@ static void process_checks(struct r1bio *r1_bio)
1969 for (i = 0; i < conf->raid_disks * 2; i++) { 1916 for (i = 0; i < conf->raid_disks * 2; i++) {
1970 int j; 1917 int j;
1971 int size; 1918 int size;
1972 int uptodate; 1919 int error;
1973 struct bio *b = r1_bio->bios[i]; 1920 struct bio *b = r1_bio->bios[i];
1974 if (b->bi_end_io != end_sync_read) 1921 if (b->bi_end_io != end_sync_read)
1975 continue; 1922 continue;
1976 /* fixup the bio for reuse, but preserve BIO_UPTODATE */ 1923 /* fixup the bio for reuse, but preserve errno */
1977 uptodate = test_bit(BIO_UPTODATE, &b->bi_flags); 1924 error = b->bi_error;
1978 bio_reset(b); 1925 bio_reset(b);
1979 if (!uptodate) 1926 b->bi_error = error;
1980 clear_bit(BIO_UPTODATE, &b->bi_flags);
1981 b->bi_vcnt = vcnt; 1927 b->bi_vcnt = vcnt;
1982 b->bi_iter.bi_size = r1_bio->sectors << 9; 1928 b->bi_iter.bi_size = r1_bio->sectors << 9;
1983 b->bi_iter.bi_sector = r1_bio->sector + 1929 b->bi_iter.bi_sector = r1_bio->sector +
@@ -2000,7 +1946,7 @@ static void process_checks(struct r1bio *r1_bio)
2000 } 1946 }
2001 for (primary = 0; primary < conf->raid_disks * 2; primary++) 1947 for (primary = 0; primary < conf->raid_disks * 2; primary++)
2002 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && 1948 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2003 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) { 1949 !r1_bio->bios[primary]->bi_error) {
2004 r1_bio->bios[primary]->bi_end_io = NULL; 1950 r1_bio->bios[primary]->bi_end_io = NULL;
2005 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); 1951 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2006 break; 1952 break;
@@ -2010,14 +1956,14 @@ static void process_checks(struct r1bio *r1_bio)
2010 int j; 1956 int j;
2011 struct bio *pbio = r1_bio->bios[primary]; 1957 struct bio *pbio = r1_bio->bios[primary];
2012 struct bio *sbio = r1_bio->bios[i]; 1958 struct bio *sbio = r1_bio->bios[i];
2013 int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags); 1959 int error = sbio->bi_error;
2014 1960
2015 if (sbio->bi_end_io != end_sync_read) 1961 if (sbio->bi_end_io != end_sync_read)
2016 continue; 1962 continue;
2017 /* Now we can 'fixup' the BIO_UPTODATE flag */ 1963 /* Now we can 'fixup' the error value */
2018 set_bit(BIO_UPTODATE, &sbio->bi_flags); 1964 sbio->bi_error = 0;
2019 1965
2020 if (uptodate) { 1966 if (!error) {
2021 for (j = vcnt; j-- ; ) { 1967 for (j = vcnt; j-- ; ) {
2022 struct page *p, *s; 1968 struct page *p, *s;
2023 p = pbio->bi_io_vec[j].bv_page; 1969 p = pbio->bi_io_vec[j].bv_page;
@@ -2032,7 +1978,7 @@ static void process_checks(struct r1bio *r1_bio)
2032 if (j >= 0) 1978 if (j >= 0)
2033 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); 1979 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2034 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) 1980 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2035 && uptodate)) { 1981 && !error)) {
2036 /* No need to write to this device. */ 1982 /* No need to write to this device. */
2037 sbio->bi_end_io = NULL; 1983 sbio->bi_end_io = NULL;
2038 rdev_dec_pending(conf->mirrors[i].rdev, mddev); 1984 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
@@ -2273,11 +2219,11 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
2273 struct bio *bio = r1_bio->bios[m]; 2219 struct bio *bio = r1_bio->bios[m];
2274 if (bio->bi_end_io == NULL) 2220 if (bio->bi_end_io == NULL)
2275 continue; 2221 continue;
2276 if (test_bit(BIO_UPTODATE, &bio->bi_flags) && 2222 if (!bio->bi_error &&
2277 test_bit(R1BIO_MadeGood, &r1_bio->state)) { 2223 test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2278 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); 2224 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2279 } 2225 }
2280 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && 2226 if (bio->bi_error &&
2281 test_bit(R1BIO_WriteError, &r1_bio->state)) { 2227 test_bit(R1BIO_WriteError, &r1_bio->state)) {
2282 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) 2228 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2283 md_error(conf->mddev, rdev); 2229 md_error(conf->mddev, rdev);
@@ -2741,7 +2687,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2741 /* remove last page from this bio */ 2687 /* remove last page from this bio */
2742 bio->bi_vcnt--; 2688 bio->bi_vcnt--;
2743 bio->bi_iter.bi_size -= len; 2689 bio->bi_iter.bi_size -= len;
2744 __clear_bit(BIO_SEG_VALID, &bio->bi_flags); 2690 bio_clear_flag(bio, BIO_SEG_VALID);
2745 } 2691 }
2746 goto bio_full; 2692 goto bio_full;
2747 } 2693 }
@@ -2836,8 +2782,6 @@ static struct r1conf *setup_conf(struct mddev *mddev)
2836 goto abort; 2782 goto abort;
2837 disk->rdev = rdev; 2783 disk->rdev = rdev;
2838 q = bdev_get_queue(rdev->bdev); 2784 q = bdev_get_queue(rdev->bdev);
2839 if (q->merge_bvec_fn)
2840 mddev->merge_check_needed = 1;
2841 2785
2842 disk->head_position = 0; 2786 disk->head_position = 0;
2843 disk->seq_start = MaxSector; 2787 disk->seq_start = MaxSector;
@@ -3204,7 +3148,6 @@ static struct md_personality raid1_personality =
3204 .quiesce = raid1_quiesce, 3148 .quiesce = raid1_quiesce,
3205 .takeover = raid1_takeover, 3149 .takeover = raid1_takeover,
3206 .congested = raid1_congested, 3150 .congested = raid1_congested,
3207 .mergeable_bvec = raid1_mergeable_bvec,
3208}; 3151};
3209 3152
3210static int __init raid_init(void) 3153static int __init raid_init(void)