summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c72
1 files changed, 37 insertions, 35 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 47da0af6322b..a5d9c0ee4d60 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -133,7 +133,7 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
133static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) 133static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
134{ 134{
135 int sectors = bio_sectors(bio); 135 int sectors = bio_sectors(bio);
136 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) 136 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
137 return bio->bi_next; 137 return bio->bi_next;
138 else 138 else
139 return NULL; 139 return NULL;
@@ -225,7 +225,7 @@ static void return_io(struct bio *return_bi)
225 225
226 return_bi = bi->bi_next; 226 return_bi = bi->bi_next;
227 bi->bi_next = NULL; 227 bi->bi_next = NULL;
228 bi->bi_size = 0; 228 bi->bi_iter.bi_size = 0;
229 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 229 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
230 bi, 0); 230 bi, 0);
231 bio_endio(bi, 0); 231 bio_endio(bi, 0);
@@ -854,10 +854,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
854 bi->bi_rw, i); 854 bi->bi_rw, i);
855 atomic_inc(&sh->count); 855 atomic_inc(&sh->count);
856 if (use_new_offset(conf, sh)) 856 if (use_new_offset(conf, sh))
857 bi->bi_sector = (sh->sector 857 bi->bi_iter.bi_sector = (sh->sector
858 + rdev->new_data_offset); 858 + rdev->new_data_offset);
859 else 859 else
860 bi->bi_sector = (sh->sector 860 bi->bi_iter.bi_sector = (sh->sector
861 + rdev->data_offset); 861 + rdev->data_offset);
862 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 862 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
863 bi->bi_rw |= REQ_NOMERGE; 863 bi->bi_rw |= REQ_NOMERGE;
@@ -865,7 +865,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
865 bi->bi_vcnt = 1; 865 bi->bi_vcnt = 1;
866 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 866 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
867 bi->bi_io_vec[0].bv_offset = 0; 867 bi->bi_io_vec[0].bv_offset = 0;
868 bi->bi_size = STRIPE_SIZE; 868 bi->bi_iter.bi_size = STRIPE_SIZE;
869 /* 869 /*
870 * If this is discard request, set bi_vcnt 0. We don't 870 * If this is discard request, set bi_vcnt 0. We don't
871 * want to confuse SCSI because SCSI will replace payload 871 * want to confuse SCSI because SCSI will replace payload
@@ -901,15 +901,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
901 rbi->bi_rw, i); 901 rbi->bi_rw, i);
902 atomic_inc(&sh->count); 902 atomic_inc(&sh->count);
903 if (use_new_offset(conf, sh)) 903 if (use_new_offset(conf, sh))
904 rbi->bi_sector = (sh->sector 904 rbi->bi_iter.bi_sector = (sh->sector
905 + rrdev->new_data_offset); 905 + rrdev->new_data_offset);
906 else 906 else
907 rbi->bi_sector = (sh->sector 907 rbi->bi_iter.bi_sector = (sh->sector
908 + rrdev->data_offset); 908 + rrdev->data_offset);
909 rbi->bi_vcnt = 1; 909 rbi->bi_vcnt = 1;
910 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 910 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
911 rbi->bi_io_vec[0].bv_offset = 0; 911 rbi->bi_io_vec[0].bv_offset = 0;
912 rbi->bi_size = STRIPE_SIZE; 912 rbi->bi_iter.bi_size = STRIPE_SIZE;
913 /* 913 /*
914 * If this is discard request, set bi_vcnt 0. We don't 914 * If this is discard request, set bi_vcnt 0. We don't
915 * want to confuse SCSI because SCSI will replace payload 915 * want to confuse SCSI because SCSI will replace payload
@@ -944,10 +944,10 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
944 struct async_submit_ctl submit; 944 struct async_submit_ctl submit;
945 enum async_tx_flags flags = 0; 945 enum async_tx_flags flags = 0;
946 946
947 if (bio->bi_sector >= sector) 947 if (bio->bi_iter.bi_sector >= sector)
948 page_offset = (signed)(bio->bi_sector - sector) * 512; 948 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
949 else 949 else
950 page_offset = (signed)(sector - bio->bi_sector) * -512; 950 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
951 951
952 if (frombio) 952 if (frombio)
953 flags |= ASYNC_TX_FENCE; 953 flags |= ASYNC_TX_FENCE;
@@ -1014,7 +1014,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
1014 BUG_ON(!dev->read); 1014 BUG_ON(!dev->read);
1015 rbi = dev->read; 1015 rbi = dev->read;
1016 dev->read = NULL; 1016 dev->read = NULL;
1017 while (rbi && rbi->bi_sector < 1017 while (rbi && rbi->bi_iter.bi_sector <
1018 dev->sector + STRIPE_SECTORS) { 1018 dev->sector + STRIPE_SECTORS) {
1019 rbi2 = r5_next_bio(rbi, dev->sector); 1019 rbi2 = r5_next_bio(rbi, dev->sector);
1020 if (!raid5_dec_bi_active_stripes(rbi)) { 1020 if (!raid5_dec_bi_active_stripes(rbi)) {
@@ -1050,7 +1050,7 @@ static void ops_run_biofill(struct stripe_head *sh)
1050 dev->read = rbi = dev->toread; 1050 dev->read = rbi = dev->toread;
1051 dev->toread = NULL; 1051 dev->toread = NULL;
1052 spin_unlock_irq(&sh->stripe_lock); 1052 spin_unlock_irq(&sh->stripe_lock);
1053 while (rbi && rbi->bi_sector < 1053 while (rbi && rbi->bi_iter.bi_sector <
1054 dev->sector + STRIPE_SECTORS) { 1054 dev->sector + STRIPE_SECTORS) {
1055 tx = async_copy_data(0, rbi, dev->page, 1055 tx = async_copy_data(0, rbi, dev->page,
1056 dev->sector, tx); 1056 dev->sector, tx);
@@ -1392,7 +1392,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1392 wbi = dev->written = chosen; 1392 wbi = dev->written = chosen;
1393 spin_unlock_irq(&sh->stripe_lock); 1393 spin_unlock_irq(&sh->stripe_lock);
1394 1394
1395 while (wbi && wbi->bi_sector < 1395 while (wbi && wbi->bi_iter.bi_sector <
1396 dev->sector + STRIPE_SECTORS) { 1396 dev->sector + STRIPE_SECTORS) {
1397 if (wbi->bi_rw & REQ_FUA) 1397 if (wbi->bi_rw & REQ_FUA)
1398 set_bit(R5_WantFUA, &dev->flags); 1398 set_bit(R5_WantFUA, &dev->flags);
@@ -2616,7 +2616,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2616 int firstwrite=0; 2616 int firstwrite=0;
2617 2617
2618 pr_debug("adding bi b#%llu to stripe s#%llu\n", 2618 pr_debug("adding bi b#%llu to stripe s#%llu\n",
2619 (unsigned long long)bi->bi_sector, 2619 (unsigned long long)bi->bi_iter.bi_sector,
2620 (unsigned long long)sh->sector); 2620 (unsigned long long)sh->sector);
2621 2621
2622 /* 2622 /*
@@ -2634,12 +2634,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2634 firstwrite = 1; 2634 firstwrite = 1;
2635 } else 2635 } else
2636 bip = &sh->dev[dd_idx].toread; 2636 bip = &sh->dev[dd_idx].toread;
2637 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 2637 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
2638 if (bio_end_sector(*bip) > bi->bi_sector) 2638 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
2639 goto overlap; 2639 goto overlap;
2640 bip = & (*bip)->bi_next; 2640 bip = & (*bip)->bi_next;
2641 } 2641 }
2642 if (*bip && (*bip)->bi_sector < bio_end_sector(bi)) 2642 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
2643 goto overlap; 2643 goto overlap;
2644 2644
2645 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 2645 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2653,7 +2653,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2653 sector_t sector = sh->dev[dd_idx].sector; 2653 sector_t sector = sh->dev[dd_idx].sector;
2654 for (bi=sh->dev[dd_idx].towrite; 2654 for (bi=sh->dev[dd_idx].towrite;
2655 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 2655 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2656 bi && bi->bi_sector <= sector; 2656 bi && bi->bi_iter.bi_sector <= sector;
2657 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 2657 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2658 if (bio_end_sector(bi) >= sector) 2658 if (bio_end_sector(bi) >= sector)
2659 sector = bio_end_sector(bi); 2659 sector = bio_end_sector(bi);
@@ -2663,7 +2663,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2663 } 2663 }
2664 2664
2665 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 2665 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2666 (unsigned long long)(*bip)->bi_sector, 2666 (unsigned long long)(*bip)->bi_iter.bi_sector,
2667 (unsigned long long)sh->sector, dd_idx); 2667 (unsigned long long)sh->sector, dd_idx);
2668 spin_unlock_irq(&sh->stripe_lock); 2668 spin_unlock_irq(&sh->stripe_lock);
2669 2669
@@ -2738,7 +2738,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2738 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2738 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2739 wake_up(&conf->wait_for_overlap); 2739 wake_up(&conf->wait_for_overlap);
2740 2740
2741 while (bi && bi->bi_sector < 2741 while (bi && bi->bi_iter.bi_sector <
2742 sh->dev[i].sector + STRIPE_SECTORS) { 2742 sh->dev[i].sector + STRIPE_SECTORS) {
2743 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2743 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2744 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2744 clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2757,7 +2757,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2757 bi = sh->dev[i].written; 2757 bi = sh->dev[i].written;
2758 sh->dev[i].written = NULL; 2758 sh->dev[i].written = NULL;
2759 if (bi) bitmap_end = 1; 2759 if (bi) bitmap_end = 1;
2760 while (bi && bi->bi_sector < 2760 while (bi && bi->bi_iter.bi_sector <
2761 sh->dev[i].sector + STRIPE_SECTORS) { 2761 sh->dev[i].sector + STRIPE_SECTORS) {
2762 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2762 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2763 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2763 clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2781,7 +2781,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2781 spin_unlock_irq(&sh->stripe_lock); 2781 spin_unlock_irq(&sh->stripe_lock);
2782 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2782 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2783 wake_up(&conf->wait_for_overlap); 2783 wake_up(&conf->wait_for_overlap);
2784 while (bi && bi->bi_sector < 2784 while (bi && bi->bi_iter.bi_sector <
2785 sh->dev[i].sector + STRIPE_SECTORS) { 2785 sh->dev[i].sector + STRIPE_SECTORS) {
2786 struct bio *nextbi = 2786 struct bio *nextbi =
2787 r5_next_bio(bi, sh->dev[i].sector); 2787 r5_next_bio(bi, sh->dev[i].sector);
@@ -3005,7 +3005,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
3005 clear_bit(R5_UPTODATE, &dev->flags); 3005 clear_bit(R5_UPTODATE, &dev->flags);
3006 wbi = dev->written; 3006 wbi = dev->written;
3007 dev->written = NULL; 3007 dev->written = NULL;
3008 while (wbi && wbi->bi_sector < 3008 while (wbi && wbi->bi_iter.bi_sector <
3009 dev->sector + STRIPE_SECTORS) { 3009 dev->sector + STRIPE_SECTORS) {
3010 wbi2 = r5_next_bio(wbi, dev->sector); 3010 wbi2 = r5_next_bio(wbi, dev->sector);
3011 if (!raid5_dec_bi_active_stripes(wbi)) { 3011 if (!raid5_dec_bi_active_stripes(wbi)) {
@@ -4097,7 +4097,7 @@ static int raid5_mergeable_bvec(struct request_queue *q,
4097 4097
4098static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 4098static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
4099{ 4099{
4100 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 4100 sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
4101 unsigned int chunk_sectors = mddev->chunk_sectors; 4101 unsigned int chunk_sectors = mddev->chunk_sectors;
4102 unsigned int bio_sectors = bio_sectors(bio); 4102 unsigned int bio_sectors = bio_sectors(bio);
4103 4103
@@ -4234,9 +4234,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4234 /* 4234 /*
4235 * compute position 4235 * compute position
4236 */ 4236 */
4237 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, 4237 align_bi->bi_iter.bi_sector =
4238 0, 4238 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
4239 &dd_idx, NULL); 4239 0, &dd_idx, NULL);
4240 4240
4241 end_sector = bio_end_sector(align_bi); 4241 end_sector = bio_end_sector(align_bi);
4242 rcu_read_lock(); 4242 rcu_read_lock();
@@ -4261,7 +4261,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4261 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 4261 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
4262 4262
4263 if (!bio_fits_rdev(align_bi) || 4263 if (!bio_fits_rdev(align_bi) ||
4264 is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi), 4264 is_badblock(rdev, align_bi->bi_iter.bi_sector,
4265 bio_sectors(align_bi),
4265 &first_bad, &bad_sectors)) { 4266 &first_bad, &bad_sectors)) {
4266 /* too big in some way, or has a known bad block */ 4267 /* too big in some way, or has a known bad block */
4267 bio_put(align_bi); 4268 bio_put(align_bi);
@@ -4270,7 +4271,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4270 } 4271 }
4271 4272
4272 /* No reshape active, so we can trust rdev->data_offset */ 4273 /* No reshape active, so we can trust rdev->data_offset */
4273 align_bi->bi_sector += rdev->data_offset; 4274 align_bi->bi_iter.bi_sector += rdev->data_offset;
4274 4275
4275 spin_lock_irq(&conf->device_lock); 4276 spin_lock_irq(&conf->device_lock);
4276 wait_event_lock_irq(conf->wait_for_stripe, 4277 wait_event_lock_irq(conf->wait_for_stripe,
@@ -4282,7 +4283,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4282 if (mddev->gendisk) 4283 if (mddev->gendisk)
4283 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), 4284 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
4284 align_bi, disk_devt(mddev->gendisk), 4285 align_bi, disk_devt(mddev->gendisk),
4285 raid_bio->bi_sector); 4286 raid_bio->bi_iter.bi_sector);
4286 generic_make_request(align_bi); 4287 generic_make_request(align_bi);
4287 return 1; 4288 return 1;
4288 } else { 4289 } else {
@@ -4465,8 +4466,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
4465 /* Skip discard while reshape is happening */ 4466 /* Skip discard while reshape is happening */
4466 return; 4467 return;
4467 4468
4468 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4469 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4469 last_sector = bi->bi_sector + (bi->bi_size>>9); 4470 last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
4470 4471
4471 bi->bi_next = NULL; 4472 bi->bi_next = NULL;
4472 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4473 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -4570,7 +4571,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4570 return; 4571 return;
4571 } 4572 }
4572 4573
4573 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4574 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4574 last_sector = bio_end_sector(bi); 4575 last_sector = bio_end_sector(bi);
4575 bi->bi_next = NULL; 4576 bi->bi_next = NULL;
4576 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4577 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -5054,7 +5055,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
5054 int remaining; 5055 int remaining;
5055 int handled = 0; 5056 int handled = 0;
5056 5057
5057 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 5058 logical_sector = raid_bio->bi_iter.bi_sector &
5059 ~((sector_t)STRIPE_SECTORS-1);
5058 sector = raid5_compute_sector(conf, logical_sector, 5060 sector = raid5_compute_sector(conf, logical_sector,
5059 0, &dd_idx, NULL); 5061 0, &dd_idx, NULL);
5060 last_sector = bio_end_sector(raid_bio); 5062 last_sector = bio_end_sector(raid_bio);