aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-30 14:19:05 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-30 14:19:05 -0500
commitf568849edac8611d603e00bd6cbbcfea09395ae6 (patch)
treeb9472d640fe5d87426d38c9d81d946cf197ad3fb /drivers/md/raid5.c
parentd9894c228b11273e720bb63ba120d1d326fe9d94 (diff)
parent675675ada486dde5bf9aa51665e90706bff11a35 (diff)
Merge branch 'for-3.14/core' of git://git.kernel.dk/linux-block
Pull core block IO changes from Jens Axboe: "The major piece in here is the immutable bio_ve series from Kent, the rest is fairly minor. It was supposed to go in last round, but various issues pushed it to this release instead. The pull request contains: - Various smaller blk-mq fixes from different folks. Nothing major here, just minor fixes and cleanups. - Fix for a memory leak in the error path in the block ioctl code from Christian Engelmayer. - Header export fix from CaiZhiyong. - Finally the immutable biovec changes from Kent Overstreet. This enables some nice future work on making arbitrarily sized bios possible, and splitting more efficient. Related fixes to immutable bio_vecs: - dm-cache immutable fixup from Mike Snitzer. - btrfs immutable fixup from Muthu Kumar. - bio-integrity fix from Nic Bellinger, which is also going to stable" * 'for-3.14/core' of git://git.kernel.dk/linux-block: (44 commits) xtensa: fixup simdisk driver to work with immutable bio_vecs block/blk-mq-cpu.c: use hotcpu_notifier() blk-mq: for_each_* macro correctness block: Fix memory leak in rw_copy_check_uvector() handling bio-integrity: Fix bio_integrity_verify segment start bug block: remove unrelated header files and export symbol blk-mq: uses page->list incorrectly blk-mq: use __smp_call_function_single directly btrfs: fix missing increment of bi_remaining Revert "block: Warn and free bio if bi_end_io is not set" block: Warn and free bio if bi_end_io is not set blk-mq: fix initializing request's start time block: blk-mq: don't export blk_mq_free_queue() block: blk-mq: make blk_sync_queue support mq block: blk-mq: support draining mq queue dm cache: increment bi_remaining when bi_end_io is restored block: fixup for generic bio chaining block: Really silence spurious compiler warnings block: Silence spurious compiler warnings block: Kill bio_pair_split() ...
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c84
1 files changed, 43 insertions, 41 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 03f82ab87d9e..67ca9c3d2939 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -133,7 +133,7 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
133static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) 133static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
134{ 134{
135 int sectors = bio_sectors(bio); 135 int sectors = bio_sectors(bio);
136 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) 136 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
137 return bio->bi_next; 137 return bio->bi_next;
138 else 138 else
139 return NULL; 139 return NULL;
@@ -225,7 +225,7 @@ static void return_io(struct bio *return_bi)
225 225
226 return_bi = bi->bi_next; 226 return_bi = bi->bi_next;
227 bi->bi_next = NULL; 227 bi->bi_next = NULL;
228 bi->bi_size = 0; 228 bi->bi_iter.bi_size = 0;
229 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 229 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
230 bi, 0); 230 bi, 0);
231 bio_endio(bi, 0); 231 bio_endio(bi, 0);
@@ -852,10 +852,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
852 bi->bi_rw, i); 852 bi->bi_rw, i);
853 atomic_inc(&sh->count); 853 atomic_inc(&sh->count);
854 if (use_new_offset(conf, sh)) 854 if (use_new_offset(conf, sh))
855 bi->bi_sector = (sh->sector 855 bi->bi_iter.bi_sector = (sh->sector
856 + rdev->new_data_offset); 856 + rdev->new_data_offset);
857 else 857 else
858 bi->bi_sector = (sh->sector 858 bi->bi_iter.bi_sector = (sh->sector
859 + rdev->data_offset); 859 + rdev->data_offset);
860 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 860 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
861 bi->bi_rw |= REQ_NOMERGE; 861 bi->bi_rw |= REQ_NOMERGE;
@@ -863,7 +863,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
863 bi->bi_vcnt = 1; 863 bi->bi_vcnt = 1;
864 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 864 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
865 bi->bi_io_vec[0].bv_offset = 0; 865 bi->bi_io_vec[0].bv_offset = 0;
866 bi->bi_size = STRIPE_SIZE; 866 bi->bi_iter.bi_size = STRIPE_SIZE;
867 /* 867 /*
868 * If this is discard request, set bi_vcnt 0. We don't 868 * If this is discard request, set bi_vcnt 0. We don't
869 * want to confuse SCSI because SCSI will replace payload 869 * want to confuse SCSI because SCSI will replace payload
@@ -899,15 +899,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
899 rbi->bi_rw, i); 899 rbi->bi_rw, i);
900 atomic_inc(&sh->count); 900 atomic_inc(&sh->count);
901 if (use_new_offset(conf, sh)) 901 if (use_new_offset(conf, sh))
902 rbi->bi_sector = (sh->sector 902 rbi->bi_iter.bi_sector = (sh->sector
903 + rrdev->new_data_offset); 903 + rrdev->new_data_offset);
904 else 904 else
905 rbi->bi_sector = (sh->sector 905 rbi->bi_iter.bi_sector = (sh->sector
906 + rrdev->data_offset); 906 + rrdev->data_offset);
907 rbi->bi_vcnt = 1; 907 rbi->bi_vcnt = 1;
908 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 908 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
909 rbi->bi_io_vec[0].bv_offset = 0; 909 rbi->bi_io_vec[0].bv_offset = 0;
910 rbi->bi_size = STRIPE_SIZE; 910 rbi->bi_iter.bi_size = STRIPE_SIZE;
911 /* 911 /*
912 * If this is discard request, set bi_vcnt 0. We don't 912 * If this is discard request, set bi_vcnt 0. We don't
913 * want to confuse SCSI because SCSI will replace payload 913 * want to confuse SCSI because SCSI will replace payload
@@ -935,24 +935,24 @@ static struct dma_async_tx_descriptor *
935async_copy_data(int frombio, struct bio *bio, struct page *page, 935async_copy_data(int frombio, struct bio *bio, struct page *page,
936 sector_t sector, struct dma_async_tx_descriptor *tx) 936 sector_t sector, struct dma_async_tx_descriptor *tx)
937{ 937{
938 struct bio_vec *bvl; 938 struct bio_vec bvl;
939 struct bvec_iter iter;
939 struct page *bio_page; 940 struct page *bio_page;
940 int i;
941 int page_offset; 941 int page_offset;
942 struct async_submit_ctl submit; 942 struct async_submit_ctl submit;
943 enum async_tx_flags flags = 0; 943 enum async_tx_flags flags = 0;
944 944
945 if (bio->bi_sector >= sector) 945 if (bio->bi_iter.bi_sector >= sector)
946 page_offset = (signed)(bio->bi_sector - sector) * 512; 946 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
947 else 947 else
948 page_offset = (signed)(sector - bio->bi_sector) * -512; 948 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
949 949
950 if (frombio) 950 if (frombio)
951 flags |= ASYNC_TX_FENCE; 951 flags |= ASYNC_TX_FENCE;
952 init_async_submit(&submit, flags, tx, NULL, NULL, NULL); 952 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
953 953
954 bio_for_each_segment(bvl, bio, i) { 954 bio_for_each_segment(bvl, bio, iter) {
955 int len = bvl->bv_len; 955 int len = bvl.bv_len;
956 int clen; 956 int clen;
957 int b_offset = 0; 957 int b_offset = 0;
958 958
@@ -968,8 +968,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
968 clen = len; 968 clen = len;
969 969
970 if (clen > 0) { 970 if (clen > 0) {
971 b_offset += bvl->bv_offset; 971 b_offset += bvl.bv_offset;
972 bio_page = bvl->bv_page; 972 bio_page = bvl.bv_page;
973 if (frombio) 973 if (frombio)
974 tx = async_memcpy(page, bio_page, page_offset, 974 tx = async_memcpy(page, bio_page, page_offset,
975 b_offset, clen, &submit); 975 b_offset, clen, &submit);
@@ -1012,7 +1012,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
1012 BUG_ON(!dev->read); 1012 BUG_ON(!dev->read);
1013 rbi = dev->read; 1013 rbi = dev->read;
1014 dev->read = NULL; 1014 dev->read = NULL;
1015 while (rbi && rbi->bi_sector < 1015 while (rbi && rbi->bi_iter.bi_sector <
1016 dev->sector + STRIPE_SECTORS) { 1016 dev->sector + STRIPE_SECTORS) {
1017 rbi2 = r5_next_bio(rbi, dev->sector); 1017 rbi2 = r5_next_bio(rbi, dev->sector);
1018 if (!raid5_dec_bi_active_stripes(rbi)) { 1018 if (!raid5_dec_bi_active_stripes(rbi)) {
@@ -1048,7 +1048,7 @@ static void ops_run_biofill(struct stripe_head *sh)
1048 dev->read = rbi = dev->toread; 1048 dev->read = rbi = dev->toread;
1049 dev->toread = NULL; 1049 dev->toread = NULL;
1050 spin_unlock_irq(&sh->stripe_lock); 1050 spin_unlock_irq(&sh->stripe_lock);
1051 while (rbi && rbi->bi_sector < 1051 while (rbi && rbi->bi_iter.bi_sector <
1052 dev->sector + STRIPE_SECTORS) { 1052 dev->sector + STRIPE_SECTORS) {
1053 tx = async_copy_data(0, rbi, dev->page, 1053 tx = async_copy_data(0, rbi, dev->page,
1054 dev->sector, tx); 1054 dev->sector, tx);
@@ -1390,7 +1390,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1390 wbi = dev->written = chosen; 1390 wbi = dev->written = chosen;
1391 spin_unlock_irq(&sh->stripe_lock); 1391 spin_unlock_irq(&sh->stripe_lock);
1392 1392
1393 while (wbi && wbi->bi_sector < 1393 while (wbi && wbi->bi_iter.bi_sector <
1394 dev->sector + STRIPE_SECTORS) { 1394 dev->sector + STRIPE_SECTORS) {
1395 if (wbi->bi_rw & REQ_FUA) 1395 if (wbi->bi_rw & REQ_FUA)
1396 set_bit(R5_WantFUA, &dev->flags); 1396 set_bit(R5_WantFUA, &dev->flags);
@@ -2615,7 +2615,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2615 int firstwrite=0; 2615 int firstwrite=0;
2616 2616
2617 pr_debug("adding bi b#%llu to stripe s#%llu\n", 2617 pr_debug("adding bi b#%llu to stripe s#%llu\n",
2618 (unsigned long long)bi->bi_sector, 2618 (unsigned long long)bi->bi_iter.bi_sector,
2619 (unsigned long long)sh->sector); 2619 (unsigned long long)sh->sector);
2620 2620
2621 /* 2621 /*
@@ -2633,12 +2633,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2633 firstwrite = 1; 2633 firstwrite = 1;
2634 } else 2634 } else
2635 bip = &sh->dev[dd_idx].toread; 2635 bip = &sh->dev[dd_idx].toread;
2636 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 2636 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
2637 if (bio_end_sector(*bip) > bi->bi_sector) 2637 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
2638 goto overlap; 2638 goto overlap;
2639 bip = & (*bip)->bi_next; 2639 bip = & (*bip)->bi_next;
2640 } 2640 }
2641 if (*bip && (*bip)->bi_sector < bio_end_sector(bi)) 2641 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
2642 goto overlap; 2642 goto overlap;
2643 2643
2644 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 2644 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2652,7 +2652,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2652 sector_t sector = sh->dev[dd_idx].sector; 2652 sector_t sector = sh->dev[dd_idx].sector;
2653 for (bi=sh->dev[dd_idx].towrite; 2653 for (bi=sh->dev[dd_idx].towrite;
2654 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 2654 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2655 bi && bi->bi_sector <= sector; 2655 bi && bi->bi_iter.bi_sector <= sector;
2656 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 2656 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2657 if (bio_end_sector(bi) >= sector) 2657 if (bio_end_sector(bi) >= sector)
2658 sector = bio_end_sector(bi); 2658 sector = bio_end_sector(bi);
@@ -2662,7 +2662,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2662 } 2662 }
2663 2663
2664 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 2664 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2665 (unsigned long long)(*bip)->bi_sector, 2665 (unsigned long long)(*bip)->bi_iter.bi_sector,
2666 (unsigned long long)sh->sector, dd_idx); 2666 (unsigned long long)sh->sector, dd_idx);
2667 spin_unlock_irq(&sh->stripe_lock); 2667 spin_unlock_irq(&sh->stripe_lock);
2668 2668
@@ -2737,7 +2737,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2737 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2737 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2738 wake_up(&conf->wait_for_overlap); 2738 wake_up(&conf->wait_for_overlap);
2739 2739
2740 while (bi && bi->bi_sector < 2740 while (bi && bi->bi_iter.bi_sector <
2741 sh->dev[i].sector + STRIPE_SECTORS) { 2741 sh->dev[i].sector + STRIPE_SECTORS) {
2742 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2742 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2743 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2743 clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2756,7 +2756,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2756 bi = sh->dev[i].written; 2756 bi = sh->dev[i].written;
2757 sh->dev[i].written = NULL; 2757 sh->dev[i].written = NULL;
2758 if (bi) bitmap_end = 1; 2758 if (bi) bitmap_end = 1;
2759 while (bi && bi->bi_sector < 2759 while (bi && bi->bi_iter.bi_sector <
2760 sh->dev[i].sector + STRIPE_SECTORS) { 2760 sh->dev[i].sector + STRIPE_SECTORS) {
2761 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2761 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2762 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2762 clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2780,7 +2780,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2780 spin_unlock_irq(&sh->stripe_lock); 2780 spin_unlock_irq(&sh->stripe_lock);
2781 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2781 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2782 wake_up(&conf->wait_for_overlap); 2782 wake_up(&conf->wait_for_overlap);
2783 while (bi && bi->bi_sector < 2783 while (bi && bi->bi_iter.bi_sector <
2784 sh->dev[i].sector + STRIPE_SECTORS) { 2784 sh->dev[i].sector + STRIPE_SECTORS) {
2785 struct bio *nextbi = 2785 struct bio *nextbi =
2786 r5_next_bio(bi, sh->dev[i].sector); 2786 r5_next_bio(bi, sh->dev[i].sector);
@@ -3004,7 +3004,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
3004 clear_bit(R5_UPTODATE, &dev->flags); 3004 clear_bit(R5_UPTODATE, &dev->flags);
3005 wbi = dev->written; 3005 wbi = dev->written;
3006 dev->written = NULL; 3006 dev->written = NULL;
3007 while (wbi && wbi->bi_sector < 3007 while (wbi && wbi->bi_iter.bi_sector <
3008 dev->sector + STRIPE_SECTORS) { 3008 dev->sector + STRIPE_SECTORS) {
3009 wbi2 = r5_next_bio(wbi, dev->sector); 3009 wbi2 = r5_next_bio(wbi, dev->sector);
3010 if (!raid5_dec_bi_active_stripes(wbi)) { 3010 if (!raid5_dec_bi_active_stripes(wbi)) {
@@ -4096,7 +4096,7 @@ static int raid5_mergeable_bvec(struct request_queue *q,
4096 4096
4097static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 4097static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
4098{ 4098{
4099 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 4099 sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
4100 unsigned int chunk_sectors = mddev->chunk_sectors; 4100 unsigned int chunk_sectors = mddev->chunk_sectors;
4101 unsigned int bio_sectors = bio_sectors(bio); 4101 unsigned int bio_sectors = bio_sectors(bio);
4102 4102
@@ -4233,9 +4233,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4233 /* 4233 /*
4234 * compute position 4234 * compute position
4235 */ 4235 */
4236 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, 4236 align_bi->bi_iter.bi_sector =
4237 0, 4237 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
4238 &dd_idx, NULL); 4238 0, &dd_idx, NULL);
4239 4239
4240 end_sector = bio_end_sector(align_bi); 4240 end_sector = bio_end_sector(align_bi);
4241 rcu_read_lock(); 4241 rcu_read_lock();
@@ -4260,7 +4260,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4260 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 4260 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
4261 4261
4262 if (!bio_fits_rdev(align_bi) || 4262 if (!bio_fits_rdev(align_bi) ||
4263 is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi), 4263 is_badblock(rdev, align_bi->bi_iter.bi_sector,
4264 bio_sectors(align_bi),
4264 &first_bad, &bad_sectors)) { 4265 &first_bad, &bad_sectors)) {
4265 /* too big in some way, or has a known bad block */ 4266 /* too big in some way, or has a known bad block */
4266 bio_put(align_bi); 4267 bio_put(align_bi);
@@ -4269,7 +4270,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4269 } 4270 }
4270 4271
4271 /* No reshape active, so we can trust rdev->data_offset */ 4272 /* No reshape active, so we can trust rdev->data_offset */
4272 align_bi->bi_sector += rdev->data_offset; 4273 align_bi->bi_iter.bi_sector += rdev->data_offset;
4273 4274
4274 spin_lock_irq(&conf->device_lock); 4275 spin_lock_irq(&conf->device_lock);
4275 wait_event_lock_irq(conf->wait_for_stripe, 4276 wait_event_lock_irq(conf->wait_for_stripe,
@@ -4281,7 +4282,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4281 if (mddev->gendisk) 4282 if (mddev->gendisk)
4282 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), 4283 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
4283 align_bi, disk_devt(mddev->gendisk), 4284 align_bi, disk_devt(mddev->gendisk),
4284 raid_bio->bi_sector); 4285 raid_bio->bi_iter.bi_sector);
4285 generic_make_request(align_bi); 4286 generic_make_request(align_bi);
4286 return 1; 4287 return 1;
4287 } else { 4288 } else {
@@ -4464,8 +4465,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
4464 /* Skip discard while reshape is happening */ 4465 /* Skip discard while reshape is happening */
4465 return; 4466 return;
4466 4467
4467 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4468 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4468 last_sector = bi->bi_sector + (bi->bi_size>>9); 4469 last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
4469 4470
4470 bi->bi_next = NULL; 4471 bi->bi_next = NULL;
4471 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4472 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -4569,7 +4570,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4569 return; 4570 return;
4570 } 4571 }
4571 4572
4572 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4573 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4573 last_sector = bio_end_sector(bi); 4574 last_sector = bio_end_sector(bi);
4574 bi->bi_next = NULL; 4575 bi->bi_next = NULL;
4575 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4576 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -5053,7 +5054,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
5053 int remaining; 5054 int remaining;
5054 int handled = 0; 5055 int handled = 0;
5055 5056
5056 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 5057 logical_sector = raid_bio->bi_iter.bi_sector &
5058 ~((sector_t)STRIPE_SECTORS-1);
5057 sector = raid5_compute_sector(conf, logical_sector, 5059 sector = raid5_compute_sector(conf, logical_sector,
5058 0, &dd_idx, NULL); 5060 0, &dd_idx, NULL);
5059 last_sector = bio_end_sector(raid_bio); 5061 last_sector = bio_end_sector(raid_bio);