diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-10-11 18:44:27 -0400 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2013-11-24 01:33:47 -0500 |
commit | 4f024f3797c43cb4b73cd2c50cec728842d0e49e (patch) | |
tree | 3aedcab02d2ad723a189d01934d1e94fec7a54e1 /fs | |
parent | ed9c47bebeeea4a468b07cfd745c690190f8014c (diff) |
block: Abstract out bvec iterator
Immutable biovecs are going to require an explicit iterator. To
implement immutable bvecs, a later patch is going to add a bi_bvec_done
member to this struct; for now, this patch effectively just renames
things.
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: "Ed L. Cashin" <ecashin@coraid.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Lars Ellenberg <drbd-dev@lists.linbit.com>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Matthew Wilcox <willy@linux.intel.com>
Cc: Geoff Levand <geoff@infradead.org>
Cc: Yehuda Sadeh <yehuda@inktank.com>
Cc: Sage Weil <sage@inktank.com>
Cc: Alex Elder <elder@inktank.com>
Cc: ceph-devel@vger.kernel.org
Cc: Joshua Morris <josh.h.morris@us.ibm.com>
Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Neil Brown <neilb@suse.de>
Cc: Alasdair Kergon <agk@redhat.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: dm-devel@redhat.com
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: linux390@de.ibm.com
Cc: Boaz Harrosh <bharrosh@panasas.com>
Cc: Benny Halevy <bhalevy@tonian.com>
Cc: "James E.J. Bottomley" <JBottomley@parallels.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Chris Mason <chris.mason@fusionio.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Dave Kleikamp <shaggy@kernel.org>
Cc: Joern Engel <joern@logfs.org>
Cc: Prasad Joshi <prasadjoshi.linux@gmail.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Cc: KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Ben Myers <bpm@sgi.com>
Cc: xfs@oss.sgi.com
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Len Brown <len.brown@intel.com>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com>
Cc: Ben Hutchings <ben@decadent.org.uk>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Guo Chao <yan@linux.vnet.ibm.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Asai Thambi S P <asamymuthupa@micron.com>
Cc: Selvan Mani <smani@micron.com>
Cc: Sam Bradshaw <sbradshaw@micron.com>
Cc: Wei Yongjun <yongjun_wei@trendmicro.com.cn>
Cc: "Roger Pau Monné" <roger.pau@citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Cc: Ian Campbell <Ian.Campbell@citrix.com>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Jerome Marchand <jmarchand@redhat.com>
Cc: Joe Perches <joe@perches.com>
Cc: Peng Tao <tao.peng@emc.com>
Cc: Andy Adamson <andros@netapp.com>
Cc: fanchaoting <fanchaoting@cn.fujitsu.com>
Cc: Jie Liu <jeff.liu@oracle.com>
Cc: Sunil Mushran <sunil.mushran@gmail.com>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
Cc: Namjae Jeon <namjae.jeon@samsung.com>
Cc: Pankaj Kumar <pankaj.km@samsung.com>
Cc: Dan Magenheimer <dan.magenheimer@oracle.com>
Cc: Mel Gorman <mgorman@suse.de>6
Diffstat (limited to 'fs')
-rw-r--r-- | fs/bio-integrity.c | 8 | ||||
-rw-r--r-- | fs/bio.c | 56 | ||||
-rw-r--r-- | fs/btrfs/check-integrity.c | 8 | ||||
-rw-r--r-- | fs/btrfs/compression.c | 17 | ||||
-rw-r--r-- | fs/btrfs/extent_io.c | 14 | ||||
-rw-r--r-- | fs/btrfs/file-item.c | 19 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 22 | ||||
-rw-r--r-- | fs/btrfs/raid56.c | 22 | ||||
-rw-r--r-- | fs/btrfs/scrub.c | 12 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 12 | ||||
-rw-r--r-- | fs/buffer.c | 12 | ||||
-rw-r--r-- | fs/direct-io.c | 4 | ||||
-rw-r--r-- | fs/ext4/page-io.c | 4 | ||||
-rw-r--r-- | fs/f2fs/data.c | 2 | ||||
-rw-r--r-- | fs/f2fs/segment.c | 2 | ||||
-rw-r--r-- | fs/gfs2/lops.c | 2 | ||||
-rw-r--r-- | fs/gfs2/ops_fstype.c | 2 | ||||
-rw-r--r-- | fs/hfsplus/wrapper.c | 2 | ||||
-rw-r--r-- | fs/jfs/jfs_logmgr.c | 12 | ||||
-rw-r--r-- | fs/jfs/jfs_metapage.c | 9 | ||||
-rw-r--r-- | fs/logfs/dev_bdev.c | 20 | ||||
-rw-r--r-- | fs/mpage.c | 2 | ||||
-rw-r--r-- | fs/nfs/blocklayout/blocklayout.c | 9 | ||||
-rw-r--r-- | fs/nilfs2/segbuf.c | 3 | ||||
-rw-r--r-- | fs/ocfs2/cluster/heartbeat.c | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_aops.c | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_buf.c | 4 |
27 files changed, 146 insertions, 137 deletions
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index fc60b31453ee..08e3d1388c65 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c | |||
@@ -215,9 +215,9 @@ unsigned int bio_integrity_tag_size(struct bio *bio) | |||
215 | { | 215 | { |
216 | struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); | 216 | struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); |
217 | 217 | ||
218 | BUG_ON(bio->bi_size == 0); | 218 | BUG_ON(bio->bi_iter.bi_size == 0); |
219 | 219 | ||
220 | return bi->tag_size * (bio->bi_size / bi->sector_size); | 220 | return bi->tag_size * (bio->bi_iter.bi_size / bi->sector_size); |
221 | } | 221 | } |
222 | EXPORT_SYMBOL(bio_integrity_tag_size); | 222 | EXPORT_SYMBOL(bio_integrity_tag_size); |
223 | 223 | ||
@@ -300,7 +300,7 @@ static void bio_integrity_generate(struct bio *bio) | |||
300 | struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); | 300 | struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); |
301 | struct blk_integrity_exchg bix; | 301 | struct blk_integrity_exchg bix; |
302 | struct bio_vec *bv; | 302 | struct bio_vec *bv; |
303 | sector_t sector = bio->bi_sector; | 303 | sector_t sector = bio->bi_iter.bi_sector; |
304 | unsigned int i, sectors, total; | 304 | unsigned int i, sectors, total; |
305 | void *prot_buf = bio->bi_integrity->bip_buf; | 305 | void *prot_buf = bio->bi_integrity->bip_buf; |
306 | 306 | ||
@@ -387,7 +387,7 @@ int bio_integrity_prep(struct bio *bio) | |||
387 | bip->bip_owns_buf = 1; | 387 | bip->bip_owns_buf = 1; |
388 | bip->bip_buf = buf; | 388 | bip->bip_buf = buf; |
389 | bip->bip_size = len; | 389 | bip->bip_size = len; |
390 | bip->bip_sector = bio->bi_sector; | 390 | bip->bip_sector = bio->bi_iter.bi_sector; |
391 | 391 | ||
392 | /* Map it */ | 392 | /* Map it */ |
393 | offset = offset_in_page(buf); | 393 | offset = offset_in_page(buf); |
@@ -532,13 +532,13 @@ void __bio_clone(struct bio *bio, struct bio *bio_src) | |||
532 | * most users will be overriding ->bi_bdev with a new target, | 532 | * most users will be overriding ->bi_bdev with a new target, |
533 | * so we don't set nor calculate new physical/hw segment counts here | 533 | * so we don't set nor calculate new physical/hw segment counts here |
534 | */ | 534 | */ |
535 | bio->bi_sector = bio_src->bi_sector; | 535 | bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; |
536 | bio->bi_bdev = bio_src->bi_bdev; | 536 | bio->bi_bdev = bio_src->bi_bdev; |
537 | bio->bi_flags |= 1 << BIO_CLONED; | 537 | bio->bi_flags |= 1 << BIO_CLONED; |
538 | bio->bi_rw = bio_src->bi_rw; | 538 | bio->bi_rw = bio_src->bi_rw; |
539 | bio->bi_vcnt = bio_src->bi_vcnt; | 539 | bio->bi_vcnt = bio_src->bi_vcnt; |
540 | bio->bi_size = bio_src->bi_size; | 540 | bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; |
541 | bio->bi_idx = bio_src->bi_idx; | 541 | bio->bi_iter.bi_idx = bio_src->bi_iter.bi_idx; |
542 | } | 542 | } |
543 | EXPORT_SYMBOL(__bio_clone); | 543 | EXPORT_SYMBOL(__bio_clone); |
544 | 544 | ||
@@ -612,7 +612,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | |||
612 | if (unlikely(bio_flagged(bio, BIO_CLONED))) | 612 | if (unlikely(bio_flagged(bio, BIO_CLONED))) |
613 | return 0; | 613 | return 0; |
614 | 614 | ||
615 | if (((bio->bi_size + len) >> 9) > max_sectors) | 615 | if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) |
616 | return 0; | 616 | return 0; |
617 | 617 | ||
618 | /* | 618 | /* |
@@ -635,8 +635,9 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | |||
635 | simulate merging updated prev_bvec | 635 | simulate merging updated prev_bvec |
636 | as new bvec. */ | 636 | as new bvec. */ |
637 | .bi_bdev = bio->bi_bdev, | 637 | .bi_bdev = bio->bi_bdev, |
638 | .bi_sector = bio->bi_sector, | 638 | .bi_sector = bio->bi_iter.bi_sector, |
639 | .bi_size = bio->bi_size - prev_bv_len, | 639 | .bi_size = bio->bi_iter.bi_size - |
640 | prev_bv_len, | ||
640 | .bi_rw = bio->bi_rw, | 641 | .bi_rw = bio->bi_rw, |
641 | }; | 642 | }; |
642 | 643 | ||
@@ -684,8 +685,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | |||
684 | if (q->merge_bvec_fn) { | 685 | if (q->merge_bvec_fn) { |
685 | struct bvec_merge_data bvm = { | 686 | struct bvec_merge_data bvm = { |
686 | .bi_bdev = bio->bi_bdev, | 687 | .bi_bdev = bio->bi_bdev, |
687 | .bi_sector = bio->bi_sector, | 688 | .bi_sector = bio->bi_iter.bi_sector, |
688 | .bi_size = bio->bi_size, | 689 | .bi_size = bio->bi_iter.bi_size, |
689 | .bi_rw = bio->bi_rw, | 690 | .bi_rw = bio->bi_rw, |
690 | }; | 691 | }; |
691 | 692 | ||
@@ -708,7 +709,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | |||
708 | bio->bi_vcnt++; | 709 | bio->bi_vcnt++; |
709 | bio->bi_phys_segments++; | 710 | bio->bi_phys_segments++; |
710 | done: | 711 | done: |
711 | bio->bi_size += len; | 712 | bio->bi_iter.bi_size += len; |
712 | return len; | 713 | return len; |
713 | } | 714 | } |
714 | 715 | ||
@@ -807,22 +808,22 @@ void bio_advance(struct bio *bio, unsigned bytes) | |||
807 | if (bio_integrity(bio)) | 808 | if (bio_integrity(bio)) |
808 | bio_integrity_advance(bio, bytes); | 809 | bio_integrity_advance(bio, bytes); |
809 | 810 | ||
810 | bio->bi_sector += bytes >> 9; | 811 | bio->bi_iter.bi_sector += bytes >> 9; |
811 | bio->bi_size -= bytes; | 812 | bio->bi_iter.bi_size -= bytes; |
812 | 813 | ||
813 | if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) | 814 | if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) |
814 | return; | 815 | return; |
815 | 816 | ||
816 | while (bytes) { | 817 | while (bytes) { |
817 | if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { | 818 | if (unlikely(bio->bi_iter.bi_idx >= bio->bi_vcnt)) { |
818 | WARN_ONCE(1, "bio idx %d >= vcnt %d\n", | 819 | WARN_ONCE(1, "bio idx %d >= vcnt %d\n", |
819 | bio->bi_idx, bio->bi_vcnt); | 820 | bio->bi_iter.bi_idx, bio->bi_vcnt); |
820 | break; | 821 | break; |
821 | } | 822 | } |
822 | 823 | ||
823 | if (bytes >= bio_iovec(bio)->bv_len) { | 824 | if (bytes >= bio_iovec(bio)->bv_len) { |
824 | bytes -= bio_iovec(bio)->bv_len; | 825 | bytes -= bio_iovec(bio)->bv_len; |
825 | bio->bi_idx++; | 826 | bio->bi_iter.bi_idx++; |
826 | } else { | 827 | } else { |
827 | bio_iovec(bio)->bv_len -= bytes; | 828 | bio_iovec(bio)->bv_len -= bytes; |
828 | bio_iovec(bio)->bv_offset += bytes; | 829 | bio_iovec(bio)->bv_offset += bytes; |
@@ -1485,7 +1486,7 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, | |||
1485 | if (IS_ERR(bio)) | 1486 | if (IS_ERR(bio)) |
1486 | return bio; | 1487 | return bio; |
1487 | 1488 | ||
1488 | if (bio->bi_size == len) | 1489 | if (bio->bi_iter.bi_size == len) |
1489 | return bio; | 1490 | return bio; |
1490 | 1491 | ||
1491 | /* | 1492 | /* |
@@ -1763,16 +1764,16 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors) | |||
1763 | return bp; | 1764 | return bp; |
1764 | 1765 | ||
1765 | trace_block_split(bdev_get_queue(bi->bi_bdev), bi, | 1766 | trace_block_split(bdev_get_queue(bi->bi_bdev), bi, |
1766 | bi->bi_sector + first_sectors); | 1767 | bi->bi_iter.bi_sector + first_sectors); |
1767 | 1768 | ||
1768 | BUG_ON(bio_segments(bi) > 1); | 1769 | BUG_ON(bio_segments(bi) > 1); |
1769 | atomic_set(&bp->cnt, 3); | 1770 | atomic_set(&bp->cnt, 3); |
1770 | bp->error = 0; | 1771 | bp->error = 0; |
1771 | bp->bio1 = *bi; | 1772 | bp->bio1 = *bi; |
1772 | bp->bio2 = *bi; | 1773 | bp->bio2 = *bi; |
1773 | bp->bio2.bi_sector += first_sectors; | 1774 | bp->bio2.bi_iter.bi_sector += first_sectors; |
1774 | bp->bio2.bi_size -= first_sectors << 9; | 1775 | bp->bio2.bi_iter.bi_size -= first_sectors << 9; |
1775 | bp->bio1.bi_size = first_sectors << 9; | 1776 | bp->bio1.bi_iter.bi_size = first_sectors << 9; |
1776 | 1777 | ||
1777 | if (bi->bi_vcnt != 0) { | 1778 | if (bi->bi_vcnt != 0) { |
1778 | bp->bv1 = *bio_iovec(bi); | 1779 | bp->bv1 = *bio_iovec(bi); |
@@ -1821,21 +1822,22 @@ void bio_trim(struct bio *bio, int offset, int size) | |||
1821 | int sofar = 0; | 1822 | int sofar = 0; |
1822 | 1823 | ||
1823 | size <<= 9; | 1824 | size <<= 9; |
1824 | if (offset == 0 && size == bio->bi_size) | 1825 | if (offset == 0 && size == bio->bi_iter.bi_size) |
1825 | return; | 1826 | return; |
1826 | 1827 | ||
1827 | clear_bit(BIO_SEG_VALID, &bio->bi_flags); | 1828 | clear_bit(BIO_SEG_VALID, &bio->bi_flags); |
1828 | 1829 | ||
1829 | bio_advance(bio, offset << 9); | 1830 | bio_advance(bio, offset << 9); |
1830 | 1831 | ||
1831 | bio->bi_size = size; | 1832 | bio->bi_iter.bi_size = size; |
1832 | 1833 | ||
1833 | /* avoid any complications with bi_idx being non-zero*/ | 1834 | /* avoid any complications with bi_idx being non-zero*/ |
1834 | if (bio->bi_idx) { | 1835 | if (bio->bi_iter.bi_idx) { |
1835 | memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, | 1836 | memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_iter.bi_idx, |
1836 | (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); | 1837 | (bio->bi_vcnt - bio->bi_iter.bi_idx) * |
1837 | bio->bi_vcnt -= bio->bi_idx; | 1838 | sizeof(struct bio_vec)); |
1838 | bio->bi_idx = 0; | 1839 | bio->bi_vcnt -= bio->bi_iter.bi_idx; |
1840 | bio->bi_iter.bi_idx = 0; | ||
1839 | } | 1841 | } |
1840 | /* Make sure vcnt and last bv are not too big */ | 1842 | /* Make sure vcnt and last bv are not too big */ |
1841 | bio_for_each_segment(bvec, bio, i) { | 1843 | bio_for_each_segment(bvec, bio, i) { |
@@ -1871,7 +1873,7 @@ sector_t bio_sector_offset(struct bio *bio, unsigned short index, | |||
1871 | sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue); | 1873 | sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue); |
1872 | sectors = 0; | 1874 | sectors = 0; |
1873 | 1875 | ||
1874 | if (index >= bio->bi_idx) | 1876 | if (index >= bio->bi_iter.bi_idx) |
1875 | index = bio->bi_vcnt - 1; | 1877 | index = bio->bi_vcnt - 1; |
1876 | 1878 | ||
1877 | bio_for_each_segment_all(bv, bio, i) { | 1879 | bio_for_each_segment_all(bv, bio, i) { |
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 131d82800b3a..cb05e1c842c5 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c | |||
@@ -1695,7 +1695,7 @@ static int btrfsic_read_block(struct btrfsic_state *state, | |||
1695 | return -1; | 1695 | return -1; |
1696 | } | 1696 | } |
1697 | bio->bi_bdev = block_ctx->dev->bdev; | 1697 | bio->bi_bdev = block_ctx->dev->bdev; |
1698 | bio->bi_sector = dev_bytenr >> 9; | 1698 | bio->bi_iter.bi_sector = dev_bytenr >> 9; |
1699 | 1699 | ||
1700 | for (j = i; j < num_pages; j++) { | 1700 | for (j = i; j < num_pages; j++) { |
1701 | ret = bio_add_page(bio, block_ctx->pagev[j], | 1701 | ret = bio_add_page(bio, block_ctx->pagev[j], |
@@ -3013,7 +3013,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio) | |||
3013 | int bio_is_patched; | 3013 | int bio_is_patched; |
3014 | char **mapped_datav; | 3014 | char **mapped_datav; |
3015 | 3015 | ||
3016 | dev_bytenr = 512 * bio->bi_sector; | 3016 | dev_bytenr = 512 * bio->bi_iter.bi_sector; |
3017 | bio_is_patched = 0; | 3017 | bio_is_patched = 0; |
3018 | if (dev_state->state->print_mask & | 3018 | if (dev_state->state->print_mask & |
3019 | BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) | 3019 | BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) |
@@ -3021,8 +3021,8 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio) | |||
3021 | "submit_bio(rw=0x%x, bi_vcnt=%u," | 3021 | "submit_bio(rw=0x%x, bi_vcnt=%u," |
3022 | " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", | 3022 | " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", |
3023 | rw, bio->bi_vcnt, | 3023 | rw, bio->bi_vcnt, |
3024 | (unsigned long long)bio->bi_sector, dev_bytenr, | 3024 | (unsigned long long)bio->bi_iter.bi_sector, |
3025 | bio->bi_bdev); | 3025 | dev_bytenr, bio->bi_bdev); |
3026 | 3026 | ||
3027 | mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt, | 3027 | mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt, |
3028 | GFP_NOFS); | 3028 | GFP_NOFS); |
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index eac6784e43d7..f5cdeb4b5538 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
@@ -172,7 +172,8 @@ static void end_compressed_bio_read(struct bio *bio, int err) | |||
172 | goto out; | 172 | goto out; |
173 | 173 | ||
174 | inode = cb->inode; | 174 | inode = cb->inode; |
175 | ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9); | 175 | ret = check_compressed_csum(inode, cb, |
176 | (u64)bio->bi_iter.bi_sector << 9); | ||
176 | if (ret) | 177 | if (ret) |
177 | goto csum_failed; | 178 | goto csum_failed; |
178 | 179 | ||
@@ -370,7 +371,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
370 | for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { | 371 | for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { |
371 | page = compressed_pages[pg_index]; | 372 | page = compressed_pages[pg_index]; |
372 | page->mapping = inode->i_mapping; | 373 | page->mapping = inode->i_mapping; |
373 | if (bio->bi_size) | 374 | if (bio->bi_iter.bi_size) |
374 | ret = io_tree->ops->merge_bio_hook(WRITE, page, 0, | 375 | ret = io_tree->ops->merge_bio_hook(WRITE, page, 0, |
375 | PAGE_CACHE_SIZE, | 376 | PAGE_CACHE_SIZE, |
376 | bio, 0); | 377 | bio, 0); |
@@ -504,7 +505,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, | |||
504 | 505 | ||
505 | if (!em || last_offset < em->start || | 506 | if (!em || last_offset < em->start || |
506 | (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || | 507 | (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || |
507 | (em->block_start >> 9) != cb->orig_bio->bi_sector) { | 508 | (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { |
508 | free_extent_map(em); | 509 | free_extent_map(em); |
509 | unlock_extent(tree, last_offset, end); | 510 | unlock_extent(tree, last_offset, end); |
510 | unlock_page(page); | 511 | unlock_page(page); |
@@ -550,7 +551,7 @@ next: | |||
550 | * in it. We don't actually do IO on those pages but allocate new ones | 551 | * in it. We don't actually do IO on those pages but allocate new ones |
551 | * to hold the compressed pages on disk. | 552 | * to hold the compressed pages on disk. |
552 | * | 553 | * |
553 | * bio->bi_sector points to the compressed extent on disk | 554 | * bio->bi_iter.bi_sector points to the compressed extent on disk |
554 | * bio->bi_io_vec points to all of the inode pages | 555 | * bio->bi_io_vec points to all of the inode pages |
555 | * bio->bi_vcnt is a count of pages | 556 | * bio->bi_vcnt is a count of pages |
556 | * | 557 | * |
@@ -571,7 +572,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
571 | struct page *page; | 572 | struct page *page; |
572 | struct block_device *bdev; | 573 | struct block_device *bdev; |
573 | struct bio *comp_bio; | 574 | struct bio *comp_bio; |
574 | u64 cur_disk_byte = (u64)bio->bi_sector << 9; | 575 | u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; |
575 | u64 em_len; | 576 | u64 em_len; |
576 | u64 em_start; | 577 | u64 em_start; |
577 | struct extent_map *em; | 578 | struct extent_map *em; |
@@ -657,7 +658,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
657 | page->mapping = inode->i_mapping; | 658 | page->mapping = inode->i_mapping; |
658 | page->index = em_start >> PAGE_CACHE_SHIFT; | 659 | page->index = em_start >> PAGE_CACHE_SHIFT; |
659 | 660 | ||
660 | if (comp_bio->bi_size) | 661 | if (comp_bio->bi_iter.bi_size) |
661 | ret = tree->ops->merge_bio_hook(READ, page, 0, | 662 | ret = tree->ops->merge_bio_hook(READ, page, 0, |
662 | PAGE_CACHE_SIZE, | 663 | PAGE_CACHE_SIZE, |
663 | comp_bio, 0); | 664 | comp_bio, 0); |
@@ -685,8 +686,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
685 | comp_bio, sums); | 686 | comp_bio, sums); |
686 | BUG_ON(ret); /* -ENOMEM */ | 687 | BUG_ON(ret); /* -ENOMEM */ |
687 | } | 688 | } |
688 | sums += (comp_bio->bi_size + root->sectorsize - 1) / | 689 | sums += (comp_bio->bi_iter.bi_size + |
689 | root->sectorsize; | 690 | root->sectorsize - 1) / root->sectorsize; |
690 | 691 | ||
691 | ret = btrfs_map_bio(root, READ, comp_bio, | 692 | ret = btrfs_map_bio(root, READ, comp_bio, |
692 | mirror_num, 0); | 693 | mirror_num, 0); |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 8b5f9e1d1f0e..bcb6f1b780d6 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -1984,7 +1984,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start, | |||
1984 | bio = btrfs_io_bio_alloc(GFP_NOFS, 1); | 1984 | bio = btrfs_io_bio_alloc(GFP_NOFS, 1); |
1985 | if (!bio) | 1985 | if (!bio) |
1986 | return -EIO; | 1986 | return -EIO; |
1987 | bio->bi_size = 0; | 1987 | bio->bi_iter.bi_size = 0; |
1988 | map_length = length; | 1988 | map_length = length; |
1989 | 1989 | ||
1990 | ret = btrfs_map_block(fs_info, WRITE, logical, | 1990 | ret = btrfs_map_block(fs_info, WRITE, logical, |
@@ -1995,7 +1995,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start, | |||
1995 | } | 1995 | } |
1996 | BUG_ON(mirror_num != bbio->mirror_num); | 1996 | BUG_ON(mirror_num != bbio->mirror_num); |
1997 | sector = bbio->stripes[mirror_num-1].physical >> 9; | 1997 | sector = bbio->stripes[mirror_num-1].physical >> 9; |
1998 | bio->bi_sector = sector; | 1998 | bio->bi_iter.bi_sector = sector; |
1999 | dev = bbio->stripes[mirror_num-1].dev; | 1999 | dev = bbio->stripes[mirror_num-1].dev; |
2000 | kfree(bbio); | 2000 | kfree(bbio); |
2001 | if (!dev || !dev->bdev || !dev->writeable) { | 2001 | if (!dev || !dev->bdev || !dev->writeable) { |
@@ -2268,9 +2268,9 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, | |||
2268 | return -EIO; | 2268 | return -EIO; |
2269 | } | 2269 | } |
2270 | bio->bi_end_io = failed_bio->bi_end_io; | 2270 | bio->bi_end_io = failed_bio->bi_end_io; |
2271 | bio->bi_sector = failrec->logical >> 9; | 2271 | bio->bi_iter.bi_sector = failrec->logical >> 9; |
2272 | bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; | 2272 | bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; |
2273 | bio->bi_size = 0; | 2273 | bio->bi_iter.bi_size = 0; |
2274 | 2274 | ||
2275 | btrfs_failed_bio = btrfs_io_bio(failed_bio); | 2275 | btrfs_failed_bio = btrfs_io_bio(failed_bio); |
2276 | if (btrfs_failed_bio->csum) { | 2276 | if (btrfs_failed_bio->csum) { |
@@ -2412,7 +2412,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
2412 | struct inode *inode = page->mapping->host; | 2412 | struct inode *inode = page->mapping->host; |
2413 | 2413 | ||
2414 | pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, " | 2414 | pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, " |
2415 | "mirror=%lu\n", (u64)bio->bi_sector, err, | 2415 | "mirror=%lu\n", (u64)bio->bi_iter.bi_sector, err, |
2416 | io_bio->mirror_num); | 2416 | io_bio->mirror_num); |
2417 | tree = &BTRFS_I(inode)->io_tree; | 2417 | tree = &BTRFS_I(inode)->io_tree; |
2418 | 2418 | ||
@@ -2543,7 +2543,7 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, | |||
2543 | 2543 | ||
2544 | if (bio) { | 2544 | if (bio) { |
2545 | bio->bi_bdev = bdev; | 2545 | bio->bi_bdev = bdev; |
2546 | bio->bi_sector = first_sector; | 2546 | bio->bi_iter.bi_sector = first_sector; |
2547 | btrfs_bio = btrfs_io_bio(bio); | 2547 | btrfs_bio = btrfs_io_bio(bio); |
2548 | btrfs_bio->csum = NULL; | 2548 | btrfs_bio->csum = NULL; |
2549 | btrfs_bio->csum_allocated = NULL; | 2549 | btrfs_bio->csum_allocated = NULL; |
@@ -2637,7 +2637,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, | |||
2637 | if (bio_ret && *bio_ret) { | 2637 | if (bio_ret && *bio_ret) { |
2638 | bio = *bio_ret; | 2638 | bio = *bio_ret; |
2639 | if (old_compressed) | 2639 | if (old_compressed) |
2640 | contig = bio->bi_sector == sector; | 2640 | contig = bio->bi_iter.bi_sector == sector; |
2641 | else | 2641 | else |
2642 | contig = bio_end_sector(bio) == sector; | 2642 | contig = bio_end_sector(bio) == sector; |
2643 | 2643 | ||
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 6f3848860283..84a46a42d262 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c | |||
@@ -182,7 +182,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, | |||
182 | if (!path) | 182 | if (!path) |
183 | return -ENOMEM; | 183 | return -ENOMEM; |
184 | 184 | ||
185 | nblocks = bio->bi_size >> inode->i_sb->s_blocksize_bits; | 185 | nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; |
186 | if (!dst) { | 186 | if (!dst) { |
187 | if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { | 187 | if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { |
188 | btrfs_bio->csum_allocated = kmalloc(nblocks * csum_size, | 188 | btrfs_bio->csum_allocated = kmalloc(nblocks * csum_size, |
@@ -201,7 +201,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, | |||
201 | csum = (u8 *)dst; | 201 | csum = (u8 *)dst; |
202 | } | 202 | } |
203 | 203 | ||
204 | if (bio->bi_size > PAGE_CACHE_SIZE * 8) | 204 | if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8) |
205 | path->reada = 2; | 205 | path->reada = 2; |
206 | 206 | ||
207 | WARN_ON(bio->bi_vcnt <= 0); | 207 | WARN_ON(bio->bi_vcnt <= 0); |
@@ -217,7 +217,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, | |||
217 | path->skip_locking = 1; | 217 | path->skip_locking = 1; |
218 | } | 218 | } |
219 | 219 | ||
220 | disk_bytenr = (u64)bio->bi_sector << 9; | 220 | disk_bytenr = (u64)bio->bi_iter.bi_sector << 9; |
221 | if (dio) | 221 | if (dio) |
222 | offset = logical_offset; | 222 | offset = logical_offset; |
223 | while (bio_index < bio->bi_vcnt) { | 223 | while (bio_index < bio->bi_vcnt) { |
@@ -302,7 +302,7 @@ int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, | |||
302 | struct btrfs_dio_private *dip, struct bio *bio, | 302 | struct btrfs_dio_private *dip, struct bio *bio, |
303 | u64 offset) | 303 | u64 offset) |
304 | { | 304 | { |
305 | int len = (bio->bi_sector << 9) - dip->disk_bytenr; | 305 | int len = (bio->bi_iter.bi_sector << 9) - dip->disk_bytenr; |
306 | u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); | 306 | u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); |
307 | int ret; | 307 | int ret; |
308 | 308 | ||
@@ -447,11 +447,12 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, | |||
447 | u64 offset; | 447 | u64 offset; |
448 | 448 | ||
449 | WARN_ON(bio->bi_vcnt <= 0); | 449 | WARN_ON(bio->bi_vcnt <= 0); |
450 | sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS); | 450 | sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size), |
451 | GFP_NOFS); | ||
451 | if (!sums) | 452 | if (!sums) |
452 | return -ENOMEM; | 453 | return -ENOMEM; |
453 | 454 | ||
454 | sums->len = bio->bi_size; | 455 | sums->len = bio->bi_iter.bi_size; |
455 | INIT_LIST_HEAD(&sums->list); | 456 | INIT_LIST_HEAD(&sums->list); |
456 | 457 | ||
457 | if (contig) | 458 | if (contig) |
@@ -461,7 +462,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, | |||
461 | 462 | ||
462 | ordered = btrfs_lookup_ordered_extent(inode, offset); | 463 | ordered = btrfs_lookup_ordered_extent(inode, offset); |
463 | BUG_ON(!ordered); /* Logic error */ | 464 | BUG_ON(!ordered); /* Logic error */ |
464 | sums->bytenr = (u64)bio->bi_sector << 9; | 465 | sums->bytenr = (u64)bio->bi_iter.bi_sector << 9; |
465 | index = 0; | 466 | index = 0; |
466 | 467 | ||
467 | while (bio_index < bio->bi_vcnt) { | 468 | while (bio_index < bio->bi_vcnt) { |
@@ -476,7 +477,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, | |||
476 | btrfs_add_ordered_sum(inode, ordered, sums); | 477 | btrfs_add_ordered_sum(inode, ordered, sums); |
477 | btrfs_put_ordered_extent(ordered); | 478 | btrfs_put_ordered_extent(ordered); |
478 | 479 | ||
479 | bytes_left = bio->bi_size - total_bytes; | 480 | bytes_left = bio->bi_iter.bi_size - total_bytes; |
480 | 481 | ||
481 | sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left), | 482 | sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left), |
482 | GFP_NOFS); | 483 | GFP_NOFS); |
@@ -484,7 +485,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, | |||
484 | sums->len = bytes_left; | 485 | sums->len = bytes_left; |
485 | ordered = btrfs_lookup_ordered_extent(inode, offset); | 486 | ordered = btrfs_lookup_ordered_extent(inode, offset); |
486 | BUG_ON(!ordered); /* Logic error */ | 487 | BUG_ON(!ordered); /* Logic error */ |
487 | sums->bytenr = ((u64)bio->bi_sector << 9) + | 488 | sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) + |
488 | total_bytes; | 489 | total_bytes; |
489 | index = 0; | 490 | index = 0; |
490 | } | 491 | } |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d6630dc130ba..7ab0e94ad492 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -1577,7 +1577,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, | |||
1577 | unsigned long bio_flags) | 1577 | unsigned long bio_flags) |
1578 | { | 1578 | { |
1579 | struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; | 1579 | struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; |
1580 | u64 logical = (u64)bio->bi_sector << 9; | 1580 | u64 logical = (u64)bio->bi_iter.bi_sector << 9; |
1581 | u64 length = 0; | 1581 | u64 length = 0; |
1582 | u64 map_length; | 1582 | u64 map_length; |
1583 | int ret; | 1583 | int ret; |
@@ -1585,7 +1585,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, | |||
1585 | if (bio_flags & EXTENT_BIO_COMPRESSED) | 1585 | if (bio_flags & EXTENT_BIO_COMPRESSED) |
1586 | return 0; | 1586 | return 0; |
1587 | 1587 | ||
1588 | length = bio->bi_size; | 1588 | length = bio->bi_iter.bi_size; |
1589 | map_length = length; | 1589 | map_length = length; |
1590 | ret = btrfs_map_block(root->fs_info, rw, logical, | 1590 | ret = btrfs_map_block(root->fs_info, rw, logical, |
1591 | &map_length, NULL, 0); | 1591 | &map_length, NULL, 0); |
@@ -6894,7 +6894,8 @@ static void btrfs_end_dio_bio(struct bio *bio, int err) | |||
6894 | printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu " | 6894 | printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu " |
6895 | "sector %#Lx len %u err no %d\n", | 6895 | "sector %#Lx len %u err no %d\n", |
6896 | btrfs_ino(dip->inode), bio->bi_rw, | 6896 | btrfs_ino(dip->inode), bio->bi_rw, |
6897 | (unsigned long long)bio->bi_sector, bio->bi_size, err); | 6897 | (unsigned long long)bio->bi_iter.bi_sector, |
6898 | bio->bi_iter.bi_size, err); | ||
6898 | dip->errors = 1; | 6899 | dip->errors = 1; |
6899 | 6900 | ||
6900 | /* | 6901 | /* |
@@ -6985,7 +6986,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |||
6985 | struct bio *bio; | 6986 | struct bio *bio; |
6986 | struct bio *orig_bio = dip->orig_bio; | 6987 | struct bio *orig_bio = dip->orig_bio; |
6987 | struct bio_vec *bvec = orig_bio->bi_io_vec; | 6988 | struct bio_vec *bvec = orig_bio->bi_io_vec; |
6988 | u64 start_sector = orig_bio->bi_sector; | 6989 | u64 start_sector = orig_bio->bi_iter.bi_sector; |
6989 | u64 file_offset = dip->logical_offset; | 6990 | u64 file_offset = dip->logical_offset; |
6990 | u64 submit_len = 0; | 6991 | u64 submit_len = 0; |
6991 | u64 map_length; | 6992 | u64 map_length; |
@@ -6993,7 +6994,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |||
6993 | int ret = 0; | 6994 | int ret = 0; |
6994 | int async_submit = 0; | 6995 | int async_submit = 0; |
6995 | 6996 | ||
6996 | map_length = orig_bio->bi_size; | 6997 | map_length = orig_bio->bi_iter.bi_size; |
6997 | ret = btrfs_map_block(root->fs_info, rw, start_sector << 9, | 6998 | ret = btrfs_map_block(root->fs_info, rw, start_sector << 9, |
6998 | &map_length, NULL, 0); | 6999 | &map_length, NULL, 0); |
6999 | if (ret) { | 7000 | if (ret) { |
@@ -7001,7 +7002,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |||
7001 | return -EIO; | 7002 | return -EIO; |
7002 | } | 7003 | } |
7003 | 7004 | ||
7004 | if (map_length >= orig_bio->bi_size) { | 7005 | if (map_length >= orig_bio->bi_iter.bi_size) { |
7005 | bio = orig_bio; | 7006 | bio = orig_bio; |
7006 | goto submit; | 7007 | goto submit; |
7007 | } | 7008 | } |
@@ -7053,7 +7054,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |||
7053 | bio->bi_private = dip; | 7054 | bio->bi_private = dip; |
7054 | bio->bi_end_io = btrfs_end_dio_bio; | 7055 | bio->bi_end_io = btrfs_end_dio_bio; |
7055 | 7056 | ||
7056 | map_length = orig_bio->bi_size; | 7057 | map_length = orig_bio->bi_iter.bi_size; |
7057 | ret = btrfs_map_block(root->fs_info, rw, | 7058 | ret = btrfs_map_block(root->fs_info, rw, |
7058 | start_sector << 9, | 7059 | start_sector << 9, |
7059 | &map_length, NULL, 0); | 7060 | &map_length, NULL, 0); |
@@ -7111,7 +7112,8 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio, | |||
7111 | 7112 | ||
7112 | if (!skip_sum && !write) { | 7113 | if (!skip_sum && !write) { |
7113 | csum_size = btrfs_super_csum_size(root->fs_info->super_copy); | 7114 | csum_size = btrfs_super_csum_size(root->fs_info->super_copy); |
7114 | sum_len = dio_bio->bi_size >> inode->i_sb->s_blocksize_bits; | 7115 | sum_len = dio_bio->bi_iter.bi_size >> |
7116 | inode->i_sb->s_blocksize_bits; | ||
7115 | sum_len *= csum_size; | 7117 | sum_len *= csum_size; |
7116 | } else { | 7118 | } else { |
7117 | sum_len = 0; | 7119 | sum_len = 0; |
@@ -7126,8 +7128,8 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio, | |||
7126 | dip->private = dio_bio->bi_private; | 7128 | dip->private = dio_bio->bi_private; |
7127 | dip->inode = inode; | 7129 | dip->inode = inode; |
7128 | dip->logical_offset = file_offset; | 7130 | dip->logical_offset = file_offset; |
7129 | dip->bytes = dio_bio->bi_size; | 7131 | dip->bytes = dio_bio->bi_iter.bi_size; |
7130 | dip->disk_bytenr = (u64)dio_bio->bi_sector << 9; | 7132 | dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; |
7131 | io_bio->bi_private = dip; | 7133 | io_bio->bi_private = dip; |
7132 | dip->errors = 0; | 7134 | dip->errors = 0; |
7133 | dip->orig_bio = io_bio; | 7135 | dip->orig_bio = io_bio; |
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 24ac21840a9a..9af0b25d991a 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c | |||
@@ -1032,8 +1032,8 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, | |||
1032 | 1032 | ||
1033 | /* see if we can add this page onto our existing bio */ | 1033 | /* see if we can add this page onto our existing bio */ |
1034 | if (last) { | 1034 | if (last) { |
1035 | last_end = (u64)last->bi_sector << 9; | 1035 | last_end = (u64)last->bi_iter.bi_sector << 9; |
1036 | last_end += last->bi_size; | 1036 | last_end += last->bi_iter.bi_size; |
1037 | 1037 | ||
1038 | /* | 1038 | /* |
1039 | * we can't merge these if they are from different | 1039 | * we can't merge these if they are from different |
@@ -1053,9 +1053,9 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, | |||
1053 | if (!bio) | 1053 | if (!bio) |
1054 | return -ENOMEM; | 1054 | return -ENOMEM; |
1055 | 1055 | ||
1056 | bio->bi_size = 0; | 1056 | bio->bi_iter.bi_size = 0; |
1057 | bio->bi_bdev = stripe->dev->bdev; | 1057 | bio->bi_bdev = stripe->dev->bdev; |
1058 | bio->bi_sector = disk_start >> 9; | 1058 | bio->bi_iter.bi_sector = disk_start >> 9; |
1059 | set_bit(BIO_UPTODATE, &bio->bi_flags); | 1059 | set_bit(BIO_UPTODATE, &bio->bi_flags); |
1060 | 1060 | ||
1061 | bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); | 1061 | bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); |
@@ -1111,7 +1111,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio) | |||
1111 | 1111 | ||
1112 | spin_lock_irq(&rbio->bio_list_lock); | 1112 | spin_lock_irq(&rbio->bio_list_lock); |
1113 | bio_list_for_each(bio, &rbio->bio_list) { | 1113 | bio_list_for_each(bio, &rbio->bio_list) { |
1114 | start = (u64)bio->bi_sector << 9; | 1114 | start = (u64)bio->bi_iter.bi_sector << 9; |
1115 | stripe_offset = start - rbio->raid_map[0]; | 1115 | stripe_offset = start - rbio->raid_map[0]; |
1116 | page_index = stripe_offset >> PAGE_CACHE_SHIFT; | 1116 | page_index = stripe_offset >> PAGE_CACHE_SHIFT; |
1117 | 1117 | ||
@@ -1272,7 +1272,7 @@ cleanup: | |||
1272 | static int find_bio_stripe(struct btrfs_raid_bio *rbio, | 1272 | static int find_bio_stripe(struct btrfs_raid_bio *rbio, |
1273 | struct bio *bio) | 1273 | struct bio *bio) |
1274 | { | 1274 | { |
1275 | u64 physical = bio->bi_sector; | 1275 | u64 physical = bio->bi_iter.bi_sector; |
1276 | u64 stripe_start; | 1276 | u64 stripe_start; |
1277 | int i; | 1277 | int i; |
1278 | struct btrfs_bio_stripe *stripe; | 1278 | struct btrfs_bio_stripe *stripe; |
@@ -1298,7 +1298,7 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio, | |||
1298 | static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, | 1298 | static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, |
1299 | struct bio *bio) | 1299 | struct bio *bio) |
1300 | { | 1300 | { |
1301 | u64 logical = bio->bi_sector; | 1301 | u64 logical = bio->bi_iter.bi_sector; |
1302 | u64 stripe_start; | 1302 | u64 stripe_start; |
1303 | int i; | 1303 | int i; |
1304 | 1304 | ||
@@ -1602,8 +1602,8 @@ static int plug_cmp(void *priv, struct list_head *a, struct list_head *b) | |||
1602 | plug_list); | 1602 | plug_list); |
1603 | struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio, | 1603 | struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio, |
1604 | plug_list); | 1604 | plug_list); |
1605 | u64 a_sector = ra->bio_list.head->bi_sector; | 1605 | u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; |
1606 | u64 b_sector = rb->bio_list.head->bi_sector; | 1606 | u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; |
1607 | 1607 | ||
1608 | if (a_sector < b_sector) | 1608 | if (a_sector < b_sector) |
1609 | return -1; | 1609 | return -1; |
@@ -1691,7 +1691,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio, | |||
1691 | if (IS_ERR(rbio)) | 1691 | if (IS_ERR(rbio)) |
1692 | return PTR_ERR(rbio); | 1692 | return PTR_ERR(rbio); |
1693 | bio_list_add(&rbio->bio_list, bio); | 1693 | bio_list_add(&rbio->bio_list, bio); |
1694 | rbio->bio_list_bytes = bio->bi_size; | 1694 | rbio->bio_list_bytes = bio->bi_iter.bi_size; |
1695 | 1695 | ||
1696 | /* | 1696 | /* |
1697 | * don't plug on full rbios, just get them out the door | 1697 | * don't plug on full rbios, just get them out the door |
@@ -2044,7 +2044,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, | |||
2044 | 2044 | ||
2045 | rbio->read_rebuild = 1; | 2045 | rbio->read_rebuild = 1; |
2046 | bio_list_add(&rbio->bio_list, bio); | 2046 | bio_list_add(&rbio->bio_list, bio); |
2047 | rbio->bio_list_bytes = bio->bi_size; | 2047 | rbio->bio_list_bytes = bio->bi_iter.bi_size; |
2048 | 2048 | ||
2049 | rbio->faila = find_logical_bio_stripe(rbio, bio); | 2049 | rbio->faila = find_logical_bio_stripe(rbio, bio); |
2050 | if (rbio->faila == -1) { | 2050 | if (rbio->faila == -1) { |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 1fd3f33c330a..bb9a928fa3a8 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -1308,7 +1308,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info, | |||
1308 | continue; | 1308 | continue; |
1309 | } | 1309 | } |
1310 | bio->bi_bdev = page->dev->bdev; | 1310 | bio->bi_bdev = page->dev->bdev; |
1311 | bio->bi_sector = page->physical >> 9; | 1311 | bio->bi_iter.bi_sector = page->physical >> 9; |
1312 | 1312 | ||
1313 | bio_add_page(bio, page->page, PAGE_SIZE, 0); | 1313 | bio_add_page(bio, page->page, PAGE_SIZE, 0); |
1314 | if (btrfsic_submit_bio_wait(READ, bio)) | 1314 | if (btrfsic_submit_bio_wait(READ, bio)) |
@@ -1427,7 +1427,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, | |||
1427 | if (!bio) | 1427 | if (!bio) |
1428 | return -EIO; | 1428 | return -EIO; |
1429 | bio->bi_bdev = page_bad->dev->bdev; | 1429 | bio->bi_bdev = page_bad->dev->bdev; |
1430 | bio->bi_sector = page_bad->physical >> 9; | 1430 | bio->bi_iter.bi_sector = page_bad->physical >> 9; |
1431 | 1431 | ||
1432 | ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); | 1432 | ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); |
1433 | if (PAGE_SIZE != ret) { | 1433 | if (PAGE_SIZE != ret) { |
@@ -1520,7 +1520,7 @@ again: | |||
1520 | bio->bi_private = sbio; | 1520 | bio->bi_private = sbio; |
1521 | bio->bi_end_io = scrub_wr_bio_end_io; | 1521 | bio->bi_end_io = scrub_wr_bio_end_io; |
1522 | bio->bi_bdev = sbio->dev->bdev; | 1522 | bio->bi_bdev = sbio->dev->bdev; |
1523 | bio->bi_sector = sbio->physical >> 9; | 1523 | bio->bi_iter.bi_sector = sbio->physical >> 9; |
1524 | sbio->err = 0; | 1524 | sbio->err = 0; |
1525 | } else if (sbio->physical + sbio->page_count * PAGE_SIZE != | 1525 | } else if (sbio->physical + sbio->page_count * PAGE_SIZE != |
1526 | spage->physical_for_dev_replace || | 1526 | spage->physical_for_dev_replace || |
@@ -1926,7 +1926,7 @@ again: | |||
1926 | bio->bi_private = sbio; | 1926 | bio->bi_private = sbio; |
1927 | bio->bi_end_io = scrub_bio_end_io; | 1927 | bio->bi_end_io = scrub_bio_end_io; |
1928 | bio->bi_bdev = sbio->dev->bdev; | 1928 | bio->bi_bdev = sbio->dev->bdev; |
1929 | bio->bi_sector = sbio->physical >> 9; | 1929 | bio->bi_iter.bi_sector = sbio->physical >> 9; |
1930 | sbio->err = 0; | 1930 | sbio->err = 0; |
1931 | } else if (sbio->physical + sbio->page_count * PAGE_SIZE != | 1931 | } else if (sbio->physical + sbio->page_count * PAGE_SIZE != |
1932 | spage->physical || | 1932 | spage->physical || |
@@ -3371,8 +3371,8 @@ static int write_page_nocow(struct scrub_ctx *sctx, | |||
3371 | spin_unlock(&sctx->stat_lock); | 3371 | spin_unlock(&sctx->stat_lock); |
3372 | return -ENOMEM; | 3372 | return -ENOMEM; |
3373 | } | 3373 | } |
3374 | bio->bi_size = 0; | 3374 | bio->bi_iter.bi_size = 0; |
3375 | bio->bi_sector = physical_for_dev_replace >> 9; | 3375 | bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; |
3376 | bio->bi_bdev = dev->bdev; | 3376 | bio->bi_bdev = dev->bdev; |
3377 | ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); | 3377 | ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); |
3378 | if (ret != PAGE_CACHE_SIZE) { | 3378 | if (ret != PAGE_CACHE_SIZE) { |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 92303f42baaa..f2130de0ddc2 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -5411,7 +5411,7 @@ static int bio_size_ok(struct block_device *bdev, struct bio *bio, | |||
5411 | if (!q->merge_bvec_fn) | 5411 | if (!q->merge_bvec_fn) |
5412 | return 1; | 5412 | return 1; |
5413 | 5413 | ||
5414 | bvm.bi_size = bio->bi_size - prev->bv_len; | 5414 | bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len; |
5415 | if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) | 5415 | if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) |
5416 | return 0; | 5416 | return 0; |
5417 | return 1; | 5417 | return 1; |
@@ -5426,7 +5426,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, | |||
5426 | bio->bi_private = bbio; | 5426 | bio->bi_private = bbio; |
5427 | btrfs_io_bio(bio)->stripe_index = dev_nr; | 5427 | btrfs_io_bio(bio)->stripe_index = dev_nr; |
5428 | bio->bi_end_io = btrfs_end_bio; | 5428 | bio->bi_end_io = btrfs_end_bio; |
5429 | bio->bi_sector = physical >> 9; | 5429 | bio->bi_iter.bi_sector = physical >> 9; |
5430 | #ifdef DEBUG | 5430 | #ifdef DEBUG |
5431 | { | 5431 | { |
5432 | struct rcu_string *name; | 5432 | struct rcu_string *name; |
@@ -5464,7 +5464,7 @@ again: | |||
5464 | while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) { | 5464 | while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) { |
5465 | if (bio_add_page(bio, bvec->bv_page, bvec->bv_len, | 5465 | if (bio_add_page(bio, bvec->bv_page, bvec->bv_len, |
5466 | bvec->bv_offset) < bvec->bv_len) { | 5466 | bvec->bv_offset) < bvec->bv_len) { |
5467 | u64 len = bio->bi_size; | 5467 | u64 len = bio->bi_iter.bi_size; |
5468 | 5468 | ||
5469 | atomic_inc(&bbio->stripes_pending); | 5469 | atomic_inc(&bbio->stripes_pending); |
5470 | submit_stripe_bio(root, bbio, bio, physical, dev_nr, | 5470 | submit_stripe_bio(root, bbio, bio, physical, dev_nr, |
@@ -5486,7 +5486,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) | |||
5486 | bio->bi_private = bbio->private; | 5486 | bio->bi_private = bbio->private; |
5487 | bio->bi_end_io = bbio->end_io; | 5487 | bio->bi_end_io = bbio->end_io; |
5488 | btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; | 5488 | btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; |
5489 | bio->bi_sector = logical >> 9; | 5489 | bio->bi_iter.bi_sector = logical >> 9; |
5490 | kfree(bbio); | 5490 | kfree(bbio); |
5491 | bio_endio(bio, -EIO); | 5491 | bio_endio(bio, -EIO); |
5492 | } | 5492 | } |
@@ -5497,7 +5497,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | |||
5497 | { | 5497 | { |
5498 | struct btrfs_device *dev; | 5498 | struct btrfs_device *dev; |
5499 | struct bio *first_bio = bio; | 5499 | struct bio *first_bio = bio; |
5500 | u64 logical = (u64)bio->bi_sector << 9; | 5500 | u64 logical = (u64)bio->bi_iter.bi_sector << 9; |
5501 | u64 length = 0; | 5501 | u64 length = 0; |
5502 | u64 map_length; | 5502 | u64 map_length; |
5503 | u64 *raid_map = NULL; | 5503 | u64 *raid_map = NULL; |
@@ -5506,7 +5506,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | |||
5506 | int total_devs = 1; | 5506 | int total_devs = 1; |
5507 | struct btrfs_bio *bbio = NULL; | 5507 | struct btrfs_bio *bbio = NULL; |
5508 | 5508 | ||
5509 | length = bio->bi_size; | 5509 | length = bio->bi_iter.bi_size; |
5510 | map_length = length; | 5510 | map_length = length; |
5511 | 5511 | ||
5512 | ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio, | 5512 | ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio, |
diff --git a/fs/buffer.c b/fs/buffer.c index 6024877335ca..1c04ec66974e 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -2982,11 +2982,11 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh) | |||
2982 | * let it through, and the IO layer will turn it into | 2982 | * let it through, and the IO layer will turn it into |
2983 | * an EIO. | 2983 | * an EIO. |
2984 | */ | 2984 | */ |
2985 | if (unlikely(bio->bi_sector >= maxsector)) | 2985 | if (unlikely(bio->bi_iter.bi_sector >= maxsector)) |
2986 | return; | 2986 | return; |
2987 | 2987 | ||
2988 | maxsector -= bio->bi_sector; | 2988 | maxsector -= bio->bi_iter.bi_sector; |
2989 | bytes = bio->bi_size; | 2989 | bytes = bio->bi_iter.bi_size; |
2990 | if (likely((bytes >> 9) <= maxsector)) | 2990 | if (likely((bytes >> 9) <= maxsector)) |
2991 | return; | 2991 | return; |
2992 | 2992 | ||
@@ -2994,7 +2994,7 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh) | |||
2994 | bytes = maxsector << 9; | 2994 | bytes = maxsector << 9; |
2995 | 2995 | ||
2996 | /* Truncate the bio.. */ | 2996 | /* Truncate the bio.. */ |
2997 | bio->bi_size = bytes; | 2997 | bio->bi_iter.bi_size = bytes; |
2998 | bio->bi_io_vec[0].bv_len = bytes; | 2998 | bio->bi_io_vec[0].bv_len = bytes; |
2999 | 2999 | ||
3000 | /* ..and clear the end of the buffer for reads */ | 3000 | /* ..and clear the end of the buffer for reads */ |
@@ -3029,14 +3029,14 @@ int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags) | |||
3029 | */ | 3029 | */ |
3030 | bio = bio_alloc(GFP_NOIO, 1); | 3030 | bio = bio_alloc(GFP_NOIO, 1); |
3031 | 3031 | ||
3032 | bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); | 3032 | bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); |
3033 | bio->bi_bdev = bh->b_bdev; | 3033 | bio->bi_bdev = bh->b_bdev; |
3034 | bio->bi_io_vec[0].bv_page = bh->b_page; | 3034 | bio->bi_io_vec[0].bv_page = bh->b_page; |
3035 | bio->bi_io_vec[0].bv_len = bh->b_size; | 3035 | bio->bi_io_vec[0].bv_len = bh->b_size; |
3036 | bio->bi_io_vec[0].bv_offset = bh_offset(bh); | 3036 | bio->bi_io_vec[0].bv_offset = bh_offset(bh); |
3037 | 3037 | ||
3038 | bio->bi_vcnt = 1; | 3038 | bio->bi_vcnt = 1; |
3039 | bio->bi_size = bh->b_size; | 3039 | bio->bi_iter.bi_size = bh->b_size; |
3040 | 3040 | ||
3041 | bio->bi_end_io = end_bio_bh_io_sync; | 3041 | bio->bi_end_io = end_bio_bh_io_sync; |
3042 | bio->bi_private = bh; | 3042 | bio->bi_private = bh; |
diff --git a/fs/direct-io.c b/fs/direct-io.c index 0e04142d5962..160a5489a939 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -375,7 +375,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, | |||
375 | bio = bio_alloc(GFP_KERNEL, nr_vecs); | 375 | bio = bio_alloc(GFP_KERNEL, nr_vecs); |
376 | 376 | ||
377 | bio->bi_bdev = bdev; | 377 | bio->bi_bdev = bdev; |
378 | bio->bi_sector = first_sector; | 378 | bio->bi_iter.bi_sector = first_sector; |
379 | if (dio->is_async) | 379 | if (dio->is_async) |
380 | bio->bi_end_io = dio_bio_end_aio; | 380 | bio->bi_end_io = dio_bio_end_aio; |
381 | else | 381 | else |
@@ -719,7 +719,7 @@ static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio, | |||
719 | if (sdio->bio) { | 719 | if (sdio->bio) { |
720 | loff_t cur_offset = sdio->cur_page_fs_offset; | 720 | loff_t cur_offset = sdio->cur_page_fs_offset; |
721 | loff_t bio_next_offset = sdio->logical_offset_in_bio + | 721 | loff_t bio_next_offset = sdio->logical_offset_in_bio + |
722 | sdio->bio->bi_size; | 722 | sdio->bio->bi_iter.bi_size; |
723 | 723 | ||
724 | /* | 724 | /* |
725 | * See whether this new request is contiguous with the old. | 725 | * See whether this new request is contiguous with the old. |
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index a31e4da14508..ab95508e3d40 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
@@ -298,7 +298,7 @@ ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end) | |||
298 | static void ext4_end_bio(struct bio *bio, int error) | 298 | static void ext4_end_bio(struct bio *bio, int error) |
299 | { | 299 | { |
300 | ext4_io_end_t *io_end = bio->bi_private; | 300 | ext4_io_end_t *io_end = bio->bi_private; |
301 | sector_t bi_sector = bio->bi_sector; | 301 | sector_t bi_sector = bio->bi_iter.bi_sector; |
302 | 302 | ||
303 | BUG_ON(!io_end); | 303 | BUG_ON(!io_end); |
304 | bio->bi_end_io = NULL; | 304 | bio->bi_end_io = NULL; |
@@ -366,7 +366,7 @@ static int io_submit_init_bio(struct ext4_io_submit *io, | |||
366 | bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); | 366 | bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); |
367 | if (!bio) | 367 | if (!bio) |
368 | return -ENOMEM; | 368 | return -ENOMEM; |
369 | bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); | 369 | bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); |
370 | bio->bi_bdev = bh->b_bdev; | 370 | bio->bi_bdev = bh->b_bdev; |
371 | bio->bi_end_io = ext4_end_bio; | 371 | bio->bi_end_io = ext4_end_bio; |
372 | bio->bi_private = ext4_get_io_end(io->io_end); | 372 | bio->bi_private = ext4_get_io_end(io->io_end); |
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index a4949096cf4c..a2c8de8ba6ce 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c | |||
@@ -386,7 +386,7 @@ int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page, | |||
386 | bio = f2fs_bio_alloc(bdev, 1); | 386 | bio = f2fs_bio_alloc(bdev, 1); |
387 | 387 | ||
388 | /* Initialize the bio */ | 388 | /* Initialize the bio */ |
389 | bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); | 389 | bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); |
390 | bio->bi_end_io = read_end_io; | 390 | bio->bi_end_io = read_end_io; |
391 | 391 | ||
392 | if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { | 392 | if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { |
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index a90c6bc0d129..36e8afd8e1e4 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c | |||
@@ -682,7 +682,7 @@ retry: | |||
682 | 682 | ||
683 | bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); | 683 | bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); |
684 | sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks); | 684 | sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks); |
685 | sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); | 685 | sbi->bio[type]->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); |
686 | sbi->bio[type]->bi_private = priv; | 686 | sbi->bio[type]->bi_private = priv; |
687 | /* | 687 | /* |
688 | * The end_io will be assigned at the sumbission phase. | 688 | * The end_io will be assigned at the sumbission phase. |
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 010b9fb9fec6..985da945f0b5 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c | |||
@@ -272,7 +272,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno) | |||
272 | nrvecs = max(nrvecs/2, 1U); | 272 | nrvecs = max(nrvecs/2, 1U); |
273 | } | 273 | } |
274 | 274 | ||
275 | bio->bi_sector = blkno * (sb->s_blocksize >> 9); | 275 | bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9); |
276 | bio->bi_bdev = sb->s_bdev; | 276 | bio->bi_bdev = sb->s_bdev; |
277 | bio->bi_end_io = gfs2_end_log_write; | 277 | bio->bi_end_io = gfs2_end_log_write; |
278 | bio->bi_private = sdp; | 278 | bio->bi_private = sdp; |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 82303b474958..16194da91652 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -224,7 +224,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent) | |||
224 | lock_page(page); | 224 | lock_page(page); |
225 | 225 | ||
226 | bio = bio_alloc(GFP_NOFS, 1); | 226 | bio = bio_alloc(GFP_NOFS, 1); |
227 | bio->bi_sector = sector * (sb->s_blocksize >> 9); | 227 | bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); |
228 | bio->bi_bdev = sb->s_bdev; | 228 | bio->bi_bdev = sb->s_bdev; |
229 | bio_add_page(bio, page, PAGE_SIZE, 0); | 229 | bio_add_page(bio, page, PAGE_SIZE, 0); |
230 | 230 | ||
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index e9a97a0d4314..3f999649587f 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c | |||
@@ -63,7 +63,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector, | |||
63 | sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1); | 63 | sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1); |
64 | 64 | ||
65 | bio = bio_alloc(GFP_NOIO, 1); | 65 | bio = bio_alloc(GFP_NOIO, 1); |
66 | bio->bi_sector = sector; | 66 | bio->bi_iter.bi_sector = sector; |
67 | bio->bi_bdev = sb->s_bdev; | 67 | bio->bi_bdev = sb->s_bdev; |
68 | 68 | ||
69 | if (!(rw & WRITE) && data) | 69 | if (!(rw & WRITE) && data) |
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index 360d27c48887..8d811e02b4b9 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c | |||
@@ -1998,20 +1998,20 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp) | |||
1998 | 1998 | ||
1999 | bio = bio_alloc(GFP_NOFS, 1); | 1999 | bio = bio_alloc(GFP_NOFS, 1); |
2000 | 2000 | ||
2001 | bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); | 2001 | bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); |
2002 | bio->bi_bdev = log->bdev; | 2002 | bio->bi_bdev = log->bdev; |
2003 | bio->bi_io_vec[0].bv_page = bp->l_page; | 2003 | bio->bi_io_vec[0].bv_page = bp->l_page; |
2004 | bio->bi_io_vec[0].bv_len = LOGPSIZE; | 2004 | bio->bi_io_vec[0].bv_len = LOGPSIZE; |
2005 | bio->bi_io_vec[0].bv_offset = bp->l_offset; | 2005 | bio->bi_io_vec[0].bv_offset = bp->l_offset; |
2006 | 2006 | ||
2007 | bio->bi_vcnt = 1; | 2007 | bio->bi_vcnt = 1; |
2008 | bio->bi_size = LOGPSIZE; | 2008 | bio->bi_iter.bi_size = LOGPSIZE; |
2009 | 2009 | ||
2010 | bio->bi_end_io = lbmIODone; | 2010 | bio->bi_end_io = lbmIODone; |
2011 | bio->bi_private = bp; | 2011 | bio->bi_private = bp; |
2012 | /*check if journaling to disk has been disabled*/ | 2012 | /*check if journaling to disk has been disabled*/ |
2013 | if (log->no_integrity) { | 2013 | if (log->no_integrity) { |
2014 | bio->bi_size = 0; | 2014 | bio->bi_iter.bi_size = 0; |
2015 | lbmIODone(bio, 0); | 2015 | lbmIODone(bio, 0); |
2016 | } else { | 2016 | } else { |
2017 | submit_bio(READ_SYNC, bio); | 2017 | submit_bio(READ_SYNC, bio); |
@@ -2144,21 +2144,21 @@ static void lbmStartIO(struct lbuf * bp) | |||
2144 | jfs_info("lbmStartIO\n"); | 2144 | jfs_info("lbmStartIO\n"); |
2145 | 2145 | ||
2146 | bio = bio_alloc(GFP_NOFS, 1); | 2146 | bio = bio_alloc(GFP_NOFS, 1); |
2147 | bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); | 2147 | bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); |
2148 | bio->bi_bdev = log->bdev; | 2148 | bio->bi_bdev = log->bdev; |
2149 | bio->bi_io_vec[0].bv_page = bp->l_page; | 2149 | bio->bi_io_vec[0].bv_page = bp->l_page; |
2150 | bio->bi_io_vec[0].bv_len = LOGPSIZE; | 2150 | bio->bi_io_vec[0].bv_len = LOGPSIZE; |
2151 | bio->bi_io_vec[0].bv_offset = bp->l_offset; | 2151 | bio->bi_io_vec[0].bv_offset = bp->l_offset; |
2152 | 2152 | ||
2153 | bio->bi_vcnt = 1; | 2153 | bio->bi_vcnt = 1; |
2154 | bio->bi_size = LOGPSIZE; | 2154 | bio->bi_iter.bi_size = LOGPSIZE; |
2155 | 2155 | ||
2156 | bio->bi_end_io = lbmIODone; | 2156 | bio->bi_end_io = lbmIODone; |
2157 | bio->bi_private = bp; | 2157 | bio->bi_private = bp; |
2158 | 2158 | ||
2159 | /* check if journaling to disk has been disabled */ | 2159 | /* check if journaling to disk has been disabled */ |
2160 | if (log->no_integrity) { | 2160 | if (log->no_integrity) { |
2161 | bio->bi_size = 0; | 2161 | bio->bi_iter.bi_size = 0; |
2162 | lbmIODone(bio, 0); | 2162 | lbmIODone(bio, 0); |
2163 | } else { | 2163 | } else { |
2164 | submit_bio(WRITE_SYNC, bio); | 2164 | submit_bio(WRITE_SYNC, bio); |
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index d165cde0c68d..49ba7ff1bbb9 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c | |||
@@ -416,7 +416,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) | |||
416 | * count from hitting zero before we're through | 416 | * count from hitting zero before we're through |
417 | */ | 417 | */ |
418 | inc_io(page); | 418 | inc_io(page); |
419 | if (!bio->bi_size) | 419 | if (!bio->bi_iter.bi_size) |
420 | goto dump_bio; | 420 | goto dump_bio; |
421 | submit_bio(WRITE, bio); | 421 | submit_bio(WRITE, bio); |
422 | nr_underway++; | 422 | nr_underway++; |
@@ -438,7 +438,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) | |||
438 | 438 | ||
439 | bio = bio_alloc(GFP_NOFS, 1); | 439 | bio = bio_alloc(GFP_NOFS, 1); |
440 | bio->bi_bdev = inode->i_sb->s_bdev; | 440 | bio->bi_bdev = inode->i_sb->s_bdev; |
441 | bio->bi_sector = pblock << (inode->i_blkbits - 9); | 441 | bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9); |
442 | bio->bi_end_io = metapage_write_end_io; | 442 | bio->bi_end_io = metapage_write_end_io; |
443 | bio->bi_private = page; | 443 | bio->bi_private = page; |
444 | 444 | ||
@@ -452,7 +452,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) | |||
452 | if (bio) { | 452 | if (bio) { |
453 | if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes) | 453 | if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes) |
454 | goto add_failed; | 454 | goto add_failed; |
455 | if (!bio->bi_size) | 455 | if (!bio->bi_iter.bi_size) |
456 | goto dump_bio; | 456 | goto dump_bio; |
457 | 457 | ||
458 | submit_bio(WRITE, bio); | 458 | submit_bio(WRITE, bio); |
@@ -517,7 +517,8 @@ static int metapage_readpage(struct file *fp, struct page *page) | |||
517 | 517 | ||
518 | bio = bio_alloc(GFP_NOFS, 1); | 518 | bio = bio_alloc(GFP_NOFS, 1); |
519 | bio->bi_bdev = inode->i_sb->s_bdev; | 519 | bio->bi_bdev = inode->i_sb->s_bdev; |
520 | bio->bi_sector = pblock << (inode->i_blkbits - 9); | 520 | bio->bi_iter.bi_sector = |
521 | pblock << (inode->i_blkbits - 9); | ||
521 | bio->bi_end_io = metapage_read_end_io; | 522 | bio->bi_end_io = metapage_read_end_io; |
522 | bio->bi_private = page; | 523 | bio->bi_private = page; |
523 | len = xlen << inode->i_blkbits; | 524 | len = xlen << inode->i_blkbits; |
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c index e6df3be3b31b..76279e11982d 100644 --- a/fs/logfs/dev_bdev.c +++ b/fs/logfs/dev_bdev.c | |||
@@ -26,9 +26,9 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw) | |||
26 | bio_vec.bv_len = PAGE_SIZE; | 26 | bio_vec.bv_len = PAGE_SIZE; |
27 | bio_vec.bv_offset = 0; | 27 | bio_vec.bv_offset = 0; |
28 | bio.bi_vcnt = 1; | 28 | bio.bi_vcnt = 1; |
29 | bio.bi_size = PAGE_SIZE; | ||
30 | bio.bi_bdev = bdev; | 29 | bio.bi_bdev = bdev; |
31 | bio.bi_sector = page->index * (PAGE_SIZE >> 9); | 30 | bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9); |
31 | bio.bi_iter.bi_size = PAGE_SIZE; | ||
32 | 32 | ||
33 | return submit_bio_wait(rw, &bio); | 33 | return submit_bio_wait(rw, &bio); |
34 | } | 34 | } |
@@ -92,9 +92,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, | |||
92 | if (i >= max_pages) { | 92 | if (i >= max_pages) { |
93 | /* Block layer cannot split bios :( */ | 93 | /* Block layer cannot split bios :( */ |
94 | bio->bi_vcnt = i; | 94 | bio->bi_vcnt = i; |
95 | bio->bi_size = i * PAGE_SIZE; | 95 | bio->bi_iter.bi_size = i * PAGE_SIZE; |
96 | bio->bi_bdev = super->s_bdev; | 96 | bio->bi_bdev = super->s_bdev; |
97 | bio->bi_sector = ofs >> 9; | 97 | bio->bi_iter.bi_sector = ofs >> 9; |
98 | bio->bi_private = sb; | 98 | bio->bi_private = sb; |
99 | bio->bi_end_io = writeseg_end_io; | 99 | bio->bi_end_io = writeseg_end_io; |
100 | atomic_inc(&super->s_pending_writes); | 100 | atomic_inc(&super->s_pending_writes); |
@@ -119,9 +119,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, | |||
119 | unlock_page(page); | 119 | unlock_page(page); |
120 | } | 120 | } |
121 | bio->bi_vcnt = nr_pages; | 121 | bio->bi_vcnt = nr_pages; |
122 | bio->bi_size = nr_pages * PAGE_SIZE; | 122 | bio->bi_iter.bi_size = nr_pages * PAGE_SIZE; |
123 | bio->bi_bdev = super->s_bdev; | 123 | bio->bi_bdev = super->s_bdev; |
124 | bio->bi_sector = ofs >> 9; | 124 | bio->bi_iter.bi_sector = ofs >> 9; |
125 | bio->bi_private = sb; | 125 | bio->bi_private = sb; |
126 | bio->bi_end_io = writeseg_end_io; | 126 | bio->bi_end_io = writeseg_end_io; |
127 | atomic_inc(&super->s_pending_writes); | 127 | atomic_inc(&super->s_pending_writes); |
@@ -184,9 +184,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index, | |||
184 | if (i >= max_pages) { | 184 | if (i >= max_pages) { |
185 | /* Block layer cannot split bios :( */ | 185 | /* Block layer cannot split bios :( */ |
186 | bio->bi_vcnt = i; | 186 | bio->bi_vcnt = i; |
187 | bio->bi_size = i * PAGE_SIZE; | 187 | bio->bi_iter.bi_size = i * PAGE_SIZE; |
188 | bio->bi_bdev = super->s_bdev; | 188 | bio->bi_bdev = super->s_bdev; |
189 | bio->bi_sector = ofs >> 9; | 189 | bio->bi_iter.bi_sector = ofs >> 9; |
190 | bio->bi_private = sb; | 190 | bio->bi_private = sb; |
191 | bio->bi_end_io = erase_end_io; | 191 | bio->bi_end_io = erase_end_io; |
192 | atomic_inc(&super->s_pending_writes); | 192 | atomic_inc(&super->s_pending_writes); |
@@ -205,9 +205,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index, | |||
205 | bio->bi_io_vec[i].bv_offset = 0; | 205 | bio->bi_io_vec[i].bv_offset = 0; |
206 | } | 206 | } |
207 | bio->bi_vcnt = nr_pages; | 207 | bio->bi_vcnt = nr_pages; |
208 | bio->bi_size = nr_pages * PAGE_SIZE; | 208 | bio->bi_iter.bi_size = nr_pages * PAGE_SIZE; |
209 | bio->bi_bdev = super->s_bdev; | 209 | bio->bi_bdev = super->s_bdev; |
210 | bio->bi_sector = ofs >> 9; | 210 | bio->bi_iter.bi_sector = ofs >> 9; |
211 | bio->bi_private = sb; | 211 | bio->bi_private = sb; |
212 | bio->bi_end_io = erase_end_io; | 212 | bio->bi_end_io = erase_end_io; |
213 | atomic_inc(&super->s_pending_writes); | 213 | atomic_inc(&super->s_pending_writes); |
diff --git a/fs/mpage.c b/fs/mpage.c index dd6d5878f4d9..4979ffa60aaa 100644 --- a/fs/mpage.c +++ b/fs/mpage.c | |||
@@ -93,7 +93,7 @@ mpage_alloc(struct block_device *bdev, | |||
93 | 93 | ||
94 | if (bio) { | 94 | if (bio) { |
95 | bio->bi_bdev = bdev; | 95 | bio->bi_bdev = bdev; |
96 | bio->bi_sector = first_sector; | 96 | bio->bi_iter.bi_sector = first_sector; |
97 | } | 97 | } |
98 | return bio; | 98 | return bio; |
99 | } | 99 | } |
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index da768923bf7c..56ff823ca82e 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c | |||
@@ -134,8 +134,8 @@ bl_submit_bio(int rw, struct bio *bio) | |||
134 | if (bio) { | 134 | if (bio) { |
135 | get_parallel(bio->bi_private); | 135 | get_parallel(bio->bi_private); |
136 | dprintk("%s submitting %s bio %u@%llu\n", __func__, | 136 | dprintk("%s submitting %s bio %u@%llu\n", __func__, |
137 | rw == READ ? "read" : "write", | 137 | rw == READ ? "read" : "write", bio->bi_iter.bi_size, |
138 | bio->bi_size, (unsigned long long)bio->bi_sector); | 138 | (unsigned long long)bio->bi_iter.bi_sector); |
139 | submit_bio(rw, bio); | 139 | submit_bio(rw, bio); |
140 | } | 140 | } |
141 | return NULL; | 141 | return NULL; |
@@ -156,7 +156,8 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect, | |||
156 | } | 156 | } |
157 | 157 | ||
158 | if (bio) { | 158 | if (bio) { |
159 | bio->bi_sector = isect - be->be_f_offset + be->be_v_offset; | 159 | bio->bi_iter.bi_sector = isect - be->be_f_offset + |
160 | be->be_v_offset; | ||
160 | bio->bi_bdev = be->be_mdev; | 161 | bio->bi_bdev = be->be_mdev; |
161 | bio->bi_end_io = end_io; | 162 | bio->bi_end_io = end_io; |
162 | bio->bi_private = par; | 163 | bio->bi_private = par; |
@@ -511,7 +512,7 @@ bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be, | |||
511 | isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) + | 512 | isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) + |
512 | (offset / SECTOR_SIZE); | 513 | (offset / SECTOR_SIZE); |
513 | 514 | ||
514 | bio->bi_sector = isect - be->be_f_offset + be->be_v_offset; | 515 | bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset; |
515 | bio->bi_bdev = be->be_mdev; | 516 | bio->bi_bdev = be->be_mdev; |
516 | bio->bi_end_io = bl_read_single_end_io; | 517 | bio->bi_end_io = bl_read_single_end_io; |
517 | 518 | ||
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 2d8be51f90dc..dc3a9efdaab8 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c | |||
@@ -416,7 +416,8 @@ static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start, | |||
416 | } | 416 | } |
417 | if (likely(bio)) { | 417 | if (likely(bio)) { |
418 | bio->bi_bdev = nilfs->ns_bdev; | 418 | bio->bi_bdev = nilfs->ns_bdev; |
419 | bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9); | 419 | bio->bi_iter.bi_sector = |
420 | start << (nilfs->ns_blocksize_bits - 9); | ||
420 | } | 421 | } |
421 | return bio; | 422 | return bio; |
422 | } | 423 | } |
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 73920ffda05b..bf482dfed14f 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
@@ -413,7 +413,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, | |||
413 | } | 413 | } |
414 | 414 | ||
415 | /* Must put everything in 512 byte sectors for the bio... */ | 415 | /* Must put everything in 512 byte sectors for the bio... */ |
416 | bio->bi_sector = (reg->hr_start_block + cs) << (bits - 9); | 416 | bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9); |
417 | bio->bi_bdev = reg->hr_bdev; | 417 | bio->bi_bdev = reg->hr_bdev; |
418 | bio->bi_private = wc; | 418 | bio->bi_private = wc; |
419 | bio->bi_end_io = o2hb_bio_end_io; | 419 | bio->bi_end_io = o2hb_bio_end_io; |
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 71c8c9d2b882..1b19b9cd692a 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -407,7 +407,7 @@ xfs_alloc_ioend_bio( | |||
407 | struct bio *bio = bio_alloc(GFP_NOIO, nvecs); | 407 | struct bio *bio = bio_alloc(GFP_NOIO, nvecs); |
408 | 408 | ||
409 | ASSERT(bio->bi_private == NULL); | 409 | ASSERT(bio->bi_private == NULL); |
410 | bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); | 410 | bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); |
411 | bio->bi_bdev = bh->b_bdev; | 411 | bio->bi_bdev = bh->b_bdev; |
412 | return bio; | 412 | return bio; |
413 | } | 413 | } |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index c7f0b77dcb00..5f3ea443ebbe 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -1255,7 +1255,7 @@ next_chunk: | |||
1255 | 1255 | ||
1256 | bio = bio_alloc(GFP_NOIO, nr_pages); | 1256 | bio = bio_alloc(GFP_NOIO, nr_pages); |
1257 | bio->bi_bdev = bp->b_target->bt_bdev; | 1257 | bio->bi_bdev = bp->b_target->bt_bdev; |
1258 | bio->bi_sector = sector; | 1258 | bio->bi_iter.bi_sector = sector; |
1259 | bio->bi_end_io = xfs_buf_bio_end_io; | 1259 | bio->bi_end_io = xfs_buf_bio_end_io; |
1260 | bio->bi_private = bp; | 1260 | bio->bi_private = bp; |
1261 | 1261 | ||
@@ -1277,7 +1277,7 @@ next_chunk: | |||
1277 | total_nr_pages--; | 1277 | total_nr_pages--; |
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | if (likely(bio->bi_size)) { | 1280 | if (likely(bio->bi_iter.bi_size)) { |
1281 | if (xfs_buf_is_vmapped(bp)) { | 1281 | if (xfs_buf_is_vmapped(bp)) { |
1282 | flush_kernel_vmap_range(bp->b_addr, | 1282 | flush_kernel_vmap_range(bp->b_addr, |
1283 | xfs_buf_vmap_len(bp)); | 1283 | xfs_buf_vmap_len(bp)); |