diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-11-23 20:19:00 -0500 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2013-11-24 01:33:49 -0500 |
commit | 7988613b0e5b2638caf6cd493cc78e9595eba19c (patch) | |
tree | cc9fc4e235278035313ee32940740f61269f8fb3 | |
parent | a4ad39b1d10584dfcfcfb0d510faab2c7f034399 (diff) |
block: Convert bio_for_each_segment() to bvec_iter
More prep work for immutable biovecs - with immutable bvecs drivers
won't be able to use the biovec directly, they'll need to use helpers
that take into account bio->bi_iter.bi_bvec_done.
This updates callers for the new usage without changing the
implementation yet.
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: "Ed L. Cashin" <ecashin@coraid.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Lars Ellenberg <drbd-dev@lists.linbit.com>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Paul Clements <Paul.Clements@steeleye.com>
Cc: Jim Paris <jim@jtan.com>
Cc: Geoff Levand <geoff@infradead.org>
Cc: Yehuda Sadeh <yehuda@inktank.com>
Cc: Sage Weil <sage@inktank.com>
Cc: Alex Elder <elder@inktank.com>
Cc: ceph-devel@vger.kernel.org
Cc: Joshua Morris <josh.h.morris@us.ibm.com>
Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Neil Brown <neilb@suse.de>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: linux390@de.ibm.com
Cc: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com>
Cc: Sreekanth Reddy <Sreekanth.Reddy@lsi.com>
Cc: support@lsi.com
Cc: "James E.J. Bottomley" <JBottomley@parallels.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Guo Chao <yan@linux.vnet.ibm.com>
Cc: Asai Thambi S P <asamymuthupa@micron.com>
Cc: Selvan Mani <smani@micron.com>
Cc: Sam Bradshaw <sbradshaw@micron.com>
Cc: Matthew Wilcox <matthew.r.wilcox@intel.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Stephen Hemminger <shemminger@vyatta.com>
Cc: Quoc-Son Anh <quoc-sonx.anh@intel.com>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Jerome Marchand <jmarchan@redhat.com>
Cc: Seth Jennings <sjenning@linux.vnet.ibm.com>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: "Darrick J. Wong" <darrick.wong@oracle.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Jan Kara <jack@suse.cz>
Cc: linux-m68k@lists.linux-m68k.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: drbd-user@lists.linbit.com
Cc: nbd-general@lists.sourceforge.net
Cc: cbe-oss-dev@lists.ozlabs.org
Cc: xen-devel@lists.xensource.com
Cc: virtualization@lists.linux-foundation.org
Cc: linux-raid@vger.kernel.org
Cc: linux-s390@vger.kernel.org
Cc: DL-MPTFusionLinux@lsi.com
Cc: linux-scsi@vger.kernel.org
Cc: devel@driverdev.osuosl.org
Cc: linux-fsdevel@vger.kernel.org
Cc: cluster-devel@redhat.com
Cc: linux-mm@kvack.org
Acked-by: Geoff Levand <geoff@infradead.org>
39 files changed, 401 insertions, 397 deletions
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c index 0a9d0b3c794b..2d75ae246167 100644 --- a/arch/m68k/emu/nfblock.c +++ b/arch/m68k/emu/nfblock.c | |||
@@ -62,17 +62,18 @@ struct nfhd_device { | |||
62 | static void nfhd_make_request(struct request_queue *queue, struct bio *bio) | 62 | static void nfhd_make_request(struct request_queue *queue, struct bio *bio) |
63 | { | 63 | { |
64 | struct nfhd_device *dev = queue->queuedata; | 64 | struct nfhd_device *dev = queue->queuedata; |
65 | struct bio_vec *bvec; | 65 | struct bio_vec bvec; |
66 | int i, dir, len, shift; | 66 | struct bvec_iter iter; |
67 | int dir, len, shift; | ||
67 | sector_t sec = bio->bi_iter.bi_sector; | 68 | sector_t sec = bio->bi_iter.bi_sector; |
68 | 69 | ||
69 | dir = bio_data_dir(bio); | 70 | dir = bio_data_dir(bio); |
70 | shift = dev->bshift; | 71 | shift = dev->bshift; |
71 | bio_for_each_segment(bvec, bio, i) { | 72 | bio_for_each_segment(bvec, bio, iter) { |
72 | len = bvec->bv_len; | 73 | len = bvec.bv_len; |
73 | len >>= 9; | 74 | len >>= 9; |
74 | nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift, | 75 | nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift, |
75 | bvec_to_phys(bvec)); | 76 | bvec_to_phys(&bvec)); |
76 | sec += len; | 77 | sec += len; |
77 | } | 78 | } |
78 | bio_endio(bio, 0); | 79 | bio_endio(bio, 0); |
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index f33bcbaa6a07..47b6b9f81d43 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c | |||
@@ -109,28 +109,28 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio) | |||
109 | struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; | 109 | struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; |
110 | unsigned long phys_mem, phys_end; | 110 | unsigned long phys_mem, phys_end; |
111 | void *user_mem; | 111 | void *user_mem; |
112 | struct bio_vec *vec; | 112 | struct bio_vec vec; |
113 | unsigned int transfered; | 113 | unsigned int transfered; |
114 | unsigned short idx; | 114 | struct bvec_iter iter; |
115 | 115 | ||
116 | phys_mem = bank->io_addr + (bio->bi_iter.bi_sector << | 116 | phys_mem = bank->io_addr + (bio->bi_iter.bi_sector << |
117 | AXON_RAM_SECTOR_SHIFT); | 117 | AXON_RAM_SECTOR_SHIFT); |
118 | phys_end = bank->io_addr + bank->size; | 118 | phys_end = bank->io_addr + bank->size; |
119 | transfered = 0; | 119 | transfered = 0; |
120 | bio_for_each_segment(vec, bio, idx) { | 120 | bio_for_each_segment(vec, bio, iter) { |
121 | if (unlikely(phys_mem + vec->bv_len > phys_end)) { | 121 | if (unlikely(phys_mem + vec.bv_len > phys_end)) { |
122 | bio_io_error(bio); | 122 | bio_io_error(bio); |
123 | return; | 123 | return; |
124 | } | 124 | } |
125 | 125 | ||
126 | user_mem = page_address(vec->bv_page) + vec->bv_offset; | 126 | user_mem = page_address(vec.bv_page) + vec.bv_offset; |
127 | if (bio_data_dir(bio) == READ) | 127 | if (bio_data_dir(bio) == READ) |
128 | memcpy(user_mem, (void *) phys_mem, vec->bv_len); | 128 | memcpy(user_mem, (void *) phys_mem, vec.bv_len); |
129 | else | 129 | else |
130 | memcpy((void *) phys_mem, user_mem, vec->bv_len); | 130 | memcpy((void *) phys_mem, user_mem, vec.bv_len); |
131 | 131 | ||
132 | phys_mem += vec->bv_len; | 132 | phys_mem += vec.bv_len; |
133 | transfered += vec->bv_len; | 133 | transfered += vec.bv_len; |
134 | } | 134 | } |
135 | bio_endio(bio, 0); | 135 | bio_endio(bio, 0); |
136 | } | 136 | } |
diff --git a/block/blk-core.c b/block/blk-core.c index 5c2ab2c74066..5da8e900d3b1 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -2746,10 +2746,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | |||
2746 | void rq_flush_dcache_pages(struct request *rq) | 2746 | void rq_flush_dcache_pages(struct request *rq) |
2747 | { | 2747 | { |
2748 | struct req_iterator iter; | 2748 | struct req_iterator iter; |
2749 | struct bio_vec *bvec; | 2749 | struct bio_vec bvec; |
2750 | 2750 | ||
2751 | rq_for_each_segment(bvec, rq, iter) | 2751 | rq_for_each_segment(bvec, rq, iter) |
2752 | flush_dcache_page(bvec->bv_page); | 2752 | flush_dcache_page(bvec.bv_page); |
2753 | } | 2753 | } |
2754 | EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); | 2754 | EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); |
2755 | #endif | 2755 | #endif |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 03bc083c28cf..a1ead9049ed6 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -12,10 +12,11 @@ | |||
12 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | 12 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
13 | struct bio *bio) | 13 | struct bio *bio) |
14 | { | 14 | { |
15 | struct bio_vec *bv, *bvprv = NULL; | 15 | struct bio_vec bv, bvprv = { NULL }; |
16 | int cluster, i, high, highprv = 1; | 16 | int cluster, high, highprv = 1; |
17 | unsigned int seg_size, nr_phys_segs; | 17 | unsigned int seg_size, nr_phys_segs; |
18 | struct bio *fbio, *bbio; | 18 | struct bio *fbio, *bbio; |
19 | struct bvec_iter iter; | ||
19 | 20 | ||
20 | if (!bio) | 21 | if (!bio) |
21 | return 0; | 22 | return 0; |
@@ -25,25 +26,23 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
25 | seg_size = 0; | 26 | seg_size = 0; |
26 | nr_phys_segs = 0; | 27 | nr_phys_segs = 0; |
27 | for_each_bio(bio) { | 28 | for_each_bio(bio) { |
28 | bio_for_each_segment(bv, bio, i) { | 29 | bio_for_each_segment(bv, bio, iter) { |
29 | /* | 30 | /* |
30 | * the trick here is making sure that a high page is | 31 | * the trick here is making sure that a high page is |
31 | * never considered part of another segment, since that | 32 | * never considered part of another segment, since that |
32 | * might change with the bounce page. | 33 | * might change with the bounce page. |
33 | */ | 34 | */ |
34 | high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q); | 35 | high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); |
35 | if (high || highprv) | 36 | if (!high && !highprv && cluster) { |
36 | goto new_segment; | 37 | if (seg_size + bv.bv_len |
37 | if (cluster) { | ||
38 | if (seg_size + bv->bv_len | ||
39 | > queue_max_segment_size(q)) | 38 | > queue_max_segment_size(q)) |
40 | goto new_segment; | 39 | goto new_segment; |
41 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) | 40 | if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) |
42 | goto new_segment; | 41 | goto new_segment; |
43 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) | 42 | if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) |
44 | goto new_segment; | 43 | goto new_segment; |
45 | 44 | ||
46 | seg_size += bv->bv_len; | 45 | seg_size += bv.bv_len; |
47 | bvprv = bv; | 46 | bvprv = bv; |
48 | continue; | 47 | continue; |
49 | } | 48 | } |
@@ -54,7 +53,7 @@ new_segment: | |||
54 | 53 | ||
55 | nr_phys_segs++; | 54 | nr_phys_segs++; |
56 | bvprv = bv; | 55 | bvprv = bv; |
57 | seg_size = bv->bv_len; | 56 | seg_size = bv.bv_len; |
58 | highprv = high; | 57 | highprv = high; |
59 | } | 58 | } |
60 | bbio = bio; | 59 | bbio = bio; |
@@ -110,21 +109,21 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |||
110 | return 0; | 109 | return 0; |
111 | } | 110 | } |
112 | 111 | ||
113 | static void | 112 | static inline void |
114 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, | 113 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, |
115 | struct scatterlist *sglist, struct bio_vec **bvprv, | 114 | struct scatterlist *sglist, struct bio_vec *bvprv, |
116 | struct scatterlist **sg, int *nsegs, int *cluster) | 115 | struct scatterlist **sg, int *nsegs, int *cluster) |
117 | { | 116 | { |
118 | 117 | ||
119 | int nbytes = bvec->bv_len; | 118 | int nbytes = bvec->bv_len; |
120 | 119 | ||
121 | if (*bvprv && *cluster) { | 120 | if (*sg && *cluster) { |
122 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) | 121 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
123 | goto new_segment; | 122 | goto new_segment; |
124 | 123 | ||
125 | if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec)) | 124 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) |
126 | goto new_segment; | 125 | goto new_segment; |
127 | if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec)) | 126 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) |
128 | goto new_segment; | 127 | goto new_segment; |
129 | 128 | ||
130 | (*sg)->length += nbytes; | 129 | (*sg)->length += nbytes; |
@@ -150,7 +149,7 @@ new_segment: | |||
150 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); | 149 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); |
151 | (*nsegs)++; | 150 | (*nsegs)++; |
152 | } | 151 | } |
153 | *bvprv = bvec; | 152 | *bvprv = *bvec; |
154 | } | 153 | } |
155 | 154 | ||
156 | /* | 155 | /* |
@@ -160,7 +159,7 @@ new_segment: | |||
160 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | 159 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, |
161 | struct scatterlist *sglist) | 160 | struct scatterlist *sglist) |
162 | { | 161 | { |
163 | struct bio_vec *bvec, *bvprv; | 162 | struct bio_vec bvec, bvprv; |
164 | struct req_iterator iter; | 163 | struct req_iterator iter; |
165 | struct scatterlist *sg; | 164 | struct scatterlist *sg; |
166 | int nsegs, cluster; | 165 | int nsegs, cluster; |
@@ -171,10 +170,9 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
171 | /* | 170 | /* |
172 | * for each bio in rq | 171 | * for each bio in rq |
173 | */ | 172 | */ |
174 | bvprv = NULL; | ||
175 | sg = NULL; | 173 | sg = NULL; |
176 | rq_for_each_segment(bvec, rq, iter) { | 174 | rq_for_each_segment(bvec, rq, iter) { |
177 | __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, | 175 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, |
178 | &nsegs, &cluster); | 176 | &nsegs, &cluster); |
179 | } /* segments in rq */ | 177 | } /* segments in rq */ |
180 | 178 | ||
@@ -223,18 +221,17 @@ EXPORT_SYMBOL(blk_rq_map_sg); | |||
223 | int blk_bio_map_sg(struct request_queue *q, struct bio *bio, | 221 | int blk_bio_map_sg(struct request_queue *q, struct bio *bio, |
224 | struct scatterlist *sglist) | 222 | struct scatterlist *sglist) |
225 | { | 223 | { |
226 | struct bio_vec *bvec, *bvprv; | 224 | struct bio_vec bvec, bvprv; |
227 | struct scatterlist *sg; | 225 | struct scatterlist *sg; |
228 | int nsegs, cluster; | 226 | int nsegs, cluster; |
229 | unsigned long i; | 227 | struct bvec_iter iter; |
230 | 228 | ||
231 | nsegs = 0; | 229 | nsegs = 0; |
232 | cluster = blk_queue_cluster(q); | 230 | cluster = blk_queue_cluster(q); |
233 | 231 | ||
234 | bvprv = NULL; | ||
235 | sg = NULL; | 232 | sg = NULL; |
236 | bio_for_each_segment(bvec, bio, i) { | 233 | bio_for_each_segment(bvec, bio, iter) { |
237 | __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, | 234 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, |
238 | &nsegs, &cluster); | 235 | &nsegs, &cluster); |
239 | } /* segments in bio */ | 236 | } /* segments in bio */ |
240 | 237 | ||
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 77c24ab1898a..7a06aec1dedc 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -897,15 +897,15 @@ rqbiocnt(struct request *r) | |||
897 | static void | 897 | static void |
898 | bio_pageinc(struct bio *bio) | 898 | bio_pageinc(struct bio *bio) |
899 | { | 899 | { |
900 | struct bio_vec *bv; | 900 | struct bio_vec bv; |
901 | struct page *page; | 901 | struct page *page; |
902 | int i; | 902 | struct bvec_iter iter; |
903 | 903 | ||
904 | bio_for_each_segment(bv, bio, i) { | 904 | bio_for_each_segment(bv, bio, iter) { |
905 | /* Non-zero page count for non-head members of | 905 | /* Non-zero page count for non-head members of |
906 | * compound pages is no longer allowed by the kernel. | 906 | * compound pages is no longer allowed by the kernel. |
907 | */ | 907 | */ |
908 | page = compound_trans_head(bv->bv_page); | 908 | page = compound_trans_head(bv.bv_page); |
909 | atomic_inc(&page->_count); | 909 | atomic_inc(&page->_count); |
910 | } | 910 | } |
911 | } | 911 | } |
@@ -913,12 +913,12 @@ bio_pageinc(struct bio *bio) | |||
913 | static void | 913 | static void |
914 | bio_pagedec(struct bio *bio) | 914 | bio_pagedec(struct bio *bio) |
915 | { | 915 | { |
916 | struct bio_vec *bv; | ||
917 | struct page *page; | 916 | struct page *page; |
918 | int i; | 917 | struct bio_vec bv; |
918 | struct bvec_iter iter; | ||
919 | 919 | ||
920 | bio_for_each_segment(bv, bio, i) { | 920 | bio_for_each_segment(bv, bio, iter) { |
921 | page = compound_trans_head(bv->bv_page); | 921 | page = compound_trans_head(bv.bv_page); |
922 | atomic_dec(&page->_count); | 922 | atomic_dec(&page->_count); |
923 | } | 923 | } |
924 | } | 924 | } |
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 66f5aaae15a2..e73b85cf0756 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
@@ -328,9 +328,9 @@ static void brd_make_request(struct request_queue *q, struct bio *bio) | |||
328 | struct block_device *bdev = bio->bi_bdev; | 328 | struct block_device *bdev = bio->bi_bdev; |
329 | struct brd_device *brd = bdev->bd_disk->private_data; | 329 | struct brd_device *brd = bdev->bd_disk->private_data; |
330 | int rw; | 330 | int rw; |
331 | struct bio_vec *bvec; | 331 | struct bio_vec bvec; |
332 | sector_t sector; | 332 | sector_t sector; |
333 | int i; | 333 | struct bvec_iter iter; |
334 | int err = -EIO; | 334 | int err = -EIO; |
335 | 335 | ||
336 | sector = bio->bi_iter.bi_sector; | 336 | sector = bio->bi_iter.bi_sector; |
@@ -347,10 +347,10 @@ static void brd_make_request(struct request_queue *q, struct bio *bio) | |||
347 | if (rw == READA) | 347 | if (rw == READA) |
348 | rw = READ; | 348 | rw = READ; |
349 | 349 | ||
350 | bio_for_each_segment(bvec, bio, i) { | 350 | bio_for_each_segment(bvec, bio, iter) { |
351 | unsigned int len = bvec->bv_len; | 351 | unsigned int len = bvec.bv_len; |
352 | err = brd_do_bvec(brd, bvec->bv_page, len, | 352 | err = brd_do_bvec(brd, bvec.bv_page, len, |
353 | bvec->bv_offset, rw, sector); | 353 | bvec.bv_offset, rw, sector); |
354 | if (err) | 354 | if (err) |
355 | break; | 355 | break; |
356 | sector += len >> SECTOR_SHIFT; | 356 | sector += len >> SECTOR_SHIFT; |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 9e3818b1bc83..f4e5440aba05 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -1537,15 +1537,17 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page, | |||
1537 | 1537 | ||
1538 | static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) | 1538 | static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) |
1539 | { | 1539 | { |
1540 | struct bio_vec *bvec; | 1540 | struct bio_vec bvec; |
1541 | int i; | 1541 | struct bvec_iter iter; |
1542 | |||
1542 | /* hint all but last page with MSG_MORE */ | 1543 | /* hint all but last page with MSG_MORE */ |
1543 | bio_for_each_segment(bvec, bio, i) { | 1544 | bio_for_each_segment(bvec, bio, iter) { |
1544 | int err; | 1545 | int err; |
1545 | 1546 | ||
1546 | err = _drbd_no_send_page(mdev, bvec->bv_page, | 1547 | err = _drbd_no_send_page(mdev, bvec.bv_page, |
1547 | bvec->bv_offset, bvec->bv_len, | 1548 | bvec.bv_offset, bvec.bv_len, |
1548 | i == bio->bi_vcnt - 1 ? 0 : MSG_MORE); | 1549 | bio_iter_last(bio, iter) |
1550 | ? 0 : MSG_MORE); | ||
1549 | if (err) | 1551 | if (err) |
1550 | return err; | 1552 | return err; |
1551 | } | 1553 | } |
@@ -1554,15 +1556,16 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) | |||
1554 | 1556 | ||
1555 | static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio) | 1557 | static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio) |
1556 | { | 1558 | { |
1557 | struct bio_vec *bvec; | 1559 | struct bio_vec bvec; |
1558 | int i; | 1560 | struct bvec_iter iter; |
1561 | |||
1559 | /* hint all but last page with MSG_MORE */ | 1562 | /* hint all but last page with MSG_MORE */ |
1560 | bio_for_each_segment(bvec, bio, i) { | 1563 | bio_for_each_segment(bvec, bio, iter) { |
1561 | int err; | 1564 | int err; |
1562 | 1565 | ||
1563 | err = _drbd_send_page(mdev, bvec->bv_page, | 1566 | err = _drbd_send_page(mdev, bvec.bv_page, |
1564 | bvec->bv_offset, bvec->bv_len, | 1567 | bvec.bv_offset, bvec.bv_len, |
1565 | i == bio->bi_vcnt - 1 ? 0 : MSG_MORE); | 1568 | bio_iter_last(bio, iter) ? 0 : MSG_MORE); |
1566 | if (err) | 1569 | if (err) |
1567 | return err; | 1570 | return err; |
1568 | } | 1571 | } |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 5326c22cdb9d..d073305ffd5e 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -1595,9 +1595,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size) | |||
1595 | static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, | 1595 | static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, |
1596 | sector_t sector, int data_size) | 1596 | sector_t sector, int data_size) |
1597 | { | 1597 | { |
1598 | struct bio_vec *bvec; | 1598 | struct bio_vec bvec; |
1599 | struct bvec_iter iter; | ||
1599 | struct bio *bio; | 1600 | struct bio *bio; |
1600 | int dgs, err, i, expect; | 1601 | int dgs, err, expect; |
1601 | void *dig_in = mdev->tconn->int_dig_in; | 1602 | void *dig_in = mdev->tconn->int_dig_in; |
1602 | void *dig_vv = mdev->tconn->int_dig_vv; | 1603 | void *dig_vv = mdev->tconn->int_dig_vv; |
1603 | 1604 | ||
@@ -1617,11 +1618,11 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, | |||
1617 | bio = req->master_bio; | 1618 | bio = req->master_bio; |
1618 | D_ASSERT(sector == bio->bi_iter.bi_sector); | 1619 | D_ASSERT(sector == bio->bi_iter.bi_sector); |
1619 | 1620 | ||
1620 | bio_for_each_segment(bvec, bio, i) { | 1621 | bio_for_each_segment(bvec, bio, iter) { |
1621 | void *mapped = kmap(bvec->bv_page) + bvec->bv_offset; | 1622 | void *mapped = kmap(bvec.bv_page) + bvec.bv_offset; |
1622 | expect = min_t(int, data_size, bvec->bv_len); | 1623 | expect = min_t(int, data_size, bvec.bv_len); |
1623 | err = drbd_recv_all_warn(mdev->tconn, mapped, expect); | 1624 | err = drbd_recv_all_warn(mdev->tconn, mapped, expect); |
1624 | kunmap(bvec->bv_page); | 1625 | kunmap(bvec.bv_page); |
1625 | if (err) | 1626 | if (err) |
1626 | return err; | 1627 | return err; |
1627 | data_size -= expect; | 1628 | data_size -= expect; |
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 891c0ecaa292..84d3175d493a 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c | |||
@@ -313,8 +313,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio * | |||
313 | { | 313 | { |
314 | struct hash_desc desc; | 314 | struct hash_desc desc; |
315 | struct scatterlist sg; | 315 | struct scatterlist sg; |
316 | struct bio_vec *bvec; | 316 | struct bio_vec bvec; |
317 | int i; | 317 | struct bvec_iter iter; |
318 | 318 | ||
319 | desc.tfm = tfm; | 319 | desc.tfm = tfm; |
320 | desc.flags = 0; | 320 | desc.flags = 0; |
@@ -322,8 +322,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio * | |||
322 | sg_init_table(&sg, 1); | 322 | sg_init_table(&sg, 1); |
323 | crypto_hash_init(&desc); | 323 | crypto_hash_init(&desc); |
324 | 324 | ||
325 | bio_for_each_segment(bvec, bio, i) { | 325 | bio_for_each_segment(bvec, bio, iter) { |
326 | sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); | 326 | sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); |
327 | crypto_hash_update(&desc, &sg, sg.length); | 327 | crypto_hash_update(&desc, &sg, sg.length); |
328 | } | 328 | } |
329 | crypto_hash_final(&desc, digest); | 329 | crypto_hash_final(&desc, digest); |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 6a86fe7b730f..6b29c4422828 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -2351,7 +2351,7 @@ static void rw_interrupt(void) | |||
2351 | /* Compute maximal contiguous buffer size. */ | 2351 | /* Compute maximal contiguous buffer size. */ |
2352 | static int buffer_chain_size(void) | 2352 | static int buffer_chain_size(void) |
2353 | { | 2353 | { |
2354 | struct bio_vec *bv; | 2354 | struct bio_vec bv; |
2355 | int size; | 2355 | int size; |
2356 | struct req_iterator iter; | 2356 | struct req_iterator iter; |
2357 | char *base; | 2357 | char *base; |
@@ -2360,10 +2360,10 @@ static int buffer_chain_size(void) | |||
2360 | size = 0; | 2360 | size = 0; |
2361 | 2361 | ||
2362 | rq_for_each_segment(bv, current_req, iter) { | 2362 | rq_for_each_segment(bv, current_req, iter) { |
2363 | if (page_address(bv->bv_page) + bv->bv_offset != base + size) | 2363 | if (page_address(bv.bv_page) + bv.bv_offset != base + size) |
2364 | break; | 2364 | break; |
2365 | 2365 | ||
2366 | size += bv->bv_len; | 2366 | size += bv.bv_len; |
2367 | } | 2367 | } |
2368 | 2368 | ||
2369 | return size >> 9; | 2369 | return size >> 9; |
@@ -2389,7 +2389,7 @@ static int transfer_size(int ssize, int max_sector, int max_size) | |||
2389 | static void copy_buffer(int ssize, int max_sector, int max_sector_2) | 2389 | static void copy_buffer(int ssize, int max_sector, int max_sector_2) |
2390 | { | 2390 | { |
2391 | int remaining; /* number of transferred 512-byte sectors */ | 2391 | int remaining; /* number of transferred 512-byte sectors */ |
2392 | struct bio_vec *bv; | 2392 | struct bio_vec bv; |
2393 | char *buffer; | 2393 | char *buffer; |
2394 | char *dma_buffer; | 2394 | char *dma_buffer; |
2395 | int size; | 2395 | int size; |
@@ -2427,10 +2427,10 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2) | |||
2427 | if (!remaining) | 2427 | if (!remaining) |
2428 | break; | 2428 | break; |
2429 | 2429 | ||
2430 | size = bv->bv_len; | 2430 | size = bv.bv_len; |
2431 | SUPBOUND(size, remaining); | 2431 | SUPBOUND(size, remaining); |
2432 | 2432 | ||
2433 | buffer = page_address(bv->bv_page) + bv->bv_offset; | 2433 | buffer = page_address(bv.bv_page) + bv.bv_offset; |
2434 | if (dma_buffer + size > | 2434 | if (dma_buffer + size > |
2435 | floppy_track_buffer + (max_buffer_sectors << 10) || | 2435 | floppy_track_buffer + (max_buffer_sectors << 10) || |
2436 | dma_buffer < floppy_track_buffer) { | 2436 | dma_buffer < floppy_track_buffer) { |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f5e39989adde..33fde3a39759 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -288,9 +288,10 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos) | |||
288 | { | 288 | { |
289 | int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t, | 289 | int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t, |
290 | struct page *page); | 290 | struct page *page); |
291 | struct bio_vec *bvec; | 291 | struct bio_vec bvec; |
292 | struct bvec_iter iter; | ||
292 | struct page *page = NULL; | 293 | struct page *page = NULL; |
293 | int i, ret = 0; | 294 | int ret = 0; |
294 | 295 | ||
295 | if (lo->transfer != transfer_none) { | 296 | if (lo->transfer != transfer_none) { |
296 | page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); | 297 | page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); |
@@ -302,11 +303,11 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos) | |||
302 | do_lo_send = do_lo_send_direct_write; | 303 | do_lo_send = do_lo_send_direct_write; |
303 | } | 304 | } |
304 | 305 | ||
305 | bio_for_each_segment(bvec, bio, i) { | 306 | bio_for_each_segment(bvec, bio, iter) { |
306 | ret = do_lo_send(lo, bvec, pos, page); | 307 | ret = do_lo_send(lo, &bvec, pos, page); |
307 | if (ret < 0) | 308 | if (ret < 0) |
308 | break; | 309 | break; |
309 | pos += bvec->bv_len; | 310 | pos += bvec.bv_len; |
310 | } | 311 | } |
311 | if (page) { | 312 | if (page) { |
312 | kunmap(page); | 313 | kunmap(page); |
@@ -392,20 +393,20 @@ do_lo_receive(struct loop_device *lo, | |||
392 | static int | 393 | static int |
393 | lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) | 394 | lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) |
394 | { | 395 | { |
395 | struct bio_vec *bvec; | 396 | struct bio_vec bvec; |
397 | struct bvec_iter iter; | ||
396 | ssize_t s; | 398 | ssize_t s; |
397 | int i; | ||
398 | 399 | ||
399 | bio_for_each_segment(bvec, bio, i) { | 400 | bio_for_each_segment(bvec, bio, iter) { |
400 | s = do_lo_receive(lo, bvec, bsize, pos); | 401 | s = do_lo_receive(lo, &bvec, bsize, pos); |
401 | if (s < 0) | 402 | if (s < 0) |
402 | return s; | 403 | return s; |
403 | 404 | ||
404 | if (s != bvec->bv_len) { | 405 | if (s != bvec.bv_len) { |
405 | zero_fill_bio(bio); | 406 | zero_fill_bio(bio); |
406 | break; | 407 | break; |
407 | } | 408 | } |
408 | pos += bvec->bv_len; | 409 | pos += bvec.bv_len; |
409 | } | 410 | } |
410 | return 0; | 411 | return 0; |
411 | } | 412 | } |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 69e9eb5a6b34..52b2f2a71470 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -3962,8 +3962,9 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) | |||
3962 | { | 3962 | { |
3963 | struct driver_data *dd = queue->queuedata; | 3963 | struct driver_data *dd = queue->queuedata; |
3964 | struct scatterlist *sg; | 3964 | struct scatterlist *sg; |
3965 | struct bio_vec *bvec; | 3965 | struct bio_vec bvec; |
3966 | int i, nents = 0; | 3966 | struct bvec_iter iter; |
3967 | int nents = 0; | ||
3967 | int tag = 0, unaligned = 0; | 3968 | int tag = 0, unaligned = 0; |
3968 | 3969 | ||
3969 | if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { | 3970 | if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { |
@@ -4026,11 +4027,11 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) | |||
4026 | } | 4027 | } |
4027 | 4028 | ||
4028 | /* Create the scatter list for this bio. */ | 4029 | /* Create the scatter list for this bio. */ |
4029 | bio_for_each_segment(bvec, bio, i) { | 4030 | bio_for_each_segment(bvec, bio, iter) { |
4030 | sg_set_page(&sg[nents], | 4031 | sg_set_page(&sg[nents], |
4031 | bvec->bv_page, | 4032 | bvec.bv_page, |
4032 | bvec->bv_len, | 4033 | bvec.bv_len, |
4033 | bvec->bv_offset); | 4034 | bvec.bv_offset); |
4034 | nents++; | 4035 | nents++; |
4035 | } | 4036 | } |
4036 | 4037 | ||
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 2dc3b5153f0d..aa362f493216 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -271,7 +271,7 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req) | |||
271 | 271 | ||
272 | if (nbd_cmd(req) == NBD_CMD_WRITE) { | 272 | if (nbd_cmd(req) == NBD_CMD_WRITE) { |
273 | struct req_iterator iter; | 273 | struct req_iterator iter; |
274 | struct bio_vec *bvec; | 274 | struct bio_vec bvec; |
275 | /* | 275 | /* |
276 | * we are really probing at internals to determine | 276 | * we are really probing at internals to determine |
277 | * whether to set MSG_MORE or not... | 277 | * whether to set MSG_MORE or not... |
@@ -281,8 +281,8 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req) | |||
281 | if (!rq_iter_last(req, iter)) | 281 | if (!rq_iter_last(req, iter)) |
282 | flags = MSG_MORE; | 282 | flags = MSG_MORE; |
283 | dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", | 283 | dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", |
284 | nbd->disk->disk_name, req, bvec->bv_len); | 284 | nbd->disk->disk_name, req, bvec.bv_len); |
285 | result = sock_send_bvec(nbd, bvec, flags); | 285 | result = sock_send_bvec(nbd, &bvec, flags); |
286 | if (result <= 0) { | 286 | if (result <= 0) { |
287 | dev_err(disk_to_dev(nbd->disk), | 287 | dev_err(disk_to_dev(nbd->disk), |
288 | "Send data failed (result %d)\n", | 288 | "Send data failed (result %d)\n", |
@@ -378,10 +378,10 @@ static struct request *nbd_read_stat(struct nbd_device *nbd) | |||
378 | nbd->disk->disk_name, req); | 378 | nbd->disk->disk_name, req); |
379 | if (nbd_cmd(req) == NBD_CMD_READ) { | 379 | if (nbd_cmd(req) == NBD_CMD_READ) { |
380 | struct req_iterator iter; | 380 | struct req_iterator iter; |
381 | struct bio_vec *bvec; | 381 | struct bio_vec bvec; |
382 | 382 | ||
383 | rq_for_each_segment(bvec, req, iter) { | 383 | rq_for_each_segment(bvec, req, iter) { |
384 | result = sock_recv_bvec(nbd, bvec); | 384 | result = sock_recv_bvec(nbd, &bvec); |
385 | if (result <= 0) { | 385 | if (result <= 0) { |
386 | dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", | 386 | dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", |
387 | result); | 387 | result); |
@@ -389,7 +389,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd) | |||
389 | return req; | 389 | return req; |
390 | } | 390 | } |
391 | dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", | 391 | dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", |
392 | nbd->disk->disk_name, req, bvec->bv_len); | 392 | nbd->disk->disk_name, req, bvec.bv_len); |
393 | } | 393 | } |
394 | } | 394 | } |
395 | return req; | 395 | return req; |
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 53d217381873..5539d2920872 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c | |||
@@ -550,9 +550,11 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq, | |||
550 | static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, | 550 | static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, |
551 | struct bio *bio, enum dma_data_direction dma_dir, int psegs) | 551 | struct bio *bio, enum dma_data_direction dma_dir, int psegs) |
552 | { | 552 | { |
553 | struct bio_vec *bvec, *bvprv = NULL; | 553 | struct bio_vec bvec, bvprv; |
554 | struct bvec_iter iter; | ||
554 | struct scatterlist *sg = NULL; | 555 | struct scatterlist *sg = NULL; |
555 | int i, length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size; | 556 | int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size; |
557 | int first = 1; | ||
556 | 558 | ||
557 | if (nvmeq->dev->stripe_size) | 559 | if (nvmeq->dev->stripe_size) |
558 | split_len = nvmeq->dev->stripe_size - | 560 | split_len = nvmeq->dev->stripe_size - |
@@ -560,25 +562,28 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, | |||
560 | (nvmeq->dev->stripe_size - 1)); | 562 | (nvmeq->dev->stripe_size - 1)); |
561 | 563 | ||
562 | sg_init_table(iod->sg, psegs); | 564 | sg_init_table(iod->sg, psegs); |
563 | bio_for_each_segment(bvec, bio, i) { | 565 | bio_for_each_segment(bvec, bio, iter) { |
564 | if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { | 566 | if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) { |
565 | sg->length += bvec->bv_len; | 567 | sg->length += bvec.bv_len; |
566 | } else { | 568 | } else { |
567 | if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) | 569 | if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec)) |
568 | return nvme_split_and_submit(bio, nvmeq, i, | 570 | return nvme_split_and_submit(bio, nvmeq, |
569 | length, 0); | 571 | iter.bi_idx, |
572 | length, 0); | ||
570 | 573 | ||
571 | sg = sg ? sg + 1 : iod->sg; | 574 | sg = sg ? sg + 1 : iod->sg; |
572 | sg_set_page(sg, bvec->bv_page, bvec->bv_len, | 575 | sg_set_page(sg, bvec.bv_page, |
573 | bvec->bv_offset); | 576 | bvec.bv_len, bvec.bv_offset); |
574 | nsegs++; | 577 | nsegs++; |
575 | } | 578 | } |
576 | 579 | ||
577 | if (split_len - length < bvec->bv_len) | 580 | if (split_len - length < bvec.bv_len) |
578 | return nvme_split_and_submit(bio, nvmeq, i, split_len, | 581 | return nvme_split_and_submit(bio, nvmeq, iter.bi_idx, |
579 | split_len - length); | 582 | split_len, |
580 | length += bvec->bv_len; | 583 | split_len - length); |
584 | length += bvec.bv_len; | ||
581 | bvprv = bvec; | 585 | bvprv = bvec; |
586 | first = 0; | ||
582 | } | 587 | } |
583 | iod->nents = nsegs; | 588 | iod->nents = nsegs; |
584 | sg_mark_end(sg); | 589 | sg_mark_end(sg); |
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 464be78a0836..1c6edb9a9960 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c | |||
@@ -94,7 +94,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev, | |||
94 | { | 94 | { |
95 | unsigned int offset = 0; | 95 | unsigned int offset = 0; |
96 | struct req_iterator iter; | 96 | struct req_iterator iter; |
97 | struct bio_vec *bvec; | 97 | struct bio_vec bvec; |
98 | unsigned int i = 0; | 98 | unsigned int i = 0; |
99 | size_t size; | 99 | size_t size; |
100 | void *buf; | 100 | void *buf; |
@@ -106,14 +106,14 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev, | |||
106 | __func__, __LINE__, i, bio_segments(iter.bio), | 106 | __func__, __LINE__, i, bio_segments(iter.bio), |
107 | bio_sectors(iter.bio), iter.bio->bi_iter.bi_sector); | 107 | bio_sectors(iter.bio), iter.bio->bi_iter.bi_sector); |
108 | 108 | ||
109 | size = bvec->bv_len; | 109 | size = bvec.bv_len; |
110 | buf = bvec_kmap_irq(bvec, &flags); | 110 | buf = bvec_kmap_irq(&bvec, &flags); |
111 | if (gather) | 111 | if (gather) |
112 | memcpy(dev->bounce_buf+offset, buf, size); | 112 | memcpy(dev->bounce_buf+offset, buf, size); |
113 | else | 113 | else |
114 | memcpy(buf, dev->bounce_buf+offset, size); | 114 | memcpy(buf, dev->bounce_buf+offset, size); |
115 | offset += size; | 115 | offset += size; |
116 | flush_kernel_dcache_page(bvec->bv_page); | 116 | flush_kernel_dcache_page(bvec.bv_page); |
117 | bvec_kunmap_irq(buf, &flags); | 117 | bvec_kunmap_irq(buf, &flags); |
118 | i++; | 118 | i++; |
119 | } | 119 | } |
@@ -130,7 +130,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev, | |||
130 | 130 | ||
131 | #ifdef DEBUG | 131 | #ifdef DEBUG |
132 | unsigned int n = 0; | 132 | unsigned int n = 0; |
133 | struct bio_vec *bv; | 133 | struct bio_vec bv; |
134 | struct req_iterator iter; | 134 | struct req_iterator iter; |
135 | 135 | ||
136 | rq_for_each_segment(bv, req, iter) | 136 | rq_for_each_segment(bv, req, iter) |
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index 320bbfc9b902..ef45cfb98fd2 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c | |||
@@ -555,14 +555,14 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev, | |||
555 | const char *op = write ? "write" : "read"; | 555 | const char *op = write ? "write" : "read"; |
556 | loff_t offset = bio->bi_iter.bi_sector << 9; | 556 | loff_t offset = bio->bi_iter.bi_sector << 9; |
557 | int error = 0; | 557 | int error = 0; |
558 | struct bio_vec *bvec; | 558 | struct bio_vec bvec; |
559 | unsigned int i; | 559 | struct bvec_iter iter; |
560 | struct bio *next; | 560 | struct bio *next; |
561 | 561 | ||
562 | bio_for_each_segment(bvec, bio, i) { | 562 | bio_for_each_segment(bvec, bio, iter) { |
563 | /* PS3 is ppc64, so we don't handle highmem */ | 563 | /* PS3 is ppc64, so we don't handle highmem */ |
564 | char *ptr = page_address(bvec->bv_page) + bvec->bv_offset; | 564 | char *ptr = page_address(bvec.bv_page) + bvec.bv_offset; |
565 | size_t len = bvec->bv_len, retlen; | 565 | size_t len = bvec.bv_len, retlen; |
566 | 566 | ||
567 | dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op, | 567 | dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op, |
568 | len, offset); | 568 | len, offset); |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index a8f4fe2d4d1b..20e8ab35736b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -1109,23 +1109,23 @@ static void bio_chain_put(struct bio *chain) | |||
1109 | */ | 1109 | */ |
1110 | static void zero_bio_chain(struct bio *chain, int start_ofs) | 1110 | static void zero_bio_chain(struct bio *chain, int start_ofs) |
1111 | { | 1111 | { |
1112 | struct bio_vec *bv; | 1112 | struct bio_vec bv; |
1113 | struct bvec_iter iter; | ||
1113 | unsigned long flags; | 1114 | unsigned long flags; |
1114 | void *buf; | 1115 | void *buf; |
1115 | int i; | ||
1116 | int pos = 0; | 1116 | int pos = 0; |
1117 | 1117 | ||
1118 | while (chain) { | 1118 | while (chain) { |
1119 | bio_for_each_segment(bv, chain, i) { | 1119 | bio_for_each_segment(bv, chain, iter) { |
1120 | if (pos + bv->bv_len > start_ofs) { | 1120 | if (pos + bv.bv_len > start_ofs) { |
1121 | int remainder = max(start_ofs - pos, 0); | 1121 | int remainder = max(start_ofs - pos, 0); |
1122 | buf = bvec_kmap_irq(bv, &flags); | 1122 | buf = bvec_kmap_irq(&bv, &flags); |
1123 | memset(buf + remainder, 0, | 1123 | memset(buf + remainder, 0, |
1124 | bv->bv_len - remainder); | 1124 | bv.bv_len - remainder); |
1125 | flush_dcache_page(bv->bv_page); | 1125 | flush_dcache_page(bv.bv_page); |
1126 | bvec_kunmap_irq(buf, &flags); | 1126 | bvec_kunmap_irq(buf, &flags); |
1127 | } | 1127 | } |
1128 | pos += bv->bv_len; | 1128 | pos += bv.bv_len; |
1129 | } | 1129 | } |
1130 | 1130 | ||
1131 | chain = chain->bi_next; | 1131 | chain = chain->bi_next; |
@@ -1173,11 +1173,11 @@ static struct bio *bio_clone_range(struct bio *bio_src, | |||
1173 | unsigned int len, | 1173 | unsigned int len, |
1174 | gfp_t gfpmask) | 1174 | gfp_t gfpmask) |
1175 | { | 1175 | { |
1176 | struct bio_vec *bv; | 1176 | struct bio_vec bv; |
1177 | struct bvec_iter iter; | ||
1178 | struct bvec_iter end_iter; | ||
1177 | unsigned int resid; | 1179 | unsigned int resid; |
1178 | unsigned short idx; | ||
1179 | unsigned int voff; | 1180 | unsigned int voff; |
1180 | unsigned short end_idx; | ||
1181 | unsigned short vcnt; | 1181 | unsigned short vcnt; |
1182 | struct bio *bio; | 1182 | struct bio *bio; |
1183 | 1183 | ||
@@ -1196,22 +1196,22 @@ static struct bio *bio_clone_range(struct bio *bio_src, | |||
1196 | /* Find first affected segment... */ | 1196 | /* Find first affected segment... */ |
1197 | 1197 | ||
1198 | resid = offset; | 1198 | resid = offset; |
1199 | bio_for_each_segment(bv, bio_src, idx) { | 1199 | bio_for_each_segment(bv, bio_src, iter) { |
1200 | if (resid < bv->bv_len) | 1200 | if (resid < bv.bv_len) |
1201 | break; | 1201 | break; |
1202 | resid -= bv->bv_len; | 1202 | resid -= bv.bv_len; |
1203 | } | 1203 | } |
1204 | voff = resid; | 1204 | voff = resid; |
1205 | 1205 | ||
1206 | /* ...and the last affected segment */ | 1206 | /* ...and the last affected segment */ |
1207 | 1207 | ||
1208 | resid += len; | 1208 | resid += len; |
1209 | __bio_for_each_segment(bv, bio_src, end_idx, idx) { | 1209 | __bio_for_each_segment(bv, bio_src, end_iter, iter) { |
1210 | if (resid <= bv->bv_len) | 1210 | if (resid <= bv.bv_len) |
1211 | break; | 1211 | break; |
1212 | resid -= bv->bv_len; | 1212 | resid -= bv.bv_len; |
1213 | } | 1213 | } |
1214 | vcnt = end_idx - idx + 1; | 1214 | vcnt = end_iter.bi_idx = iter.bi_idx + 1; |
1215 | 1215 | ||
1216 | /* Build the clone */ | 1216 | /* Build the clone */ |
1217 | 1217 | ||
@@ -1229,7 +1229,7 @@ static struct bio *bio_clone_range(struct bio *bio_src, | |||
1229 | * Copy over our part of the bio_vec, then update the first | 1229 | * Copy over our part of the bio_vec, then update the first |
1230 | * and last (or only) entries. | 1230 | * and last (or only) entries. |
1231 | */ | 1231 | */ |
1232 | memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx], | 1232 | memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[iter.bi_idx], |
1233 | vcnt * sizeof (struct bio_vec)); | 1233 | vcnt * sizeof (struct bio_vec)); |
1234 | bio->bi_io_vec[0].bv_offset += voff; | 1234 | bio->bi_io_vec[0].bv_offset += voff; |
1235 | if (vcnt > 1) { | 1235 | if (vcnt > 1) { |
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index 3716633be3c2..cf8cd293abb5 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c | |||
@@ -684,7 +684,8 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |||
684 | void *cb_data) | 684 | void *cb_data) |
685 | { | 685 | { |
686 | struct list_head dma_list[RSXX_MAX_TARGETS]; | 686 | struct list_head dma_list[RSXX_MAX_TARGETS]; |
687 | struct bio_vec *bvec; | 687 | struct bio_vec bvec; |
688 | struct bvec_iter iter; | ||
688 | unsigned long long addr8; | 689 | unsigned long long addr8; |
689 | unsigned int laddr; | 690 | unsigned int laddr; |
690 | unsigned int bv_len; | 691 | unsigned int bv_len; |
@@ -722,9 +723,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |||
722 | bv_len -= RSXX_HW_BLK_SIZE; | 723 | bv_len -= RSXX_HW_BLK_SIZE; |
723 | } | 724 | } |
724 | } else { | 725 | } else { |
725 | bio_for_each_segment(bvec, bio, i) { | 726 | bio_for_each_segment(bvec, bio, iter) { |
726 | bv_len = bvec->bv_len; | 727 | bv_len = bvec.bv_len; |
727 | bv_off = bvec->bv_offset; | 728 | bv_off = bvec.bv_offset; |
728 | 729 | ||
729 | while (bv_len > 0) { | 730 | while (bv_len > 0) { |
730 | tgt = rsxx_get_dma_tgt(card, addr8); | 731 | tgt = rsxx_get_dma_tgt(card, addr8); |
@@ -736,7 +737,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |||
736 | st = rsxx_queue_dma(card, &dma_list[tgt], | 737 | st = rsxx_queue_dma(card, &dma_list[tgt], |
737 | bio_data_dir(bio), | 738 | bio_data_dir(bio), |
738 | dma_off, dma_len, | 739 | dma_off, dma_len, |
739 | laddr, bvec->bv_page, | 740 | laddr, bvec.bv_page, |
740 | bv_off, cb, cb_data); | 741 | bv_off, cb, cb_data); |
741 | if (st) | 742 | if (st) |
742 | goto bvec_err; | 743 | goto bvec_err; |
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 038a6d2aced3..b62f37925374 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -362,7 +362,7 @@ static void btree_node_write_done(struct closure *cl) | |||
362 | struct bio_vec *bv; | 362 | struct bio_vec *bv; |
363 | int n; | 363 | int n; |
364 | 364 | ||
365 | __bio_for_each_segment(bv, b->bio, n, 0) | 365 | bio_for_each_segment_all(bv, b->bio, n) |
366 | __free_page(bv->bv_page); | 366 | __free_page(bv->bv_page); |
367 | 367 | ||
368 | __btree_node_write_done(cl); | 368 | __btree_node_write_done(cl); |
@@ -421,7 +421,7 @@ static void do_btree_node_write(struct btree *b) | |||
421 | struct bio_vec *bv; | 421 | struct bio_vec *bv; |
422 | void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); | 422 | void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); |
423 | 423 | ||
424 | bio_for_each_segment(bv, b->bio, j) | 424 | bio_for_each_segment_all(bv, b->bio, j) |
425 | memcpy(page_address(bv->bv_page), | 425 | memcpy(page_address(bv->bv_page), |
426 | base + j * PAGE_SIZE, PAGE_SIZE); | 426 | base + j * PAGE_SIZE, PAGE_SIZE); |
427 | 427 | ||
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 92b3fd468a03..03cb4d114e16 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c | |||
@@ -173,7 +173,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) | |||
173 | { | 173 | { |
174 | char name[BDEVNAME_SIZE]; | 174 | char name[BDEVNAME_SIZE]; |
175 | struct bio *check; | 175 | struct bio *check; |
176 | struct bio_vec *bv; | 176 | struct bio_vec bv, *bv2; |
177 | struct bvec_iter iter; | ||
177 | int i; | 178 | int i; |
178 | 179 | ||
179 | check = bio_clone(bio, GFP_NOIO); | 180 | check = bio_clone(bio, GFP_NOIO); |
@@ -185,13 +186,13 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) | |||
185 | 186 | ||
186 | submit_bio_wait(READ_SYNC, check); | 187 | submit_bio_wait(READ_SYNC, check); |
187 | 188 | ||
188 | bio_for_each_segment(bv, bio, i) { | 189 | bio_for_each_segment(bv, bio, iter) { |
189 | void *p1 = kmap_atomic(bv->bv_page); | 190 | void *p1 = kmap_atomic(bv.bv_page); |
190 | void *p2 = page_address(check->bi_io_vec[i].bv_page); | 191 | void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page); |
191 | 192 | ||
192 | cache_set_err_on(memcmp(p1 + bv->bv_offset, | 193 | cache_set_err_on(memcmp(p1 + bv.bv_offset, |
193 | p2 + bv->bv_offset, | 194 | p2 + bv.bv_offset, |
194 | bv->bv_len), | 195 | bv.bv_len), |
195 | dc->disk.c, | 196 | dc->disk.c, |
196 | "verify failed at dev %s sector %llu", | 197 | "verify failed at dev %s sector %llu", |
197 | bdevname(dc->bdev, name), | 198 | bdevname(dc->bdev, name), |
@@ -200,8 +201,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) | |||
200 | kunmap_atomic(p1); | 201 | kunmap_atomic(p1); |
201 | } | 202 | } |
202 | 203 | ||
203 | bio_for_each_segment_all(bv, check, i) | 204 | bio_for_each_segment_all(bv2, check, i) |
204 | __free_page(bv->bv_page); | 205 | __free_page(bv2->bv_page); |
205 | out_put: | 206 | out_put: |
206 | bio_put(check); | 207 | bio_put(check); |
207 | } | 208 | } |
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index dc44f0689eb7..9b5b6a41a9b6 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c | |||
@@ -22,12 +22,12 @@ static void bch_bi_idx_hack_endio(struct bio *bio, int error) | |||
22 | static void bch_generic_make_request_hack(struct bio *bio) | 22 | static void bch_generic_make_request_hack(struct bio *bio) |
23 | { | 23 | { |
24 | if (bio->bi_iter.bi_idx) { | 24 | if (bio->bi_iter.bi_idx) { |
25 | int i; | 25 | struct bio_vec bv; |
26 | struct bio_vec *bv; | 26 | struct bvec_iter iter; |
27 | struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); | 27 | struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); |
28 | 28 | ||
29 | bio_for_each_segment(bv, bio, i) | 29 | bio_for_each_segment(bv, bio, iter) |
30 | clone->bi_io_vec[clone->bi_vcnt++] = *bv; | 30 | clone->bi_io_vec[clone->bi_vcnt++] = bv; |
31 | 31 | ||
32 | clone->bi_iter.bi_sector = bio->bi_iter.bi_sector; | 32 | clone->bi_iter.bi_sector = bio->bi_iter.bi_sector; |
33 | clone->bi_bdev = bio->bi_bdev; | 33 | clone->bi_bdev = bio->bi_bdev; |
@@ -73,8 +73,9 @@ static void bch_generic_make_request_hack(struct bio *bio) | |||
73 | struct bio *bch_bio_split(struct bio *bio, int sectors, | 73 | struct bio *bch_bio_split(struct bio *bio, int sectors, |
74 | gfp_t gfp, struct bio_set *bs) | 74 | gfp_t gfp, struct bio_set *bs) |
75 | { | 75 | { |
76 | unsigned idx = bio->bi_iter.bi_idx, vcnt = 0, nbytes = sectors << 9; | 76 | unsigned vcnt = 0, nbytes = sectors << 9; |
77 | struct bio_vec *bv; | 77 | struct bio_vec bv; |
78 | struct bvec_iter iter; | ||
78 | struct bio *ret = NULL; | 79 | struct bio *ret = NULL; |
79 | 80 | ||
80 | BUG_ON(sectors <= 0); | 81 | BUG_ON(sectors <= 0); |
@@ -86,49 +87,35 @@ struct bio *bch_bio_split(struct bio *bio, int sectors, | |||
86 | ret = bio_alloc_bioset(gfp, 1, bs); | 87 | ret = bio_alloc_bioset(gfp, 1, bs); |
87 | if (!ret) | 88 | if (!ret) |
88 | return NULL; | 89 | return NULL; |
89 | idx = 0; | ||
90 | goto out; | 90 | goto out; |
91 | } | 91 | } |
92 | 92 | ||
93 | bio_for_each_segment(bv, bio, idx) { | 93 | bio_for_each_segment(bv, bio, iter) { |
94 | vcnt = idx - bio->bi_iter.bi_idx; | 94 | vcnt++; |
95 | 95 | ||
96 | if (!nbytes) { | 96 | if (nbytes <= bv.bv_len) |
97 | ret = bio_alloc_bioset(gfp, vcnt, bs); | 97 | break; |
98 | if (!ret) | ||
99 | return NULL; | ||
100 | 98 | ||
101 | memcpy(ret->bi_io_vec, __bio_iovec(bio), | 99 | nbytes -= bv.bv_len; |
102 | sizeof(struct bio_vec) * vcnt); | 100 | } |
103 | 101 | ||
104 | break; | 102 | ret = bio_alloc_bioset(gfp, vcnt, bs); |
105 | } else if (nbytes < bv->bv_len) { | 103 | if (!ret) |
106 | ret = bio_alloc_bioset(gfp, ++vcnt, bs); | 104 | return NULL; |
107 | if (!ret) | ||
108 | return NULL; | ||
109 | 105 | ||
110 | memcpy(ret->bi_io_vec, __bio_iovec(bio), | 106 | bio_for_each_segment(bv, bio, iter) { |
111 | sizeof(struct bio_vec) * vcnt); | 107 | ret->bi_io_vec[ret->bi_vcnt++] = bv; |
112 | 108 | ||
113 | ret->bi_io_vec[vcnt - 1].bv_len = nbytes; | 109 | if (ret->bi_vcnt == vcnt) |
114 | bv->bv_offset += nbytes; | ||
115 | bv->bv_len -= nbytes; | ||
116 | break; | 110 | break; |
117 | } | ||
118 | |||
119 | nbytes -= bv->bv_len; | ||
120 | } | 111 | } |
112 | |||
113 | ret->bi_io_vec[ret->bi_vcnt - 1].bv_len = nbytes; | ||
121 | out: | 114 | out: |
122 | ret->bi_bdev = bio->bi_bdev; | 115 | ret->bi_bdev = bio->bi_bdev; |
123 | ret->bi_iter.bi_sector = bio->bi_iter.bi_sector; | 116 | ret->bi_iter.bi_sector = bio->bi_iter.bi_sector; |
124 | ret->bi_iter.bi_size = sectors << 9; | 117 | ret->bi_iter.bi_size = sectors << 9; |
125 | ret->bi_rw = bio->bi_rw; | 118 | ret->bi_rw = bio->bi_rw; |
126 | ret->bi_vcnt = vcnt; | ||
127 | ret->bi_max_vecs = vcnt; | ||
128 | |||
129 | bio->bi_iter.bi_sector += sectors; | ||
130 | bio->bi_iter.bi_size -= sectors << 9; | ||
131 | bio->bi_iter.bi_idx = idx; | ||
132 | 119 | ||
133 | if (bio_integrity(bio)) { | 120 | if (bio_integrity(bio)) { |
134 | if (bio_integrity_clone(ret, bio, gfp)) { | 121 | if (bio_integrity_clone(ret, bio, gfp)) { |
@@ -137,9 +124,10 @@ out: | |||
137 | } | 124 | } |
138 | 125 | ||
139 | bio_integrity_trim(ret, 0, bio_sectors(ret)); | 126 | bio_integrity_trim(ret, 0, bio_sectors(ret)); |
140 | bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio)); | ||
141 | } | 127 | } |
142 | 128 | ||
129 | bio_advance(bio, ret->bi_iter.bi_size); | ||
130 | |||
143 | return ret; | 131 | return ret; |
144 | } | 132 | } |
145 | 133 | ||
@@ -155,12 +143,13 @@ static unsigned bch_bio_max_sectors(struct bio *bio) | |||
155 | 143 | ||
156 | if (bio_segments(bio) > max_segments || | 144 | if (bio_segments(bio) > max_segments || |
157 | q->merge_bvec_fn) { | 145 | q->merge_bvec_fn) { |
158 | struct bio_vec *bv; | 146 | struct bio_vec bv; |
159 | int i, seg = 0; | 147 | struct bvec_iter iter; |
148 | unsigned seg = 0; | ||
160 | 149 | ||
161 | ret = 0; | 150 | ret = 0; |
162 | 151 | ||
163 | bio_for_each_segment(bv, bio, i) { | 152 | bio_for_each_segment(bv, bio, iter) { |
164 | struct bvec_merge_data bvm = { | 153 | struct bvec_merge_data bvm = { |
165 | .bi_bdev = bio->bi_bdev, | 154 | .bi_bdev = bio->bi_bdev, |
166 | .bi_sector = bio->bi_iter.bi_sector, | 155 | .bi_sector = bio->bi_iter.bi_sector, |
@@ -172,11 +161,11 @@ static unsigned bch_bio_max_sectors(struct bio *bio) | |||
172 | break; | 161 | break; |
173 | 162 | ||
174 | if (q->merge_bvec_fn && | 163 | if (q->merge_bvec_fn && |
175 | q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len) | 164 | q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) |
176 | break; | 165 | break; |
177 | 166 | ||
178 | seg++; | 167 | seg++; |
179 | ret += bv->bv_len >> 9; | 168 | ret += bv.bv_len >> 9; |
180 | } | 169 | } |
181 | } | 170 | } |
182 | 171 | ||
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 47a9bbc75124..4c0a422fd49f 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -198,14 +198,14 @@ static bool verify(struct cached_dev *dc, struct bio *bio) | |||
198 | 198 | ||
199 | static void bio_csum(struct bio *bio, struct bkey *k) | 199 | static void bio_csum(struct bio *bio, struct bkey *k) |
200 | { | 200 | { |
201 | struct bio_vec *bv; | 201 | struct bio_vec bv; |
202 | struct bvec_iter iter; | ||
202 | uint64_t csum = 0; | 203 | uint64_t csum = 0; |
203 | int i; | ||
204 | 204 | ||
205 | bio_for_each_segment(bv, bio, i) { | 205 | bio_for_each_segment(bv, bio, iter) { |
206 | void *d = kmap(bv->bv_page) + bv->bv_offset; | 206 | void *d = kmap(bv.bv_page) + bv.bv_offset; |
207 | csum = bch_crc64_update(csum, d, bv->bv_len); | 207 | csum = bch_crc64_update(csum, d, bv.bv_len); |
208 | kunmap(bv->bv_page); | 208 | kunmap(bv.bv_page); |
209 | } | 209 | } |
210 | 210 | ||
211 | k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); | 211 | k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); |
@@ -1182,17 +1182,17 @@ void bch_cached_dev_request_init(struct cached_dev *dc) | |||
1182 | static int flash_dev_cache_miss(struct btree *b, struct search *s, | 1182 | static int flash_dev_cache_miss(struct btree *b, struct search *s, |
1183 | struct bio *bio, unsigned sectors) | 1183 | struct bio *bio, unsigned sectors) |
1184 | { | 1184 | { |
1185 | struct bio_vec *bv; | 1185 | struct bio_vec bv; |
1186 | int i; | 1186 | struct bvec_iter iter; |
1187 | 1187 | ||
1188 | /* Zero fill bio */ | 1188 | /* Zero fill bio */ |
1189 | 1189 | ||
1190 | bio_for_each_segment(bv, bio, i) { | 1190 | bio_for_each_segment(bv, bio, iter) { |
1191 | unsigned j = min(bv->bv_len >> 9, sectors); | 1191 | unsigned j = min(bv.bv_len >> 9, sectors); |
1192 | 1192 | ||
1193 | void *p = kmap(bv->bv_page); | 1193 | void *p = kmap(bv.bv_page); |
1194 | memset(p + bv->bv_offset, 0, j << 9); | 1194 | memset(p + bv.bv_offset, 0, j << 9); |
1195 | kunmap(bv->bv_page); | 1195 | kunmap(bv.bv_page); |
1196 | 1196 | ||
1197 | sectors -= j; | 1197 | sectors -= j; |
1198 | } | 1198 | } |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a5d9c0ee4d60..bef353c51c04 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -937,9 +937,9 @@ static struct dma_async_tx_descriptor * | |||
937 | async_copy_data(int frombio, struct bio *bio, struct page *page, | 937 | async_copy_data(int frombio, struct bio *bio, struct page *page, |
938 | sector_t sector, struct dma_async_tx_descriptor *tx) | 938 | sector_t sector, struct dma_async_tx_descriptor *tx) |
939 | { | 939 | { |
940 | struct bio_vec *bvl; | 940 | struct bio_vec bvl; |
941 | struct bvec_iter iter; | ||
941 | struct page *bio_page; | 942 | struct page *bio_page; |
942 | int i; | ||
943 | int page_offset; | 943 | int page_offset; |
944 | struct async_submit_ctl submit; | 944 | struct async_submit_ctl submit; |
945 | enum async_tx_flags flags = 0; | 945 | enum async_tx_flags flags = 0; |
@@ -953,8 +953,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, | |||
953 | flags |= ASYNC_TX_FENCE; | 953 | flags |= ASYNC_TX_FENCE; |
954 | init_async_submit(&submit, flags, tx, NULL, NULL, NULL); | 954 | init_async_submit(&submit, flags, tx, NULL, NULL, NULL); |
955 | 955 | ||
956 | bio_for_each_segment(bvl, bio, i) { | 956 | bio_for_each_segment(bvl, bio, iter) { |
957 | int len = bvl->bv_len; | 957 | int len = bvl.bv_len; |
958 | int clen; | 958 | int clen; |
959 | int b_offset = 0; | 959 | int b_offset = 0; |
960 | 960 | ||
@@ -970,8 +970,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, | |||
970 | clen = len; | 970 | clen = len; |
971 | 971 | ||
972 | if (clen > 0) { | 972 | if (clen > 0) { |
973 | b_offset += bvl->bv_offset; | 973 | b_offset += bvl.bv_offset; |
974 | bio_page = bvl->bv_page; | 974 | bio_page = bvl.bv_page; |
975 | if (frombio) | 975 | if (frombio) |
976 | tx = async_memcpy(page, bio_page, page_offset, | 976 | tx = async_memcpy(page, bio_page, page_offset, |
977 | b_offset, clen, &submit); | 977 | b_offset, clen, &submit); |
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 92bd22ce6760..9cbc567698ce 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c | |||
@@ -504,7 +504,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, | |||
504 | struct dasd_diag_req *dreq; | 504 | struct dasd_diag_req *dreq; |
505 | struct dasd_diag_bio *dbio; | 505 | struct dasd_diag_bio *dbio; |
506 | struct req_iterator iter; | 506 | struct req_iterator iter; |
507 | struct bio_vec *bv; | 507 | struct bio_vec bv; |
508 | char *dst; | 508 | char *dst; |
509 | unsigned int count, datasize; | 509 | unsigned int count, datasize; |
510 | sector_t recid, first_rec, last_rec; | 510 | sector_t recid, first_rec, last_rec; |
@@ -525,10 +525,10 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, | |||
525 | /* Check struct bio and count the number of blocks for the request. */ | 525 | /* Check struct bio and count the number of blocks for the request. */ |
526 | count = 0; | 526 | count = 0; |
527 | rq_for_each_segment(bv, req, iter) { | 527 | rq_for_each_segment(bv, req, iter) { |
528 | if (bv->bv_len & (blksize - 1)) | 528 | if (bv.bv_len & (blksize - 1)) |
529 | /* Fba can only do full blocks. */ | 529 | /* Fba can only do full blocks. */ |
530 | return ERR_PTR(-EINVAL); | 530 | return ERR_PTR(-EINVAL); |
531 | count += bv->bv_len >> (block->s2b_shift + 9); | 531 | count += bv.bv_len >> (block->s2b_shift + 9); |
532 | } | 532 | } |
533 | /* Paranoia. */ | 533 | /* Paranoia. */ |
534 | if (count != last_rec - first_rec + 1) | 534 | if (count != last_rec - first_rec + 1) |
@@ -545,8 +545,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, | |||
545 | dbio = dreq->bio; | 545 | dbio = dreq->bio; |
546 | recid = first_rec; | 546 | recid = first_rec; |
547 | rq_for_each_segment(bv, req, iter) { | 547 | rq_for_each_segment(bv, req, iter) { |
548 | dst = page_address(bv->bv_page) + bv->bv_offset; | 548 | dst = page_address(bv.bv_page) + bv.bv_offset; |
549 | for (off = 0; off < bv->bv_len; off += blksize) { | 549 | for (off = 0; off < bv.bv_len; off += blksize) { |
550 | memset(dbio, 0, sizeof (struct dasd_diag_bio)); | 550 | memset(dbio, 0, sizeof (struct dasd_diag_bio)); |
551 | dbio->type = rw_cmd; | 551 | dbio->type = rw_cmd; |
552 | dbio->block_number = recid + 1; | 552 | dbio->block_number = recid + 1; |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index cee7e2708a1f..70d177017329 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -2551,7 +2551,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( | |||
2551 | struct dasd_ccw_req *cqr; | 2551 | struct dasd_ccw_req *cqr; |
2552 | struct ccw1 *ccw; | 2552 | struct ccw1 *ccw; |
2553 | struct req_iterator iter; | 2553 | struct req_iterator iter; |
2554 | struct bio_vec *bv; | 2554 | struct bio_vec bv; |
2555 | char *dst; | 2555 | char *dst; |
2556 | unsigned int off; | 2556 | unsigned int off; |
2557 | int count, cidaw, cplength, datasize; | 2557 | int count, cidaw, cplength, datasize; |
@@ -2573,13 +2573,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( | |||
2573 | count = 0; | 2573 | count = 0; |
2574 | cidaw = 0; | 2574 | cidaw = 0; |
2575 | rq_for_each_segment(bv, req, iter) { | 2575 | rq_for_each_segment(bv, req, iter) { |
2576 | if (bv->bv_len & (blksize - 1)) | 2576 | if (bv.bv_len & (blksize - 1)) |
2577 | /* Eckd can only do full blocks. */ | 2577 | /* Eckd can only do full blocks. */ |
2578 | return ERR_PTR(-EINVAL); | 2578 | return ERR_PTR(-EINVAL); |
2579 | count += bv->bv_len >> (block->s2b_shift + 9); | 2579 | count += bv.bv_len >> (block->s2b_shift + 9); |
2580 | #if defined(CONFIG_64BIT) | 2580 | #if defined(CONFIG_64BIT) |
2581 | if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) | 2581 | if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) |
2582 | cidaw += bv->bv_len >> (block->s2b_shift + 9); | 2582 | cidaw += bv.bv_len >> (block->s2b_shift + 9); |
2583 | #endif | 2583 | #endif |
2584 | } | 2584 | } |
2585 | /* Paranoia. */ | 2585 | /* Paranoia. */ |
@@ -2650,16 +2650,16 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( | |||
2650 | last_rec - recid + 1, cmd, basedev, blksize); | 2650 | last_rec - recid + 1, cmd, basedev, blksize); |
2651 | } | 2651 | } |
2652 | rq_for_each_segment(bv, req, iter) { | 2652 | rq_for_each_segment(bv, req, iter) { |
2653 | dst = page_address(bv->bv_page) + bv->bv_offset; | 2653 | dst = page_address(bv.bv_page) + bv.bv_offset; |
2654 | if (dasd_page_cache) { | 2654 | if (dasd_page_cache) { |
2655 | char *copy = kmem_cache_alloc(dasd_page_cache, | 2655 | char *copy = kmem_cache_alloc(dasd_page_cache, |
2656 | GFP_DMA | __GFP_NOWARN); | 2656 | GFP_DMA | __GFP_NOWARN); |
2657 | if (copy && rq_data_dir(req) == WRITE) | 2657 | if (copy && rq_data_dir(req) == WRITE) |
2658 | memcpy(copy + bv->bv_offset, dst, bv->bv_len); | 2658 | memcpy(copy + bv.bv_offset, dst, bv.bv_len); |
2659 | if (copy) | 2659 | if (copy) |
2660 | dst = copy + bv->bv_offset; | 2660 | dst = copy + bv.bv_offset; |
2661 | } | 2661 | } |
2662 | for (off = 0; off < bv->bv_len; off += blksize) { | 2662 | for (off = 0; off < bv.bv_len; off += blksize) { |
2663 | sector_t trkid = recid; | 2663 | sector_t trkid = recid; |
2664 | unsigned int recoffs = sector_div(trkid, blk_per_trk); | 2664 | unsigned int recoffs = sector_div(trkid, blk_per_trk); |
2665 | rcmd = cmd; | 2665 | rcmd = cmd; |
@@ -2735,7 +2735,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( | |||
2735 | struct dasd_ccw_req *cqr; | 2735 | struct dasd_ccw_req *cqr; |
2736 | struct ccw1 *ccw; | 2736 | struct ccw1 *ccw; |
2737 | struct req_iterator iter; | 2737 | struct req_iterator iter; |
2738 | struct bio_vec *bv; | 2738 | struct bio_vec bv; |
2739 | char *dst, *idaw_dst; | 2739 | char *dst, *idaw_dst; |
2740 | unsigned int cidaw, cplength, datasize; | 2740 | unsigned int cidaw, cplength, datasize; |
2741 | unsigned int tlf; | 2741 | unsigned int tlf; |
@@ -2813,8 +2813,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( | |||
2813 | idaw_dst = NULL; | 2813 | idaw_dst = NULL; |
2814 | idaw_len = 0; | 2814 | idaw_len = 0; |
2815 | rq_for_each_segment(bv, req, iter) { | 2815 | rq_for_each_segment(bv, req, iter) { |
2816 | dst = page_address(bv->bv_page) + bv->bv_offset; | 2816 | dst = page_address(bv.bv_page) + bv.bv_offset; |
2817 | seg_len = bv->bv_len; | 2817 | seg_len = bv.bv_len; |
2818 | while (seg_len) { | 2818 | while (seg_len) { |
2819 | if (new_track) { | 2819 | if (new_track) { |
2820 | trkid = recid; | 2820 | trkid = recid; |
@@ -3039,7 +3039,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( | |||
3039 | { | 3039 | { |
3040 | struct dasd_ccw_req *cqr; | 3040 | struct dasd_ccw_req *cqr; |
3041 | struct req_iterator iter; | 3041 | struct req_iterator iter; |
3042 | struct bio_vec *bv; | 3042 | struct bio_vec bv; |
3043 | char *dst; | 3043 | char *dst; |
3044 | unsigned int trkcount, ctidaw; | 3044 | unsigned int trkcount, ctidaw; |
3045 | unsigned char cmd; | 3045 | unsigned char cmd; |
@@ -3125,8 +3125,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( | |||
3125 | new_track = 1; | 3125 | new_track = 1; |
3126 | recid = first_rec; | 3126 | recid = first_rec; |
3127 | rq_for_each_segment(bv, req, iter) { | 3127 | rq_for_each_segment(bv, req, iter) { |
3128 | dst = page_address(bv->bv_page) + bv->bv_offset; | 3128 | dst = page_address(bv.bv_page) + bv.bv_offset; |
3129 | seg_len = bv->bv_len; | 3129 | seg_len = bv.bv_len; |
3130 | while (seg_len) { | 3130 | while (seg_len) { |
3131 | if (new_track) { | 3131 | if (new_track) { |
3132 | trkid = recid; | 3132 | trkid = recid; |
@@ -3158,9 +3158,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( | |||
3158 | } | 3158 | } |
3159 | } else { | 3159 | } else { |
3160 | rq_for_each_segment(bv, req, iter) { | 3160 | rq_for_each_segment(bv, req, iter) { |
3161 | dst = page_address(bv->bv_page) + bv->bv_offset; | 3161 | dst = page_address(bv.bv_page) + bv.bv_offset; |
3162 | last_tidaw = itcw_add_tidaw(itcw, 0x00, | 3162 | last_tidaw = itcw_add_tidaw(itcw, 0x00, |
3163 | dst, bv->bv_len); | 3163 | dst, bv.bv_len); |
3164 | if (IS_ERR(last_tidaw)) { | 3164 | if (IS_ERR(last_tidaw)) { |
3165 | ret = -EINVAL; | 3165 | ret = -EINVAL; |
3166 | goto out_error; | 3166 | goto out_error; |
@@ -3276,7 +3276,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev, | |||
3276 | struct dasd_ccw_req *cqr; | 3276 | struct dasd_ccw_req *cqr; |
3277 | struct ccw1 *ccw; | 3277 | struct ccw1 *ccw; |
3278 | struct req_iterator iter; | 3278 | struct req_iterator iter; |
3279 | struct bio_vec *bv; | 3279 | struct bio_vec bv; |
3280 | char *dst; | 3280 | char *dst; |
3281 | unsigned char cmd; | 3281 | unsigned char cmd; |
3282 | unsigned int trkcount; | 3282 | unsigned int trkcount; |
@@ -3376,8 +3376,8 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev, | |||
3376 | idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); | 3376 | idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); |
3377 | } | 3377 | } |
3378 | rq_for_each_segment(bv, req, iter) { | 3378 | rq_for_each_segment(bv, req, iter) { |
3379 | dst = page_address(bv->bv_page) + bv->bv_offset; | 3379 | dst = page_address(bv.bv_page) + bv.bv_offset; |
3380 | seg_len = bv->bv_len; | 3380 | seg_len = bv.bv_len; |
3381 | if (cmd == DASD_ECKD_CCW_READ_TRACK) | 3381 | if (cmd == DASD_ECKD_CCW_READ_TRACK) |
3382 | memset(dst, 0, seg_len); | 3382 | memset(dst, 0, seg_len); |
3383 | if (!len_to_track_end) { | 3383 | if (!len_to_track_end) { |
@@ -3422,7 +3422,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) | |||
3422 | struct dasd_eckd_private *private; | 3422 | struct dasd_eckd_private *private; |
3423 | struct ccw1 *ccw; | 3423 | struct ccw1 *ccw; |
3424 | struct req_iterator iter; | 3424 | struct req_iterator iter; |
3425 | struct bio_vec *bv; | 3425 | struct bio_vec bv; |
3426 | char *dst, *cda; | 3426 | char *dst, *cda; |
3427 | unsigned int blksize, blk_per_trk, off; | 3427 | unsigned int blksize, blk_per_trk, off; |
3428 | sector_t recid; | 3428 | sector_t recid; |
@@ -3440,8 +3440,8 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) | |||
3440 | if (private->uses_cdl == 0 || recid > 2*blk_per_trk) | 3440 | if (private->uses_cdl == 0 || recid > 2*blk_per_trk) |
3441 | ccw++; | 3441 | ccw++; |
3442 | rq_for_each_segment(bv, req, iter) { | 3442 | rq_for_each_segment(bv, req, iter) { |
3443 | dst = page_address(bv->bv_page) + bv->bv_offset; | 3443 | dst = page_address(bv.bv_page) + bv.bv_offset; |
3444 | for (off = 0; off < bv->bv_len; off += blksize) { | 3444 | for (off = 0; off < bv.bv_len; off += blksize) { |
3445 | /* Skip locate record. */ | 3445 | /* Skip locate record. */ |
3446 | if (private->uses_cdl && recid <= 2*blk_per_trk) | 3446 | if (private->uses_cdl && recid <= 2*blk_per_trk) |
3447 | ccw++; | 3447 | ccw++; |
@@ -3452,7 +3452,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) | |||
3452 | cda = (char *)((addr_t) ccw->cda); | 3452 | cda = (char *)((addr_t) ccw->cda); |
3453 | if (dst != cda) { | 3453 | if (dst != cda) { |
3454 | if (rq_data_dir(req) == READ) | 3454 | if (rq_data_dir(req) == READ) |
3455 | memcpy(dst, cda, bv->bv_len); | 3455 | memcpy(dst, cda, bv.bv_len); |
3456 | kmem_cache_free(dasd_page_cache, | 3456 | kmem_cache_free(dasd_page_cache, |
3457 | (void *)((addr_t)cda & PAGE_MASK)); | 3457 | (void *)((addr_t)cda & PAGE_MASK)); |
3458 | } | 3458 | } |
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 9cbc8c32ba59..2c8e68bf9a1c 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c | |||
@@ -260,7 +260,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, | |||
260 | struct dasd_ccw_req *cqr; | 260 | struct dasd_ccw_req *cqr; |
261 | struct ccw1 *ccw; | 261 | struct ccw1 *ccw; |
262 | struct req_iterator iter; | 262 | struct req_iterator iter; |
263 | struct bio_vec *bv; | 263 | struct bio_vec bv; |
264 | char *dst; | 264 | char *dst; |
265 | int count, cidaw, cplength, datasize; | 265 | int count, cidaw, cplength, datasize; |
266 | sector_t recid, first_rec, last_rec; | 266 | sector_t recid, first_rec, last_rec; |
@@ -283,13 +283,13 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, | |||
283 | count = 0; | 283 | count = 0; |
284 | cidaw = 0; | 284 | cidaw = 0; |
285 | rq_for_each_segment(bv, req, iter) { | 285 | rq_for_each_segment(bv, req, iter) { |
286 | if (bv->bv_len & (blksize - 1)) | 286 | if (bv.bv_len & (blksize - 1)) |
287 | /* Fba can only do full blocks. */ | 287 | /* Fba can only do full blocks. */ |
288 | return ERR_PTR(-EINVAL); | 288 | return ERR_PTR(-EINVAL); |
289 | count += bv->bv_len >> (block->s2b_shift + 9); | 289 | count += bv.bv_len >> (block->s2b_shift + 9); |
290 | #if defined(CONFIG_64BIT) | 290 | #if defined(CONFIG_64BIT) |
291 | if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) | 291 | if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) |
292 | cidaw += bv->bv_len / blksize; | 292 | cidaw += bv.bv_len / blksize; |
293 | #endif | 293 | #endif |
294 | } | 294 | } |
295 | /* Paranoia. */ | 295 | /* Paranoia. */ |
@@ -326,16 +326,16 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, | |||
326 | } | 326 | } |
327 | recid = first_rec; | 327 | recid = first_rec; |
328 | rq_for_each_segment(bv, req, iter) { | 328 | rq_for_each_segment(bv, req, iter) { |
329 | dst = page_address(bv->bv_page) + bv->bv_offset; | 329 | dst = page_address(bv.bv_page) + bv.bv_offset; |
330 | if (dasd_page_cache) { | 330 | if (dasd_page_cache) { |
331 | char *copy = kmem_cache_alloc(dasd_page_cache, | 331 | char *copy = kmem_cache_alloc(dasd_page_cache, |
332 | GFP_DMA | __GFP_NOWARN); | 332 | GFP_DMA | __GFP_NOWARN); |
333 | if (copy && rq_data_dir(req) == WRITE) | 333 | if (copy && rq_data_dir(req) == WRITE) |
334 | memcpy(copy + bv->bv_offset, dst, bv->bv_len); | 334 | memcpy(copy + bv.bv_offset, dst, bv.bv_len); |
335 | if (copy) | 335 | if (copy) |
336 | dst = copy + bv->bv_offset; | 336 | dst = copy + bv.bv_offset; |
337 | } | 337 | } |
338 | for (off = 0; off < bv->bv_len; off += blksize) { | 338 | for (off = 0; off < bv.bv_len; off += blksize) { |
339 | /* Locate record for stupid devices. */ | 339 | /* Locate record for stupid devices. */ |
340 | if (private->rdc_data.mode.bits.data_chain == 0) { | 340 | if (private->rdc_data.mode.bits.data_chain == 0) { |
341 | ccw[-1].flags |= CCW_FLAG_CC; | 341 | ccw[-1].flags |= CCW_FLAG_CC; |
@@ -384,7 +384,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req) | |||
384 | struct dasd_fba_private *private; | 384 | struct dasd_fba_private *private; |
385 | struct ccw1 *ccw; | 385 | struct ccw1 *ccw; |
386 | struct req_iterator iter; | 386 | struct req_iterator iter; |
387 | struct bio_vec *bv; | 387 | struct bio_vec bv; |
388 | char *dst, *cda; | 388 | char *dst, *cda; |
389 | unsigned int blksize, off; | 389 | unsigned int blksize, off; |
390 | int status; | 390 | int status; |
@@ -399,8 +399,8 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req) | |||
399 | if (private->rdc_data.mode.bits.data_chain != 0) | 399 | if (private->rdc_data.mode.bits.data_chain != 0) |
400 | ccw++; | 400 | ccw++; |
401 | rq_for_each_segment(bv, req, iter) { | 401 | rq_for_each_segment(bv, req, iter) { |
402 | dst = page_address(bv->bv_page) + bv->bv_offset; | 402 | dst = page_address(bv.bv_page) + bv.bv_offset; |
403 | for (off = 0; off < bv->bv_len; off += blksize) { | 403 | for (off = 0; off < bv.bv_len; off += blksize) { |
404 | /* Skip locate record. */ | 404 | /* Skip locate record. */ |
405 | if (private->rdc_data.mode.bits.data_chain == 0) | 405 | if (private->rdc_data.mode.bits.data_chain == 0) |
406 | ccw++; | 406 | ccw++; |
@@ -411,7 +411,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req) | |||
411 | cda = (char *)((addr_t) ccw->cda); | 411 | cda = (char *)((addr_t) ccw->cda); |
412 | if (dst != cda) { | 412 | if (dst != cda) { |
413 | if (rq_data_dir(req) == READ) | 413 | if (rq_data_dir(req) == READ) |
414 | memcpy(dst, cda, bv->bv_len); | 414 | memcpy(dst, cda, bv.bv_len); |
415 | kmem_cache_free(dasd_page_cache, | 415 | kmem_cache_free(dasd_page_cache, |
416 | (void *)((addr_t)cda & PAGE_MASK)); | 416 | (void *)((addr_t)cda & PAGE_MASK)); |
417 | } | 417 | } |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 16814a8457f8..ebf41e228e55 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -808,12 +808,12 @@ static void | |||
808 | dcssblk_make_request(struct request_queue *q, struct bio *bio) | 808 | dcssblk_make_request(struct request_queue *q, struct bio *bio) |
809 | { | 809 | { |
810 | struct dcssblk_dev_info *dev_info; | 810 | struct dcssblk_dev_info *dev_info; |
811 | struct bio_vec *bvec; | 811 | struct bio_vec bvec; |
812 | struct bvec_iter iter; | ||
812 | unsigned long index; | 813 | unsigned long index; |
813 | unsigned long page_addr; | 814 | unsigned long page_addr; |
814 | unsigned long source_addr; | 815 | unsigned long source_addr; |
815 | unsigned long bytes_done; | 816 | unsigned long bytes_done; |
816 | int i; | ||
817 | 817 | ||
818 | bytes_done = 0; | 818 | bytes_done = 0; |
819 | dev_info = bio->bi_bdev->bd_disk->private_data; | 819 | dev_info = bio->bi_bdev->bd_disk->private_data; |
@@ -844,21 +844,21 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) | |||
844 | } | 844 | } |
845 | 845 | ||
846 | index = (bio->bi_iter.bi_sector >> 3); | 846 | index = (bio->bi_iter.bi_sector >> 3); |
847 | bio_for_each_segment(bvec, bio, i) { | 847 | bio_for_each_segment(bvec, bio, iter) { |
848 | page_addr = (unsigned long) | 848 | page_addr = (unsigned long) |
849 | page_address(bvec->bv_page) + bvec->bv_offset; | 849 | page_address(bvec.bv_page) + bvec.bv_offset; |
850 | source_addr = dev_info->start + (index<<12) + bytes_done; | 850 | source_addr = dev_info->start + (index<<12) + bytes_done; |
851 | if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0) | 851 | if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0) |
852 | // More paranoia. | 852 | // More paranoia. |
853 | goto fail; | 853 | goto fail; |
854 | if (bio_data_dir(bio) == READ) { | 854 | if (bio_data_dir(bio) == READ) { |
855 | memcpy((void*)page_addr, (void*)source_addr, | 855 | memcpy((void*)page_addr, (void*)source_addr, |
856 | bvec->bv_len); | 856 | bvec.bv_len); |
857 | } else { | 857 | } else { |
858 | memcpy((void*)source_addr, (void*)page_addr, | 858 | memcpy((void*)source_addr, (void*)page_addr, |
859 | bvec->bv_len); | 859 | bvec.bv_len); |
860 | } | 860 | } |
861 | bytes_done += bvec->bv_len; | 861 | bytes_done += bvec.bv_len; |
862 | } | 862 | } |
863 | bio_endio(bio, 0); | 863 | bio_endio(bio, 0); |
864 | return; | 864 | return; |
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index d0ab5019d885..76bed1743db1 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c | |||
@@ -130,7 +130,7 @@ static void scm_request_prepare(struct scm_request *scmrq) | |||
130 | struct aidaw *aidaw = scmrq->aidaw; | 130 | struct aidaw *aidaw = scmrq->aidaw; |
131 | struct msb *msb = &scmrq->aob->msb[0]; | 131 | struct msb *msb = &scmrq->aob->msb[0]; |
132 | struct req_iterator iter; | 132 | struct req_iterator iter; |
133 | struct bio_vec *bv; | 133 | struct bio_vec bv; |
134 | 134 | ||
135 | msb->bs = MSB_BS_4K; | 135 | msb->bs = MSB_BS_4K; |
136 | scmrq->aob->request.msb_count = 1; | 136 | scmrq->aob->request.msb_count = 1; |
@@ -142,9 +142,9 @@ static void scm_request_prepare(struct scm_request *scmrq) | |||
142 | msb->data_addr = (u64) aidaw; | 142 | msb->data_addr = (u64) aidaw; |
143 | 143 | ||
144 | rq_for_each_segment(bv, scmrq->request, iter) { | 144 | rq_for_each_segment(bv, scmrq->request, iter) { |
145 | WARN_ON(bv->bv_offset); | 145 | WARN_ON(bv.bv_offset); |
146 | msb->blk_count += bv->bv_len >> 12; | 146 | msb->blk_count += bv.bv_len >> 12; |
147 | aidaw->data_addr = (u64) page_address(bv->bv_page); | 147 | aidaw->data_addr = (u64) page_address(bv.bv_page); |
148 | aidaw++; | 148 | aidaw++; |
149 | } | 149 | } |
150 | } | 150 | } |
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index 27f930cd657f..9aae909d47a5 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c | |||
@@ -122,7 +122,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq) | |||
122 | struct aidaw *aidaw = scmrq->aidaw; | 122 | struct aidaw *aidaw = scmrq->aidaw; |
123 | struct msb *msb = &scmrq->aob->msb[0]; | 123 | struct msb *msb = &scmrq->aob->msb[0]; |
124 | struct req_iterator iter; | 124 | struct req_iterator iter; |
125 | struct bio_vec *bv; | 125 | struct bio_vec bv; |
126 | int i = 0; | 126 | int i = 0; |
127 | u64 addr; | 127 | u64 addr; |
128 | 128 | ||
@@ -163,7 +163,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq) | |||
163 | i++; | 163 | i++; |
164 | } | 164 | } |
165 | rq_for_each_segment(bv, req, iter) { | 165 | rq_for_each_segment(bv, req, iter) { |
166 | aidaw->data_addr = (u64) page_address(bv->bv_page); | 166 | aidaw->data_addr = (u64) page_address(bv.bv_page); |
167 | aidaw++; | 167 | aidaw++; |
168 | i++; | 168 | i++; |
169 | } | 169 | } |
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index dd4e73fdb323..3e530f9da8c4 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c | |||
@@ -184,11 +184,11 @@ static unsigned long xpram_highest_page_index(void) | |||
184 | static void xpram_make_request(struct request_queue *q, struct bio *bio) | 184 | static void xpram_make_request(struct request_queue *q, struct bio *bio) |
185 | { | 185 | { |
186 | xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; | 186 | xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; |
187 | struct bio_vec *bvec; | 187 | struct bio_vec bvec; |
188 | struct bvec_iter iter; | ||
188 | unsigned int index; | 189 | unsigned int index; |
189 | unsigned long page_addr; | 190 | unsigned long page_addr; |
190 | unsigned long bytes; | 191 | unsigned long bytes; |
191 | int i; | ||
192 | 192 | ||
193 | if ((bio->bi_iter.bi_sector & 7) != 0 || | 193 | if ((bio->bi_iter.bi_sector & 7) != 0 || |
194 | (bio->bi_iter.bi_size & 4095) != 0) | 194 | (bio->bi_iter.bi_size & 4095) != 0) |
@@ -200,10 +200,10 @@ static void xpram_make_request(struct request_queue *q, struct bio *bio) | |||
200 | if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset) | 200 | if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset) |
201 | goto fail; | 201 | goto fail; |
202 | index = (bio->bi_iter.bi_sector >> 3) + xdev->offset; | 202 | index = (bio->bi_iter.bi_sector >> 3) + xdev->offset; |
203 | bio_for_each_segment(bvec, bio, i) { | 203 | bio_for_each_segment(bvec, bio, iter) { |
204 | page_addr = (unsigned long) | 204 | page_addr = (unsigned long) |
205 | kmap(bvec->bv_page) + bvec->bv_offset; | 205 | kmap(bvec.bv_page) + bvec.bv_offset; |
206 | bytes = bvec->bv_len; | 206 | bytes = bvec.bv_len; |
207 | if ((page_addr & 4095) != 0 || (bytes & 4095) != 0) | 207 | if ((page_addr & 4095) != 0 || (bytes & 4095) != 0) |
208 | /* More paranoia. */ | 208 | /* More paranoia. */ |
209 | goto fail; | 209 | goto fail; |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index 9d26637308be..7143e86af326 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c | |||
@@ -1901,7 +1901,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
1901 | struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); | 1901 | struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); |
1902 | Mpi2SmpPassthroughRequest_t *mpi_request; | 1902 | Mpi2SmpPassthroughRequest_t *mpi_request; |
1903 | Mpi2SmpPassthroughReply_t *mpi_reply; | 1903 | Mpi2SmpPassthroughReply_t *mpi_reply; |
1904 | int rc, i; | 1904 | int rc; |
1905 | u16 smid; | 1905 | u16 smid; |
1906 | u32 ioc_state; | 1906 | u32 ioc_state; |
1907 | unsigned long timeleft; | 1907 | unsigned long timeleft; |
@@ -1916,7 +1916,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
1916 | void *pci_addr_out = NULL; | 1916 | void *pci_addr_out = NULL; |
1917 | u16 wait_state_count; | 1917 | u16 wait_state_count; |
1918 | struct request *rsp = req->next_rq; | 1918 | struct request *rsp = req->next_rq; |
1919 | struct bio_vec *bvec = NULL; | 1919 | struct bio_vec bvec; |
1920 | struct bvec_iter iter; | ||
1920 | 1921 | ||
1921 | if (!rsp) { | 1922 | if (!rsp) { |
1922 | printk(MPT2SAS_ERR_FMT "%s: the smp response space is " | 1923 | printk(MPT2SAS_ERR_FMT "%s: the smp response space is " |
@@ -1955,11 +1956,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
1955 | goto out; | 1956 | goto out; |
1956 | } | 1957 | } |
1957 | 1958 | ||
1958 | bio_for_each_segment(bvec, req->bio, i) { | 1959 | bio_for_each_segment(bvec, req->bio, iter) { |
1959 | memcpy(pci_addr_out + offset, | 1960 | memcpy(pci_addr_out + offset, |
1960 | page_address(bvec->bv_page) + bvec->bv_offset, | 1961 | page_address(bvec.bv_page) + bvec.bv_offset, |
1961 | bvec->bv_len); | 1962 | bvec.bv_len); |
1962 | offset += bvec->bv_len; | 1963 | offset += bvec.bv_len; |
1963 | } | 1964 | } |
1964 | } else { | 1965 | } else { |
1965 | dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), | 1966 | dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), |
@@ -2106,19 +2107,19 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
2106 | u32 offset = 0; | 2107 | u32 offset = 0; |
2107 | u32 bytes_to_copy = | 2108 | u32 bytes_to_copy = |
2108 | le16_to_cpu(mpi_reply->ResponseDataLength); | 2109 | le16_to_cpu(mpi_reply->ResponseDataLength); |
2109 | bio_for_each_segment(bvec, rsp->bio, i) { | 2110 | bio_for_each_segment(bvec, rsp->bio, iter) { |
2110 | if (bytes_to_copy <= bvec->bv_len) { | 2111 | if (bytes_to_copy <= bvec.bv_len) { |
2111 | memcpy(page_address(bvec->bv_page) + | 2112 | memcpy(page_address(bvec.bv_page) + |
2112 | bvec->bv_offset, pci_addr_in + | 2113 | bvec.bv_offset, pci_addr_in + |
2113 | offset, bytes_to_copy); | 2114 | offset, bytes_to_copy); |
2114 | break; | 2115 | break; |
2115 | } else { | 2116 | } else { |
2116 | memcpy(page_address(bvec->bv_page) + | 2117 | memcpy(page_address(bvec.bv_page) + |
2117 | bvec->bv_offset, pci_addr_in + | 2118 | bvec.bv_offset, pci_addr_in + |
2118 | offset, bvec->bv_len); | 2119 | offset, bvec.bv_len); |
2119 | bytes_to_copy -= bvec->bv_len; | 2120 | bytes_to_copy -= bvec.bv_len; |
2120 | } | 2121 | } |
2121 | offset += bvec->bv_len; | 2122 | offset += bvec.bv_len; |
2122 | } | 2123 | } |
2123 | } | 2124 | } |
2124 | } else { | 2125 | } else { |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index e771a88c6a74..196a67f2e95f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c | |||
@@ -1884,7 +1884,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
1884 | struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); | 1884 | struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); |
1885 | Mpi2SmpPassthroughRequest_t *mpi_request; | 1885 | Mpi2SmpPassthroughRequest_t *mpi_request; |
1886 | Mpi2SmpPassthroughReply_t *mpi_reply; | 1886 | Mpi2SmpPassthroughReply_t *mpi_reply; |
1887 | int rc, i; | 1887 | int rc; |
1888 | u16 smid; | 1888 | u16 smid; |
1889 | u32 ioc_state; | 1889 | u32 ioc_state; |
1890 | unsigned long timeleft; | 1890 | unsigned long timeleft; |
@@ -1898,7 +1898,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
1898 | void *pci_addr_out = NULL; | 1898 | void *pci_addr_out = NULL; |
1899 | u16 wait_state_count; | 1899 | u16 wait_state_count; |
1900 | struct request *rsp = req->next_rq; | 1900 | struct request *rsp = req->next_rq; |
1901 | struct bio_vec *bvec = NULL; | 1901 | struct bio_vec bvec; |
1902 | struct bvec_iter iter; | ||
1902 | 1903 | ||
1903 | if (!rsp) { | 1904 | if (!rsp) { |
1904 | pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n", | 1905 | pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n", |
@@ -1938,11 +1939,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
1938 | goto out; | 1939 | goto out; |
1939 | } | 1940 | } |
1940 | 1941 | ||
1941 | bio_for_each_segment(bvec, req->bio, i) { | 1942 | bio_for_each_segment(bvec, req->bio, iter) { |
1942 | memcpy(pci_addr_out + offset, | 1943 | memcpy(pci_addr_out + offset, |
1943 | page_address(bvec->bv_page) + bvec->bv_offset, | 1944 | page_address(bvec.bv_page) + bvec.bv_offset, |
1944 | bvec->bv_len); | 1945 | bvec.bv_len); |
1945 | offset += bvec->bv_len; | 1946 | offset += bvec.bv_len; |
1946 | } | 1947 | } |
1947 | } else { | 1948 | } else { |
1948 | dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), | 1949 | dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), |
@@ -2067,19 +2068,19 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
2067 | u32 offset = 0; | 2068 | u32 offset = 0; |
2068 | u32 bytes_to_copy = | 2069 | u32 bytes_to_copy = |
2069 | le16_to_cpu(mpi_reply->ResponseDataLength); | 2070 | le16_to_cpu(mpi_reply->ResponseDataLength); |
2070 | bio_for_each_segment(bvec, rsp->bio, i) { | 2071 | bio_for_each_segment(bvec, rsp->bio, iter) { |
2071 | if (bytes_to_copy <= bvec->bv_len) { | 2072 | if (bytes_to_copy <= bvec.bv_len) { |
2072 | memcpy(page_address(bvec->bv_page) + | 2073 | memcpy(page_address(bvec.bv_page) + |
2073 | bvec->bv_offset, pci_addr_in + | 2074 | bvec.bv_offset, pci_addr_in + |
2074 | offset, bytes_to_copy); | 2075 | offset, bytes_to_copy); |
2075 | break; | 2076 | break; |
2076 | } else { | 2077 | } else { |
2077 | memcpy(page_address(bvec->bv_page) + | 2078 | memcpy(page_address(bvec.bv_page) + |
2078 | bvec->bv_offset, pci_addr_in + | 2079 | bvec.bv_offset, pci_addr_in + |
2079 | offset, bvec->bv_len); | 2080 | offset, bvec.bv_len); |
2080 | bytes_to_copy -= bvec->bv_len; | 2081 | bytes_to_copy -= bvec.bv_len; |
2081 | } | 2082 | } |
2082 | offset += bvec->bv_len; | 2083 | offset += bvec.bv_len; |
2083 | } | 2084 | } |
2084 | } | 2085 | } |
2085 | } else { | 2086 | } else { |
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c index 53741be754b4..581ff78be1a2 100644 --- a/drivers/staging/lustre/lustre/llite/lloop.c +++ b/drivers/staging/lustre/lustre/llite/lloop.c | |||
@@ -194,10 +194,10 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) | |||
194 | struct cl_object *obj = ll_i2info(inode)->lli_clob; | 194 | struct cl_object *obj = ll_i2info(inode)->lli_clob; |
195 | pgoff_t offset; | 195 | pgoff_t offset; |
196 | int ret; | 196 | int ret; |
197 | int i; | ||
198 | int rw; | 197 | int rw; |
199 | obd_count page_count = 0; | 198 | obd_count page_count = 0; |
200 | struct bio_vec *bvec; | 199 | struct bio_vec bvec; |
200 | struct bvec_iter iter; | ||
201 | struct bio *bio; | 201 | struct bio *bio; |
202 | ssize_t bytes; | 202 | ssize_t bytes; |
203 | 203 | ||
@@ -221,14 +221,14 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) | |||
221 | LASSERT(rw == bio->bi_rw); | 221 | LASSERT(rw == bio->bi_rw); |
222 | 222 | ||
223 | offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; | 223 | offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; |
224 | bio_for_each_segment(bvec, bio, i) { | 224 | bio_for_each_segment(bvec, bio, iter) { |
225 | BUG_ON(bvec->bv_offset != 0); | 225 | BUG_ON(bvec.bv_offset != 0); |
226 | BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE); | 226 | BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE); |
227 | 227 | ||
228 | pages[page_count] = bvec->bv_page; | 228 | pages[page_count] = bvec.bv_page; |
229 | offsets[page_count] = offset; | 229 | offsets[page_count] = offset; |
230 | page_count++; | 230 | page_count++; |
231 | offset += bvec->bv_len; | 231 | offset += bvec.bv_len; |
232 | } | 232 | } |
233 | LASSERT(page_count <= LLOOP_MAX_SEGMENTS); | 233 | LASSERT(page_count <= LLOOP_MAX_SEGMENTS); |
234 | } | 234 | } |
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index e9e6f984092b..6f988382b174 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c | |||
@@ -672,9 +672,10 @@ static ssize_t reset_store(struct device *dev, | |||
672 | 672 | ||
673 | static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) | 673 | static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) |
674 | { | 674 | { |
675 | int i, offset; | 675 | int offset; |
676 | u32 index; | 676 | u32 index; |
677 | struct bio_vec *bvec; | 677 | struct bio_vec bvec; |
678 | struct bvec_iter iter; | ||
678 | 679 | ||
679 | switch (rw) { | 680 | switch (rw) { |
680 | case READ: | 681 | case READ: |
@@ -689,33 +690,33 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) | |||
689 | offset = (bio->bi_iter.bi_sector & | 690 | offset = (bio->bi_iter.bi_sector & |
690 | (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; | 691 | (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; |
691 | 692 | ||
692 | bio_for_each_segment(bvec, bio, i) { | 693 | bio_for_each_segment(bvec, bio, iter) { |
693 | int max_transfer_size = PAGE_SIZE - offset; | 694 | int max_transfer_size = PAGE_SIZE - offset; |
694 | 695 | ||
695 | if (bvec->bv_len > max_transfer_size) { | 696 | if (bvec.bv_len > max_transfer_size) { |
696 | /* | 697 | /* |
697 | * zram_bvec_rw() can only make operation on a single | 698 | * zram_bvec_rw() can only make operation on a single |
698 | * zram page. Split the bio vector. | 699 | * zram page. Split the bio vector. |
699 | */ | 700 | */ |
700 | struct bio_vec bv; | 701 | struct bio_vec bv; |
701 | 702 | ||
702 | bv.bv_page = bvec->bv_page; | 703 | bv.bv_page = bvec.bv_page; |
703 | bv.bv_len = max_transfer_size; | 704 | bv.bv_len = max_transfer_size; |
704 | bv.bv_offset = bvec->bv_offset; | 705 | bv.bv_offset = bvec.bv_offset; |
705 | 706 | ||
706 | if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0) | 707 | if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0) |
707 | goto out; | 708 | goto out; |
708 | 709 | ||
709 | bv.bv_len = bvec->bv_len - max_transfer_size; | 710 | bv.bv_len = bvec.bv_len - max_transfer_size; |
710 | bv.bv_offset += max_transfer_size; | 711 | bv.bv_offset += max_transfer_size; |
711 | if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0) | 712 | if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0) |
712 | goto out; | 713 | goto out; |
713 | } else | 714 | } else |
714 | if (zram_bvec_rw(zram, bvec, index, offset, bio, rw) | 715 | if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw) |
715 | < 0) | 716 | < 0) |
716 | goto out; | 717 | goto out; |
717 | 718 | ||
718 | update_position(&index, &offset, bvec); | 719 | update_position(&index, &offset, &bvec); |
719 | } | 720 | } |
720 | 721 | ||
721 | set_bit(BIO_UPTODATE, &bio->bi_flags); | 722 | set_bit(BIO_UPTODATE, &bio->bi_flags); |
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 08e3d1388c65..9127db86f315 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c | |||
@@ -299,25 +299,26 @@ static void bio_integrity_generate(struct bio *bio) | |||
299 | { | 299 | { |
300 | struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); | 300 | struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); |
301 | struct blk_integrity_exchg bix; | 301 | struct blk_integrity_exchg bix; |
302 | struct bio_vec *bv; | 302 | struct bio_vec bv; |
303 | struct bvec_iter iter; | ||
303 | sector_t sector = bio->bi_iter.bi_sector; | 304 | sector_t sector = bio->bi_iter.bi_sector; |
304 | unsigned int i, sectors, total; | 305 | unsigned int sectors, total; |
305 | void *prot_buf = bio->bi_integrity->bip_buf; | 306 | void *prot_buf = bio->bi_integrity->bip_buf; |
306 | 307 | ||
307 | total = 0; | 308 | total = 0; |
308 | bix.disk_name = bio->bi_bdev->bd_disk->disk_name; | 309 | bix.disk_name = bio->bi_bdev->bd_disk->disk_name; |
309 | bix.sector_size = bi->sector_size; | 310 | bix.sector_size = bi->sector_size; |
310 | 311 | ||
311 | bio_for_each_segment(bv, bio, i) { | 312 | bio_for_each_segment(bv, bio, iter) { |
312 | void *kaddr = kmap_atomic(bv->bv_page); | 313 | void *kaddr = kmap_atomic(bv.bv_page); |
313 | bix.data_buf = kaddr + bv->bv_offset; | 314 | bix.data_buf = kaddr + bv.bv_offset; |
314 | bix.data_size = bv->bv_len; | 315 | bix.data_size = bv.bv_len; |
315 | bix.prot_buf = prot_buf; | 316 | bix.prot_buf = prot_buf; |
316 | bix.sector = sector; | 317 | bix.sector = sector; |
317 | 318 | ||
318 | bi->generate_fn(&bix); | 319 | bi->generate_fn(&bix); |
319 | 320 | ||
320 | sectors = bv->bv_len / bi->sector_size; | 321 | sectors = bv.bv_len / bi->sector_size; |
321 | sector += sectors; | 322 | sector += sectors; |
322 | prot_buf += sectors * bi->tuple_size; | 323 | prot_buf += sectors * bi->tuple_size; |
323 | total += sectors * bi->tuple_size; | 324 | total += sectors * bi->tuple_size; |
@@ -441,19 +442,20 @@ static int bio_integrity_verify(struct bio *bio) | |||
441 | { | 442 | { |
442 | struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); | 443 | struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); |
443 | struct blk_integrity_exchg bix; | 444 | struct blk_integrity_exchg bix; |
444 | struct bio_vec *bv; | 445 | struct bio_vec bv; |
446 | struct bvec_iter iter; | ||
445 | sector_t sector = bio->bi_integrity->bip_sector; | 447 | sector_t sector = bio->bi_integrity->bip_sector; |
446 | unsigned int i, sectors, total, ret; | 448 | unsigned int sectors, total, ret; |
447 | void *prot_buf = bio->bi_integrity->bip_buf; | 449 | void *prot_buf = bio->bi_integrity->bip_buf; |
448 | 450 | ||
449 | ret = total = 0; | 451 | ret = total = 0; |
450 | bix.disk_name = bio->bi_bdev->bd_disk->disk_name; | 452 | bix.disk_name = bio->bi_bdev->bd_disk->disk_name; |
451 | bix.sector_size = bi->sector_size; | 453 | bix.sector_size = bi->sector_size; |
452 | 454 | ||
453 | bio_for_each_segment(bv, bio, i) { | 455 | bio_for_each_segment(bv, bio, iter) { |
454 | void *kaddr = kmap_atomic(bv->bv_page); | 456 | void *kaddr = kmap_atomic(bv.bv_page); |
455 | bix.data_buf = kaddr + bv->bv_offset; | 457 | bix.data_buf = kaddr + bv.bv_offset; |
456 | bix.data_size = bv->bv_len; | 458 | bix.data_size = bv.bv_len; |
457 | bix.prot_buf = prot_buf; | 459 | bix.prot_buf = prot_buf; |
458 | bix.sector = sector; | 460 | bix.sector = sector; |
459 | 461 | ||
@@ -464,7 +466,7 @@ static int bio_integrity_verify(struct bio *bio) | |||
464 | return ret; | 466 | return ret; |
465 | } | 467 | } |
466 | 468 | ||
467 | sectors = bv->bv_len / bi->sector_size; | 469 | sectors = bv.bv_len / bi->sector_size; |
468 | sector += sectors; | 470 | sector += sectors; |
469 | prot_buf += sectors * bi->tuple_size; | 471 | prot_buf += sectors * bi->tuple_size; |
470 | total += sectors * bi->tuple_size; | 472 | total += sectors * bi->tuple_size; |
@@ -473,13 +473,13 @@ EXPORT_SYMBOL(bio_alloc_bioset); | |||
473 | void zero_fill_bio(struct bio *bio) | 473 | void zero_fill_bio(struct bio *bio) |
474 | { | 474 | { |
475 | unsigned long flags; | 475 | unsigned long flags; |
476 | struct bio_vec *bv; | 476 | struct bio_vec bv; |
477 | int i; | 477 | struct bvec_iter iter; |
478 | 478 | ||
479 | bio_for_each_segment(bv, bio, i) { | 479 | bio_for_each_segment(bv, bio, iter) { |
480 | char *data = bvec_kmap_irq(bv, &flags); | 480 | char *data = bvec_kmap_irq(&bv, &flags); |
481 | memset(data, 0, bv->bv_len); | 481 | memset(data, 0, bv.bv_len); |
482 | flush_dcache_page(bv->bv_page); | 482 | flush_dcache_page(bv.bv_page); |
483 | bvec_kunmap_irq(data, &flags); | 483 | bvec_kunmap_irq(data, &flags); |
484 | } | 484 | } |
485 | } | 485 | } |
@@ -1687,11 +1687,11 @@ void bio_check_pages_dirty(struct bio *bio) | |||
1687 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | 1687 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
1688 | void bio_flush_dcache_pages(struct bio *bi) | 1688 | void bio_flush_dcache_pages(struct bio *bi) |
1689 | { | 1689 | { |
1690 | int i; | 1690 | struct bio_vec bvec; |
1691 | struct bio_vec *bvec; | 1691 | struct bvec_iter iter; |
1692 | 1692 | ||
1693 | bio_for_each_segment(bvec, bi, i) | 1693 | bio_for_each_segment(bvec, bi, iter) |
1694 | flush_dcache_page(bvec->bv_page); | 1694 | flush_dcache_page(bvec.bv_page); |
1695 | } | 1695 | } |
1696 | EXPORT_SYMBOL(bio_flush_dcache_pages); | 1696 | EXPORT_SYMBOL(bio_flush_dcache_pages); |
1697 | #endif | 1697 | #endif |
@@ -1840,7 +1840,7 @@ void bio_trim(struct bio *bio, int offset, int size) | |||
1840 | bio->bi_iter.bi_idx = 0; | 1840 | bio->bi_iter.bi_idx = 0; |
1841 | } | 1841 | } |
1842 | /* Make sure vcnt and last bv are not too big */ | 1842 | /* Make sure vcnt and last bv are not too big */ |
1843 | bio_for_each_segment(bvec, bio, i) { | 1843 | bio_for_each_segment_all(bvec, bio, i) { |
1844 | if (sofar + bvec->bv_len > size) | 1844 | if (sofar + bvec->bv_len > size) |
1845 | bvec->bv_len = size - sofar; | 1845 | bvec->bv_len = size - sofar; |
1846 | if (bvec->bv_len == 0) { | 1846 | if (bvec->bv_len == 0) { |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 9f182fcbe714..c16adb5f69f8 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -63,10 +63,13 @@ | |||
63 | */ | 63 | */ |
64 | #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)])) | 64 | #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)])) |
65 | #define __bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_iter.bi_idx) | 65 | #define __bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_iter.bi_idx) |
66 | #define bio_iovec(bio) (*__bio_iovec(bio)) | 66 | |
67 | #define bio_iter_iovec(bio, iter) ((bio)->bi_io_vec[(iter).bi_idx]) | ||
67 | 68 | ||
68 | #define bio_page(bio) (bio_iovec((bio)).bv_page) | 69 | #define bio_page(bio) (bio_iovec((bio)).bv_page) |
69 | #define bio_offset(bio) (bio_iovec((bio)).bv_offset) | 70 | #define bio_offset(bio) (bio_iovec((bio)).bv_offset) |
71 | #define bio_iovec(bio) (*__bio_iovec(bio)) | ||
72 | |||
70 | #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_iter.bi_idx) | 73 | #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_iter.bi_idx) |
71 | #define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9) | 74 | #define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9) |
72 | #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) | 75 | #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) |
@@ -134,15 +137,6 @@ static inline void *bio_data(struct bio *bio) | |||
134 | #define bio_io_error(bio) bio_endio((bio), -EIO) | 137 | #define bio_io_error(bio) bio_endio((bio), -EIO) |
135 | 138 | ||
136 | /* | 139 | /* |
137 | * drivers should not use the __ version unless they _really_ know what | ||
138 | * they're doing | ||
139 | */ | ||
140 | #define __bio_for_each_segment(bvl, bio, i, start_idx) \ | ||
141 | for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \ | ||
142 | i < (bio)->bi_vcnt; \ | ||
143 | bvl++, i++) | ||
144 | |||
145 | /* | ||
146 | * drivers should _never_ use the all version - the bio may have been split | 140 | * drivers should _never_ use the all version - the bio may have been split |
147 | * before it got to the driver and the driver won't own all of it | 141 | * before it got to the driver and the driver won't own all of it |
148 | */ | 142 | */ |
@@ -151,10 +145,16 @@ static inline void *bio_data(struct bio *bio) | |||
151 | bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \ | 145 | bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \ |
152 | i++) | 146 | i++) |
153 | 147 | ||
154 | #define bio_for_each_segment(bvl, bio, i) \ | 148 | #define __bio_for_each_segment(bvl, bio, iter, start) \ |
155 | for (i = (bio)->bi_iter.bi_idx; \ | 149 | for (iter = (start); \ |
156 | bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \ | 150 | bvl = bio_iter_iovec((bio), (iter)), \ |
157 | i++) | 151 | (iter).bi_idx < (bio)->bi_vcnt; \ |
152 | (iter).bi_idx++) | ||
153 | |||
154 | #define bio_for_each_segment(bvl, bio, iter) \ | ||
155 | __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) | ||
156 | |||
157 | #define bio_iter_last(bio, iter) ((iter).bi_idx == (bio)->bi_vcnt - 1) | ||
158 | 158 | ||
159 | /* | 159 | /* |
160 | * get a reference to a bio, so it won't disappear. the intended use is | 160 | * get a reference to a bio, so it won't disappear. the intended use is |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1b135d49b279..337b92a54658 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -735,7 +735,7 @@ struct rq_map_data { | |||
735 | }; | 735 | }; |
736 | 736 | ||
737 | struct req_iterator { | 737 | struct req_iterator { |
738 | int i; | 738 | struct bvec_iter iter; |
739 | struct bio *bio; | 739 | struct bio *bio; |
740 | }; | 740 | }; |
741 | 741 | ||
@@ -748,10 +748,11 @@ struct req_iterator { | |||
748 | 748 | ||
749 | #define rq_for_each_segment(bvl, _rq, _iter) \ | 749 | #define rq_for_each_segment(bvl, _rq, _iter) \ |
750 | __rq_for_each_bio(_iter.bio, _rq) \ | 750 | __rq_for_each_bio(_iter.bio, _rq) \ |
751 | bio_for_each_segment(bvl, _iter.bio, _iter.i) | 751 | bio_for_each_segment(bvl, _iter.bio, _iter.iter) |
752 | 752 | ||
753 | #define rq_iter_last(rq, _iter) \ | 753 | #define rq_iter_last(rq, _iter) \ |
754 | (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) | 754 | (_iter.bio->bi_next == NULL && \ |
755 | bio_iter_last(_iter.bio, _iter.iter)) | ||
755 | 756 | ||
756 | #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | 757 | #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
757 | # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" | 758 | # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" |
diff --git a/mm/bounce.c b/mm/bounce.c index 5a7d58fb883b..523918b8c6dc 100644 --- a/mm/bounce.c +++ b/mm/bounce.c | |||
@@ -98,27 +98,24 @@ int init_emergency_isa_pool(void) | |||
98 | static void copy_to_high_bio_irq(struct bio *to, struct bio *from) | 98 | static void copy_to_high_bio_irq(struct bio *to, struct bio *from) |
99 | { | 99 | { |
100 | unsigned char *vfrom; | 100 | unsigned char *vfrom; |
101 | struct bio_vec *tovec, *fromvec; | 101 | struct bio_vec tovec, *fromvec = from->bi_io_vec; |
102 | int i; | 102 | struct bvec_iter iter; |
103 | 103 | ||
104 | bio_for_each_segment(tovec, to, i) { | 104 | bio_for_each_segment(tovec, to, iter) { |
105 | fromvec = from->bi_io_vec + i; | 105 | if (tovec.bv_page != fromvec->bv_page) { |
106 | 106 | /* | |
107 | /* | 107 | * fromvec->bv_offset and fromvec->bv_len might have |
108 | * not bounced | 108 | * been modified by the block layer, so use the original |
109 | */ | 109 | * copy, bounce_copy_vec already uses tovec->bv_len |
110 | if (tovec->bv_page == fromvec->bv_page) | 110 | */ |
111 | continue; | 111 | vfrom = page_address(fromvec->bv_page) + |
112 | 112 | tovec.bv_offset; | |
113 | /* | 113 | |
114 | * fromvec->bv_offset and fromvec->bv_len might have been | 114 | bounce_copy_vec(&tovec, vfrom); |
115 | * modified by the block layer, so use the original copy, | 115 | flush_dcache_page(tovec.bv_page); |
116 | * bounce_copy_vec already uses tovec->bv_len | 116 | } |
117 | */ | ||
118 | vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; | ||
119 | 117 | ||
120 | bounce_copy_vec(tovec, vfrom); | 118 | fromvec++; |
121 | flush_dcache_page(tovec->bv_page); | ||
122 | } | 119 | } |
123 | } | 120 | } |
124 | 121 | ||
@@ -201,13 +198,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, | |||
201 | { | 198 | { |
202 | struct bio *bio; | 199 | struct bio *bio; |
203 | int rw = bio_data_dir(*bio_orig); | 200 | int rw = bio_data_dir(*bio_orig); |
204 | struct bio_vec *to, *from; | 201 | struct bio_vec *to, from; |
202 | struct bvec_iter iter; | ||
205 | unsigned i; | 203 | unsigned i; |
206 | 204 | ||
207 | if (force) | 205 | if (force) |
208 | goto bounce; | 206 | goto bounce; |
209 | bio_for_each_segment(from, *bio_orig, i) | 207 | bio_for_each_segment(from, *bio_orig, iter) |
210 | if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q)) | 208 | if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q)) |
211 | goto bounce; | 209 | goto bounce; |
212 | 210 | ||
213 | return; | 211 | return; |