aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-11-23 20:19:00 -0500
committerKent Overstreet <kmo@daterainc.com>2013-11-24 01:33:49 -0500
commit7988613b0e5b2638caf6cd493cc78e9595eba19c (patch)
treecc9fc4e235278035313ee32940740f61269f8fb3 /mm
parenta4ad39b1d10584dfcfcfb0d510faab2c7f034399 (diff)
block: Convert bio_for_each_segment() to bvec_iter
More prep work for immutable biovecs - with immutable bvecs drivers won't be able to use the biovec directly, they'll need to use helpers that take into account bio->bi_iter.bi_bvec_done. This updates callers for the new usage without changing the implementation yet. Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: "Ed L. Cashin" <ecashin@coraid.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Lars Ellenberg <drbd-dev@lists.linbit.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Paul Clements <Paul.Clements@steeleye.com> Cc: Jim Paris <jim@jtan.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Yehuda Sadeh <yehuda@inktank.com> Cc: Sage Weil <sage@inktank.com> Cc: Alex Elder <elder@inktank.com> Cc: ceph-devel@vger.kernel.org Cc: Joshua Morris <josh.h.morris@us.ibm.com> Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Neil Brown <neilb@suse.de> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: linux390@de.ibm.com Cc: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> Cc: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> Cc: support@lsi.com Cc: "James E.J. Bottomley" <JBottomley@parallels.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com> Cc: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Guo Chao <yan@linux.vnet.ibm.com> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Cc: Matthew Wilcox <matthew.r.wilcox@intel.com> Cc: Keith Busch <keith.busch@intel.com> Cc: Stephen Hemminger <shemminger@vyatta.com> Cc: Quoc-Son Anh <quoc-sonx.anh@intel.com> Cc: Sebastian Ott <sebott@linux.vnet.ibm.com> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Jerome Marchand <jmarchan@redhat.com> Cc: Seth Jennings <sjenning@linux.vnet.ibm.com> Cc: "Martin K. Petersen" <martin.petersen@oracle.com> Cc: Mike Snitzer <snitzer@redhat.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: "Darrick J. Wong" <darrick.wong@oracle.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Jan Kara <jack@suse.cz> Cc: linux-m68k@lists.linux-m68k.org Cc: linuxppc-dev@lists.ozlabs.org Cc: drbd-user@lists.linbit.com Cc: nbd-general@lists.sourceforge.net Cc: cbe-oss-dev@lists.ozlabs.org Cc: xen-devel@lists.xensource.com Cc: virtualization@lists.linux-foundation.org Cc: linux-raid@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: DL-MPTFusionLinux@lsi.com Cc: linux-scsi@vger.kernel.org Cc: devel@driverdev.osuosl.org Cc: linux-fsdevel@vger.kernel.org Cc: cluster-devel@redhat.com Cc: linux-mm@kvack.org Acked-by: Geoff Levand <geoff@infradead.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/bounce.c44
1 files changed, 21 insertions, 23 deletions
diff --git a/mm/bounce.c b/mm/bounce.c
index 5a7d58fb883b..523918b8c6dc 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -98,27 +98,24 @@ int init_emergency_isa_pool(void)
98static void copy_to_high_bio_irq(struct bio *to, struct bio *from) 98static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
99{ 99{
100 unsigned char *vfrom; 100 unsigned char *vfrom;
101 struct bio_vec *tovec, *fromvec; 101 struct bio_vec tovec, *fromvec = from->bi_io_vec;
102 int i; 102 struct bvec_iter iter;
103 103
104 bio_for_each_segment(tovec, to, i) { 104 bio_for_each_segment(tovec, to, iter) {
105 fromvec = from->bi_io_vec + i; 105 if (tovec.bv_page != fromvec->bv_page) {
106 106 /*
107 /* 107 * fromvec->bv_offset and fromvec->bv_len might have
108 * not bounced 108 * been modified by the block layer, so use the original
109 */ 109 * copy, bounce_copy_vec already uses tovec->bv_len
110 if (tovec->bv_page == fromvec->bv_page) 110 */
111 continue; 111 vfrom = page_address(fromvec->bv_page) +
112 112 tovec.bv_offset;
113 /* 113
114 * fromvec->bv_offset and fromvec->bv_len might have been 114 bounce_copy_vec(&tovec, vfrom);
115 * modified by the block layer, so use the original copy, 115 flush_dcache_page(tovec.bv_page);
116 * bounce_copy_vec already uses tovec->bv_len 116 }
117 */
118 vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
119 117
120 bounce_copy_vec(tovec, vfrom); 118 fromvec++;
121 flush_dcache_page(tovec->bv_page);
122 } 119 }
123} 120}
124 121
@@ -201,13 +198,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
201{ 198{
202 struct bio *bio; 199 struct bio *bio;
203 int rw = bio_data_dir(*bio_orig); 200 int rw = bio_data_dir(*bio_orig);
204 struct bio_vec *to, *from; 201 struct bio_vec *to, from;
202 struct bvec_iter iter;
205 unsigned i; 203 unsigned i;
206 204
207 if (force) 205 if (force)
208 goto bounce; 206 goto bounce;
209 bio_for_each_segment(from, *bio_orig, i) 207 bio_for_each_segment(from, *bio_orig, iter)
210 if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q)) 208 if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
211 goto bounce; 209 goto bounce;
212 210
213 return; 211 return;