aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/nvme-core.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-11-23 20:19:00 -0500
committerKent Overstreet <kmo@daterainc.com>2013-11-24 01:33:49 -0500
commit7988613b0e5b2638caf6cd493cc78e9595eba19c (patch)
treecc9fc4e235278035313ee32940740f61269f8fb3 /drivers/block/nvme-core.c
parenta4ad39b1d10584dfcfcfb0d510faab2c7f034399 (diff)
block: Convert bio_for_each_segment() to bvec_iter
More prep work for immutable biovecs - with immutable bvecs drivers won't be able to use the biovec directly, they'll need to use helpers that take into account bio->bi_iter.bi_bvec_done. This updates callers for the new usage without changing the implementation yet. Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: "Ed L. Cashin" <ecashin@coraid.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Lars Ellenberg <drbd-dev@lists.linbit.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Paul Clements <Paul.Clements@steeleye.com> Cc: Jim Paris <jim@jtan.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Yehuda Sadeh <yehuda@inktank.com> Cc: Sage Weil <sage@inktank.com> Cc: Alex Elder <elder@inktank.com> Cc: ceph-devel@vger.kernel.org Cc: Joshua Morris <josh.h.morris@us.ibm.com> Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Neil Brown <neilb@suse.de> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: linux390@de.ibm.com Cc: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> Cc: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> Cc: support@lsi.com Cc: "James E.J. Bottomley" <JBottomley@parallels.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com> Cc: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Guo Chao <yan@linux.vnet.ibm.com> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Cc: Matthew Wilcox <matthew.r.wilcox@intel.com> Cc: Keith Busch <keith.busch@intel.com> Cc: Stephen Hemminger <shemminger@vyatta.com> Cc: Quoc-Son Anh <quoc-sonx.anh@intel.com> Cc: Sebastian Ott <sebott@linux.vnet.ibm.com> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Jerome Marchand <jmarchan@redhat.com> Cc: Seth Jennings <sjenning@linux.vnet.ibm.com> Cc: "Martin K. Petersen" <martin.petersen@oracle.com> Cc: Mike Snitzer <snitzer@redhat.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: "Darrick J. Wong" <darrick.wong@oracle.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Jan Kara <jack@suse.cz> Cc: linux-m68k@lists.linux-m68k.org Cc: linuxppc-dev@lists.ozlabs.org Cc: drbd-user@lists.linbit.com Cc: nbd-general@lists.sourceforge.net Cc: cbe-oss-dev@lists.ozlabs.org Cc: xen-devel@lists.xensource.com Cc: virtualization@lists.linux-foundation.org Cc: linux-raid@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: DL-MPTFusionLinux@lsi.com Cc: linux-scsi@vger.kernel.org Cc: devel@driverdev.osuosl.org Cc: linux-fsdevel@vger.kernel.org Cc: cluster-devel@redhat.com Cc: linux-mm@kvack.org Acked-by: Geoff Levand <geoff@infradead.org>
Diffstat (limited to 'drivers/block/nvme-core.c')
-rw-r--r--drivers/block/nvme-core.c33
1 files changed, 19 insertions, 14 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 53d217381873..5539d2920872 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -550,9 +550,11 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
550static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, 550static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
551 struct bio *bio, enum dma_data_direction dma_dir, int psegs) 551 struct bio *bio, enum dma_data_direction dma_dir, int psegs)
552{ 552{
553 struct bio_vec *bvec, *bvprv = NULL; 553 struct bio_vec bvec, bvprv;
554 struct bvec_iter iter;
554 struct scatterlist *sg = NULL; 555 struct scatterlist *sg = NULL;
555 int i, length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size; 556 int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
557 int first = 1;
556 558
557 if (nvmeq->dev->stripe_size) 559 if (nvmeq->dev->stripe_size)
558 split_len = nvmeq->dev->stripe_size - 560 split_len = nvmeq->dev->stripe_size -
@@ -560,25 +562,28 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
560 (nvmeq->dev->stripe_size - 1)); 562 (nvmeq->dev->stripe_size - 1));
561 563
562 sg_init_table(iod->sg, psegs); 564 sg_init_table(iod->sg, psegs);
563 bio_for_each_segment(bvec, bio, i) { 565 bio_for_each_segment(bvec, bio, iter) {
564 if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { 566 if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) {
565 sg->length += bvec->bv_len; 567 sg->length += bvec.bv_len;
566 } else { 568 } else {
567 if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) 569 if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec))
568 return nvme_split_and_submit(bio, nvmeq, i, 570 return nvme_split_and_submit(bio, nvmeq,
569 length, 0); 571 iter.bi_idx,
572 length, 0);
570 573
571 sg = sg ? sg + 1 : iod->sg; 574 sg = sg ? sg + 1 : iod->sg;
572 sg_set_page(sg, bvec->bv_page, bvec->bv_len, 575 sg_set_page(sg, bvec.bv_page,
573 bvec->bv_offset); 576 bvec.bv_len, bvec.bv_offset);
574 nsegs++; 577 nsegs++;
575 } 578 }
576 579
577 if (split_len - length < bvec->bv_len) 580 if (split_len - length < bvec.bv_len)
578 return nvme_split_and_submit(bio, nvmeq, i, split_len, 581 return nvme_split_and_submit(bio, nvmeq, iter.bi_idx,
579 split_len - length); 582 split_len,
580 length += bvec->bv_len; 583 split_len - length);
584 length += bvec.bv_len;
581 bvprv = bvec; 585 bvprv = bvec;
586 first = 0;
582 } 587 }
583 iod->nents = nsegs; 588 iod->nents = nsegs;
584 sg_mark_end(sg); 589 sg_mark_end(sg);