diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-10-11 18:44:27 -0400 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2013-11-24 01:33:47 -0500 |
commit | 4f024f3797c43cb4b73cd2c50cec728842d0e49e (patch) | |
tree | 3aedcab02d2ad723a189d01934d1e94fec7a54e1 /fs/bio.c | |
parent | ed9c47bebeeea4a468b07cfd745c690190f8014c (diff) |
block: Abstract out bvec iterator
Immutable biovecs are going to require an explicit iterator. To
implement immutable bvecs, a later patch is going to add a bi_bvec_done
member to this struct; for now, this patch effectively just renames
things.
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: "Ed L. Cashin" <ecashin@coraid.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Lars Ellenberg <drbd-dev@lists.linbit.com>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Matthew Wilcox <willy@linux.intel.com>
Cc: Geoff Levand <geoff@infradead.org>
Cc: Yehuda Sadeh <yehuda@inktank.com>
Cc: Sage Weil <sage@inktank.com>
Cc: Alex Elder <elder@inktank.com>
Cc: ceph-devel@vger.kernel.org
Cc: Joshua Morris <josh.h.morris@us.ibm.com>
Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Neil Brown <neilb@suse.de>
Cc: Alasdair Kergon <agk@redhat.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: dm-devel@redhat.com
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: linux390@de.ibm.com
Cc: Boaz Harrosh <bharrosh@panasas.com>
Cc: Benny Halevy <bhalevy@tonian.com>
Cc: "James E.J. Bottomley" <JBottomley@parallels.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Chris Mason <chris.mason@fusionio.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Dave Kleikamp <shaggy@kernel.org>
Cc: Joern Engel <joern@logfs.org>
Cc: Prasad Joshi <prasadjoshi.linux@gmail.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Cc: KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Ben Myers <bpm@sgi.com>
Cc: xfs@oss.sgi.com
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Len Brown <len.brown@intel.com>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com>
Cc: Ben Hutchings <ben@decadent.org.uk>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Guo Chao <yan@linux.vnet.ibm.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Asai Thambi S P <asamymuthupa@micron.com>
Cc: Selvan Mani <smani@micron.com>
Cc: Sam Bradshaw <sbradshaw@micron.com>
Cc: Wei Yongjun <yongjun_wei@trendmicro.com.cn>
Cc: "Roger Pau Monné" <roger.pau@citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Cc: Ian Campbell <Ian.Campbell@citrix.com>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Jerome Marchand <jmarchand@redhat.com>
Cc: Joe Perches <joe@perches.com>
Cc: Peng Tao <tao.peng@emc.com>
Cc: Andy Adamson <andros@netapp.com>
Cc: fanchaoting <fanchaoting@cn.fujitsu.com>
Cc: Jie Liu <jeff.liu@oracle.com>
Cc: Sunil Mushran <sunil.mushran@gmail.com>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
Cc: Namjae Jeon <namjae.jeon@samsung.com>
Cc: Pankaj Kumar <pankaj.km@samsung.com>
Cc: Dan Magenheimer <dan.magenheimer@oracle.com>
Cc: Mel Gorman <mgorman@suse.de>6
Diffstat (limited to 'fs/bio.c')
-rw-r--r-- | fs/bio.c | 56 |
1 files changed, 29 insertions, 27 deletions
@@ -532,13 +532,13 @@ void __bio_clone(struct bio *bio, struct bio *bio_src) | |||
532 | * most users will be overriding ->bi_bdev with a new target, | 532 | * most users will be overriding ->bi_bdev with a new target, |
533 | * so we don't set nor calculate new physical/hw segment counts here | 533 | * so we don't set nor calculate new physical/hw segment counts here |
534 | */ | 534 | */ |
535 | bio->bi_sector = bio_src->bi_sector; | 535 | bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; |
536 | bio->bi_bdev = bio_src->bi_bdev; | 536 | bio->bi_bdev = bio_src->bi_bdev; |
537 | bio->bi_flags |= 1 << BIO_CLONED; | 537 | bio->bi_flags |= 1 << BIO_CLONED; |
538 | bio->bi_rw = bio_src->bi_rw; | 538 | bio->bi_rw = bio_src->bi_rw; |
539 | bio->bi_vcnt = bio_src->bi_vcnt; | 539 | bio->bi_vcnt = bio_src->bi_vcnt; |
540 | bio->bi_size = bio_src->bi_size; | 540 | bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; |
541 | bio->bi_idx = bio_src->bi_idx; | 541 | bio->bi_iter.bi_idx = bio_src->bi_iter.bi_idx; |
542 | } | 542 | } |
543 | EXPORT_SYMBOL(__bio_clone); | 543 | EXPORT_SYMBOL(__bio_clone); |
544 | 544 | ||
@@ -612,7 +612,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | |||
612 | if (unlikely(bio_flagged(bio, BIO_CLONED))) | 612 | if (unlikely(bio_flagged(bio, BIO_CLONED))) |
613 | return 0; | 613 | return 0; |
614 | 614 | ||
615 | if (((bio->bi_size + len) >> 9) > max_sectors) | 615 | if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) |
616 | return 0; | 616 | return 0; |
617 | 617 | ||
618 | /* | 618 | /* |
@@ -635,8 +635,9 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | |||
635 | simulate merging updated prev_bvec | 635 | simulate merging updated prev_bvec |
636 | as new bvec. */ | 636 | as new bvec. */ |
637 | .bi_bdev = bio->bi_bdev, | 637 | .bi_bdev = bio->bi_bdev, |
638 | .bi_sector = bio->bi_sector, | 638 | .bi_sector = bio->bi_iter.bi_sector, |
639 | .bi_size = bio->bi_size - prev_bv_len, | 639 | .bi_size = bio->bi_iter.bi_size - |
640 | prev_bv_len, | ||
640 | .bi_rw = bio->bi_rw, | 641 | .bi_rw = bio->bi_rw, |
641 | }; | 642 | }; |
642 | 643 | ||
@@ -684,8 +685,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | |||
684 | if (q->merge_bvec_fn) { | 685 | if (q->merge_bvec_fn) { |
685 | struct bvec_merge_data bvm = { | 686 | struct bvec_merge_data bvm = { |
686 | .bi_bdev = bio->bi_bdev, | 687 | .bi_bdev = bio->bi_bdev, |
687 | .bi_sector = bio->bi_sector, | 688 | .bi_sector = bio->bi_iter.bi_sector, |
688 | .bi_size = bio->bi_size, | 689 | .bi_size = bio->bi_iter.bi_size, |
689 | .bi_rw = bio->bi_rw, | 690 | .bi_rw = bio->bi_rw, |
690 | }; | 691 | }; |
691 | 692 | ||
@@ -708,7 +709,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | |||
708 | bio->bi_vcnt++; | 709 | bio->bi_vcnt++; |
709 | bio->bi_phys_segments++; | 710 | bio->bi_phys_segments++; |
710 | done: | 711 | done: |
711 | bio->bi_size += len; | 712 | bio->bi_iter.bi_size += len; |
712 | return len; | 713 | return len; |
713 | } | 714 | } |
714 | 715 | ||
@@ -807,22 +808,22 @@ void bio_advance(struct bio *bio, unsigned bytes) | |||
807 | if (bio_integrity(bio)) | 808 | if (bio_integrity(bio)) |
808 | bio_integrity_advance(bio, bytes); | 809 | bio_integrity_advance(bio, bytes); |
809 | 810 | ||
810 | bio->bi_sector += bytes >> 9; | 811 | bio->bi_iter.bi_sector += bytes >> 9; |
811 | bio->bi_size -= bytes; | 812 | bio->bi_iter.bi_size -= bytes; |
812 | 813 | ||
813 | if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) | 814 | if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) |
814 | return; | 815 | return; |
815 | 816 | ||
816 | while (bytes) { | 817 | while (bytes) { |
817 | if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { | 818 | if (unlikely(bio->bi_iter.bi_idx >= bio->bi_vcnt)) { |
818 | WARN_ONCE(1, "bio idx %d >= vcnt %d\n", | 819 | WARN_ONCE(1, "bio idx %d >= vcnt %d\n", |
819 | bio->bi_idx, bio->bi_vcnt); | 820 | bio->bi_iter.bi_idx, bio->bi_vcnt); |
820 | break; | 821 | break; |
821 | } | 822 | } |
822 | 823 | ||
823 | if (bytes >= bio_iovec(bio)->bv_len) { | 824 | if (bytes >= bio_iovec(bio)->bv_len) { |
824 | bytes -= bio_iovec(bio)->bv_len; | 825 | bytes -= bio_iovec(bio)->bv_len; |
825 | bio->bi_idx++; | 826 | bio->bi_iter.bi_idx++; |
826 | } else { | 827 | } else { |
827 | bio_iovec(bio)->bv_len -= bytes; | 828 | bio_iovec(bio)->bv_len -= bytes; |
828 | bio_iovec(bio)->bv_offset += bytes; | 829 | bio_iovec(bio)->bv_offset += bytes; |
@@ -1485,7 +1486,7 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, | |||
1485 | if (IS_ERR(bio)) | 1486 | if (IS_ERR(bio)) |
1486 | return bio; | 1487 | return bio; |
1487 | 1488 | ||
1488 | if (bio->bi_size == len) | 1489 | if (bio->bi_iter.bi_size == len) |
1489 | return bio; | 1490 | return bio; |
1490 | 1491 | ||
1491 | /* | 1492 | /* |
@@ -1763,16 +1764,16 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors) | |||
1763 | return bp; | 1764 | return bp; |
1764 | 1765 | ||
1765 | trace_block_split(bdev_get_queue(bi->bi_bdev), bi, | 1766 | trace_block_split(bdev_get_queue(bi->bi_bdev), bi, |
1766 | bi->bi_sector + first_sectors); | 1767 | bi->bi_iter.bi_sector + first_sectors); |
1767 | 1768 | ||
1768 | BUG_ON(bio_segments(bi) > 1); | 1769 | BUG_ON(bio_segments(bi) > 1); |
1769 | atomic_set(&bp->cnt, 3); | 1770 | atomic_set(&bp->cnt, 3); |
1770 | bp->error = 0; | 1771 | bp->error = 0; |
1771 | bp->bio1 = *bi; | 1772 | bp->bio1 = *bi; |
1772 | bp->bio2 = *bi; | 1773 | bp->bio2 = *bi; |
1773 | bp->bio2.bi_sector += first_sectors; | 1774 | bp->bio2.bi_iter.bi_sector += first_sectors; |
1774 | bp->bio2.bi_size -= first_sectors << 9; | 1775 | bp->bio2.bi_iter.bi_size -= first_sectors << 9; |
1775 | bp->bio1.bi_size = first_sectors << 9; | 1776 | bp->bio1.bi_iter.bi_size = first_sectors << 9; |
1776 | 1777 | ||
1777 | if (bi->bi_vcnt != 0) { | 1778 | if (bi->bi_vcnt != 0) { |
1778 | bp->bv1 = *bio_iovec(bi); | 1779 | bp->bv1 = *bio_iovec(bi); |
@@ -1821,21 +1822,22 @@ void bio_trim(struct bio *bio, int offset, int size) | |||
1821 | int sofar = 0; | 1822 | int sofar = 0; |
1822 | 1823 | ||
1823 | size <<= 9; | 1824 | size <<= 9; |
1824 | if (offset == 0 && size == bio->bi_size) | 1825 | if (offset == 0 && size == bio->bi_iter.bi_size) |
1825 | return; | 1826 | return; |
1826 | 1827 | ||
1827 | clear_bit(BIO_SEG_VALID, &bio->bi_flags); | 1828 | clear_bit(BIO_SEG_VALID, &bio->bi_flags); |
1828 | 1829 | ||
1829 | bio_advance(bio, offset << 9); | 1830 | bio_advance(bio, offset << 9); |
1830 | 1831 | ||
1831 | bio->bi_size = size; | 1832 | bio->bi_iter.bi_size = size; |
1832 | 1833 | ||
1833 | /* avoid any complications with bi_idx being non-zero*/ | 1834 | /* avoid any complications with bi_idx being non-zero*/ |
1834 | if (bio->bi_idx) { | 1835 | if (bio->bi_iter.bi_idx) { |
1835 | memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, | 1836 | memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_iter.bi_idx, |
1836 | (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); | 1837 | (bio->bi_vcnt - bio->bi_iter.bi_idx) * |
1837 | bio->bi_vcnt -= bio->bi_idx; | 1838 | sizeof(struct bio_vec)); |
1838 | bio->bi_idx = 0; | 1839 | bio->bi_vcnt -= bio->bi_iter.bi_idx; |
1840 | bio->bi_iter.bi_idx = 0; | ||
1839 | } | 1841 | } |
1840 | /* Make sure vcnt and last bv are not too big */ | 1842 | /* Make sure vcnt and last bv are not too big */ |
1841 | bio_for_each_segment(bvec, bio, i) { | 1843 | bio_for_each_segment(bvec, bio, i) { |
@@ -1871,7 +1873,7 @@ sector_t bio_sector_offset(struct bio *bio, unsigned short index, | |||
1871 | sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue); | 1873 | sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue); |
1872 | sectors = 0; | 1874 | sectors = 0; |
1873 | 1875 | ||
1874 | if (index >= bio->bi_idx) | 1876 | if (index >= bio->bi_iter.bi_idx) |
1875 | index = bio->bi_vcnt - 1; | 1877 | index = bio->bi_vcnt - 1; |
1876 | 1878 | ||
1877 | bio_for_each_segment_all(bv, bio, i) { | 1879 | bio_for_each_segment_all(bv, bio, i) { |