aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-10-11 18:44:27 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-24 01:33:47 -0500
commit4f024f3797c43cb4b73cd2c50cec728842d0e49e (patch)
tree3aedcab02d2ad723a189d01934d1e94fec7a54e1 /block
parented9c47bebeeea4a468b07cfd745c690190f8014c (diff)
block: Abstract out bvec iterator
Immutable biovecs are going to require an explicit iterator. To implement immutable bvecs, a later patch is going to add a bi_bvec_done member to this struct; for now, this patch effectively just renames things. Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: "Ed L. Cashin" <ecashin@coraid.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Lars Ellenberg <drbd-dev@lists.linbit.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Matthew Wilcox <willy@linux.intel.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Yehuda Sadeh <yehuda@inktank.com> Cc: Sage Weil <sage@inktank.com> Cc: Alex Elder <elder@inktank.com> Cc: ceph-devel@vger.kernel.org Cc: Joshua Morris <josh.h.morris@us.ibm.com> Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Neil Brown <neilb@suse.de> Cc: Alasdair Kergon <agk@redhat.com> Cc: Mike Snitzer <snitzer@redhat.com> Cc: dm-devel@redhat.com Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: linux390@de.ibm.com Cc: Boaz Harrosh <bharrosh@panasas.com> Cc: Benny Halevy <bhalevy@tonian.com> Cc: "James E.J. Bottomley" <JBottomley@parallels.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "Nicholas A. Bellinger" <nab@linux-iscsi.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Chris Mason <chris.mason@fusionio.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Andreas Dilger <adilger.kernel@dilger.ca> Cc: Jaegeuk Kim <jaegeuk.kim@samsung.com> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Dave Kleikamp <shaggy@kernel.org> Cc: Joern Engel <joern@logfs.org> Cc: Prasad Joshi <prasadjoshi.linux@gmail.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Cc: KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Ben Myers <bpm@sgi.com> Cc: xfs@oss.sgi.com Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Len Brown <len.brown@intel.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com> Cc: Ben Hutchings <ben@decadent.org.uk> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Guo Chao <yan@linux.vnet.ibm.com> Cc: Tejun Heo <tj@kernel.org> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Cc: Wei Yongjun <yongjun_wei@trendmicro.com.cn> Cc: "Roger Pau Monné" <roger.pau@citrix.com> Cc: Jan Beulich <jbeulich@suse.com> Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Cc: Ian Campbell <Ian.Campbell@citrix.com> Cc: Sebastian Ott <sebott@linux.vnet.ibm.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Jiang Liu <jiang.liu@huawei.com> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Jerome Marchand <jmarchand@redhat.com> Cc: Joe Perches <joe@perches.com> Cc: Peng Tao <tao.peng@emc.com> Cc: Andy Adamson <andros@netapp.com> Cc: fanchaoting <fanchaoting@cn.fujitsu.com> Cc: Jie Liu <jeff.liu@oracle.com> Cc: Sunil Mushran <sunil.mushran@gmail.com> Cc: "Martin K. Petersen" <martin.petersen@oracle.com> Cc: Namjae Jeon <namjae.jeon@samsung.com> Cc: Pankaj Kumar <pankaj.km@samsung.com> Cc: Dan Magenheimer <dan.magenheimer@oracle.com> Cc: Mel Gorman <mgorman@suse.de>6
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c36
-rw-r--r--block/blk-flush.c2
-rw-r--r--block/blk-lib.c12
-rw-r--r--block/blk-map.c6
-rw-r--r--block/blk-merge.c4
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk-throttle.c14
-rw-r--r--block/elevator.c2
8 files changed, 39 insertions, 39 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 8bdd0121212a..5c2ab2c74066 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -130,7 +130,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
130 bio_advance(bio, nbytes); 130 bio_advance(bio, nbytes);
131 131
132 /* don't actually finish bio if it's part of flush sequence */ 132 /* don't actually finish bio if it's part of flush sequence */
133 if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 133 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
134 bio_endio(bio, error); 134 bio_endio(bio, error);
135} 135}
136 136
@@ -1326,7 +1326,7 @@ void blk_add_request_payload(struct request *rq, struct page *page,
1326 bio->bi_io_vec->bv_offset = 0; 1326 bio->bi_io_vec->bv_offset = 0;
1327 bio->bi_io_vec->bv_len = len; 1327 bio->bi_io_vec->bv_len = len;
1328 1328
1329 bio->bi_size = len; 1329 bio->bi_iter.bi_size = len;
1330 bio->bi_vcnt = 1; 1330 bio->bi_vcnt = 1;
1331 bio->bi_phys_segments = 1; 1331 bio->bi_phys_segments = 1;
1332 1332
@@ -1351,7 +1351,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1351 1351
1352 req->biotail->bi_next = bio; 1352 req->biotail->bi_next = bio;
1353 req->biotail = bio; 1353 req->biotail = bio;
1354 req->__data_len += bio->bi_size; 1354 req->__data_len += bio->bi_iter.bi_size;
1355 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1355 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1356 1356
1357 blk_account_io_start(req, false); 1357 blk_account_io_start(req, false);
@@ -1380,8 +1380,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
1380 * not touch req->buffer either... 1380 * not touch req->buffer either...
1381 */ 1381 */
1382 req->buffer = bio_data(bio); 1382 req->buffer = bio_data(bio);
1383 req->__sector = bio->bi_sector; 1383 req->__sector = bio->bi_iter.bi_sector;
1384 req->__data_len += bio->bi_size; 1384 req->__data_len += bio->bi_iter.bi_size;
1385 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1385 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1386 1386
1387 blk_account_io_start(req, false); 1387 blk_account_io_start(req, false);
@@ -1459,7 +1459,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1459 req->cmd_flags |= REQ_FAILFAST_MASK; 1459 req->cmd_flags |= REQ_FAILFAST_MASK;
1460 1460
1461 req->errors = 0; 1461 req->errors = 0;
1462 req->__sector = bio->bi_sector; 1462 req->__sector = bio->bi_iter.bi_sector;
1463 req->ioprio = bio_prio(bio); 1463 req->ioprio = bio_prio(bio);
1464 blk_rq_bio_prep(req->q, req, bio); 1464 blk_rq_bio_prep(req->q, req, bio);
1465} 1465}
@@ -1583,12 +1583,12 @@ static inline void blk_partition_remap(struct bio *bio)
1583 if (bio_sectors(bio) && bdev != bdev->bd_contains) { 1583 if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1584 struct hd_struct *p = bdev->bd_part; 1584 struct hd_struct *p = bdev->bd_part;
1585 1585
1586 bio->bi_sector += p->start_sect; 1586 bio->bi_iter.bi_sector += p->start_sect;
1587 bio->bi_bdev = bdev->bd_contains; 1587 bio->bi_bdev = bdev->bd_contains;
1588 1588
1589 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, 1589 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
1590 bdev->bd_dev, 1590 bdev->bd_dev,
1591 bio->bi_sector - p->start_sect); 1591 bio->bi_iter.bi_sector - p->start_sect);
1592 } 1592 }
1593} 1593}
1594 1594
@@ -1654,7 +1654,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1654 /* Test device or partition size, when known. */ 1654 /* Test device or partition size, when known. */
1655 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 1655 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
1656 if (maxsector) { 1656 if (maxsector) {
1657 sector_t sector = bio->bi_sector; 1657 sector_t sector = bio->bi_iter.bi_sector;
1658 1658
1659 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 1659 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1660 /* 1660 /*
@@ -1690,7 +1690,7 @@ generic_make_request_checks(struct bio *bio)
1690 "generic_make_request: Trying to access " 1690 "generic_make_request: Trying to access "
1691 "nonexistent block-device %s (%Lu)\n", 1691 "nonexistent block-device %s (%Lu)\n",
1692 bdevname(bio->bi_bdev, b), 1692 bdevname(bio->bi_bdev, b),
1693 (long long) bio->bi_sector); 1693 (long long) bio->bi_iter.bi_sector);
1694 goto end_io; 1694 goto end_io;
1695 } 1695 }
1696 1696
@@ -1704,9 +1704,9 @@ generic_make_request_checks(struct bio *bio)
1704 } 1704 }
1705 1705
1706 part = bio->bi_bdev->bd_part; 1706 part = bio->bi_bdev->bd_part;
1707 if (should_fail_request(part, bio->bi_size) || 1707 if (should_fail_request(part, bio->bi_iter.bi_size) ||
1708 should_fail_request(&part_to_disk(part)->part0, 1708 should_fail_request(&part_to_disk(part)->part0,
1709 bio->bi_size)) 1709 bio->bi_iter.bi_size))
1710 goto end_io; 1710 goto end_io;
1711 1711
1712 /* 1712 /*
@@ -1865,7 +1865,7 @@ void submit_bio(int rw, struct bio *bio)
1865 if (rw & WRITE) { 1865 if (rw & WRITE) {
1866 count_vm_events(PGPGOUT, count); 1866 count_vm_events(PGPGOUT, count);
1867 } else { 1867 } else {
1868 task_io_account_read(bio->bi_size); 1868 task_io_account_read(bio->bi_iter.bi_size);
1869 count_vm_events(PGPGIN, count); 1869 count_vm_events(PGPGIN, count);
1870 } 1870 }
1871 1871
@@ -1874,7 +1874,7 @@ void submit_bio(int rw, struct bio *bio)
1874 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 1874 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1875 current->comm, task_pid_nr(current), 1875 current->comm, task_pid_nr(current),
1876 (rw & WRITE) ? "WRITE" : "READ", 1876 (rw & WRITE) ? "WRITE" : "READ",
1877 (unsigned long long)bio->bi_sector, 1877 (unsigned long long)bio->bi_iter.bi_sector,
1878 bdevname(bio->bi_bdev, b), 1878 bdevname(bio->bi_bdev, b),
1879 count); 1879 count);
1880 } 1880 }
@@ -2007,7 +2007,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
2007 for (bio = rq->bio; bio; bio = bio->bi_next) { 2007 for (bio = rq->bio; bio; bio = bio->bi_next) {
2008 if ((bio->bi_rw & ff) != ff) 2008 if ((bio->bi_rw & ff) != ff)
2009 break; 2009 break;
2010 bytes += bio->bi_size; 2010 bytes += bio->bi_iter.bi_size;
2011 } 2011 }
2012 2012
2013 /* this could lead to infinite loop */ 2013 /* this could lead to infinite loop */
@@ -2378,9 +2378,9 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2378 total_bytes = 0; 2378 total_bytes = 0;
2379 while (req->bio) { 2379 while (req->bio) {
2380 struct bio *bio = req->bio; 2380 struct bio *bio = req->bio;
2381 unsigned bio_bytes = min(bio->bi_size, nr_bytes); 2381 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
2382 2382
2383 if (bio_bytes == bio->bi_size) 2383 if (bio_bytes == bio->bi_iter.bi_size)
2384 req->bio = bio->bi_next; 2384 req->bio = bio->bi_next;
2385 2385
2386 req_bio_endio(req, bio, bio_bytes, error); 2386 req_bio_endio(req, bio, bio_bytes, error);
@@ -2728,7 +2728,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2728 rq->nr_phys_segments = bio_phys_segments(q, bio); 2728 rq->nr_phys_segments = bio_phys_segments(q, bio);
2729 rq->buffer = bio_data(bio); 2729 rq->buffer = bio_data(bio);
2730 } 2730 }
2731 rq->__data_len = bio->bi_size; 2731 rq->__data_len = bio->bi_iter.bi_size;
2732 rq->bio = rq->biotail = bio; 2732 rq->bio = rq->biotail = bio;
2733 2733
2734 if (bio->bi_bdev) 2734 if (bio->bi_bdev)
diff --git a/block/blk-flush.c b/block/blk-flush.c
index fb6f3c0ffa49..9288aaf35c21 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -548,7 +548,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
548 * copied from blk_rq_pos(rq). 548 * copied from blk_rq_pos(rq).
549 */ 549 */
550 if (error_sector) 550 if (error_sector)
551 *error_sector = bio->bi_sector; 551 *error_sector = bio->bi_iter.bi_sector;
552 552
553 bio_put(bio); 553 bio_put(bio);
554 return ret; 554 return ret;
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 9b5b561cb928..2da76c999ef3 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -108,12 +108,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
108 req_sects = end_sect - sector; 108 req_sects = end_sect - sector;
109 } 109 }
110 110
111 bio->bi_sector = sector; 111 bio->bi_iter.bi_sector = sector;
112 bio->bi_end_io = bio_batch_end_io; 112 bio->bi_end_io = bio_batch_end_io;
113 bio->bi_bdev = bdev; 113 bio->bi_bdev = bdev;
114 bio->bi_private = &bb; 114 bio->bi_private = &bb;
115 115
116 bio->bi_size = req_sects << 9; 116 bio->bi_iter.bi_size = req_sects << 9;
117 nr_sects -= req_sects; 117 nr_sects -= req_sects;
118 sector = end_sect; 118 sector = end_sect;
119 119
@@ -174,7 +174,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
174 break; 174 break;
175 } 175 }
176 176
177 bio->bi_sector = sector; 177 bio->bi_iter.bi_sector = sector;
178 bio->bi_end_io = bio_batch_end_io; 178 bio->bi_end_io = bio_batch_end_io;
179 bio->bi_bdev = bdev; 179 bio->bi_bdev = bdev;
180 bio->bi_private = &bb; 180 bio->bi_private = &bb;
@@ -184,11 +184,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
184 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); 184 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
185 185
186 if (nr_sects > max_write_same_sectors) { 186 if (nr_sects > max_write_same_sectors) {
187 bio->bi_size = max_write_same_sectors << 9; 187 bio->bi_iter.bi_size = max_write_same_sectors << 9;
188 nr_sects -= max_write_same_sectors; 188 nr_sects -= max_write_same_sectors;
189 sector += max_write_same_sectors; 189 sector += max_write_same_sectors;
190 } else { 190 } else {
191 bio->bi_size = nr_sects << 9; 191 bio->bi_iter.bi_size = nr_sects << 9;
192 nr_sects = 0; 192 nr_sects = 0;
193 } 193 }
194 194
@@ -240,7 +240,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
240 break; 240 break;
241 } 241 }
242 242
243 bio->bi_sector = sector; 243 bio->bi_iter.bi_sector = sector;
244 bio->bi_bdev = bdev; 244 bio->bi_bdev = bdev;
245 bio->bi_end_io = bio_batch_end_io; 245 bio->bi_end_io = bio_batch_end_io;
246 bio->bi_private = &bb; 246 bio->bi_private = &bb;
diff --git a/block/blk-map.c b/block/blk-map.c
index 623e1cd4cffe..ae4ae1047fd9 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -20,7 +20,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
20 rq->biotail->bi_next = bio; 20 rq->biotail->bi_next = bio;
21 rq->biotail = bio; 21 rq->biotail = bio;
22 22
23 rq->__data_len += bio->bi_size; 23 rq->__data_len += bio->bi_iter.bi_size;
24 } 24 }
25 return 0; 25 return 0;
26} 26}
@@ -76,7 +76,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
76 76
77 ret = blk_rq_append_bio(q, rq, bio); 77 ret = blk_rq_append_bio(q, rq, bio);
78 if (!ret) 78 if (!ret)
79 return bio->bi_size; 79 return bio->bi_iter.bi_size;
80 80
81 /* if it was boucned we must call the end io function */ 81 /* if it was boucned we must call the end io function */
82 bio_endio(bio, 0); 82 bio_endio(bio, 0);
@@ -220,7 +220,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
220 if (IS_ERR(bio)) 220 if (IS_ERR(bio))
221 return PTR_ERR(bio); 221 return PTR_ERR(bio);
222 222
223 if (bio->bi_size != len) { 223 if (bio->bi_iter.bi_size != len) {
224 /* 224 /*
225 * Grab an extra reference to this bio, as bio_unmap_user() 225 * Grab an extra reference to this bio, as bio_unmap_user()
226 * expects to be able to drop it twice as it happens on the 226 * expects to be able to drop it twice as it happens on the
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 1ffc58977835..03bc083c28cf 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -543,9 +543,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
543 543
544int blk_try_merge(struct request *rq, struct bio *bio) 544int blk_try_merge(struct request *rq, struct bio *bio)
545{ 545{
546 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector) 546 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
547 return ELEVATOR_BACK_MERGE; 547 return ELEVATOR_BACK_MERGE;
548 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector) 548 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
549 return ELEVATOR_FRONT_MERGE; 549 return ELEVATOR_FRONT_MERGE;
550 return ELEVATOR_NO_MERGE; 550 return ELEVATOR_NO_MERGE;
551} 551}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index cdc629cf075b..e4fbcc3fd2db 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -301,7 +301,7 @@ void blk_mq_complete_request(struct request *rq, int error)
301 struct bio *next = bio->bi_next; 301 struct bio *next = bio->bi_next;
302 302
303 bio->bi_next = NULL; 303 bio->bi_next = NULL;
304 bytes += bio->bi_size; 304 bytes += bio->bi_iter.bi_size;
305 blk_mq_bio_endio(rq, bio, error); 305 blk_mq_bio_endio(rq, bio, error);
306 bio = next; 306 bio = next;
307 } 307 }
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 06534049afba..20f820037775 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -877,14 +877,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
877 do_div(tmp, HZ); 877 do_div(tmp, HZ);
878 bytes_allowed = tmp; 878 bytes_allowed = tmp;
879 879
880 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) { 880 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
881 if (wait) 881 if (wait)
882 *wait = 0; 882 *wait = 0;
883 return 1; 883 return 1;
884 } 884 }
885 885
886 /* Calc approx time to dispatch */ 886 /* Calc approx time to dispatch */
887 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed; 887 extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
888 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]); 888 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
889 889
890 if (!jiffy_wait) 890 if (!jiffy_wait)
@@ -987,7 +987,7 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
987 bool rw = bio_data_dir(bio); 987 bool rw = bio_data_dir(bio);
988 988
989 /* Charge the bio to the group */ 989 /* Charge the bio to the group */
990 tg->bytes_disp[rw] += bio->bi_size; 990 tg->bytes_disp[rw] += bio->bi_iter.bi_size;
991 tg->io_disp[rw]++; 991 tg->io_disp[rw]++;
992 992
993 /* 993 /*
@@ -1003,8 +1003,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1003 */ 1003 */
1004 if (!(bio->bi_rw & REQ_THROTTLED)) { 1004 if (!(bio->bi_rw & REQ_THROTTLED)) {
1005 bio->bi_rw |= REQ_THROTTLED; 1005 bio->bi_rw |= REQ_THROTTLED;
1006 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, 1006 throtl_update_dispatch_stats(tg_to_blkg(tg),
1007 bio->bi_rw); 1007 bio->bi_iter.bi_size, bio->bi_rw);
1008 } 1008 }
1009} 1009}
1010 1010
@@ -1508,7 +1508,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1508 if (tg) { 1508 if (tg) {
1509 if (!tg->has_rules[rw]) { 1509 if (!tg->has_rules[rw]) {
1510 throtl_update_dispatch_stats(tg_to_blkg(tg), 1510 throtl_update_dispatch_stats(tg_to_blkg(tg),
1511 bio->bi_size, bio->bi_rw); 1511 bio->bi_iter.bi_size, bio->bi_rw);
1512 goto out_unlock_rcu; 1512 goto out_unlock_rcu;
1513 } 1513 }
1514 } 1514 }
@@ -1564,7 +1564,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1564 /* out-of-limit, queue to @tg */ 1564 /* out-of-limit, queue to @tg */
1565 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", 1565 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1566 rw == READ ? 'R' : 'W', 1566 rw == READ ? 'R' : 'W',
1567 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], 1567 tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
1568 tg->io_disp[rw], tg->iops[rw], 1568 tg->io_disp[rw], tg->iops[rw],
1569 sq->nr_queued[READ], sq->nr_queued[WRITE]); 1569 sq->nr_queued[READ], sq->nr_queued[WRITE]);
1570 1570
diff --git a/block/elevator.c b/block/elevator.c
index b7ff2861b6bd..42c45a7d6714 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -440,7 +440,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
440 /* 440 /*
441 * See if our hash lookup can find a potential backmerge. 441 * See if our hash lookup can find a potential backmerge.
442 */ 442 */
443 __rq = elv_rqhash_find(q, bio->bi_sector); 443 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
444 if (__rq && elv_rq_merge_ok(__rq, bio)) { 444 if (__rq && elv_rq_merge_ok(__rq, bio)) {
445 *req = __rq; 445 *req = __rq;
446 return ELEVATOR_BACK_MERGE; 446 return ELEVATOR_BACK_MERGE;