aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-02-05 18:19:29 -0500
committerKent Overstreet <koverstreet@google.com>2013-03-23 17:15:30 -0400
commitaa8b57aa3d1c06ca53312294ee6dfc767ee3ddb3 (patch)
treea5921d44756d32e0555a8ae284db3c16a8386860
parentf73a1c7d117d07a96d89475066188a2b79e53c48 (diff)
block: Use bio_sectors() more consistently
Bunch of places in the code weren't using it where they could be - this'll reduce the size of the patch that puts bi_sector/bi_size/bi_idx into a struct bvec_iter. Signed-off-by: Kent Overstreet <koverstreet@google.com> CC: Jens Axboe <axboe@kernel.dk> CC: "Ed L. Cashin" <ecashin@coraid.com> CC: Nick Piggin <npiggin@kernel.dk> CC: Jiri Kosina <jkosina@suse.cz> CC: Jim Paris <jim@jtan.com> CC: Geoff Levand <geoff@infradead.org> CC: Alasdair Kergon <agk@redhat.com> CC: dm-devel@redhat.com CC: Neil Brown <neilb@suse.de> CC: Steven Rostedt <rostedt@goodmis.org> Acked-by: Ed Cashin <ecashin@coraid.com>
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/raid0.c6
-rw-r--r--drivers/md/raid1.c17
-rw-r--r--drivers/md/raid10.c24
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--include/trace/events/block.h12
8 files changed, 35 insertions, 38 deletions
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 26938e8e2fc3..2c27744b9ca6 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2433,7 +2433,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
2433 cloned_bio->bi_bdev = pd->bdev; 2433 cloned_bio->bi_bdev = pd->bdev;
2434 cloned_bio->bi_private = psd; 2434 cloned_bio->bi_private = psd;
2435 cloned_bio->bi_end_io = pkt_end_io_read_cloned; 2435 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2436 pd->stats.secs_r += bio->bi_size >> 9; 2436 pd->stats.secs_r += bio_sectors(bio);
2437 pkt_queue_bio(pd, cloned_bio); 2437 pkt_queue_bio(pd, cloned_bio);
2438 return; 2438 return;
2439 } 2439 }
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index d053098c6a91..699b5be68d31 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -458,7 +458,7 @@ static void map_region(struct dm_io_region *io, struct mirror *m,
458{ 458{
459 io->bdev = m->dev->bdev; 459 io->bdev = m->dev->bdev;
460 io->sector = map_sector(m, bio); 460 io->sector = map_sector(m, bio);
461 io->count = bio->bi_size >> 9; 461 io->count = bio_sectors(bio);
462} 462}
463 463
464static void hold_bio(struct mirror_set *ms, struct bio *bio) 464static void hold_bio(struct mirror_set *ms, struct bio *bio)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 0505452de8d6..23a38afec351 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -502,11 +502,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
502{ 502{
503 if (likely(is_power_of_2(chunk_sects))) { 503 if (likely(is_power_of_2(chunk_sects))) {
504 return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) 504 return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
505 + (bio->bi_size >> 9)); 505 + bio_sectors(bio));
506 } else{ 506 } else{
507 sector_t sector = bio->bi_sector; 507 sector_t sector = bio->bi_sector;
508 return chunk_sects >= (sector_div(sector, chunk_sects) 508 return chunk_sects >= (sector_div(sector, chunk_sects)
509 + (bio->bi_size >> 9)); 509 + bio_sectors(bio));
510 } 510 }
511} 511}
512 512
@@ -567,7 +567,7 @@ bad_map:
567 printk("md/raid0:%s: make_request bug: can't convert block across chunks" 567 printk("md/raid0:%s: make_request bug: can't convert block across chunks"
568 " or bigger than %dk %llu %d\n", 568 " or bigger than %dk %llu %d\n",
569 mdname(mddev), chunk_sects / 2, 569 mdname(mddev), chunk_sects / 2,
570 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 570 (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
571 571
572 bio_io_error(bio); 572 bio_io_error(bio);
573 return; 573 return;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4d8c2e0a6bad..f741c9fe25c8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -267,7 +267,7 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
267 (bio_data_dir(bio) == WRITE) ? "write" : "read", 267 (bio_data_dir(bio) == WRITE) ? "write" : "read",
268 (unsigned long long) bio->bi_sector, 268 (unsigned long long) bio->bi_sector,
269 (unsigned long long) bio->bi_sector + 269 (unsigned long long) bio->bi_sector +
270 (bio->bi_size >> 9) - 1); 270 bio_sectors(bio) - 1);
271 271
272 call_bio_endio(r1_bio); 272 call_bio_endio(r1_bio);
273 } 273 }
@@ -458,7 +458,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
458 " %llu-%llu\n", 458 " %llu-%llu\n",
459 (unsigned long long) mbio->bi_sector, 459 (unsigned long long) mbio->bi_sector,
460 (unsigned long long) mbio->bi_sector + 460 (unsigned long long) mbio->bi_sector +
461 (mbio->bi_size >> 9) - 1); 461 bio_sectors(mbio) - 1);
462 call_bio_endio(r1_bio); 462 call_bio_endio(r1_bio);
463 } 463 }
464 } 464 }
@@ -1049,7 +1049,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1049 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); 1049 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1050 1050
1051 r1_bio->master_bio = bio; 1051 r1_bio->master_bio = bio;
1052 r1_bio->sectors = bio->bi_size >> 9; 1052 r1_bio->sectors = bio_sectors(bio);
1053 r1_bio->state = 0; 1053 r1_bio->state = 0;
1054 r1_bio->mddev = mddev; 1054 r1_bio->mddev = mddev;
1055 r1_bio->sector = bio->bi_sector; 1055 r1_bio->sector = bio->bi_sector;
@@ -1127,7 +1127,7 @@ read_again:
1127 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); 1127 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1128 1128
1129 r1_bio->master_bio = bio; 1129 r1_bio->master_bio = bio;
1130 r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled; 1130 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1131 r1_bio->state = 0; 1131 r1_bio->state = 0;
1132 r1_bio->mddev = mddev; 1132 r1_bio->mddev = mddev;
1133 r1_bio->sector = bio->bi_sector + sectors_handled; 1133 r1_bio->sector = bio->bi_sector + sectors_handled;
@@ -1329,14 +1329,14 @@ read_again:
1329 /* Mustn't call r1_bio_write_done before this next test, 1329 /* Mustn't call r1_bio_write_done before this next test,
1330 * as it could result in the bio being freed. 1330 * as it could result in the bio being freed.
1331 */ 1331 */
1332 if (sectors_handled < (bio->bi_size >> 9)) { 1332 if (sectors_handled < bio_sectors(bio)) {
1333 r1_bio_write_done(r1_bio); 1333 r1_bio_write_done(r1_bio);
1334 /* We need another r1_bio. It has already been counted 1334 /* We need another r1_bio. It has already been counted
1335 * in bio->bi_phys_segments 1335 * in bio->bi_phys_segments
1336 */ 1336 */
1337 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); 1337 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1338 r1_bio->master_bio = bio; 1338 r1_bio->master_bio = bio;
1339 r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled; 1339 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1340 r1_bio->state = 0; 1340 r1_bio->state = 0;
1341 r1_bio->mddev = mddev; 1341 r1_bio->mddev = mddev;
1342 r1_bio->sector = bio->bi_sector + sectors_handled; 1342 r1_bio->sector = bio->bi_sector + sectors_handled;
@@ -1947,7 +1947,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
1947 wbio->bi_rw = WRITE; 1947 wbio->bi_rw = WRITE;
1948 wbio->bi_end_io = end_sync_write; 1948 wbio->bi_end_io = end_sync_write;
1949 atomic_inc(&r1_bio->remaining); 1949 atomic_inc(&r1_bio->remaining);
1950 md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9); 1950 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
1951 1951
1952 generic_make_request(wbio); 1952 generic_make_request(wbio);
1953 } 1953 }
@@ -2284,8 +2284,7 @@ read_more:
2284 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); 2284 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
2285 2285
2286 r1_bio->master_bio = mbio; 2286 r1_bio->master_bio = mbio;
2287 r1_bio->sectors = (mbio->bi_size >> 9) 2287 r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
2288 - sectors_handled;
2289 r1_bio->state = 0; 2288 r1_bio->state = 0;
2290 set_bit(R1BIO_ReadError, &r1_bio->state); 2289 set_bit(R1BIO_ReadError, &r1_bio->state);
2291 r1_bio->mddev = mddev; 2290 r1_bio->mddev = mddev;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 77b562d18a90..5ee14ab16a05 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1169,7 +1169,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1169 /* If this request crosses a chunk boundary, we need to 1169 /* If this request crosses a chunk boundary, we need to
1170 * split it. This will only happen for 1 PAGE (or less) requests. 1170 * split it. This will only happen for 1 PAGE (or less) requests.
1171 */ 1171 */
1172 if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9) 1172 if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
1173 > chunk_sects 1173 > chunk_sects
1174 && (conf->geo.near_copies < conf->geo.raid_disks 1174 && (conf->geo.near_copies < conf->geo.raid_disks
1175 || conf->prev.near_copies < conf->prev.raid_disks))) { 1175 || conf->prev.near_copies < conf->prev.raid_disks))) {
@@ -1209,7 +1209,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1209 bad_map: 1209 bad_map:
1210 printk("md/raid10:%s: make_request bug: can't convert block across chunks" 1210 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
1211 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, 1211 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
1212 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 1212 (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
1213 1213
1214 bio_io_error(bio); 1214 bio_io_error(bio);
1215 return; 1215 return;
@@ -1224,7 +1224,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1224 */ 1224 */
1225 wait_barrier(conf); 1225 wait_barrier(conf);
1226 1226
1227 sectors = bio->bi_size >> 9; 1227 sectors = bio_sectors(bio);
1228 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1228 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1229 bio->bi_sector < conf->reshape_progress && 1229 bio->bi_sector < conf->reshape_progress &&
1230 bio->bi_sector + sectors > conf->reshape_progress) { 1230 bio->bi_sector + sectors > conf->reshape_progress) {
@@ -1326,8 +1326,7 @@ read_again:
1326 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1326 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1327 1327
1328 r10_bio->master_bio = bio; 1328 r10_bio->master_bio = bio;
1329 r10_bio->sectors = ((bio->bi_size >> 9) 1329 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1330 - sectors_handled);
1331 r10_bio->state = 0; 1330 r10_bio->state = 0;
1332 r10_bio->mddev = mddev; 1331 r10_bio->mddev = mddev;
1333 r10_bio->sector = bio->bi_sector + sectors_handled; 1332 r10_bio->sector = bio->bi_sector + sectors_handled;
@@ -1569,7 +1568,7 @@ retry_write:
1569 * after checking if we need to go around again. 1568 * after checking if we need to go around again.
1570 */ 1569 */
1571 1570
1572 if (sectors_handled < (bio->bi_size >> 9)) { 1571 if (sectors_handled < bio_sectors(bio)) {
1573 one_write_done(r10_bio); 1572 one_write_done(r10_bio);
1574 /* We need another r10_bio. It has already been counted 1573 /* We need another r10_bio. It has already been counted
1575 * in bio->bi_phys_segments. 1574 * in bio->bi_phys_segments.
@@ -1577,7 +1576,7 @@ retry_write:
1577 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1576 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1578 1577
1579 r10_bio->master_bio = bio; 1578 r10_bio->master_bio = bio;
1580 r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled; 1579 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1581 1580
1582 r10_bio->mddev = mddev; 1581 r10_bio->mddev = mddev;
1583 r10_bio->sector = bio->bi_sector + sectors_handled; 1582 r10_bio->sector = bio->bi_sector + sectors_handled;
@@ -2103,7 +2102,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2103 d = r10_bio->devs[i].devnum; 2102 d = r10_bio->devs[i].devnum;
2104 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2103 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2105 atomic_inc(&r10_bio->remaining); 2104 atomic_inc(&r10_bio->remaining);
2106 md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9); 2105 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2107 2106
2108 tbio->bi_sector += conf->mirrors[d].rdev->data_offset; 2107 tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
2109 tbio->bi_bdev = conf->mirrors[d].rdev->bdev; 2108 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
@@ -2128,7 +2127,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2128 d = r10_bio->devs[i].devnum; 2127 d = r10_bio->devs[i].devnum;
2129 atomic_inc(&r10_bio->remaining); 2128 atomic_inc(&r10_bio->remaining);
2130 md_sync_acct(conf->mirrors[d].replacement->bdev, 2129 md_sync_acct(conf->mirrors[d].replacement->bdev,
2131 tbio->bi_size >> 9); 2130 bio_sectors(tbio));
2132 generic_make_request(tbio); 2131 generic_make_request(tbio);
2133 } 2132 }
2134 2133
@@ -2254,13 +2253,13 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2254 wbio2 = r10_bio->devs[1].repl_bio; 2253 wbio2 = r10_bio->devs[1].repl_bio;
2255 if (wbio->bi_end_io) { 2254 if (wbio->bi_end_io) {
2256 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2255 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2257 md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9); 2256 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2258 generic_make_request(wbio); 2257 generic_make_request(wbio);
2259 } 2258 }
2260 if (wbio2 && wbio2->bi_end_io) { 2259 if (wbio2 && wbio2->bi_end_io) {
2261 atomic_inc(&conf->mirrors[d].replacement->nr_pending); 2260 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2262 md_sync_acct(conf->mirrors[d].replacement->bdev, 2261 md_sync_acct(conf->mirrors[d].replacement->bdev,
2263 wbio2->bi_size >> 9); 2262 bio_sectors(wbio2));
2264 generic_make_request(wbio2); 2263 generic_make_request(wbio2);
2265 } 2264 }
2266} 2265}
@@ -2690,8 +2689,7 @@ read_more:
2690 r10_bio = mempool_alloc(conf->r10bio_pool, 2689 r10_bio = mempool_alloc(conf->r10bio_pool,
2691 GFP_NOIO); 2690 GFP_NOIO);
2692 r10_bio->master_bio = mbio; 2691 r10_bio->master_bio = mbio;
2693 r10_bio->sectors = (mbio->bi_size >> 9) 2692 r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
2694 - sectors_handled;
2695 r10_bio->state = 0; 2693 r10_bio->state = 0;
2696 set_bit(R10BIO_ReadError, 2694 set_bit(R10BIO_ReadError,
2697 &r10_bio->state); 2695 &r10_bio->state);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 68706970d217..4e0f87e462ce 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -90,7 +90,7 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
90 */ 90 */
91static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) 91static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
92{ 92{
93 int sectors = bio->bi_size >> 9; 93 int sectors = bio_sectors(bio);
94 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) 94 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
95 return bio->bi_next; 95 return bio->bi_next;
96 else 96 else
@@ -3804,7 +3804,7 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
3804{ 3804{
3805 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3805 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
3806 unsigned int chunk_sectors = mddev->chunk_sectors; 3806 unsigned int chunk_sectors = mddev->chunk_sectors;
3807 unsigned int bio_sectors = bio->bi_size >> 9; 3807 unsigned int bio_sectors = bio_sectors(bio);
3808 3808
3809 if (mddev->new_chunk_sectors < mddev->chunk_sectors) 3809 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3810 chunk_sectors = mddev->new_chunk_sectors; 3810 chunk_sectors = mddev->new_chunk_sectors;
@@ -3894,7 +3894,7 @@ static int bio_fits_rdev(struct bio *bi)
3894{ 3894{
3895 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3895 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3896 3896
3897 if ((bi->bi_size>>9) > queue_max_sectors(q)) 3897 if (bio_sectors(bi) > queue_max_sectors(q))
3898 return 0; 3898 return 0;
3899 blk_recount_segments(q, bi); 3899 blk_recount_segments(q, bi);
3900 if (bi->bi_phys_segments > queue_max_segments(q)) 3900 if (bi->bi_phys_segments > queue_max_segments(q))
@@ -3964,7 +3964,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
3964 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 3964 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
3965 3965
3966 if (!bio_fits_rdev(align_bi) || 3966 if (!bio_fits_rdev(align_bi) ||
3967 is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9, 3967 is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
3968 &first_bad, &bad_sectors)) { 3968 &first_bad, &bad_sectors)) {
3969 /* too big in some way, or has a known bad block */ 3969 /* too big in some way, or has a known bad block */
3970 bio_put(align_bi); 3970 bio_put(align_bi);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 5989a92236f7..d90e0485e01b 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -5166,7 +5166,7 @@ static int bio_size_ok(struct block_device *bdev, struct bio *bio,
5166 } 5166 }
5167 5167
5168 prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; 5168 prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
5169 if ((bio->bi_size >> 9) > max_sectors) 5169 if (bio_sectors(bio) > max_sectors)
5170 return 0; 5170 return 0;
5171 5171
5172 if (!q->merge_bvec_fn) 5172 if (!q->merge_bvec_fn)
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 9961726523d0..5a28843725df 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -244,7 +244,7 @@ TRACE_EVENT(block_bio_bounce,
244 __entry->dev = bio->bi_bdev ? 244 __entry->dev = bio->bi_bdev ?
245 bio->bi_bdev->bd_dev : 0; 245 bio->bi_bdev->bd_dev : 0;
246 __entry->sector = bio->bi_sector; 246 __entry->sector = bio->bi_sector;
247 __entry->nr_sector = bio->bi_size >> 9; 247 __entry->nr_sector = bio_sectors(bio);
248 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 248 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
249 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 249 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
250 ), 250 ),
@@ -281,7 +281,7 @@ TRACE_EVENT(block_bio_complete,
281 __entry->dev = bio->bi_bdev ? 281 __entry->dev = bio->bi_bdev ?
282 bio->bi_bdev->bd_dev : 0; 282 bio->bi_bdev->bd_dev : 0;
283 __entry->sector = bio->bi_sector; 283 __entry->sector = bio->bi_sector;
284 __entry->nr_sector = bio->bi_size >> 9; 284 __entry->nr_sector = bio_sectors(bio);
285 __entry->error = error; 285 __entry->error = error;
286 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 286 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
287 ), 287 ),
@@ -309,7 +309,7 @@ DECLARE_EVENT_CLASS(block_bio_merge,
309 TP_fast_assign( 309 TP_fast_assign(
310 __entry->dev = bio->bi_bdev->bd_dev; 310 __entry->dev = bio->bi_bdev->bd_dev;
311 __entry->sector = bio->bi_sector; 311 __entry->sector = bio->bi_sector;
312 __entry->nr_sector = bio->bi_size >> 9; 312 __entry->nr_sector = bio_sectors(bio);
313 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 313 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
314 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 314 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
315 ), 315 ),
@@ -376,7 +376,7 @@ TRACE_EVENT(block_bio_queue,
376 TP_fast_assign( 376 TP_fast_assign(
377 __entry->dev = bio->bi_bdev->bd_dev; 377 __entry->dev = bio->bi_bdev->bd_dev;
378 __entry->sector = bio->bi_sector; 378 __entry->sector = bio->bi_sector;
379 __entry->nr_sector = bio->bi_size >> 9; 379 __entry->nr_sector = bio_sectors(bio);
380 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 380 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
381 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 381 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
382 ), 382 ),
@@ -404,7 +404,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
404 TP_fast_assign( 404 TP_fast_assign(
405 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; 405 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
406 __entry->sector = bio ? bio->bi_sector : 0; 406 __entry->sector = bio ? bio->bi_sector : 0;
407 __entry->nr_sector = bio ? bio->bi_size >> 9 : 0; 407 __entry->nr_sector = bio ? bio_sectors(bio) : 0;
408 blk_fill_rwbs(__entry->rwbs, 408 blk_fill_rwbs(__entry->rwbs,
409 bio ? bio->bi_rw : 0, __entry->nr_sector); 409 bio ? bio->bi_rw : 0, __entry->nr_sector);
410 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 410 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
@@ -580,7 +580,7 @@ TRACE_EVENT(block_bio_remap,
580 TP_fast_assign( 580 TP_fast_assign(
581 __entry->dev = bio->bi_bdev->bd_dev; 581 __entry->dev = bio->bi_bdev->bd_dev;
582 __entry->sector = bio->bi_sector; 582 __entry->sector = bio->bi_sector;
583 __entry->nr_sector = bio->bi_size >> 9; 583 __entry->nr_sector = bio_sectors(bio);
584 __entry->old_dev = dev; 584 __entry->old_dev = dev;
585 __entry->old_sector = from; 585 __entry->old_sector = from;
586 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 586 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);