diff options
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r-- | drivers/md/raid5.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 68706970d217..4e0f87e462ce 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -90,7 +90,7 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) | |||
90 | */ | 90 | */ |
91 | static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) | 91 | static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) |
92 | { | 92 | { |
93 | int sectors = bio->bi_size >> 9; | 93 | int sectors = bio_sectors(bio); |
94 | if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) | 94 | if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) |
95 | return bio->bi_next; | 95 | return bio->bi_next; |
96 | else | 96 | else |
@@ -3804,7 +3804,7 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) | |||
3804 | { | 3804 | { |
3805 | sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); | 3805 | sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); |
3806 | unsigned int chunk_sectors = mddev->chunk_sectors; | 3806 | unsigned int chunk_sectors = mddev->chunk_sectors; |
3807 | unsigned int bio_sectors = bio->bi_size >> 9; | 3807 | unsigned int bio_sectors = bio_sectors(bio); |
3808 | 3808 | ||
3809 | if (mddev->new_chunk_sectors < mddev->chunk_sectors) | 3809 | if (mddev->new_chunk_sectors < mddev->chunk_sectors) |
3810 | chunk_sectors = mddev->new_chunk_sectors; | 3810 | chunk_sectors = mddev->new_chunk_sectors; |
@@ -3894,7 +3894,7 @@ static int bio_fits_rdev(struct bio *bi) | |||
3894 | { | 3894 | { |
3895 | struct request_queue *q = bdev_get_queue(bi->bi_bdev); | 3895 | struct request_queue *q = bdev_get_queue(bi->bi_bdev); |
3896 | 3896 | ||
3897 | if ((bi->bi_size>>9) > queue_max_sectors(q)) | 3897 | if (bio_sectors(bi) > queue_max_sectors(q)) |
3898 | return 0; | 3898 | return 0; |
3899 | blk_recount_segments(q, bi); | 3899 | blk_recount_segments(q, bi); |
3900 | if (bi->bi_phys_segments > queue_max_segments(q)) | 3900 | if (bi->bi_phys_segments > queue_max_segments(q)) |
@@ -3964,7 +3964,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) | |||
3964 | align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); | 3964 | align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); |
3965 | 3965 | ||
3966 | if (!bio_fits_rdev(align_bi) || | 3966 | if (!bio_fits_rdev(align_bi) || |
3967 | is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9, | 3967 | is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi), |
3968 | &first_bad, &bad_sectors)) { | 3968 | &first_bad, &bad_sectors)) { |
3969 | /* too big in some way, or has a known bad block */ | 3969 | /* too big in some way, or has a known bad block */ |
3970 | bio_put(align_bi); | 3970 | bio_put(align_bi); |