aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c51
1 files changed, 26 insertions, 25 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 26ee39936a28..05e4a105b9c7 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -90,7 +90,7 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
90 */ 90 */
91static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) 91static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
92{ 92{
93 int sectors = bio->bi_size >> 9; 93 int sectors = bio_sectors(bio);
94 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) 94 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
95 return bio->bi_next; 95 return bio->bi_next;
96 else 96 else
@@ -569,14 +569,6 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
569 bi = &sh->dev[i].req; 569 bi = &sh->dev[i].req;
570 rbi = &sh->dev[i].rreq; /* For writing to replacement */ 570 rbi = &sh->dev[i].rreq; /* For writing to replacement */
571 571
572 bi->bi_rw = rw;
573 rbi->bi_rw = rw;
574 if (rw & WRITE) {
575 bi->bi_end_io = raid5_end_write_request;
576 rbi->bi_end_io = raid5_end_write_request;
577 } else
578 bi->bi_end_io = raid5_end_read_request;
579
580 rcu_read_lock(); 572 rcu_read_lock();
581 rrdev = rcu_dereference(conf->disks[i].replacement); 573 rrdev = rcu_dereference(conf->disks[i].replacement);
582 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */ 574 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
@@ -651,7 +643,14 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
651 643
652 set_bit(STRIPE_IO_STARTED, &sh->state); 644 set_bit(STRIPE_IO_STARTED, &sh->state);
653 645
646 bio_reset(bi);
654 bi->bi_bdev = rdev->bdev; 647 bi->bi_bdev = rdev->bdev;
648 bi->bi_rw = rw;
649 bi->bi_end_io = (rw & WRITE)
650 ? raid5_end_write_request
651 : raid5_end_read_request;
652 bi->bi_private = sh;
653
655 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 654 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
656 __func__, (unsigned long long)sh->sector, 655 __func__, (unsigned long long)sh->sector,
657 bi->bi_rw, i); 656 bi->bi_rw, i);
@@ -665,12 +664,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
665 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 664 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
666 bi->bi_rw |= REQ_FLUSH; 665 bi->bi_rw |= REQ_FLUSH;
667 666
668 bi->bi_flags = 1 << BIO_UPTODATE; 667 bi->bi_vcnt = 1;
669 bi->bi_idx = 0;
670 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 668 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
671 bi->bi_io_vec[0].bv_offset = 0; 669 bi->bi_io_vec[0].bv_offset = 0;
672 bi->bi_size = STRIPE_SIZE; 670 bi->bi_size = STRIPE_SIZE;
673 bi->bi_next = NULL;
674 if (rrdev) 671 if (rrdev)
675 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 672 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
676 673
@@ -687,7 +684,13 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
687 684
688 set_bit(STRIPE_IO_STARTED, &sh->state); 685 set_bit(STRIPE_IO_STARTED, &sh->state);
689 686
687 bio_reset(rbi);
690 rbi->bi_bdev = rrdev->bdev; 688 rbi->bi_bdev = rrdev->bdev;
689 rbi->bi_rw = rw;
690 BUG_ON(!(rw & WRITE));
691 rbi->bi_end_io = raid5_end_write_request;
692 rbi->bi_private = sh;
693
691 pr_debug("%s: for %llu schedule op %ld on " 694 pr_debug("%s: for %llu schedule op %ld on "
692 "replacement disc %d\n", 695 "replacement disc %d\n",
693 __func__, (unsigned long long)sh->sector, 696 __func__, (unsigned long long)sh->sector,
@@ -699,12 +702,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
699 else 702 else
700 rbi->bi_sector = (sh->sector 703 rbi->bi_sector = (sh->sector
701 + rrdev->data_offset); 704 + rrdev->data_offset);
702 rbi->bi_flags = 1 << BIO_UPTODATE; 705 rbi->bi_vcnt = 1;
703 rbi->bi_idx = 0;
704 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 706 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
705 rbi->bi_io_vec[0].bv_offset = 0; 707 rbi->bi_io_vec[0].bv_offset = 0;
706 rbi->bi_size = STRIPE_SIZE; 708 rbi->bi_size = STRIPE_SIZE;
707 rbi->bi_next = NULL;
708 if (conf->mddev->gendisk) 709 if (conf->mddev->gendisk)
709 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), 710 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
710 rbi, disk_devt(conf->mddev->gendisk), 711 rbi, disk_devt(conf->mddev->gendisk),
@@ -2402,11 +2403,11 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2402 } else 2403 } else
2403 bip = &sh->dev[dd_idx].toread; 2404 bip = &sh->dev[dd_idx].toread;
2404 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 2405 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
2405 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) 2406 if (bio_end_sector(*bip) > bi->bi_sector)
2406 goto overlap; 2407 goto overlap;
2407 bip = & (*bip)->bi_next; 2408 bip = & (*bip)->bi_next;
2408 } 2409 }
2409 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 2410 if (*bip && (*bip)->bi_sector < bio_end_sector(bi))
2410 goto overlap; 2411 goto overlap;
2411 2412
2412 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 2413 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2422,8 +2423,8 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2422 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 2423 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2423 bi && bi->bi_sector <= sector; 2424 bi && bi->bi_sector <= sector;
2424 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 2425 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2425 if (bi->bi_sector + (bi->bi_size>>9) >= sector) 2426 if (bio_end_sector(bi) >= sector)
2426 sector = bi->bi_sector + (bi->bi_size>>9); 2427 sector = bio_end_sector(bi);
2427 } 2428 }
2428 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 2429 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
2429 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 2430 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
@@ -3849,7 +3850,7 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
3849{ 3850{
3850 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3851 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
3851 unsigned int chunk_sectors = mddev->chunk_sectors; 3852 unsigned int chunk_sectors = mddev->chunk_sectors;
3852 unsigned int bio_sectors = bio->bi_size >> 9; 3853 unsigned int bio_sectors = bio_sectors(bio);
3853 3854
3854 if (mddev->new_chunk_sectors < mddev->chunk_sectors) 3855 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3855 chunk_sectors = mddev->new_chunk_sectors; 3856 chunk_sectors = mddev->new_chunk_sectors;
@@ -3941,7 +3942,7 @@ static int bio_fits_rdev(struct bio *bi)
3941{ 3942{
3942 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3943 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3943 3944
3944 if ((bi->bi_size>>9) > queue_max_sectors(q)) 3945 if (bio_sectors(bi) > queue_max_sectors(q))
3945 return 0; 3946 return 0;
3946 blk_recount_segments(q, bi); 3947 blk_recount_segments(q, bi);
3947 if (bi->bi_phys_segments > queue_max_segments(q)) 3948 if (bi->bi_phys_segments > queue_max_segments(q))
@@ -3988,7 +3989,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
3988 0, 3989 0,
3989 &dd_idx, NULL); 3990 &dd_idx, NULL);
3990 3991
3991 end_sector = align_bi->bi_sector + (align_bi->bi_size >> 9); 3992 end_sector = bio_end_sector(align_bi);
3992 rcu_read_lock(); 3993 rcu_read_lock();
3993 rdev = rcu_dereference(conf->disks[dd_idx].replacement); 3994 rdev = rcu_dereference(conf->disks[dd_idx].replacement);
3994 if (!rdev || test_bit(Faulty, &rdev->flags) || 3995 if (!rdev || test_bit(Faulty, &rdev->flags) ||
@@ -4011,7 +4012,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4011 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 4012 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
4012 4013
4013 if (!bio_fits_rdev(align_bi) || 4014 if (!bio_fits_rdev(align_bi) ||
4014 is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9, 4015 is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
4015 &first_bad, &bad_sectors)) { 4016 &first_bad, &bad_sectors)) {
4016 /* too big in some way, or has a known bad block */ 4017 /* too big in some way, or has a known bad block */
4017 bio_put(align_bi); 4018 bio_put(align_bi);
@@ -4273,7 +4274,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4273 } 4274 }
4274 4275
4275 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4276 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4276 last_sector = bi->bi_sector + (bi->bi_size>>9); 4277 last_sector = bio_end_sector(bi);
4277 bi->bi_next = NULL; 4278 bi->bi_next = NULL;
4278 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4279 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
4279 4280
@@ -4739,7 +4740,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
4739 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4740 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4740 sector = raid5_compute_sector(conf, logical_sector, 4741 sector = raid5_compute_sector(conf, logical_sector,
4741 0, &dd_idx, NULL); 4742 0, &dd_idx, NULL);
4742 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); 4743 last_sector = bio_end_sector(raid_bio);
4743 4744
4744 for (; logical_sector < last_sector; 4745 for (; logical_sector < last_sector;
4745 logical_sector += STRIPE_SECTORS, 4746 logical_sector += STRIPE_SECTORS,