aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c60
1 files changed, 18 insertions, 42 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 40b35be34f8d..d34e238afa54 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -560,7 +560,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
560 if (test_bit(WriteMostly, &rdev->flags)) { 560 if (test_bit(WriteMostly, &rdev->flags)) {
561 /* Don't balance among write-mostly, just 561 /* Don't balance among write-mostly, just
562 * use the first as a last resort */ 562 * use the first as a last resort */
563 if (best_disk < 0) { 563 if (best_dist_disk < 0) {
564 if (is_badblock(rdev, this_sector, sectors, 564 if (is_badblock(rdev, this_sector, sectors,
565 &first_bad, &bad_sectors)) { 565 &first_bad, &bad_sectors)) {
566 if (first_bad < this_sector) 566 if (first_bad < this_sector)
@@ -569,7 +569,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
569 best_good_sectors = first_bad - this_sector; 569 best_good_sectors = first_bad - this_sector;
570 } else 570 } else
571 best_good_sectors = sectors; 571 best_good_sectors = sectors;
572 best_disk = disk; 572 best_dist_disk = disk;
573 best_pending_disk = disk;
573 } 574 }
574 continue; 575 continue;
575 } 576 }
@@ -701,11 +702,10 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
701 return best_disk; 702 return best_disk;
702} 703}
703 704
704static int raid1_mergeable_bvec(struct request_queue *q, 705static int raid1_mergeable_bvec(struct mddev *mddev,
705 struct bvec_merge_data *bvm, 706 struct bvec_merge_data *bvm,
706 struct bio_vec *biovec) 707 struct bio_vec *biovec)
707{ 708{
708 struct mddev *mddev = q->queuedata;
709 struct r1conf *conf = mddev->private; 709 struct r1conf *conf = mddev->private;
710 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 710 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
711 int max = biovec->bv_len; 711 int max = biovec->bv_len;
@@ -734,7 +734,7 @@ static int raid1_mergeable_bvec(struct request_queue *q,
734 734
735} 735}
736 736
737int md_raid1_congested(struct mddev *mddev, int bits) 737static int raid1_congested(struct mddev *mddev, int bits)
738{ 738{
739 struct r1conf *conf = mddev->private; 739 struct r1conf *conf = mddev->private;
740 int i, ret = 0; 740 int i, ret = 0;
@@ -763,15 +763,6 @@ int md_raid1_congested(struct mddev *mddev, int bits)
763 rcu_read_unlock(); 763 rcu_read_unlock();
764 return ret; 764 return ret;
765} 765}
766EXPORT_SYMBOL_GPL(md_raid1_congested);
767
768static int raid1_congested(void *data, int bits)
769{
770 struct mddev *mddev = data;
771
772 return mddev_congested(mddev, bits) ||
773 md_raid1_congested(mddev, bits);
774}
775 766
776static void flush_pending_writes(struct r1conf *conf) 767static void flush_pending_writes(struct r1conf *conf)
777{ 768{
@@ -2206,7 +2197,8 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
2206 if (rdev->badblocks.shift < 0) 2197 if (rdev->badblocks.shift < 0)
2207 return 0; 2198 return 0;
2208 2199
2209 block_sectors = 1 << rdev->badblocks.shift; 2200 block_sectors = roundup(1 << rdev->badblocks.shift,
2201 bdev_logical_block_size(rdev->bdev) >> 9);
2210 sector = r1_bio->sector; 2202 sector = r1_bio->sector;
2211 sectors = ((sector + block_sectors) 2203 sectors = ((sector + block_sectors)
2212 & ~(sector_t)(block_sectors - 1)) 2204 & ~(sector_t)(block_sectors - 1))
@@ -2882,7 +2874,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
2882 return ERR_PTR(err); 2874 return ERR_PTR(err);
2883} 2875}
2884 2876
2885static int stop(struct mddev *mddev); 2877static void raid1_free(struct mddev *mddev, void *priv);
2886static int run(struct mddev *mddev) 2878static int run(struct mddev *mddev)
2887{ 2879{
2888 struct r1conf *conf; 2880 struct r1conf *conf;
@@ -2904,7 +2896,7 @@ static int run(struct mddev *mddev)
2904 /* 2896 /*
2905 * copy the already verified devices into our private RAID1 2897 * copy the already verified devices into our private RAID1
2906 * bookkeeping area. [whatever we allocate in run(), 2898 * bookkeeping area. [whatever we allocate in run(),
2907 * should be freed in stop()] 2899 * should be freed in raid1_free()]
2908 */ 2900 */
2909 if (mddev->private == NULL) 2901 if (mddev->private == NULL)
2910 conf = setup_conf(mddev); 2902 conf = setup_conf(mddev);
@@ -2955,10 +2947,6 @@ static int run(struct mddev *mddev)
2955 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); 2947 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
2956 2948
2957 if (mddev->queue) { 2949 if (mddev->queue) {
2958 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2959 mddev->queue->backing_dev_info.congested_data = mddev;
2960 blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
2961
2962 if (discard_supported) 2950 if (discard_supported)
2963 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 2951 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
2964 mddev->queue); 2952 mddev->queue);
@@ -2968,37 +2956,23 @@ static int run(struct mddev *mddev)
2968 } 2956 }
2969 2957
2970 ret = md_integrity_register(mddev); 2958 ret = md_integrity_register(mddev);
2971 if (ret) 2959 if (ret) {
2972 stop(mddev); 2960 md_unregister_thread(&mddev->thread);
2961 raid1_free(mddev, conf);
2962 }
2973 return ret; 2963 return ret;
2974} 2964}
2975 2965
2976static int stop(struct mddev *mddev) 2966static void raid1_free(struct mddev *mddev, void *priv)
2977{ 2967{
2978 struct r1conf *conf = mddev->private; 2968 struct r1conf *conf = priv;
2979 struct bitmap *bitmap = mddev->bitmap;
2980 2969
2981 /* wait for behind writes to complete */
2982 if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
2983 printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
2984 mdname(mddev));
2985 /* need to kick something here to make sure I/O goes? */
2986 wait_event(bitmap->behind_wait,
2987 atomic_read(&bitmap->behind_writes) == 0);
2988 }
2989
2990 freeze_array(conf, 0);
2991 unfreeze_array(conf);
2992
2993 md_unregister_thread(&mddev->thread);
2994 if (conf->r1bio_pool) 2970 if (conf->r1bio_pool)
2995 mempool_destroy(conf->r1bio_pool); 2971 mempool_destroy(conf->r1bio_pool);
2996 kfree(conf->mirrors); 2972 kfree(conf->mirrors);
2997 safe_put_page(conf->tmppage); 2973 safe_put_page(conf->tmppage);
2998 kfree(conf->poolinfo); 2974 kfree(conf->poolinfo);
2999 kfree(conf); 2975 kfree(conf);
3000 mddev->private = NULL;
3001 return 0;
3002} 2976}
3003 2977
3004static int raid1_resize(struct mddev *mddev, sector_t sectors) 2978static int raid1_resize(struct mddev *mddev, sector_t sectors)
@@ -3181,7 +3155,7 @@ static struct md_personality raid1_personality =
3181 .owner = THIS_MODULE, 3155 .owner = THIS_MODULE,
3182 .make_request = make_request, 3156 .make_request = make_request,
3183 .run = run, 3157 .run = run,
3184 .stop = stop, 3158 .free = raid1_free,
3185 .status = status, 3159 .status = status,
3186 .error_handler = error, 3160 .error_handler = error,
3187 .hot_add_disk = raid1_add_disk, 3161 .hot_add_disk = raid1_add_disk,
@@ -3193,6 +3167,8 @@ static struct md_personality raid1_personality =
3193 .check_reshape = raid1_reshape, 3167 .check_reshape = raid1_reshape,
3194 .quiesce = raid1_quiesce, 3168 .quiesce = raid1_quiesce,
3195 .takeover = raid1_takeover, 3169 .takeover = raid1_takeover,
3170 .congested = raid1_congested,
3171 .mergeable_bvec = raid1_mergeable_bvec,
3196}; 3172};
3197 3173
3198static int __init raid_init(void) 3174static int __init raid_init(void)