diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bitmap.c | 22 | ||||
-rw-r--r-- | drivers/md/raid5.c | 42 |
2 files changed, 60 insertions, 4 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 11108165e264..059704fbb753 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -1160,6 +1160,22 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect | |||
1160 | return 0; | 1160 | return 0; |
1161 | } | 1161 | } |
1162 | 1162 | ||
1163 | if (unlikely((*bmc & COUNTER_MAX) == COUNTER_MAX)) { | ||
1164 | DEFINE_WAIT(__wait); | ||
1165 | /* note that it is safe to do the prepare_to_wait | ||
1166 | * after the test as long as we do it before dropping | ||
1167 | * the spinlock. | ||
1168 | */ | ||
1169 | prepare_to_wait(&bitmap->overflow_wait, &__wait, | ||
1170 | TASK_UNINTERRUPTIBLE); | ||
1171 | spin_unlock_irq(&bitmap->lock); | ||
1172 | bitmap->mddev->queue | ||
1173 | ->unplug_fn(bitmap->mddev->queue); | ||
1174 | schedule(); | ||
1175 | finish_wait(&bitmap->overflow_wait, &__wait); | ||
1176 | continue; | ||
1177 | } | ||
1178 | |||
1163 | switch(*bmc) { | 1179 | switch(*bmc) { |
1164 | case 0: | 1180 | case 0: |
1165 | bitmap_file_set_bit(bitmap, offset); | 1181 | bitmap_file_set_bit(bitmap, offset); |
@@ -1169,7 +1185,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect | |||
1169 | case 1: | 1185 | case 1: |
1170 | *bmc = 2; | 1186 | *bmc = 2; |
1171 | } | 1187 | } |
1172 | BUG_ON((*bmc & COUNTER_MAX) == COUNTER_MAX); | 1188 | |
1173 | (*bmc)++; | 1189 | (*bmc)++; |
1174 | 1190 | ||
1175 | spin_unlock_irq(&bitmap->lock); | 1191 | spin_unlock_irq(&bitmap->lock); |
@@ -1207,6 +1223,9 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto | |||
1207 | if (!success && ! (*bmc & NEEDED_MASK)) | 1223 | if (!success && ! (*bmc & NEEDED_MASK)) |
1208 | *bmc |= NEEDED_MASK; | 1224 | *bmc |= NEEDED_MASK; |
1209 | 1225 | ||
1226 | if ((*bmc & COUNTER_MAX) == COUNTER_MAX) | ||
1227 | wake_up(&bitmap->overflow_wait); | ||
1228 | |||
1210 | (*bmc)--; | 1229 | (*bmc)--; |
1211 | if (*bmc <= 2) { | 1230 | if (*bmc <= 2) { |
1212 | set_page_attr(bitmap, | 1231 | set_page_attr(bitmap, |
@@ -1431,6 +1450,7 @@ int bitmap_create(mddev_t *mddev) | |||
1431 | spin_lock_init(&bitmap->lock); | 1450 | spin_lock_init(&bitmap->lock); |
1432 | atomic_set(&bitmap->pending_writes, 0); | 1451 | atomic_set(&bitmap->pending_writes, 0); |
1433 | init_waitqueue_head(&bitmap->write_wait); | 1452 | init_waitqueue_head(&bitmap->write_wait); |
1453 | init_waitqueue_head(&bitmap->overflow_wait); | ||
1434 | 1454 | ||
1435 | bitmap->mddev = mddev; | 1455 | bitmap->mddev = mddev; |
1436 | 1456 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 467c16982d02..11c3d7bfa797 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -2620,7 +2620,7 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf) | |||
2620 | } | 2620 | } |
2621 | bi = conf->retry_read_aligned_list; | 2621 | bi = conf->retry_read_aligned_list; |
2622 | if(bi) { | 2622 | if(bi) { |
2623 | conf->retry_read_aligned = bi->bi_next; | 2623 | conf->retry_read_aligned_list = bi->bi_next; |
2624 | bi->bi_next = NULL; | 2624 | bi->bi_next = NULL; |
2625 | bi->bi_phys_segments = 1; /* biased count of active stripes */ | 2625 | bi->bi_phys_segments = 1; /* biased count of active stripes */ |
2626 | bi->bi_hw_segments = 0; /* count of processed stripes */ | 2626 | bi->bi_hw_segments = 0; /* count of processed stripes */ |
@@ -2669,6 +2669,27 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error) | |||
2669 | return 0; | 2669 | return 0; |
2670 | } | 2670 | } |
2671 | 2671 | ||
2672 | static int bio_fits_rdev(struct bio *bi) | ||
2673 | { | ||
2674 | request_queue_t *q = bdev_get_queue(bi->bi_bdev); | ||
2675 | |||
2676 | if ((bi->bi_size>>9) > q->max_sectors) | ||
2677 | return 0; | ||
2678 | blk_recount_segments(q, bi); | ||
2679 | if (bi->bi_phys_segments > q->max_phys_segments || | ||
2680 | bi->bi_hw_segments > q->max_hw_segments) | ||
2681 | return 0; | ||
2682 | |||
2683 | if (q->merge_bvec_fn) | ||
2684 | /* it's too hard to apply the merge_bvec_fn at this stage, | ||
2685 | * just just give up | ||
2686 | */ | ||
2687 | return 0; | ||
2688 | |||
2689 | return 1; | ||
2690 | } | ||
2691 | |||
2692 | |||
2672 | static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio) | 2693 | static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio) |
2673 | { | 2694 | { |
2674 | mddev_t *mddev = q->queuedata; | 2695 | mddev_t *mddev = q->queuedata; |
@@ -2715,6 +2736,13 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio) | |||
2715 | align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); | 2736 | align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); |
2716 | align_bi->bi_sector += rdev->data_offset; | 2737 | align_bi->bi_sector += rdev->data_offset; |
2717 | 2738 | ||
2739 | if (!bio_fits_rdev(align_bi)) { | ||
2740 | /* too big in some way */ | ||
2741 | bio_put(align_bi); | ||
2742 | rdev_dec_pending(rdev, mddev); | ||
2743 | return 0; | ||
2744 | } | ||
2745 | |||
2718 | spin_lock_irq(&conf->device_lock); | 2746 | spin_lock_irq(&conf->device_lock); |
2719 | wait_event_lock_irq(conf->wait_for_stripe, | 2747 | wait_event_lock_irq(conf->wait_for_stripe, |
2720 | conf->quiesce == 0, | 2748 | conf->quiesce == 0, |
@@ -3107,7 +3135,9 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
3107 | last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); | 3135 | last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); |
3108 | 3136 | ||
3109 | for (; logical_sector < last_sector; | 3137 | for (; logical_sector < last_sector; |
3110 | logical_sector += STRIPE_SECTORS, scnt++) { | 3138 | logical_sector += STRIPE_SECTORS, |
3139 | sector += STRIPE_SECTORS, | ||
3140 | scnt++) { | ||
3111 | 3141 | ||
3112 | if (scnt < raid_bio->bi_hw_segments) | 3142 | if (scnt < raid_bio->bi_hw_segments) |
3113 | /* already done this stripe */ | 3143 | /* already done this stripe */ |
@@ -3123,7 +3153,13 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
3123 | } | 3153 | } |
3124 | 3154 | ||
3125 | set_bit(R5_ReadError, &sh->dev[dd_idx].flags); | 3155 | set_bit(R5_ReadError, &sh->dev[dd_idx].flags); |
3126 | add_stripe_bio(sh, raid_bio, dd_idx, 0); | 3156 | if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { |
3157 | release_stripe(sh); | ||
3158 | raid_bio->bi_hw_segments = scnt; | ||
3159 | conf->retry_read_aligned = raid_bio; | ||
3160 | return handled; | ||
3161 | } | ||
3162 | |||
3127 | handle_stripe(sh, NULL); | 3163 | handle_stripe(sh, NULL); |
3128 | release_stripe(sh); | 3164 | release_stripe(sh); |
3129 | handled++; | 3165 | handled++; |