diff options
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r-- | drivers/md/raid5.c | 63 |
1 files changed, 48 insertions, 15 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index d29215d966da..e84204eb12df 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -2947,6 +2947,7 @@ static void handle_stripe5(struct stripe_head *sh) | |||
2947 | struct r5dev *dev; | 2947 | struct r5dev *dev; |
2948 | mdk_rdev_t *blocked_rdev = NULL; | 2948 | mdk_rdev_t *blocked_rdev = NULL; |
2949 | int prexor; | 2949 | int prexor; |
2950 | int dec_preread_active = 0; | ||
2950 | 2951 | ||
2951 | memset(&s, 0, sizeof(s)); | 2952 | memset(&s, 0, sizeof(s)); |
2952 | pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d " | 2953 | pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d " |
@@ -3096,12 +3097,8 @@ static void handle_stripe5(struct stripe_head *sh) | |||
3096 | set_bit(STRIPE_INSYNC, &sh->state); | 3097 | set_bit(STRIPE_INSYNC, &sh->state); |
3097 | } | 3098 | } |
3098 | } | 3099 | } |
3099 | if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { | 3100 | if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) |
3100 | atomic_dec(&conf->preread_active_stripes); | 3101 | dec_preread_active = 1; |
3101 | if (atomic_read(&conf->preread_active_stripes) < | ||
3102 | IO_THRESHOLD) | ||
3103 | md_wakeup_thread(conf->mddev->thread); | ||
3104 | } | ||
3105 | } | 3102 | } |
3106 | 3103 | ||
3107 | /* Now to consider new write requests and what else, if anything | 3104 | /* Now to consider new write requests and what else, if anything |
@@ -3208,6 +3205,16 @@ static void handle_stripe5(struct stripe_head *sh) | |||
3208 | 3205 | ||
3209 | ops_run_io(sh, &s); | 3206 | ops_run_io(sh, &s); |
3210 | 3207 | ||
3208 | if (dec_preread_active) { | ||
3209 | /* We delay this until after ops_run_io so that if make_request | ||
3210 | * is waiting on a barrier, it won't continue until the writes | ||
3211 | * have actually been submitted. | ||
3212 | */ | ||
3213 | atomic_dec(&conf->preread_active_stripes); | ||
3214 | if (atomic_read(&conf->preread_active_stripes) < | ||
3215 | IO_THRESHOLD) | ||
3216 | md_wakeup_thread(conf->mddev->thread); | ||
3217 | } | ||
3211 | return_io(return_bi); | 3218 | return_io(return_bi); |
3212 | } | 3219 | } |
3213 | 3220 | ||
@@ -3221,6 +3228,7 @@ static void handle_stripe6(struct stripe_head *sh) | |||
3221 | struct r6_state r6s; | 3228 | struct r6_state r6s; |
3222 | struct r5dev *dev, *pdev, *qdev; | 3229 | struct r5dev *dev, *pdev, *qdev; |
3223 | mdk_rdev_t *blocked_rdev = NULL; | 3230 | mdk_rdev_t *blocked_rdev = NULL; |
3231 | int dec_preread_active = 0; | ||
3224 | 3232 | ||
3225 | pr_debug("handling stripe %llu, state=%#lx cnt=%d, " | 3233 | pr_debug("handling stripe %llu, state=%#lx cnt=%d, " |
3226 | "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", | 3234 | "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", |
@@ -3358,7 +3366,6 @@ static void handle_stripe6(struct stripe_head *sh) | |||
3358 | * completed | 3366 | * completed |
3359 | */ | 3367 | */ |
3360 | if (sh->reconstruct_state == reconstruct_state_drain_result) { | 3368 | if (sh->reconstruct_state == reconstruct_state_drain_result) { |
3361 | int qd_idx = sh->qd_idx; | ||
3362 | 3369 | ||
3363 | sh->reconstruct_state = reconstruct_state_idle; | 3370 | sh->reconstruct_state = reconstruct_state_idle; |
3364 | /* All the 'written' buffers and the parity blocks are ready to | 3371 | /* All the 'written' buffers and the parity blocks are ready to |
@@ -3380,12 +3387,8 @@ static void handle_stripe6(struct stripe_head *sh) | |||
3380 | set_bit(STRIPE_INSYNC, &sh->state); | 3387 | set_bit(STRIPE_INSYNC, &sh->state); |
3381 | } | 3388 | } |
3382 | } | 3389 | } |
3383 | if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { | 3390 | if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) |
3384 | atomic_dec(&conf->preread_active_stripes); | 3391 | dec_preread_active = 1; |
3385 | if (atomic_read(&conf->preread_active_stripes) < | ||
3386 | IO_THRESHOLD) | ||
3387 | md_wakeup_thread(conf->mddev->thread); | ||
3388 | } | ||
3389 | } | 3392 | } |
3390 | 3393 | ||
3391 | /* Now to consider new write requests and what else, if anything | 3394 | /* Now to consider new write requests and what else, if anything |
@@ -3494,6 +3497,18 @@ static void handle_stripe6(struct stripe_head *sh) | |||
3494 | 3497 | ||
3495 | ops_run_io(sh, &s); | 3498 | ops_run_io(sh, &s); |
3496 | 3499 | ||
3500 | |||
3501 | if (dec_preread_active) { | ||
3502 | /* We delay this until after ops_run_io so that if make_request | ||
3503 | * is waiting on a barrier, it won't continue until the writes | ||
3504 | * have actually been submitted. | ||
3505 | */ | ||
3506 | atomic_dec(&conf->preread_active_stripes); | ||
3507 | if (atomic_read(&conf->preread_active_stripes) < | ||
3508 | IO_THRESHOLD) | ||
3509 | md_wakeup_thread(conf->mddev->thread); | ||
3510 | } | ||
3511 | |||
3497 | return_io(return_bi); | 3512 | return_io(return_bi); |
3498 | } | 3513 | } |
3499 | 3514 | ||
@@ -3741,7 +3756,7 @@ static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) | |||
3741 | { | 3756 | { |
3742 | mddev_t *mddev = q->queuedata; | 3757 | mddev_t *mddev = q->queuedata; |
3743 | raid5_conf_t *conf = mddev->private; | 3758 | raid5_conf_t *conf = mddev->private; |
3744 | unsigned int dd_idx; | 3759 | int dd_idx; |
3745 | struct bio* align_bi; | 3760 | struct bio* align_bi; |
3746 | mdk_rdev_t *rdev; | 3761 | mdk_rdev_t *rdev; |
3747 | 3762 | ||
@@ -3866,7 +3881,13 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
3866 | int cpu, remaining; | 3881 | int cpu, remaining; |
3867 | 3882 | ||
3868 | if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) { | 3883 | if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) { |
3869 | bio_endio(bi, -EOPNOTSUPP); | 3884 | /* Drain all pending writes. We only really need |
3885 | * to ensure they have been submitted, but this is | ||
3886 | * easier. | ||
3887 | */ | ||
3888 | mddev->pers->quiesce(mddev, 1); | ||
3889 | mddev->pers->quiesce(mddev, 0); | ||
3890 | md_barrier_request(mddev, bi); | ||
3870 | return 0; | 3891 | return 0; |
3871 | } | 3892 | } |
3872 | 3893 | ||
@@ -3990,6 +4011,9 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
3990 | finish_wait(&conf->wait_for_overlap, &w); | 4011 | finish_wait(&conf->wait_for_overlap, &w); |
3991 | set_bit(STRIPE_HANDLE, &sh->state); | 4012 | set_bit(STRIPE_HANDLE, &sh->state); |
3992 | clear_bit(STRIPE_DELAYED, &sh->state); | 4013 | clear_bit(STRIPE_DELAYED, &sh->state); |
4014 | if (mddev->barrier && | ||
4015 | !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) | ||
4016 | atomic_inc(&conf->preread_active_stripes); | ||
3993 | release_stripe(sh); | 4017 | release_stripe(sh); |
3994 | } else { | 4018 | } else { |
3995 | /* cannot get stripe for read-ahead, just give-up */ | 4019 | /* cannot get stripe for read-ahead, just give-up */ |
@@ -4009,6 +4033,14 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
4009 | 4033 | ||
4010 | bio_endio(bi, 0); | 4034 | bio_endio(bi, 0); |
4011 | } | 4035 | } |
4036 | |||
4037 | if (mddev->barrier) { | ||
4038 | /* We need to wait for the stripes to all be handled. | ||
4039 | * So: wait for preread_active_stripes to drop to 0. | ||
4040 | */ | ||
4041 | wait_event(mddev->thread->wqueue, | ||
4042 | atomic_read(&conf->preread_active_stripes) == 0); | ||
4043 | } | ||
4012 | return 0; | 4044 | return 0; |
4013 | } | 4045 | } |
4014 | 4046 | ||
@@ -5860,6 +5892,7 @@ static void raid5_exit(void) | |||
5860 | module_init(raid5_init); | 5892 | module_init(raid5_init); |
5861 | module_exit(raid5_exit); | 5893 | module_exit(raid5_exit); |
5862 | MODULE_LICENSE("GPL"); | 5894 | MODULE_LICENSE("GPL"); |
5895 | MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD"); | ||
5863 | MODULE_ALIAS("md-personality-4"); /* RAID5 */ | 5896 | MODULE_ALIAS("md-personality-4"); /* RAID5 */ |
5864 | MODULE_ALIAS("md-raid5"); | 5897 | MODULE_ALIAS("md-raid5"); |
5865 | MODULE_ALIAS("md-raid4"); | 5898 | MODULE_ALIAS("md-raid4"); |