aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2011-07-26 21:00:36 -0400
committerNeilBrown <neilb@suse.de>2011-07-26 21:00:36 -0400
commit3687c061886dd0bfec07e131ad12f916ef0abc62 (patch)
treec8f65f4895db3f9c1554f766a430829d40fe7c19 /drivers
parent86c374ba9f6726a79a032ede741dc66d219b166e (diff)
md/raid5: Move code for finishing a reconstruction into handle_stripe.
Prior to commit ab69ae12ceef7 the code in handle_stripe5 and handle_stripe6 to "Finish reconstruct operations initiated by the expansion process" was identical. That commit added an identical stanza of code to each function, but in different places. That was careless. The raid5 code was correct, so move that out into handle_stripe and remove raid6 version. Signed-off-by: NeilBrown <neilb@suse.de> Reviewed-by: Namhyung Kim <namhyung@gmail.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/raid5.c153
1 files changed, 57 insertions, 96 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 793dd76aeae0..cd6f04f145e6 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2998,7 +2998,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh)
2998 * 2998 *
2999 */ 2999 */
3000 3000
3001static void handle_stripe5(struct stripe_head *sh, struct stripe_head_state *s) 3001static int handle_stripe5(struct stripe_head *sh, struct stripe_head_state *s)
3002{ 3002{
3003 raid5_conf_t *conf = sh->raid_conf; 3003 raid5_conf_t *conf = sh->raid_conf;
3004 int disks = sh->disks, i; 3004 int disks = sh->disks, i;
@@ -3080,7 +3080,7 @@ static void handle_stripe5(struct stripe_head *sh, struct stripe_head_state *s)
3080 if (s->syncing || s->expanding || s->expanded || 3080 if (s->syncing || s->expanding || s->expanded ||
3081 s->to_write || s->written) { 3081 s->to_write || s->written) {
3082 set_bit(STRIPE_HANDLE, &sh->state); 3082 set_bit(STRIPE_HANDLE, &sh->state);
3083 return; 3083 return 1;
3084 } 3084 }
3085 /* There is nothing for the blocked_rdev to block */ 3085 /* There is nothing for the blocked_rdev to block */
3086 rdev_dec_pending(s->blocked_rdev, conf->mddev); 3086 rdev_dec_pending(s->blocked_rdev, conf->mddev);
@@ -3204,54 +3204,10 @@ static void handle_stripe5(struct stripe_head *sh, struct stripe_head_state *s)
3204 s->locked++; 3204 s->locked++;
3205 } 3205 }
3206 } 3206 }
3207 3207 return 0;
3208 /* Finish reconstruct operations initiated by the expansion process */
3209 if (sh->reconstruct_state == reconstruct_state_result) {
3210 struct stripe_head *sh2
3211 = get_active_stripe(conf, sh->sector, 1, 1, 1);
3212 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
3213 /* sh cannot be written until sh2 has been read.
3214 * so arrange for sh to be delayed a little
3215 */
3216 set_bit(STRIPE_DELAYED, &sh->state);
3217 set_bit(STRIPE_HANDLE, &sh->state);
3218 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3219 &sh2->state))
3220 atomic_inc(&conf->preread_active_stripes);
3221 release_stripe(sh2);
3222 return;
3223 }
3224 if (sh2)
3225 release_stripe(sh2);
3226
3227 sh->reconstruct_state = reconstruct_state_idle;
3228 clear_bit(STRIPE_EXPANDING, &sh->state);
3229 for (i = conf->raid_disks; i--; ) {
3230 set_bit(R5_Wantwrite, &sh->dev[i].flags);
3231 set_bit(R5_LOCKED, &sh->dev[i].flags);
3232 s->locked++;
3233 }
3234 }
3235
3236 if (s->expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3237 !sh->reconstruct_state) {
3238 /* Need to write out all blocks after computing parity */
3239 sh->disks = conf->raid_disks;
3240 stripe_set_idx(sh->sector, conf, 0, sh);
3241 schedule_reconstruction(sh, s, 1, 1);
3242 } else if (s->expanded && !sh->reconstruct_state && s->locked == 0) {
3243 clear_bit(STRIPE_EXPAND_READY, &sh->state);
3244 atomic_dec(&conf->reshape_stripes);
3245 wake_up(&conf->wait_for_overlap);
3246 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3247 }
3248
3249 if (s->expanding && s->locked == 0 &&
3250 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
3251 handle_stripe_expansion(conf, sh);
3252} 3208}
3253 3209
3254static void handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s) 3210static int handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s)
3255{ 3211{
3256 raid5_conf_t *conf = sh->raid_conf; 3212 raid5_conf_t *conf = sh->raid_conf;
3257 int disks = sh->disks; 3213 int disks = sh->disks;
@@ -3334,7 +3290,7 @@ static void handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s)
3334 if (s->syncing || s->expanding || s->expanded || 3290 if (s->syncing || s->expanding || s->expanded ||
3335 s->to_write || s->written) { 3291 s->to_write || s->written) {
3336 set_bit(STRIPE_HANDLE, &sh->state); 3292 set_bit(STRIPE_HANDLE, &sh->state);
3337 return; 3293 return 1;
3338 } 3294 }
3339 /* There is nothing for the blocked_rdev to block */ 3295 /* There is nothing for the blocked_rdev to block */
3340 rdev_dec_pending(s->blocked_rdev, conf->mddev); 3296 rdev_dec_pending(s->blocked_rdev, conf->mddev);
@@ -3467,56 +3423,14 @@ static void handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s)
3467 } 3423 }
3468 } 3424 }
3469 } 3425 }
3470 3426 return 0;
3471 /* Finish reconstruct operations initiated by the expansion process */
3472 if (sh->reconstruct_state == reconstruct_state_result) {
3473 sh->reconstruct_state = reconstruct_state_idle;
3474 clear_bit(STRIPE_EXPANDING, &sh->state);
3475 for (i = conf->raid_disks; i--; ) {
3476 set_bit(R5_Wantwrite, &sh->dev[i].flags);
3477 set_bit(R5_LOCKED, &sh->dev[i].flags);
3478 s->locked++;
3479 }
3480 }
3481
3482 if (s->expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3483 !sh->reconstruct_state) {
3484 struct stripe_head *sh2
3485 = get_active_stripe(conf, sh->sector, 1, 1, 1);
3486 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
3487 /* sh cannot be written until sh2 has been read.
3488 * so arrange for sh to be delayed a little
3489 */
3490 set_bit(STRIPE_DELAYED, &sh->state);
3491 set_bit(STRIPE_HANDLE, &sh->state);
3492 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3493 &sh2->state))
3494 atomic_inc(&conf->preread_active_stripes);
3495 release_stripe(sh2);
3496 return;
3497 }
3498 if (sh2)
3499 release_stripe(sh2);
3500
3501 /* Need to write out all blocks after computing P&Q */
3502 sh->disks = conf->raid_disks;
3503 stripe_set_idx(sh->sector, conf, 0, sh);
3504 schedule_reconstruction(sh, s, 1, 1);
3505 } else if (s->expanded && !sh->reconstruct_state && s->locked == 0) {
3506 clear_bit(STRIPE_EXPAND_READY, &sh->state);
3507 atomic_dec(&conf->reshape_stripes);
3508 wake_up(&conf->wait_for_overlap);
3509 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3510 }
3511
3512 if (s->expanding && s->locked == 0 &&
3513 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
3514 handle_stripe_expansion(conf, sh);
3515} 3427}
3516 3428
3517static void handle_stripe(struct stripe_head *sh) 3429static void handle_stripe(struct stripe_head *sh)
3518{ 3430{
3519 struct stripe_head_state s; 3431 struct stripe_head_state s;
3432 int done;
3433 int i;
3520 raid5_conf_t *conf = sh->raid_conf; 3434 raid5_conf_t *conf = sh->raid_conf;
3521 3435
3522 clear_bit(STRIPE_HANDLE, &sh->state); 3436 clear_bit(STRIPE_HANDLE, &sh->state);
@@ -3545,11 +3459,58 @@ static void handle_stripe(struct stripe_head *sh)
3545 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 3459 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
3546 3460
3547 if (conf->level == 6) 3461 if (conf->level == 6)
3548 handle_stripe6(sh, &s); 3462 done = handle_stripe6(sh, &s);
3549 else 3463 else
3550 handle_stripe5(sh, &s); 3464 done = handle_stripe5(sh, &s);
3465
3466 if (done)
3467 goto finish;
3468 /* Finish reconstruct operations initiated by the expansion process */
3469 if (sh->reconstruct_state == reconstruct_state_result) {
3470 struct stripe_head *sh_src
3471 = get_active_stripe(conf, sh->sector, 1, 1, 1);
3472 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
3473 /* sh cannot be written until sh_src has been read.
3474 * so arrange for sh to be delayed a little
3475 */
3476 set_bit(STRIPE_DELAYED, &sh->state);
3477 set_bit(STRIPE_HANDLE, &sh->state);
3478 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3479 &sh_src->state))
3480 atomic_inc(&conf->preread_active_stripes);
3481 release_stripe(sh_src);
3482 goto finish;
3483 }
3484 if (sh_src)
3485 release_stripe(sh_src);
3486
3487 sh->reconstruct_state = reconstruct_state_idle;
3488 clear_bit(STRIPE_EXPANDING, &sh->state);
3489 for (i = conf->raid_disks; i--; ) {
3490 set_bit(R5_Wantwrite, &sh->dev[i].flags);
3491 set_bit(R5_LOCKED, &sh->dev[i].flags);
3492 s.locked++;
3493 }
3494 }
3551 3495
3496 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3497 !sh->reconstruct_state) {
3498 /* Need to write out all blocks after computing parity */
3499 sh->disks = conf->raid_disks;
3500 stripe_set_idx(sh->sector, conf, 0, sh);
3501 schedule_reconstruction(sh, &s, 1, 1);
3502 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
3503 clear_bit(STRIPE_EXPAND_READY, &sh->state);
3504 atomic_dec(&conf->reshape_stripes);
3505 wake_up(&conf->wait_for_overlap);
3506 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3507 }
3508
3509 if (s.expanding && s.locked == 0 &&
3510 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
3511 handle_stripe_expansion(conf, sh);
3552 3512
3513finish:
3553 /* wait for this device to become unblocked */ 3514 /* wait for this device to become unblocked */
3554 if (unlikely(s.blocked_rdev)) 3515 if (unlikely(s.blocked_rdev))
3555 md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); 3516 md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);