aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2011-07-25 21:35:20 -0400
committerNeilBrown <neilb@suse.de>2011-07-25 21:35:20 -0400
commitc5709ef6a094c72b56355590bfa55cc107e98376 (patch)
tree60e6208feff1ca8da4ce9a90cae3ef9d028d72fc /drivers/md/raid5.c
parentf2b3b44deee1524ca4f006048e0569f47eefdb74 (diff)
md/raid5: add some more fields to stripe_head_state
Adding these three fields will allow more common code to be moved to handle_stripe() struct field rearrangement by Namhyung Kim. Signed-off-by: NeilBrown <neilb@suse.de> Reviewed-by: Namhyung Kim <namhyung@gmail.com>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c54
1 files changed, 24 insertions, 30 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index bbc7792f013c..bc15f48be78d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3003,12 +3003,9 @@ static void handle_stripe5(struct stripe_head *sh)
3003{ 3003{
3004 raid5_conf_t *conf = sh->raid_conf; 3004 raid5_conf_t *conf = sh->raid_conf;
3005 int disks = sh->disks, i; 3005 int disks = sh->disks, i;
3006 struct bio *return_bi = NULL;
3007 struct stripe_head_state s; 3006 struct stripe_head_state s;
3008 struct r5dev *dev; 3007 struct r5dev *dev;
3009 mdk_rdev_t *blocked_rdev = NULL;
3010 int prexor; 3008 int prexor;
3011 int dec_preread_active = 0;
3012 3009
3013 memset(&s, 0, sizeof(s)); 3010 memset(&s, 0, sizeof(s));
3014 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d " 3011 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
@@ -3058,9 +3055,9 @@ static void handle_stripe5(struct stripe_head *sh)
3058 if (dev->written) 3055 if (dev->written)
3059 s.written++; 3056 s.written++;
3060 rdev = rcu_dereference(conf->disks[i].rdev); 3057 rdev = rcu_dereference(conf->disks[i].rdev);
3061 if (blocked_rdev == NULL && 3058 if (s.blocked_rdev == NULL &&
3062 rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 3059 rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
3063 blocked_rdev = rdev; 3060 s.blocked_rdev = rdev;
3064 atomic_inc(&rdev->nr_pending); 3061 atomic_inc(&rdev->nr_pending);
3065 } 3062 }
3066 clear_bit(R5_Insync, &dev->flags); 3063 clear_bit(R5_Insync, &dev->flags);
@@ -3088,15 +3085,15 @@ static void handle_stripe5(struct stripe_head *sh)
3088 spin_unlock_irq(&conf->device_lock); 3085 spin_unlock_irq(&conf->device_lock);
3089 rcu_read_unlock(); 3086 rcu_read_unlock();
3090 3087
3091 if (unlikely(blocked_rdev)) { 3088 if (unlikely(s.blocked_rdev)) {
3092 if (s.syncing || s.expanding || s.expanded || 3089 if (s.syncing || s.expanding || s.expanded ||
3093 s.to_write || s.written) { 3090 s.to_write || s.written) {
3094 set_bit(STRIPE_HANDLE, &sh->state); 3091 set_bit(STRIPE_HANDLE, &sh->state);
3095 goto unlock; 3092 goto unlock;
3096 } 3093 }
3097 /* There is nothing for the blocked_rdev to block */ 3094 /* There is nothing for the blocked_rdev to block */
3098 rdev_dec_pending(blocked_rdev, conf->mddev); 3095 rdev_dec_pending(s.blocked_rdev, conf->mddev);
3099 blocked_rdev = NULL; 3096 s.blocked_rdev = NULL;
3100 } 3097 }
3101 3098
3102 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 3099 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
@@ -3112,7 +3109,7 @@ static void handle_stripe5(struct stripe_head *sh)
3112 * need to be failed 3109 * need to be failed
3113 */ 3110 */
3114 if (s.failed > 1 && s.to_read+s.to_write+s.written) 3111 if (s.failed > 1 && s.to_read+s.to_write+s.written)
3115 handle_failed_stripe(conf, sh, &s, disks, &return_bi); 3112 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
3116 if (s.failed > 1 && s.syncing) { 3113 if (s.failed > 1 && s.syncing) {
3117 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 3114 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
3118 clear_bit(STRIPE_SYNCING, &sh->state); 3115 clear_bit(STRIPE_SYNCING, &sh->state);
@@ -3128,7 +3125,7 @@ static void handle_stripe5(struct stripe_head *sh)
3128 !test_bit(R5_LOCKED, &dev->flags) && 3125 !test_bit(R5_LOCKED, &dev->flags) &&
3129 test_bit(R5_UPTODATE, &dev->flags)) || 3126 test_bit(R5_UPTODATE, &dev->flags)) ||
3130 (s.failed == 1 && s.failed_num[0] == sh->pd_idx))) 3127 (s.failed == 1 && s.failed_num[0] == sh->pd_idx)))
3131 handle_stripe_clean_event(conf, sh, disks, &return_bi); 3128 handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
3132 3129
3133 /* Now we might consider reading some blocks, either to check/generate 3130 /* Now we might consider reading some blocks, either to check/generate
3134 * parity, or to satisfy requests 3131 * parity, or to satisfy requests
@@ -3166,7 +3163,7 @@ static void handle_stripe5(struct stripe_head *sh)
3166 } 3163 }
3167 } 3164 }
3168 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3165 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3169 dec_preread_active = 1; 3166 s.dec_preread_active = 1;
3170 } 3167 }
3171 3168
3172 /* Now to consider new write requests and what else, if anything 3169 /* Now to consider new write requests and what else, if anything
@@ -3264,15 +3261,15 @@ static void handle_stripe5(struct stripe_head *sh)
3264 unlock: 3261 unlock:
3265 3262
3266 /* wait for this device to become unblocked */ 3263 /* wait for this device to become unblocked */
3267 if (unlikely(blocked_rdev)) 3264 if (unlikely(s.blocked_rdev))
3268 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 3265 md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
3269 3266
3270 if (s.ops_request) 3267 if (s.ops_request)
3271 raid_run_ops(sh, s.ops_request); 3268 raid_run_ops(sh, s.ops_request);
3272 3269
3273 ops_run_io(sh, &s); 3270 ops_run_io(sh, &s);
3274 3271
3275 if (dec_preread_active) { 3272 if (s.dec_preread_active) {
3276 /* We delay this until after ops_run_io so that if make_request 3273 /* We delay this until after ops_run_io so that if make_request
3277 * is waiting on a flush, it won't continue until the writes 3274 * is waiting on a flush, it won't continue until the writes
3278 * have actually been submitted. 3275 * have actually been submitted.
@@ -3282,19 +3279,16 @@ static void handle_stripe5(struct stripe_head *sh)
3282 IO_THRESHOLD) 3279 IO_THRESHOLD)
3283 md_wakeup_thread(conf->mddev->thread); 3280 md_wakeup_thread(conf->mddev->thread);
3284 } 3281 }
3285 return_io(return_bi); 3282 return_io(s.return_bi);
3286} 3283}
3287 3284
3288static void handle_stripe6(struct stripe_head *sh) 3285static void handle_stripe6(struct stripe_head *sh)
3289{ 3286{
3290 raid5_conf_t *conf = sh->raid_conf; 3287 raid5_conf_t *conf = sh->raid_conf;
3291 int disks = sh->disks; 3288 int disks = sh->disks;
3292 struct bio *return_bi = NULL;
3293 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx; 3289 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx;
3294 struct stripe_head_state s; 3290 struct stripe_head_state s;
3295 struct r5dev *dev, *pdev, *qdev; 3291 struct r5dev *dev, *pdev, *qdev;
3296 mdk_rdev_t *blocked_rdev = NULL;
3297 int dec_preread_active = 0;
3298 3292
3299 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 3293 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3300 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", 3294 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
@@ -3345,9 +3339,9 @@ static void handle_stripe6(struct stripe_head *sh)
3345 if (dev->written) 3339 if (dev->written)
3346 s.written++; 3340 s.written++;
3347 rdev = rcu_dereference(conf->disks[i].rdev); 3341 rdev = rcu_dereference(conf->disks[i].rdev);
3348 if (blocked_rdev == NULL && 3342 if (s.blocked_rdev == NULL &&
3349 rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 3343 rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
3350 blocked_rdev = rdev; 3344 s.blocked_rdev = rdev;
3351 atomic_inc(&rdev->nr_pending); 3345 atomic_inc(&rdev->nr_pending);
3352 } 3346 }
3353 clear_bit(R5_Insync, &dev->flags); 3347 clear_bit(R5_Insync, &dev->flags);
@@ -3376,15 +3370,15 @@ static void handle_stripe6(struct stripe_head *sh)
3376 spin_unlock_irq(&conf->device_lock); 3370 spin_unlock_irq(&conf->device_lock);
3377 rcu_read_unlock(); 3371 rcu_read_unlock();
3378 3372
3379 if (unlikely(blocked_rdev)) { 3373 if (unlikely(s.blocked_rdev)) {
3380 if (s.syncing || s.expanding || s.expanded || 3374 if (s.syncing || s.expanding || s.expanded ||
3381 s.to_write || s.written) { 3375 s.to_write || s.written) {
3382 set_bit(STRIPE_HANDLE, &sh->state); 3376 set_bit(STRIPE_HANDLE, &sh->state);
3383 goto unlock; 3377 goto unlock;
3384 } 3378 }
3385 /* There is nothing for the blocked_rdev to block */ 3379 /* There is nothing for the blocked_rdev to block */
3386 rdev_dec_pending(blocked_rdev, conf->mddev); 3380 rdev_dec_pending(s.blocked_rdev, conf->mddev);
3387 blocked_rdev = NULL; 3381 s.blocked_rdev = NULL;
3388 } 3382 }
3389 3383
3390 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 3384 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
@@ -3400,7 +3394,7 @@ static void handle_stripe6(struct stripe_head *sh)
3400 * might need to be failed 3394 * might need to be failed
3401 */ 3395 */
3402 if (s.failed > 2 && s.to_read+s.to_write+s.written) 3396 if (s.failed > 2 && s.to_read+s.to_write+s.written)
3403 handle_failed_stripe(conf, sh, &s, disks, &return_bi); 3397 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
3404 if (s.failed > 2 && s.syncing) { 3398 if (s.failed > 2 && s.syncing) {
3405 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 3399 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
3406 clear_bit(STRIPE_SYNCING, &sh->state); 3400 clear_bit(STRIPE_SYNCING, &sh->state);
@@ -3425,7 +3419,7 @@ static void handle_stripe6(struct stripe_head *sh)
3425 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 3419 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
3426 && !test_bit(R5_LOCKED, &qdev->flags) 3420 && !test_bit(R5_LOCKED, &qdev->flags)
3427 && test_bit(R5_UPTODATE, &qdev->flags))))) 3421 && test_bit(R5_UPTODATE, &qdev->flags)))))
3428 handle_stripe_clean_event(conf, sh, disks, &return_bi); 3422 handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
3429 3423
3430 /* Now we might consider reading some blocks, either to check/generate 3424 /* Now we might consider reading some blocks, either to check/generate
3431 * parity, or to satisfy requests 3425 * parity, or to satisfy requests
@@ -3461,7 +3455,7 @@ static void handle_stripe6(struct stripe_head *sh)
3461 } 3455 }
3462 } 3456 }
3463 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3457 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3464 dec_preread_active = 1; 3458 s.dec_preread_active = 1;
3465 } 3459 }
3466 3460
3467 /* Now to consider new write requests and what else, if anything 3461 /* Now to consider new write requests and what else, if anything
@@ -3561,8 +3555,8 @@ static void handle_stripe6(struct stripe_head *sh)
3561 unlock: 3555 unlock:
3562 3556
3563 /* wait for this device to become unblocked */ 3557 /* wait for this device to become unblocked */
3564 if (unlikely(blocked_rdev)) 3558 if (unlikely(s.blocked_rdev))
3565 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 3559 md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
3566 3560
3567 if (s.ops_request) 3561 if (s.ops_request)
3568 raid_run_ops(sh, s.ops_request); 3562 raid_run_ops(sh, s.ops_request);
@@ -3570,7 +3564,7 @@ static void handle_stripe6(struct stripe_head *sh)
3570 ops_run_io(sh, &s); 3564 ops_run_io(sh, &s);
3571 3565
3572 3566
3573 if (dec_preread_active) { 3567 if (s.dec_preread_active) {
3574 /* We delay this until after ops_run_io so that if make_request 3568 /* We delay this until after ops_run_io so that if make_request
3575 * is waiting on a flush, it won't continue until the writes 3569 * is waiting on a flush, it won't continue until the writes
3576 * have actually been submitted. 3570 * have actually been submitted.
@@ -3581,7 +3575,7 @@ static void handle_stripe6(struct stripe_head *sh)
3581 md_wakeup_thread(conf->mddev->thread); 3575 md_wakeup_thread(conf->mddev->thread);
3582 } 3576 }
3583 3577
3584 return_io(return_bi); 3578 return_io(s.return_bi);
3585} 3579}
3586 3580
3587static void handle_stripe(struct stripe_head *sh) 3581static void handle_stripe(struct stripe_head *sh)