aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/md/raid5.c38
1 files changed, 22 insertions, 16 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 35031c8b2d02..5601dda1bc40 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2283,17 +2283,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2283 int level = conf->level; 2283 int level = conf->level;
2284 2284
2285 if (rcw) { 2285 if (rcw) {
2286 /* if we are not expanding this is a proper write request, and
2287 * there will be bios with new data to be drained into the
2288 * stripe cache
2289 */
2290 if (!expand) {
2291 sh->reconstruct_state = reconstruct_state_drain_run;
2292 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2293 } else
2294 sh->reconstruct_state = reconstruct_state_run;
2295
2296 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2297 2286
2298 for (i = disks; i--; ) { 2287 for (i = disks; i--; ) {
2299 struct r5dev *dev = &sh->dev[i]; 2288 struct r5dev *dev = &sh->dev[i];
@@ -2306,6 +2295,21 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2306 s->locked++; 2295 s->locked++;
2307 } 2296 }
2308 } 2297 }
2298 /* if we are not expanding this is a proper write request, and
2299 * there will be bios with new data to be drained into the
2300 * stripe cache
2301 */
2302 if (!expand) {
2303 if (!s->locked)
2304 /* False alarm, nothing to do */
2305 return;
2306 sh->reconstruct_state = reconstruct_state_drain_run;
2307 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2308 } else
2309 sh->reconstruct_state = reconstruct_state_run;
2310
2311 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2312
2309 if (s->locked + conf->max_degraded == disks) 2313 if (s->locked + conf->max_degraded == disks)
2310 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 2314 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
2311 atomic_inc(&conf->pending_full_writes); 2315 atomic_inc(&conf->pending_full_writes);
@@ -2314,11 +2318,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2314 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 2318 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
2315 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 2319 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
2316 2320
2317 sh->reconstruct_state = reconstruct_state_prexor_drain_run;
2318 set_bit(STRIPE_OP_PREXOR, &s->ops_request);
2319 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2320 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2321
2322 for (i = disks; i--; ) { 2321 for (i = disks; i--; ) {
2323 struct r5dev *dev = &sh->dev[i]; 2322 struct r5dev *dev = &sh->dev[i];
2324 if (i == pd_idx) 2323 if (i == pd_idx)
@@ -2333,6 +2332,13 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2333 s->locked++; 2332 s->locked++;
2334 } 2333 }
2335 } 2334 }
2335 if (!s->locked)
2336 /* False alarm - nothing to do */
2337 return;
2338 sh->reconstruct_state = reconstruct_state_prexor_drain_run;
2339 set_bit(STRIPE_OP_PREXOR, &s->ops_request);
2340 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2341 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2336 } 2342 }
2337 2343
2338 /* keep the parity disk(s) locked while asynchronous operations 2344 /* keep the parity disk(s) locked while asynchronous operations