aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorYuri Tikhonov <yur@emcraft.com>2009-08-29 22:13:12 -0400
committerDan Williams <dan.j.williams@intel.com>2009-08-29 22:13:12 -0400
commitc0f7bddbe60f43578dccf4ffb8d4bff88f625ea7 (patch)
tree4100908f0a90821b6b1e42b725f25d279525799f /drivers/md
parentac6b53b6e6acab27e4f3e2383f9ac1f0d7c6200b (diff)
md/raid5,6: common schedule_reconstruction for raid5/6
Extend schedule_reconstruction5 for reuse by the raid6 path. Add support for generating Q and BUG() if a request is made to perform 'prexor'. Signed-off-by: Yuri Tikhonov <yur@emcraft.com> Signed-off-by: Ilya Yanok <yanok@emcraft.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid5.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e68616ed3e78..a1245cf99957 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2175,10 +2175,12 @@ static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
2175} 2175}
2176 2176
2177static void 2177static void
2178schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s, 2178schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2179 int rcw, int expand) 2179 int rcw, int expand)
2180{ 2180{
2181 int i, pd_idx = sh->pd_idx, disks = sh->disks; 2181 int i, pd_idx = sh->pd_idx, disks = sh->disks;
2182 raid5_conf_t *conf = sh->raid_conf;
2183 int level = conf->level;
2182 2184
2183 if (rcw) { 2185 if (rcw) {
2184 /* if we are not expanding this is a proper write request, and 2186 /* if we are not expanding this is a proper write request, and
@@ -2204,10 +2206,11 @@ schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s,
2204 s->locked++; 2206 s->locked++;
2205 } 2207 }
2206 } 2208 }
2207 if (s->locked + 1 == disks) 2209 if (s->locked + conf->max_degraded == disks)
2208 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 2210 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
2209 atomic_inc(&sh->raid_conf->pending_full_writes); 2211 atomic_inc(&conf->pending_full_writes);
2210 } else { 2212 } else {
2213 BUG_ON(level == 6);
2211 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 2214 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
2212 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 2215 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
2213 2216
@@ -2232,13 +2235,22 @@ schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s,
2232 } 2235 }
2233 } 2236 }
2234 2237
2235 /* keep the parity disk locked while asynchronous operations 2238 /* keep the parity disk(s) locked while asynchronous operations
2236 * are in flight 2239 * are in flight
2237 */ 2240 */
2238 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 2241 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
2239 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 2242 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2240 s->locked++; 2243 s->locked++;
2241 2244
2245 if (level == 6) {
2246 int qd_idx = sh->qd_idx;
2247 struct r5dev *dev = &sh->dev[qd_idx];
2248
2249 set_bit(R5_LOCKED, &dev->flags);
2250 clear_bit(R5_UPTODATE, &dev->flags);
2251 s->locked++;
2252 }
2253
2242 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", 2254 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2243 __func__, (unsigned long long)sh->sector, 2255 __func__, (unsigned long long)sh->sector,
2244 s->locked, s->ops_request); 2256 s->locked, s->ops_request);
@@ -2704,7 +2716,7 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf,
2704 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && 2716 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2705 (s->locked == 0 && (rcw == 0 || rmw == 0) && 2717 (s->locked == 0 && (rcw == 0 || rmw == 0) &&
2706 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 2718 !test_bit(STRIPE_BIT_DELAY, &sh->state)))
2707 schedule_reconstruction5(sh, s, rcw == 0, 0); 2719 schedule_reconstruction(sh, s, rcw == 0, 0);
2708} 2720}
2709 2721
2710static void handle_stripe_dirtying6(raid5_conf_t *conf, 2722static void handle_stripe_dirtying6(raid5_conf_t *conf,
@@ -3309,7 +3321,7 @@ static bool handle_stripe5(struct stripe_head *sh)
3309 /* Need to write out all blocks after computing parity */ 3321 /* Need to write out all blocks after computing parity */
3310 sh->disks = conf->raid_disks; 3322 sh->disks = conf->raid_disks;
3311 stripe_set_idx(sh->sector, conf, 0, sh); 3323 stripe_set_idx(sh->sector, conf, 0, sh);
3312 schedule_reconstruction5(sh, &s, 1, 1); 3324 schedule_reconstruction(sh, &s, 1, 1);
3313 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 3325 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
3314 clear_bit(STRIPE_EXPAND_READY, &sh->state); 3326 clear_bit(STRIPE_EXPAND_READY, &sh->state);
3315 atomic_dec(&conf->reshape_stripes); 3327 atomic_dec(&conf->reshape_stripes);