diff options
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r-- | drivers/md/raid5.c | 1494 |
1 files changed, 1055 insertions, 439 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a5ba080d303b..3bbc6d647044 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -43,11 +43,14 @@ | |||
43 | * miss any bits. | 43 | * miss any bits. |
44 | */ | 44 | */ |
45 | 45 | ||
46 | #include <linux/blkdev.h> | ||
46 | #include <linux/kthread.h> | 47 | #include <linux/kthread.h> |
47 | #include "raid6.h" | 48 | #include <linux/raid/pq.h> |
48 | |||
49 | #include <linux/raid/bitmap.h> | ||
50 | #include <linux/async_tx.h> | 49 | #include <linux/async_tx.h> |
50 | #include <linux/seq_file.h> | ||
51 | #include "md.h" | ||
52 | #include "raid5.h" | ||
53 | #include "bitmap.h" | ||
51 | 54 | ||
52 | /* | 55 | /* |
53 | * Stripe cache | 56 | * Stripe cache |
@@ -91,11 +94,6 @@ | |||
91 | 94 | ||
92 | #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args))) | 95 | #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args))) |
93 | 96 | ||
94 | #if !RAID6_USE_EMPTY_ZERO_PAGE | ||
95 | /* In .bss so it's zeroed */ | ||
96 | const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); | ||
97 | #endif | ||
98 | |||
99 | /* | 97 | /* |
100 | * We maintain a biased count of active stripes in the bottom 16 bits of | 98 | * We maintain a biased count of active stripes in the bottom 16 bits of |
101 | * bi_phys_segments, and a count of processed stripes in the upper 16 bits | 99 | * bi_phys_segments, and a count of processed stripes in the upper 16 bits |
@@ -130,12 +128,42 @@ static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) | |||
130 | bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16); | 128 | bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16); |
131 | } | 129 | } |
132 | 130 | ||
131 | /* Find first data disk in a raid6 stripe */ | ||
132 | static inline int raid6_d0(struct stripe_head *sh) | ||
133 | { | ||
134 | if (sh->ddf_layout) | ||
135 | /* ddf always start from first device */ | ||
136 | return 0; | ||
137 | /* md starts just after Q block */ | ||
138 | if (sh->qd_idx == sh->disks - 1) | ||
139 | return 0; | ||
140 | else | ||
141 | return sh->qd_idx + 1; | ||
142 | } | ||
133 | static inline int raid6_next_disk(int disk, int raid_disks) | 143 | static inline int raid6_next_disk(int disk, int raid_disks) |
134 | { | 144 | { |
135 | disk++; | 145 | disk++; |
136 | return (disk < raid_disks) ? disk : 0; | 146 | return (disk < raid_disks) ? disk : 0; |
137 | } | 147 | } |
138 | 148 | ||
149 | /* When walking through the disks in a raid5, starting at raid6_d0, | ||
150 | * We need to map each disk to a 'slot', where the data disks are slot | ||
151 | * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk | ||
152 | * is raid_disks-1. This help does that mapping. | ||
153 | */ | ||
154 | static int raid6_idx_to_slot(int idx, struct stripe_head *sh, | ||
155 | int *count, int syndrome_disks) | ||
156 | { | ||
157 | int slot; | ||
158 | |||
159 | if (idx == sh->pd_idx) | ||
160 | return syndrome_disks; | ||
161 | if (idx == sh->qd_idx) | ||
162 | return syndrome_disks + 1; | ||
163 | slot = (*count)++; | ||
164 | return slot; | ||
165 | } | ||
166 | |||
139 | static void return_io(struct bio *return_bi) | 167 | static void return_io(struct bio *return_bi) |
140 | { | 168 | { |
141 | struct bio *bi = return_bi; | 169 | struct bio *bi = return_bi; |
@@ -193,6 +221,7 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) | |||
193 | } | 221 | } |
194 | } | 222 | } |
195 | } | 223 | } |
224 | |||
196 | static void release_stripe(struct stripe_head *sh) | 225 | static void release_stripe(struct stripe_head *sh) |
197 | { | 226 | { |
198 | raid5_conf_t *conf = sh->raid_conf; | 227 | raid5_conf_t *conf = sh->raid_conf; |
@@ -270,9 +299,11 @@ static int grow_buffers(struct stripe_head *sh, int num) | |||
270 | return 0; | 299 | return 0; |
271 | } | 300 | } |
272 | 301 | ||
273 | static void raid5_build_block(struct stripe_head *sh, int i); | 302 | static void raid5_build_block(struct stripe_head *sh, int i, int previous); |
303 | static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, | ||
304 | struct stripe_head *sh); | ||
274 | 305 | ||
275 | static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks) | 306 | static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) |
276 | { | 307 | { |
277 | raid5_conf_t *conf = sh->raid_conf; | 308 | raid5_conf_t *conf = sh->raid_conf; |
278 | int i; | 309 | int i; |
@@ -287,11 +318,12 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int | |||
287 | 318 | ||
288 | remove_hash(sh); | 319 | remove_hash(sh); |
289 | 320 | ||
321 | sh->generation = conf->generation - previous; | ||
322 | sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; | ||
290 | sh->sector = sector; | 323 | sh->sector = sector; |
291 | sh->pd_idx = pd_idx; | 324 | stripe_set_idx(sector, conf, previous, sh); |
292 | sh->state = 0; | 325 | sh->state = 0; |
293 | 326 | ||
294 | sh->disks = disks; | ||
295 | 327 | ||
296 | for (i = sh->disks; i--; ) { | 328 | for (i = sh->disks; i--; ) { |
297 | struct r5dev *dev = &sh->dev[i]; | 329 | struct r5dev *dev = &sh->dev[i]; |
@@ -305,12 +337,13 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int | |||
305 | BUG(); | 337 | BUG(); |
306 | } | 338 | } |
307 | dev->flags = 0; | 339 | dev->flags = 0; |
308 | raid5_build_block(sh, i); | 340 | raid5_build_block(sh, i, previous); |
309 | } | 341 | } |
310 | insert_hash(conf, sh); | 342 | insert_hash(conf, sh); |
311 | } | 343 | } |
312 | 344 | ||
313 | static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks) | 345 | static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, |
346 | short generation) | ||
314 | { | 347 | { |
315 | struct stripe_head *sh; | 348 | struct stripe_head *sh; |
316 | struct hlist_node *hn; | 349 | struct hlist_node *hn; |
@@ -318,7 +351,7 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, in | |||
318 | CHECK_DEVLOCK(); | 351 | CHECK_DEVLOCK(); |
319 | pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); | 352 | pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); |
320 | hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) | 353 | hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) |
321 | if (sh->sector == sector && sh->disks == disks) | 354 | if (sh->sector == sector && sh->generation == generation) |
322 | return sh; | 355 | return sh; |
323 | pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); | 356 | pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); |
324 | return NULL; | 357 | return NULL; |
@@ -327,8 +360,9 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, in | |||
327 | static void unplug_slaves(mddev_t *mddev); | 360 | static void unplug_slaves(mddev_t *mddev); |
328 | static void raid5_unplug_device(struct request_queue *q); | 361 | static void raid5_unplug_device(struct request_queue *q); |
329 | 362 | ||
330 | static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks, | 363 | static struct stripe_head * |
331 | int pd_idx, int noblock) | 364 | get_active_stripe(raid5_conf_t *conf, sector_t sector, |
365 | int previous, int noblock) | ||
332 | { | 366 | { |
333 | struct stripe_head *sh; | 367 | struct stripe_head *sh; |
334 | 368 | ||
@@ -340,7 +374,7 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector | |||
340 | wait_event_lock_irq(conf->wait_for_stripe, | 374 | wait_event_lock_irq(conf->wait_for_stripe, |
341 | conf->quiesce == 0, | 375 | conf->quiesce == 0, |
342 | conf->device_lock, /* nothing */); | 376 | conf->device_lock, /* nothing */); |
343 | sh = __find_stripe(conf, sector, disks); | 377 | sh = __find_stripe(conf, sector, conf->generation - previous); |
344 | if (!sh) { | 378 | if (!sh) { |
345 | if (!conf->inactive_blocked) | 379 | if (!conf->inactive_blocked) |
346 | sh = get_free_stripe(conf); | 380 | sh = get_free_stripe(conf); |
@@ -358,10 +392,11 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector | |||
358 | ); | 392 | ); |
359 | conf->inactive_blocked = 0; | 393 | conf->inactive_blocked = 0; |
360 | } else | 394 | } else |
361 | init_stripe(sh, sector, pd_idx, disks); | 395 | init_stripe(sh, sector, previous); |
362 | } else { | 396 | } else { |
363 | if (atomic_read(&sh->count)) { | 397 | if (atomic_read(&sh->count)) { |
364 | BUG_ON(!list_empty(&sh->lru)); | 398 | BUG_ON(!list_empty(&sh->lru) |
399 | && !test_bit(STRIPE_EXPANDING, &sh->state)); | ||
365 | } else { | 400 | } else { |
366 | if (!test_bit(STRIPE_HANDLE, &sh->state)) | 401 | if (!test_bit(STRIPE_HANDLE, &sh->state)) |
367 | atomic_inc(&conf->active_stripes); | 402 | atomic_inc(&conf->active_stripes); |
@@ -895,8 +930,10 @@ static int grow_stripes(raid5_conf_t *conf, int num) | |||
895 | struct kmem_cache *sc; | 930 | struct kmem_cache *sc; |
896 | int devs = conf->raid_disks; | 931 | int devs = conf->raid_disks; |
897 | 932 | ||
898 | sprintf(conf->cache_name[0], "raid5-%s", mdname(conf->mddev)); | 933 | sprintf(conf->cache_name[0], |
899 | sprintf(conf->cache_name[1], "raid5-%s-alt", mdname(conf->mddev)); | 934 | "raid%d-%s", conf->level, mdname(conf->mddev)); |
935 | sprintf(conf->cache_name[1], | ||
936 | "raid%d-%s-alt", conf->level, mdname(conf->mddev)); | ||
900 | conf->active_name = 0; | 937 | conf->active_name = 0; |
901 | sc = kmem_cache_create(conf->cache_name[conf->active_name], | 938 | sc = kmem_cache_create(conf->cache_name[conf->active_name], |
902 | sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), | 939 | sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), |
@@ -911,7 +948,6 @@ static int grow_stripes(raid5_conf_t *conf, int num) | |||
911 | return 0; | 948 | return 0; |
912 | } | 949 | } |
913 | 950 | ||
914 | #ifdef CONFIG_MD_RAID5_RESHAPE | ||
915 | static int resize_stripes(raid5_conf_t *conf, int newsize) | 951 | static int resize_stripes(raid5_conf_t *conf, int newsize) |
916 | { | 952 | { |
917 | /* Make all the stripes able to hold 'newsize' devices. | 953 | /* Make all the stripes able to hold 'newsize' devices. |
@@ -1036,7 +1072,6 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) | |||
1036 | conf->pool_size = newsize; | 1072 | conf->pool_size = newsize; |
1037 | return err; | 1073 | return err; |
1038 | } | 1074 | } |
1039 | #endif | ||
1040 | 1075 | ||
1041 | static int drop_one_stripe(raid5_conf_t *conf) | 1076 | static int drop_one_stripe(raid5_conf_t *conf) |
1042 | { | 1077 | { |
@@ -1066,7 +1101,7 @@ static void shrink_stripes(raid5_conf_t *conf) | |||
1066 | 1101 | ||
1067 | static void raid5_end_read_request(struct bio * bi, int error) | 1102 | static void raid5_end_read_request(struct bio * bi, int error) |
1068 | { | 1103 | { |
1069 | struct stripe_head *sh = bi->bi_private; | 1104 | struct stripe_head *sh = bi->bi_private; |
1070 | raid5_conf_t *conf = sh->raid_conf; | 1105 | raid5_conf_t *conf = sh->raid_conf; |
1071 | int disks = sh->disks, i; | 1106 | int disks = sh->disks, i; |
1072 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); | 1107 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); |
@@ -1148,7 +1183,7 @@ static void raid5_end_read_request(struct bio * bi, int error) | |||
1148 | 1183 | ||
1149 | static void raid5_end_write_request(struct bio *bi, int error) | 1184 | static void raid5_end_write_request(struct bio *bi, int error) |
1150 | { | 1185 | { |
1151 | struct stripe_head *sh = bi->bi_private; | 1186 | struct stripe_head *sh = bi->bi_private; |
1152 | raid5_conf_t *conf = sh->raid_conf; | 1187 | raid5_conf_t *conf = sh->raid_conf; |
1153 | int disks = sh->disks, i; | 1188 | int disks = sh->disks, i; |
1154 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); | 1189 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); |
@@ -1176,9 +1211,9 @@ static void raid5_end_write_request(struct bio *bi, int error) | |||
1176 | } | 1211 | } |
1177 | 1212 | ||
1178 | 1213 | ||
1179 | static sector_t compute_blocknr(struct stripe_head *sh, int i); | 1214 | static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); |
1180 | 1215 | ||
1181 | static void raid5_build_block(struct stripe_head *sh, int i) | 1216 | static void raid5_build_block(struct stripe_head *sh, int i, int previous) |
1182 | { | 1217 | { |
1183 | struct r5dev *dev = &sh->dev[i]; | 1218 | struct r5dev *dev = &sh->dev[i]; |
1184 | 1219 | ||
@@ -1194,7 +1229,7 @@ static void raid5_build_block(struct stripe_head *sh, int i) | |||
1194 | dev->req.bi_private = sh; | 1229 | dev->req.bi_private = sh; |
1195 | 1230 | ||
1196 | dev->flags = 0; | 1231 | dev->flags = 0; |
1197 | dev->sector = compute_blocknr(sh, i); | 1232 | dev->sector = compute_blocknr(sh, i, previous); |
1198 | } | 1233 | } |
1199 | 1234 | ||
1200 | static void error(mddev_t *mddev, mdk_rdev_t *rdev) | 1235 | static void error(mddev_t *mddev, mdk_rdev_t *rdev) |
@@ -1227,15 +1262,23 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1227 | * Input: a 'big' sector number, | 1262 | * Input: a 'big' sector number, |
1228 | * Output: index of the data and parity disk, and the sector # in them. | 1263 | * Output: index of the data and parity disk, and the sector # in them. |
1229 | */ | 1264 | */ |
1230 | static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, | 1265 | static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, |
1231 | unsigned int data_disks, unsigned int * dd_idx, | 1266 | int previous, int *dd_idx, |
1232 | unsigned int * pd_idx, raid5_conf_t *conf) | 1267 | struct stripe_head *sh) |
1233 | { | 1268 | { |
1234 | long stripe; | 1269 | long stripe; |
1235 | unsigned long chunk_number; | 1270 | unsigned long chunk_number; |
1236 | unsigned int chunk_offset; | 1271 | unsigned int chunk_offset; |
1272 | int pd_idx, qd_idx; | ||
1273 | int ddf_layout = 0; | ||
1237 | sector_t new_sector; | 1274 | sector_t new_sector; |
1238 | int sectors_per_chunk = conf->chunk_size >> 9; | 1275 | int algorithm = previous ? conf->prev_algo |
1276 | : conf->algorithm; | ||
1277 | int sectors_per_chunk = previous ? (conf->prev_chunk >> 9) | ||
1278 | : (conf->chunk_size >> 9); | ||
1279 | int raid_disks = previous ? conf->previous_raid_disks | ||
1280 | : conf->raid_disks; | ||
1281 | int data_disks = raid_disks - conf->max_degraded; | ||
1239 | 1282 | ||
1240 | /* First compute the information on this sector */ | 1283 | /* First compute the information on this sector */ |
1241 | 1284 | ||
@@ -1259,68 +1302,170 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, | |||
1259 | /* | 1302 | /* |
1260 | * Select the parity disk based on the user selected algorithm. | 1303 | * Select the parity disk based on the user selected algorithm. |
1261 | */ | 1304 | */ |
1305 | pd_idx = qd_idx = ~0; | ||
1262 | switch(conf->level) { | 1306 | switch(conf->level) { |
1263 | case 4: | 1307 | case 4: |
1264 | *pd_idx = data_disks; | 1308 | pd_idx = data_disks; |
1265 | break; | 1309 | break; |
1266 | case 5: | 1310 | case 5: |
1267 | switch (conf->algorithm) { | 1311 | switch (algorithm) { |
1268 | case ALGORITHM_LEFT_ASYMMETRIC: | 1312 | case ALGORITHM_LEFT_ASYMMETRIC: |
1269 | *pd_idx = data_disks - stripe % raid_disks; | 1313 | pd_idx = data_disks - stripe % raid_disks; |
1270 | if (*dd_idx >= *pd_idx) | 1314 | if (*dd_idx >= pd_idx) |
1271 | (*dd_idx)++; | 1315 | (*dd_idx)++; |
1272 | break; | 1316 | break; |
1273 | case ALGORITHM_RIGHT_ASYMMETRIC: | 1317 | case ALGORITHM_RIGHT_ASYMMETRIC: |
1274 | *pd_idx = stripe % raid_disks; | 1318 | pd_idx = stripe % raid_disks; |
1275 | if (*dd_idx >= *pd_idx) | 1319 | if (*dd_idx >= pd_idx) |
1276 | (*dd_idx)++; | 1320 | (*dd_idx)++; |
1277 | break; | 1321 | break; |
1278 | case ALGORITHM_LEFT_SYMMETRIC: | 1322 | case ALGORITHM_LEFT_SYMMETRIC: |
1279 | *pd_idx = data_disks - stripe % raid_disks; | 1323 | pd_idx = data_disks - stripe % raid_disks; |
1280 | *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; | 1324 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
1281 | break; | 1325 | break; |
1282 | case ALGORITHM_RIGHT_SYMMETRIC: | 1326 | case ALGORITHM_RIGHT_SYMMETRIC: |
1283 | *pd_idx = stripe % raid_disks; | 1327 | pd_idx = stripe % raid_disks; |
1284 | *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; | 1328 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
1329 | break; | ||
1330 | case ALGORITHM_PARITY_0: | ||
1331 | pd_idx = 0; | ||
1332 | (*dd_idx)++; | ||
1333 | break; | ||
1334 | case ALGORITHM_PARITY_N: | ||
1335 | pd_idx = data_disks; | ||
1285 | break; | 1336 | break; |
1286 | default: | 1337 | default: |
1287 | printk(KERN_ERR "raid5: unsupported algorithm %d\n", | 1338 | printk(KERN_ERR "raid5: unsupported algorithm %d\n", |
1288 | conf->algorithm); | 1339 | algorithm); |
1340 | BUG(); | ||
1289 | } | 1341 | } |
1290 | break; | 1342 | break; |
1291 | case 6: | 1343 | case 6: |
1292 | 1344 | ||
1293 | /**** FIX THIS ****/ | 1345 | switch (algorithm) { |
1294 | switch (conf->algorithm) { | ||
1295 | case ALGORITHM_LEFT_ASYMMETRIC: | 1346 | case ALGORITHM_LEFT_ASYMMETRIC: |
1296 | *pd_idx = raid_disks - 1 - (stripe % raid_disks); | 1347 | pd_idx = raid_disks - 1 - (stripe % raid_disks); |
1297 | if (*pd_idx == raid_disks-1) | 1348 | qd_idx = pd_idx + 1; |
1298 | (*dd_idx)++; /* Q D D D P */ | 1349 | if (pd_idx == raid_disks-1) { |
1299 | else if (*dd_idx >= *pd_idx) | 1350 | (*dd_idx)++; /* Q D D D P */ |
1351 | qd_idx = 0; | ||
1352 | } else if (*dd_idx >= pd_idx) | ||
1300 | (*dd_idx) += 2; /* D D P Q D */ | 1353 | (*dd_idx) += 2; /* D D P Q D */ |
1301 | break; | 1354 | break; |
1302 | case ALGORITHM_RIGHT_ASYMMETRIC: | 1355 | case ALGORITHM_RIGHT_ASYMMETRIC: |
1303 | *pd_idx = stripe % raid_disks; | 1356 | pd_idx = stripe % raid_disks; |
1304 | if (*pd_idx == raid_disks-1) | 1357 | qd_idx = pd_idx + 1; |
1305 | (*dd_idx)++; /* Q D D D P */ | 1358 | if (pd_idx == raid_disks-1) { |
1306 | else if (*dd_idx >= *pd_idx) | 1359 | (*dd_idx)++; /* Q D D D P */ |
1360 | qd_idx = 0; | ||
1361 | } else if (*dd_idx >= pd_idx) | ||
1307 | (*dd_idx) += 2; /* D D P Q D */ | 1362 | (*dd_idx) += 2; /* D D P Q D */ |
1308 | break; | 1363 | break; |
1309 | case ALGORITHM_LEFT_SYMMETRIC: | 1364 | case ALGORITHM_LEFT_SYMMETRIC: |
1310 | *pd_idx = raid_disks - 1 - (stripe % raid_disks); | 1365 | pd_idx = raid_disks - 1 - (stripe % raid_disks); |
1311 | *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; | 1366 | qd_idx = (pd_idx + 1) % raid_disks; |
1367 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; | ||
1312 | break; | 1368 | break; |
1313 | case ALGORITHM_RIGHT_SYMMETRIC: | 1369 | case ALGORITHM_RIGHT_SYMMETRIC: |
1314 | *pd_idx = stripe % raid_disks; | 1370 | pd_idx = stripe % raid_disks; |
1315 | *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; | 1371 | qd_idx = (pd_idx + 1) % raid_disks; |
1372 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; | ||
1373 | break; | ||
1374 | |||
1375 | case ALGORITHM_PARITY_0: | ||
1376 | pd_idx = 0; | ||
1377 | qd_idx = 1; | ||
1378 | (*dd_idx) += 2; | ||
1379 | break; | ||
1380 | case ALGORITHM_PARITY_N: | ||
1381 | pd_idx = data_disks; | ||
1382 | qd_idx = data_disks + 1; | ||
1316 | break; | 1383 | break; |
1384 | |||
1385 | case ALGORITHM_ROTATING_ZERO_RESTART: | ||
1386 | /* Exactly the same as RIGHT_ASYMMETRIC, but or | ||
1387 | * of blocks for computing Q is different. | ||
1388 | */ | ||
1389 | pd_idx = stripe % raid_disks; | ||
1390 | qd_idx = pd_idx + 1; | ||
1391 | if (pd_idx == raid_disks-1) { | ||
1392 | (*dd_idx)++; /* Q D D D P */ | ||
1393 | qd_idx = 0; | ||
1394 | } else if (*dd_idx >= pd_idx) | ||
1395 | (*dd_idx) += 2; /* D D P Q D */ | ||
1396 | ddf_layout = 1; | ||
1397 | break; | ||
1398 | |||
1399 | case ALGORITHM_ROTATING_N_RESTART: | ||
1400 | /* Same a left_asymmetric, by first stripe is | ||
1401 | * D D D P Q rather than | ||
1402 | * Q D D D P | ||
1403 | */ | ||
1404 | pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); | ||
1405 | qd_idx = pd_idx + 1; | ||
1406 | if (pd_idx == raid_disks-1) { | ||
1407 | (*dd_idx)++; /* Q D D D P */ | ||
1408 | qd_idx = 0; | ||
1409 | } else if (*dd_idx >= pd_idx) | ||
1410 | (*dd_idx) += 2; /* D D P Q D */ | ||
1411 | ddf_layout = 1; | ||
1412 | break; | ||
1413 | |||
1414 | case ALGORITHM_ROTATING_N_CONTINUE: | ||
1415 | /* Same as left_symmetric but Q is before P */ | ||
1416 | pd_idx = raid_disks - 1 - (stripe % raid_disks); | ||
1417 | qd_idx = (pd_idx + raid_disks - 1) % raid_disks; | ||
1418 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; | ||
1419 | ddf_layout = 1; | ||
1420 | break; | ||
1421 | |||
1422 | case ALGORITHM_LEFT_ASYMMETRIC_6: | ||
1423 | /* RAID5 left_asymmetric, with Q on last device */ | ||
1424 | pd_idx = data_disks - stripe % (raid_disks-1); | ||
1425 | if (*dd_idx >= pd_idx) | ||
1426 | (*dd_idx)++; | ||
1427 | qd_idx = raid_disks - 1; | ||
1428 | break; | ||
1429 | |||
1430 | case ALGORITHM_RIGHT_ASYMMETRIC_6: | ||
1431 | pd_idx = stripe % (raid_disks-1); | ||
1432 | if (*dd_idx >= pd_idx) | ||
1433 | (*dd_idx)++; | ||
1434 | qd_idx = raid_disks - 1; | ||
1435 | break; | ||
1436 | |||
1437 | case ALGORITHM_LEFT_SYMMETRIC_6: | ||
1438 | pd_idx = data_disks - stripe % (raid_disks-1); | ||
1439 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); | ||
1440 | qd_idx = raid_disks - 1; | ||
1441 | break; | ||
1442 | |||
1443 | case ALGORITHM_RIGHT_SYMMETRIC_6: | ||
1444 | pd_idx = stripe % (raid_disks-1); | ||
1445 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); | ||
1446 | qd_idx = raid_disks - 1; | ||
1447 | break; | ||
1448 | |||
1449 | case ALGORITHM_PARITY_0_6: | ||
1450 | pd_idx = 0; | ||
1451 | (*dd_idx)++; | ||
1452 | qd_idx = raid_disks - 1; | ||
1453 | break; | ||
1454 | |||
1455 | |||
1317 | default: | 1456 | default: |
1318 | printk(KERN_CRIT "raid6: unsupported algorithm %d\n", | 1457 | printk(KERN_CRIT "raid6: unsupported algorithm %d\n", |
1319 | conf->algorithm); | 1458 | algorithm); |
1459 | BUG(); | ||
1320 | } | 1460 | } |
1321 | break; | 1461 | break; |
1322 | } | 1462 | } |
1323 | 1463 | ||
1464 | if (sh) { | ||
1465 | sh->pd_idx = pd_idx; | ||
1466 | sh->qd_idx = qd_idx; | ||
1467 | sh->ddf_layout = ddf_layout; | ||
1468 | } | ||
1324 | /* | 1469 | /* |
1325 | * Finally, compute the new sector number | 1470 | * Finally, compute the new sector number |
1326 | */ | 1471 | */ |
@@ -1329,17 +1474,21 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, | |||
1329 | } | 1474 | } |
1330 | 1475 | ||
1331 | 1476 | ||
1332 | static sector_t compute_blocknr(struct stripe_head *sh, int i) | 1477 | static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) |
1333 | { | 1478 | { |
1334 | raid5_conf_t *conf = sh->raid_conf; | 1479 | raid5_conf_t *conf = sh->raid_conf; |
1335 | int raid_disks = sh->disks; | 1480 | int raid_disks = sh->disks; |
1336 | int data_disks = raid_disks - conf->max_degraded; | 1481 | int data_disks = raid_disks - conf->max_degraded; |
1337 | sector_t new_sector = sh->sector, check; | 1482 | sector_t new_sector = sh->sector, check; |
1338 | int sectors_per_chunk = conf->chunk_size >> 9; | 1483 | int sectors_per_chunk = previous ? (conf->prev_chunk >> 9) |
1484 | : (conf->chunk_size >> 9); | ||
1485 | int algorithm = previous ? conf->prev_algo | ||
1486 | : conf->algorithm; | ||
1339 | sector_t stripe; | 1487 | sector_t stripe; |
1340 | int chunk_offset; | 1488 | int chunk_offset; |
1341 | int chunk_number, dummy1, dummy2, dd_idx = i; | 1489 | int chunk_number, dummy1, dd_idx = i; |
1342 | sector_t r_sector; | 1490 | sector_t r_sector; |
1491 | struct stripe_head sh2; | ||
1343 | 1492 | ||
1344 | 1493 | ||
1345 | chunk_offset = sector_div(new_sector, sectors_per_chunk); | 1494 | chunk_offset = sector_div(new_sector, sectors_per_chunk); |
@@ -1351,7 +1500,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i) | |||
1351 | switch(conf->level) { | 1500 | switch(conf->level) { |
1352 | case 4: break; | 1501 | case 4: break; |
1353 | case 5: | 1502 | case 5: |
1354 | switch (conf->algorithm) { | 1503 | switch (algorithm) { |
1355 | case ALGORITHM_LEFT_ASYMMETRIC: | 1504 | case ALGORITHM_LEFT_ASYMMETRIC: |
1356 | case ALGORITHM_RIGHT_ASYMMETRIC: | 1505 | case ALGORITHM_RIGHT_ASYMMETRIC: |
1357 | if (i > sh->pd_idx) | 1506 | if (i > sh->pd_idx) |
@@ -1363,19 +1512,27 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i) | |||
1363 | i += raid_disks; | 1512 | i += raid_disks; |
1364 | i -= (sh->pd_idx + 1); | 1513 | i -= (sh->pd_idx + 1); |
1365 | break; | 1514 | break; |
1515 | case ALGORITHM_PARITY_0: | ||
1516 | i -= 1; | ||
1517 | break; | ||
1518 | case ALGORITHM_PARITY_N: | ||
1519 | break; | ||
1366 | default: | 1520 | default: |
1367 | printk(KERN_ERR "raid5: unsupported algorithm %d\n", | 1521 | printk(KERN_ERR "raid5: unsupported algorithm %d\n", |
1368 | conf->algorithm); | 1522 | algorithm); |
1523 | BUG(); | ||
1369 | } | 1524 | } |
1370 | break; | 1525 | break; |
1371 | case 6: | 1526 | case 6: |
1372 | if (i == raid6_next_disk(sh->pd_idx, raid_disks)) | 1527 | if (i == sh->qd_idx) |
1373 | return 0; /* It is the Q disk */ | 1528 | return 0; /* It is the Q disk */ |
1374 | switch (conf->algorithm) { | 1529 | switch (algorithm) { |
1375 | case ALGORITHM_LEFT_ASYMMETRIC: | 1530 | case ALGORITHM_LEFT_ASYMMETRIC: |
1376 | case ALGORITHM_RIGHT_ASYMMETRIC: | 1531 | case ALGORITHM_RIGHT_ASYMMETRIC: |
1377 | if (sh->pd_idx == raid_disks-1) | 1532 | case ALGORITHM_ROTATING_ZERO_RESTART: |
1378 | i--; /* Q D D D P */ | 1533 | case ALGORITHM_ROTATING_N_RESTART: |
1534 | if (sh->pd_idx == raid_disks-1) | ||
1535 | i--; /* Q D D D P */ | ||
1379 | else if (i > sh->pd_idx) | 1536 | else if (i > sh->pd_idx) |
1380 | i -= 2; /* D D P Q D */ | 1537 | i -= 2; /* D D P Q D */ |
1381 | break; | 1538 | break; |
@@ -1390,9 +1547,35 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i) | |||
1390 | i -= (sh->pd_idx + 2); | 1547 | i -= (sh->pd_idx + 2); |
1391 | } | 1548 | } |
1392 | break; | 1549 | break; |
1550 | case ALGORITHM_PARITY_0: | ||
1551 | i -= 2; | ||
1552 | break; | ||
1553 | case ALGORITHM_PARITY_N: | ||
1554 | break; | ||
1555 | case ALGORITHM_ROTATING_N_CONTINUE: | ||
1556 | if (sh->pd_idx == 0) | ||
1557 | i--; /* P D D D Q */ | ||
1558 | else if (i > sh->pd_idx) | ||
1559 | i -= 2; /* D D Q P D */ | ||
1560 | break; | ||
1561 | case ALGORITHM_LEFT_ASYMMETRIC_6: | ||
1562 | case ALGORITHM_RIGHT_ASYMMETRIC_6: | ||
1563 | if (i > sh->pd_idx) | ||
1564 | i--; | ||
1565 | break; | ||
1566 | case ALGORITHM_LEFT_SYMMETRIC_6: | ||
1567 | case ALGORITHM_RIGHT_SYMMETRIC_6: | ||
1568 | if (i < sh->pd_idx) | ||
1569 | i += data_disks + 1; | ||
1570 | i -= (sh->pd_idx + 1); | ||
1571 | break; | ||
1572 | case ALGORITHM_PARITY_0_6: | ||
1573 | i -= 1; | ||
1574 | break; | ||
1393 | default: | 1575 | default: |
1394 | printk(KERN_CRIT "raid6: unsupported algorithm %d\n", | 1576 | printk(KERN_CRIT "raid6: unsupported algorithm %d\n", |
1395 | conf->algorithm); | 1577 | algorithm); |
1578 | BUG(); | ||
1396 | } | 1579 | } |
1397 | break; | 1580 | break; |
1398 | } | 1581 | } |
@@ -1400,8 +1583,10 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i) | |||
1400 | chunk_number = stripe * data_disks + i; | 1583 | chunk_number = stripe * data_disks + i; |
1401 | r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; | 1584 | r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; |
1402 | 1585 | ||
1403 | check = raid5_compute_sector(r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf); | 1586 | check = raid5_compute_sector(conf, r_sector, |
1404 | if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) { | 1587 | previous, &dummy1, &sh2); |
1588 | if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx | ||
1589 | || sh2.qd_idx != sh->qd_idx) { | ||
1405 | printk(KERN_ERR "compute_blocknr: map not correct\n"); | 1590 | printk(KERN_ERR "compute_blocknr: map not correct\n"); |
1406 | return 0; | 1591 | return 0; |
1407 | } | 1592 | } |
@@ -1468,14 +1653,16 @@ static void copy_data(int frombio, struct bio *bio, | |||
1468 | 1653 | ||
1469 | static void compute_parity6(struct stripe_head *sh, int method) | 1654 | static void compute_parity6(struct stripe_head *sh, int method) |
1470 | { | 1655 | { |
1471 | raid6_conf_t *conf = sh->raid_conf; | 1656 | raid5_conf_t *conf = sh->raid_conf; |
1472 | int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = sh->disks, count; | 1657 | int i, pd_idx, qd_idx, d0_idx, disks = sh->disks, count; |
1658 | int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); | ||
1473 | struct bio *chosen; | 1659 | struct bio *chosen; |
1474 | /**** FIX THIS: This could be very bad if disks is close to 256 ****/ | 1660 | /**** FIX THIS: This could be very bad if disks is close to 256 ****/ |
1475 | void *ptrs[disks]; | 1661 | void *ptrs[syndrome_disks+2]; |
1476 | 1662 | ||
1477 | qd_idx = raid6_next_disk(pd_idx, disks); | 1663 | pd_idx = sh->pd_idx; |
1478 | d0_idx = raid6_next_disk(qd_idx, disks); | 1664 | qd_idx = sh->qd_idx; |
1665 | d0_idx = raid6_d0(sh); | ||
1479 | 1666 | ||
1480 | pr_debug("compute_parity, stripe %llu, method %d\n", | 1667 | pr_debug("compute_parity, stripe %llu, method %d\n", |
1481 | (unsigned long long)sh->sector, method); | 1668 | (unsigned long long)sh->sector, method); |
@@ -1513,24 +1700,29 @@ static void compute_parity6(struct stripe_head *sh, int method) | |||
1513 | set_bit(R5_UPTODATE, &sh->dev[i].flags); | 1700 | set_bit(R5_UPTODATE, &sh->dev[i].flags); |
1514 | } | 1701 | } |
1515 | 1702 | ||
1516 | // switch(method) { | 1703 | /* Note that unlike RAID-5, the ordering of the disks matters greatly.*/ |
1517 | // case RECONSTRUCT_WRITE: | 1704 | |
1518 | // case CHECK_PARITY: | 1705 | for (i = 0; i < disks; i++) |
1519 | // case UPDATE_PARITY: | 1706 | ptrs[i] = (void *)raid6_empty_zero_page; |
1520 | /* Note that unlike RAID-5, the ordering of the disks matters greatly. */ | 1707 | |
1521 | /* FIX: Is this ordering of drives even remotely optimal? */ | 1708 | count = 0; |
1522 | count = 0; | 1709 | i = d0_idx; |
1523 | i = d0_idx; | 1710 | do { |
1524 | do { | 1711 | int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); |
1525 | ptrs[count++] = page_address(sh->dev[i].page); | 1712 | |
1526 | if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags)) | 1713 | ptrs[slot] = page_address(sh->dev[i].page); |
1527 | printk("block %d/%d not uptodate on parity calc\n", i,count); | 1714 | if (slot < syndrome_disks && |
1528 | i = raid6_next_disk(i, disks); | 1715 | !test_bit(R5_UPTODATE, &sh->dev[i].flags)) { |
1529 | } while ( i != d0_idx ); | 1716 | printk(KERN_ERR "block %d/%d not uptodate " |
1530 | // break; | 1717 | "on parity calc\n", i, count); |
1531 | // } | 1718 | BUG(); |
1532 | 1719 | } | |
1533 | raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs); | 1720 | |
1721 | i = raid6_next_disk(i, disks); | ||
1722 | } while (i != d0_idx); | ||
1723 | BUG_ON(count != syndrome_disks); | ||
1724 | |||
1725 | raid6_call.gen_syndrome(syndrome_disks+2, STRIPE_SIZE, ptrs); | ||
1534 | 1726 | ||
1535 | switch(method) { | 1727 | switch(method) { |
1536 | case RECONSTRUCT_WRITE: | 1728 | case RECONSTRUCT_WRITE: |
@@ -1552,8 +1744,7 @@ static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) | |||
1552 | { | 1744 | { |
1553 | int i, count, disks = sh->disks; | 1745 | int i, count, disks = sh->disks; |
1554 | void *ptr[MAX_XOR_BLOCKS], *dest, *p; | 1746 | void *ptr[MAX_XOR_BLOCKS], *dest, *p; |
1555 | int pd_idx = sh->pd_idx; | 1747 | int qd_idx = sh->qd_idx; |
1556 | int qd_idx = raid6_next_disk(pd_idx, disks); | ||
1557 | 1748 | ||
1558 | pr_debug("compute_block_1, stripe %llu, idx %d\n", | 1749 | pr_debug("compute_block_1, stripe %llu, idx %d\n", |
1559 | (unsigned long long)sh->sector, dd_idx); | 1750 | (unsigned long long)sh->sector, dd_idx); |
@@ -1589,63 +1780,65 @@ static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) | |||
1589 | static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) | 1780 | static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) |
1590 | { | 1781 | { |
1591 | int i, count, disks = sh->disks; | 1782 | int i, count, disks = sh->disks; |
1592 | int pd_idx = sh->pd_idx; | 1783 | int syndrome_disks = sh->ddf_layout ? disks : disks-2; |
1593 | int qd_idx = raid6_next_disk(pd_idx, disks); | 1784 | int d0_idx = raid6_d0(sh); |
1594 | int d0_idx = raid6_next_disk(qd_idx, disks); | 1785 | int faila = -1, failb = -1; |
1595 | int faila, failb; | 1786 | /**** FIX THIS: This could be very bad if disks is close to 256 ****/ |
1787 | void *ptrs[syndrome_disks+2]; | ||
1596 | 1788 | ||
1597 | /* faila and failb are disk numbers relative to d0_idx */ | 1789 | for (i = 0; i < disks ; i++) |
1598 | /* pd_idx become disks-2 and qd_idx become disks-1 */ | 1790 | ptrs[i] = (void *)raid6_empty_zero_page; |
1599 | faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx; | 1791 | count = 0; |
1600 | failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx; | 1792 | i = d0_idx; |
1793 | do { | ||
1794 | int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); | ||
1795 | |||
1796 | ptrs[slot] = page_address(sh->dev[i].page); | ||
1797 | |||
1798 | if (i == dd_idx1) | ||
1799 | faila = slot; | ||
1800 | if (i == dd_idx2) | ||
1801 | failb = slot; | ||
1802 | i = raid6_next_disk(i, disks); | ||
1803 | } while (i != d0_idx); | ||
1804 | BUG_ON(count != syndrome_disks); | ||
1601 | 1805 | ||
1602 | BUG_ON(faila == failb); | 1806 | BUG_ON(faila == failb); |
1603 | if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } | 1807 | if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } |
1604 | 1808 | ||
1605 | pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", | 1809 | pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", |
1606 | (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb); | 1810 | (unsigned long long)sh->sector, dd_idx1, dd_idx2, |
1811 | faila, failb); | ||
1607 | 1812 | ||
1608 | if ( failb == disks-1 ) { | 1813 | if (failb == syndrome_disks+1) { |
1609 | /* Q disk is one of the missing disks */ | 1814 | /* Q disk is one of the missing disks */ |
1610 | if ( faila == disks-2 ) { | 1815 | if (faila == syndrome_disks) { |
1611 | /* Missing P+Q, just recompute */ | 1816 | /* Missing P+Q, just recompute */ |
1612 | compute_parity6(sh, UPDATE_PARITY); | 1817 | compute_parity6(sh, UPDATE_PARITY); |
1613 | return; | 1818 | return; |
1614 | } else { | 1819 | } else { |
1615 | /* We're missing D+Q; recompute D from P */ | 1820 | /* We're missing D+Q; recompute D from P */ |
1616 | compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0); | 1821 | compute_block_1(sh, ((dd_idx1 == sh->qd_idx) ? |
1822 | dd_idx2 : dd_idx1), | ||
1823 | 0); | ||
1617 | compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ | 1824 | compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ |
1618 | return; | 1825 | return; |
1619 | } | 1826 | } |
1620 | } | 1827 | } |
1621 | 1828 | ||
1622 | /* We're missing D+P or D+D; build pointer table */ | 1829 | /* We're missing D+P or D+D; */ |
1623 | { | 1830 | if (failb == syndrome_disks) { |
1624 | /**** FIX THIS: This could be very bad if disks is close to 256 ****/ | 1831 | /* We're missing D+P. */ |
1625 | void *ptrs[disks]; | 1832 | raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE, faila, ptrs); |
1626 | 1833 | } else { | |
1627 | count = 0; | 1834 | /* We're missing D+D. */ |
1628 | i = d0_idx; | 1835 | raid6_2data_recov(syndrome_disks+2, STRIPE_SIZE, faila, failb, |
1629 | do { | 1836 | ptrs); |
1630 | ptrs[count++] = page_address(sh->dev[i].page); | ||
1631 | i = raid6_next_disk(i, disks); | ||
1632 | if (i != dd_idx1 && i != dd_idx2 && | ||
1633 | !test_bit(R5_UPTODATE, &sh->dev[i].flags)) | ||
1634 | printk("compute_2 with missing block %d/%d\n", count, i); | ||
1635 | } while ( i != d0_idx ); | ||
1636 | |||
1637 | if ( failb == disks-2 ) { | ||
1638 | /* We're missing D+P. */ | ||
1639 | raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs); | ||
1640 | } else { | ||
1641 | /* We're missing D+D. */ | ||
1642 | raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs); | ||
1643 | } | ||
1644 | |||
1645 | /* Both the above update both missing blocks */ | ||
1646 | set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); | ||
1647 | set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); | ||
1648 | } | 1837 | } |
1838 | |||
1839 | /* Both the above update both missing blocks */ | ||
1840 | set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); | ||
1841 | set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); | ||
1649 | } | 1842 | } |
1650 | 1843 | ||
1651 | static void | 1844 | static void |
@@ -1800,17 +1993,21 @@ static int page_is_zero(struct page *p) | |||
1800 | memcmp(a, a+4, STRIPE_SIZE-4)==0); | 1993 | memcmp(a, a+4, STRIPE_SIZE-4)==0); |
1801 | } | 1994 | } |
1802 | 1995 | ||
1803 | static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks) | 1996 | static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, |
1997 | struct stripe_head *sh) | ||
1804 | { | 1998 | { |
1805 | int sectors_per_chunk = conf->chunk_size >> 9; | 1999 | int sectors_per_chunk = |
1806 | int pd_idx, dd_idx; | 2000 | previous ? (conf->prev_chunk >> 9) |
2001 | : (conf->chunk_size >> 9); | ||
2002 | int dd_idx; | ||
1807 | int chunk_offset = sector_div(stripe, sectors_per_chunk); | 2003 | int chunk_offset = sector_div(stripe, sectors_per_chunk); |
2004 | int disks = previous ? conf->previous_raid_disks : conf->raid_disks; | ||
1808 | 2005 | ||
1809 | raid5_compute_sector(stripe * (disks - conf->max_degraded) | 2006 | raid5_compute_sector(conf, |
2007 | stripe * (disks - conf->max_degraded) | ||
1810 | *sectors_per_chunk + chunk_offset, | 2008 | *sectors_per_chunk + chunk_offset, |
1811 | disks, disks - conf->max_degraded, | 2009 | previous, |
1812 | &dd_idx, &pd_idx, conf); | 2010 | &dd_idx, sh); |
1813 | return pd_idx; | ||
1814 | } | 2011 | } |
1815 | 2012 | ||
1816 | static void | 2013 | static void |
@@ -2181,7 +2378,7 @@ static void handle_stripe_dirtying6(raid5_conf_t *conf, | |||
2181 | struct r6_state *r6s, int disks) | 2378 | struct r6_state *r6s, int disks) |
2182 | { | 2379 | { |
2183 | int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i; | 2380 | int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i; |
2184 | int qd_idx = r6s->qd_idx; | 2381 | int qd_idx = sh->qd_idx; |
2185 | for (i = disks; i--; ) { | 2382 | for (i = disks; i--; ) { |
2186 | struct r5dev *dev = &sh->dev[i]; | 2383 | struct r5dev *dev = &sh->dev[i]; |
2187 | /* Would I have to read this buffer for reconstruct_write */ | 2384 | /* Would I have to read this buffer for reconstruct_write */ |
@@ -2371,7 +2568,7 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, | |||
2371 | int update_p = 0, update_q = 0; | 2568 | int update_p = 0, update_q = 0; |
2372 | struct r5dev *dev; | 2569 | struct r5dev *dev; |
2373 | int pd_idx = sh->pd_idx; | 2570 | int pd_idx = sh->pd_idx; |
2374 | int qd_idx = r6s->qd_idx; | 2571 | int qd_idx = sh->qd_idx; |
2375 | 2572 | ||
2376 | set_bit(STRIPE_HANDLE, &sh->state); | 2573 | set_bit(STRIPE_HANDLE, &sh->state); |
2377 | 2574 | ||
@@ -2467,17 +2664,14 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, | |||
2467 | struct dma_async_tx_descriptor *tx = NULL; | 2664 | struct dma_async_tx_descriptor *tx = NULL; |
2468 | clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); | 2665 | clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); |
2469 | for (i = 0; i < sh->disks; i++) | 2666 | for (i = 0; i < sh->disks; i++) |
2470 | if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) { | 2667 | if (i != sh->pd_idx && i != sh->qd_idx) { |
2471 | int dd_idx, pd_idx, j; | 2668 | int dd_idx, j; |
2472 | struct stripe_head *sh2; | 2669 | struct stripe_head *sh2; |
2473 | 2670 | ||
2474 | sector_t bn = compute_blocknr(sh, i); | 2671 | sector_t bn = compute_blocknr(sh, i, 1); |
2475 | sector_t s = raid5_compute_sector(bn, conf->raid_disks, | 2672 | sector_t s = raid5_compute_sector(conf, bn, 0, |
2476 | conf->raid_disks - | 2673 | &dd_idx, NULL); |
2477 | conf->max_degraded, &dd_idx, | 2674 | sh2 = get_active_stripe(conf, s, 0, 1); |
2478 | &pd_idx, conf); | ||
2479 | sh2 = get_active_stripe(conf, s, conf->raid_disks, | ||
2480 | pd_idx, 1); | ||
2481 | if (sh2 == NULL) | 2675 | if (sh2 == NULL) |
2482 | /* so far only the early blocks of this stripe | 2676 | /* so far only the early blocks of this stripe |
2483 | * have been requested. When later blocks | 2677 | * have been requested. When later blocks |
@@ -2500,8 +2694,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, | |||
2500 | set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); | 2694 | set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); |
2501 | for (j = 0; j < conf->raid_disks; j++) | 2695 | for (j = 0; j < conf->raid_disks; j++) |
2502 | if (j != sh2->pd_idx && | 2696 | if (j != sh2->pd_idx && |
2503 | (!r6s || j != raid6_next_disk(sh2->pd_idx, | 2697 | (!r6s || j != sh2->qd_idx) && |
2504 | sh2->disks)) && | ||
2505 | !test_bit(R5_Expanded, &sh2->dev[j].flags)) | 2698 | !test_bit(R5_Expanded, &sh2->dev[j].flags)) |
2506 | break; | 2699 | break; |
2507 | if (j == conf->raid_disks) { | 2700 | if (j == conf->raid_disks) { |
@@ -2750,6 +2943,23 @@ static bool handle_stripe5(struct stripe_head *sh) | |||
2750 | 2943 | ||
2751 | /* Finish reconstruct operations initiated by the expansion process */ | 2944 | /* Finish reconstruct operations initiated by the expansion process */ |
2752 | if (sh->reconstruct_state == reconstruct_state_result) { | 2945 | if (sh->reconstruct_state == reconstruct_state_result) { |
2946 | struct stripe_head *sh2 | ||
2947 | = get_active_stripe(conf, sh->sector, 1, 1); | ||
2948 | if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { | ||
2949 | /* sh cannot be written until sh2 has been read. | ||
2950 | * so arrange for sh to be delayed a little | ||
2951 | */ | ||
2952 | set_bit(STRIPE_DELAYED, &sh->state); | ||
2953 | set_bit(STRIPE_HANDLE, &sh->state); | ||
2954 | if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, | ||
2955 | &sh2->state)) | ||
2956 | atomic_inc(&conf->preread_active_stripes); | ||
2957 | release_stripe(sh2); | ||
2958 | goto unlock; | ||
2959 | } | ||
2960 | if (sh2) | ||
2961 | release_stripe(sh2); | ||
2962 | |||
2753 | sh->reconstruct_state = reconstruct_state_idle; | 2963 | sh->reconstruct_state = reconstruct_state_idle; |
2754 | clear_bit(STRIPE_EXPANDING, &sh->state); | 2964 | clear_bit(STRIPE_EXPANDING, &sh->state); |
2755 | for (i = conf->raid_disks; i--; ) { | 2965 | for (i = conf->raid_disks; i--; ) { |
@@ -2763,8 +2973,7 @@ static bool handle_stripe5(struct stripe_head *sh) | |||
2763 | !sh->reconstruct_state) { | 2973 | !sh->reconstruct_state) { |
2764 | /* Need to write out all blocks after computing parity */ | 2974 | /* Need to write out all blocks after computing parity */ |
2765 | sh->disks = conf->raid_disks; | 2975 | sh->disks = conf->raid_disks; |
2766 | sh->pd_idx = stripe_to_pdidx(sh->sector, conf, | 2976 | stripe_set_idx(sh->sector, conf, 0, sh); |
2767 | conf->raid_disks); | ||
2768 | schedule_reconstruction5(sh, &s, 1, 1); | 2977 | schedule_reconstruction5(sh, &s, 1, 1); |
2769 | } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { | 2978 | } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { |
2770 | clear_bit(STRIPE_EXPAND_READY, &sh->state); | 2979 | clear_bit(STRIPE_EXPAND_READY, &sh->state); |
@@ -2796,20 +3005,19 @@ static bool handle_stripe5(struct stripe_head *sh) | |||
2796 | 3005 | ||
2797 | static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | 3006 | static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) |
2798 | { | 3007 | { |
2799 | raid6_conf_t *conf = sh->raid_conf; | 3008 | raid5_conf_t *conf = sh->raid_conf; |
2800 | int disks = sh->disks; | 3009 | int disks = sh->disks; |
2801 | struct bio *return_bi = NULL; | 3010 | struct bio *return_bi = NULL; |
2802 | int i, pd_idx = sh->pd_idx; | 3011 | int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx; |
2803 | struct stripe_head_state s; | 3012 | struct stripe_head_state s; |
2804 | struct r6_state r6s; | 3013 | struct r6_state r6s; |
2805 | struct r5dev *dev, *pdev, *qdev; | 3014 | struct r5dev *dev, *pdev, *qdev; |
2806 | mdk_rdev_t *blocked_rdev = NULL; | 3015 | mdk_rdev_t *blocked_rdev = NULL; |
2807 | 3016 | ||
2808 | r6s.qd_idx = raid6_next_disk(pd_idx, disks); | ||
2809 | pr_debug("handling stripe %llu, state=%#lx cnt=%d, " | 3017 | pr_debug("handling stripe %llu, state=%#lx cnt=%d, " |
2810 | "pd_idx=%d, qd_idx=%d\n", | 3018 | "pd_idx=%d, qd_idx=%d\n", |
2811 | (unsigned long long)sh->sector, sh->state, | 3019 | (unsigned long long)sh->sector, sh->state, |
2812 | atomic_read(&sh->count), pd_idx, r6s.qd_idx); | 3020 | atomic_read(&sh->count), pd_idx, qd_idx); |
2813 | memset(&s, 0, sizeof(s)); | 3021 | memset(&s, 0, sizeof(s)); |
2814 | 3022 | ||
2815 | spin_lock(&sh->lock); | 3023 | spin_lock(&sh->lock); |
@@ -2920,9 +3128,9 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
2920 | pdev = &sh->dev[pd_idx]; | 3128 | pdev = &sh->dev[pd_idx]; |
2921 | r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx) | 3129 | r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx) |
2922 | || (s.failed >= 2 && r6s.failed_num[1] == pd_idx); | 3130 | || (s.failed >= 2 && r6s.failed_num[1] == pd_idx); |
2923 | qdev = &sh->dev[r6s.qd_idx]; | 3131 | qdev = &sh->dev[qd_idx]; |
2924 | r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == r6s.qd_idx) | 3132 | r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == qd_idx) |
2925 | || (s.failed >= 2 && r6s.failed_num[1] == r6s.qd_idx); | 3133 | || (s.failed >= 2 && r6s.failed_num[1] == qd_idx); |
2926 | 3134 | ||
2927 | if ( s.written && | 3135 | if ( s.written && |
2928 | ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags) | 3136 | ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags) |
@@ -2980,10 +3188,26 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
2980 | } | 3188 | } |
2981 | 3189 | ||
2982 | if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { | 3190 | if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { |
3191 | struct stripe_head *sh2 | ||
3192 | = get_active_stripe(conf, sh->sector, 1, 1); | ||
3193 | if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { | ||
3194 | /* sh cannot be written until sh2 has been read. | ||
3195 | * so arrange for sh to be delayed a little | ||
3196 | */ | ||
3197 | set_bit(STRIPE_DELAYED, &sh->state); | ||
3198 | set_bit(STRIPE_HANDLE, &sh->state); | ||
3199 | if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, | ||
3200 | &sh2->state)) | ||
3201 | atomic_inc(&conf->preread_active_stripes); | ||
3202 | release_stripe(sh2); | ||
3203 | goto unlock; | ||
3204 | } | ||
3205 | if (sh2) | ||
3206 | release_stripe(sh2); | ||
3207 | |||
2983 | /* Need to write out all blocks after computing P&Q */ | 3208 | /* Need to write out all blocks after computing P&Q */ |
2984 | sh->disks = conf->raid_disks; | 3209 | sh->disks = conf->raid_disks; |
2985 | sh->pd_idx = stripe_to_pdidx(sh->sector, conf, | 3210 | stripe_set_idx(sh->sector, conf, 0, sh); |
2986 | conf->raid_disks); | ||
2987 | compute_parity6(sh, RECONSTRUCT_WRITE); | 3211 | compute_parity6(sh, RECONSTRUCT_WRITE); |
2988 | for (i = conf->raid_disks ; i-- ; ) { | 3212 | for (i = conf->raid_disks ; i-- ; ) { |
2989 | set_bit(R5_LOCKED, &sh->dev[i].flags); | 3213 | set_bit(R5_LOCKED, &sh->dev[i].flags); |
@@ -3134,6 +3358,8 @@ static int raid5_mergeable_bvec(struct request_queue *q, | |||
3134 | if ((bvm->bi_rw & 1) == WRITE) | 3358 | if ((bvm->bi_rw & 1) == WRITE) |
3135 | return biovec->bv_len; /* always allow writes to be mergeable */ | 3359 | return biovec->bv_len; /* always allow writes to be mergeable */ |
3136 | 3360 | ||
3361 | if (mddev->new_chunk < mddev->chunk_size) | ||
3362 | chunk_sectors = mddev->new_chunk >> 9; | ||
3137 | max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; | 3363 | max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; |
3138 | if (max < 0) max = 0; | 3364 | if (max < 0) max = 0; |
3139 | if (max <= biovec->bv_len && bio_sectors == 0) | 3365 | if (max <= biovec->bv_len && bio_sectors == 0) |
@@ -3149,6 +3375,8 @@ static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) | |||
3149 | unsigned int chunk_sectors = mddev->chunk_size >> 9; | 3375 | unsigned int chunk_sectors = mddev->chunk_size >> 9; |
3150 | unsigned int bio_sectors = bio->bi_size >> 9; | 3376 | unsigned int bio_sectors = bio->bi_size >> 9; |
3151 | 3377 | ||
3378 | if (mddev->new_chunk < mddev->chunk_size) | ||
3379 | chunk_sectors = mddev->new_chunk >> 9; | ||
3152 | return chunk_sectors >= | 3380 | return chunk_sectors >= |
3153 | ((sector & (chunk_sectors - 1)) + bio_sectors); | 3381 | ((sector & (chunk_sectors - 1)) + bio_sectors); |
3154 | } | 3382 | } |
@@ -3255,9 +3483,7 @@ static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) | |||
3255 | { | 3483 | { |
3256 | mddev_t *mddev = q->queuedata; | 3484 | mddev_t *mddev = q->queuedata; |
3257 | raid5_conf_t *conf = mddev_to_conf(mddev); | 3485 | raid5_conf_t *conf = mddev_to_conf(mddev); |
3258 | const unsigned int raid_disks = conf->raid_disks; | 3486 | unsigned int dd_idx; |
3259 | const unsigned int data_disks = raid_disks - conf->max_degraded; | ||
3260 | unsigned int dd_idx, pd_idx; | ||
3261 | struct bio* align_bi; | 3487 | struct bio* align_bi; |
3262 | mdk_rdev_t *rdev; | 3488 | mdk_rdev_t *rdev; |
3263 | 3489 | ||
@@ -3266,7 +3492,7 @@ static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) | |||
3266 | return 0; | 3492 | return 0; |
3267 | } | 3493 | } |
3268 | /* | 3494 | /* |
3269 | * use bio_clone to make a copy of the bio | 3495 | * use bio_clone to make a copy of the bio |
3270 | */ | 3496 | */ |
3271 | align_bi = bio_clone(raid_bio, GFP_NOIO); | 3497 | align_bi = bio_clone(raid_bio, GFP_NOIO); |
3272 | if (!align_bi) | 3498 | if (!align_bi) |
@@ -3280,12 +3506,9 @@ static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) | |||
3280 | /* | 3506 | /* |
3281 | * compute position | 3507 | * compute position |
3282 | */ | 3508 | */ |
3283 | align_bi->bi_sector = raid5_compute_sector(raid_bio->bi_sector, | 3509 | align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, |
3284 | raid_disks, | 3510 | 0, |
3285 | data_disks, | 3511 | &dd_idx, NULL); |
3286 | &dd_idx, | ||
3287 | &pd_idx, | ||
3288 | conf); | ||
3289 | 3512 | ||
3290 | rcu_read_lock(); | 3513 | rcu_read_lock(); |
3291 | rdev = rcu_dereference(conf->disks[dd_idx].rdev); | 3514 | rdev = rcu_dereference(conf->disks[dd_idx].rdev); |
@@ -3377,7 +3600,7 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
3377 | { | 3600 | { |
3378 | mddev_t *mddev = q->queuedata; | 3601 | mddev_t *mddev = q->queuedata; |
3379 | raid5_conf_t *conf = mddev_to_conf(mddev); | 3602 | raid5_conf_t *conf = mddev_to_conf(mddev); |
3380 | unsigned int dd_idx, pd_idx; | 3603 | int dd_idx; |
3381 | sector_t new_sector; | 3604 | sector_t new_sector; |
3382 | sector_t logical_sector, last_sector; | 3605 | sector_t logical_sector, last_sector; |
3383 | struct stripe_head *sh; | 3606 | struct stripe_head *sh; |
@@ -3400,7 +3623,7 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
3400 | if (rw == READ && | 3623 | if (rw == READ && |
3401 | mddev->reshape_position == MaxSector && | 3624 | mddev->reshape_position == MaxSector && |
3402 | chunk_aligned_read(q,bi)) | 3625 | chunk_aligned_read(q,bi)) |
3403 | return 0; | 3626 | return 0; |
3404 | 3627 | ||
3405 | logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); | 3628 | logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); |
3406 | last_sector = bi->bi_sector + (bi->bi_size>>9); | 3629 | last_sector = bi->bi_sector + (bi->bi_size>>9); |
@@ -3410,26 +3633,31 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
3410 | for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { | 3633 | for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { |
3411 | DEFINE_WAIT(w); | 3634 | DEFINE_WAIT(w); |
3412 | int disks, data_disks; | 3635 | int disks, data_disks; |
3636 | int previous; | ||
3413 | 3637 | ||
3414 | retry: | 3638 | retry: |
3639 | previous = 0; | ||
3640 | disks = conf->raid_disks; | ||
3415 | prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); | 3641 | prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); |
3416 | if (likely(conf->expand_progress == MaxSector)) | 3642 | if (unlikely(conf->reshape_progress != MaxSector)) { |
3417 | disks = conf->raid_disks; | 3643 | /* spinlock is needed as reshape_progress may be |
3418 | else { | ||
3419 | /* spinlock is needed as expand_progress may be | ||
3420 | * 64bit on a 32bit platform, and so it might be | 3644 | * 64bit on a 32bit platform, and so it might be |
3421 | * possible to see a half-updated value | 3645 | * possible to see a half-updated value |
3422 | * Ofcourse expand_progress could change after | 3646 | * Ofcourse reshape_progress could change after |
3423 | * the lock is dropped, so once we get a reference | 3647 | * the lock is dropped, so once we get a reference |
3424 | * to the stripe that we think it is, we will have | 3648 | * to the stripe that we think it is, we will have |
3425 | * to check again. | 3649 | * to check again. |
3426 | */ | 3650 | */ |
3427 | spin_lock_irq(&conf->device_lock); | 3651 | spin_lock_irq(&conf->device_lock); |
3428 | disks = conf->raid_disks; | 3652 | if (mddev->delta_disks < 0 |
3429 | if (logical_sector >= conf->expand_progress) | 3653 | ? logical_sector < conf->reshape_progress |
3654 | : logical_sector >= conf->reshape_progress) { | ||
3430 | disks = conf->previous_raid_disks; | 3655 | disks = conf->previous_raid_disks; |
3431 | else { | 3656 | previous = 1; |
3432 | if (logical_sector >= conf->expand_lo) { | 3657 | } else { |
3658 | if (mddev->delta_disks < 0 | ||
3659 | ? logical_sector < conf->reshape_safe | ||
3660 | : logical_sector >= conf->reshape_safe) { | ||
3433 | spin_unlock_irq(&conf->device_lock); | 3661 | spin_unlock_irq(&conf->device_lock); |
3434 | schedule(); | 3662 | schedule(); |
3435 | goto retry; | 3663 | goto retry; |
@@ -3439,15 +3667,17 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
3439 | } | 3667 | } |
3440 | data_disks = disks - conf->max_degraded; | 3668 | data_disks = disks - conf->max_degraded; |
3441 | 3669 | ||
3442 | new_sector = raid5_compute_sector(logical_sector, disks, data_disks, | 3670 | new_sector = raid5_compute_sector(conf, logical_sector, |
3443 | &dd_idx, &pd_idx, conf); | 3671 | previous, |
3672 | &dd_idx, NULL); | ||
3444 | pr_debug("raid5: make_request, sector %llu logical %llu\n", | 3673 | pr_debug("raid5: make_request, sector %llu logical %llu\n", |
3445 | (unsigned long long)new_sector, | 3674 | (unsigned long long)new_sector, |
3446 | (unsigned long long)logical_sector); | 3675 | (unsigned long long)logical_sector); |
3447 | 3676 | ||
3448 | sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK)); | 3677 | sh = get_active_stripe(conf, new_sector, previous, |
3678 | (bi->bi_rw&RWA_MASK)); | ||
3449 | if (sh) { | 3679 | if (sh) { |
3450 | if (unlikely(conf->expand_progress != MaxSector)) { | 3680 | if (unlikely(previous)) { |
3451 | /* expansion might have moved on while waiting for a | 3681 | /* expansion might have moved on while waiting for a |
3452 | * stripe, so we must do the range check again. | 3682 | * stripe, so we must do the range check again. |
3453 | * Expansion could still move past after this | 3683 | * Expansion could still move past after this |
@@ -3458,8 +3688,9 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
3458 | */ | 3688 | */ |
3459 | int must_retry = 0; | 3689 | int must_retry = 0; |
3460 | spin_lock_irq(&conf->device_lock); | 3690 | spin_lock_irq(&conf->device_lock); |
3461 | if (logical_sector < conf->expand_progress && | 3691 | if (mddev->delta_disks < 0 |
3462 | disks == conf->previous_raid_disks) | 3692 | ? logical_sector >= conf->reshape_progress |
3693 | : logical_sector < conf->reshape_progress) | ||
3463 | /* mismatch, need to try again */ | 3694 | /* mismatch, need to try again */ |
3464 | must_retry = 1; | 3695 | must_retry = 1; |
3465 | spin_unlock_irq(&conf->device_lock); | 3696 | spin_unlock_irq(&conf->device_lock); |
@@ -3514,6 +3745,8 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
3514 | return 0; | 3745 | return 0; |
3515 | } | 3746 | } |
3516 | 3747 | ||
3748 | static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks); | ||
3749 | |||
3517 | static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped) | 3750 | static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped) |
3518 | { | 3751 | { |
3519 | /* reshaping is quite different to recovery/resync so it is | 3752 | /* reshaping is quite different to recovery/resync so it is |
@@ -3527,61 +3760,118 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped | |||
3527 | */ | 3760 | */ |
3528 | raid5_conf_t *conf = (raid5_conf_t *) mddev->private; | 3761 | raid5_conf_t *conf = (raid5_conf_t *) mddev->private; |
3529 | struct stripe_head *sh; | 3762 | struct stripe_head *sh; |
3530 | int pd_idx; | ||
3531 | sector_t first_sector, last_sector; | 3763 | sector_t first_sector, last_sector; |
3532 | int raid_disks = conf->previous_raid_disks; | 3764 | int raid_disks = conf->previous_raid_disks; |
3533 | int data_disks = raid_disks - conf->max_degraded; | 3765 | int data_disks = raid_disks - conf->max_degraded; |
3534 | int new_data_disks = conf->raid_disks - conf->max_degraded; | 3766 | int new_data_disks = conf->raid_disks - conf->max_degraded; |
3535 | int i; | 3767 | int i; |
3536 | int dd_idx; | 3768 | int dd_idx; |
3537 | sector_t writepos, safepos, gap; | 3769 | sector_t writepos, readpos, safepos; |
3538 | 3770 | sector_t stripe_addr; | |
3539 | if (sector_nr == 0 && | 3771 | int reshape_sectors; |
3540 | conf->expand_progress != 0) { | 3772 | struct list_head stripes; |
3541 | /* restarting in the middle, skip the initial sectors */ | 3773 | |
3542 | sector_nr = conf->expand_progress; | 3774 | if (sector_nr == 0) { |
3775 | /* If restarting in the middle, skip the initial sectors */ | ||
3776 | if (mddev->delta_disks < 0 && | ||
3777 | conf->reshape_progress < raid5_size(mddev, 0, 0)) { | ||
3778 | sector_nr = raid5_size(mddev, 0, 0) | ||
3779 | - conf->reshape_progress; | ||
3780 | } else if (mddev->delta_disks > 0 && | ||
3781 | conf->reshape_progress > 0) | ||
3782 | sector_nr = conf->reshape_progress; | ||
3543 | sector_div(sector_nr, new_data_disks); | 3783 | sector_div(sector_nr, new_data_disks); |
3544 | *skipped = 1; | 3784 | if (sector_nr) { |
3545 | return sector_nr; | 3785 | *skipped = 1; |
3786 | return sector_nr; | ||
3787 | } | ||
3546 | } | 3788 | } |
3547 | 3789 | ||
3790 | /* We need to process a full chunk at a time. | ||
3791 | * If old and new chunk sizes differ, we need to process the | ||
3792 | * largest of these | ||
3793 | */ | ||
3794 | if (mddev->new_chunk > mddev->chunk_size) | ||
3795 | reshape_sectors = mddev->new_chunk / 512; | ||
3796 | else | ||
3797 | reshape_sectors = mddev->chunk_size / 512; | ||
3798 | |||
3548 | /* we update the metadata when there is more than 3Meg | 3799 | /* we update the metadata when there is more than 3Meg |
3549 | * in the block range (that is rather arbitrary, should | 3800 | * in the block range (that is rather arbitrary, should |
3550 | * probably be time based) or when the data about to be | 3801 | * probably be time based) or when the data about to be |
3551 | * copied would over-write the source of the data at | 3802 | * copied would over-write the source of the data at |
3552 | * the front of the range. | 3803 | * the front of the range. |
3553 | * i.e. one new_stripe forward from expand_progress new_maps | 3804 | * i.e. one new_stripe along from reshape_progress new_maps |
3554 | * to after where expand_lo old_maps to | 3805 | * to after where reshape_safe old_maps to |
3555 | */ | 3806 | */ |
3556 | writepos = conf->expand_progress + | 3807 | writepos = conf->reshape_progress; |
3557 | conf->chunk_size/512*(new_data_disks); | ||
3558 | sector_div(writepos, new_data_disks); | 3808 | sector_div(writepos, new_data_disks); |
3559 | safepos = conf->expand_lo; | 3809 | readpos = conf->reshape_progress; |
3810 | sector_div(readpos, data_disks); | ||
3811 | safepos = conf->reshape_safe; | ||
3560 | sector_div(safepos, data_disks); | 3812 | sector_div(safepos, data_disks); |
3561 | gap = conf->expand_progress - conf->expand_lo; | 3813 | if (mddev->delta_disks < 0) { |
3814 | writepos -= reshape_sectors; | ||
3815 | readpos += reshape_sectors; | ||
3816 | safepos += reshape_sectors; | ||
3817 | } else { | ||
3818 | writepos += reshape_sectors; | ||
3819 | readpos -= reshape_sectors; | ||
3820 | safepos -= reshape_sectors; | ||
3821 | } | ||
3562 | 3822 | ||
3563 | if (writepos >= safepos || | 3823 | /* 'writepos' is the most advanced device address we might write. |
3564 | gap > (new_data_disks)*3000*2 /*3Meg*/) { | 3824 | * 'readpos' is the least advanced device address we might read. |
3825 | * 'safepos' is the least address recorded in the metadata as having | ||
3826 | * been reshaped. | ||
3827 | * If 'readpos' is behind 'writepos', then there is no way that we can | ||
3828 | * ensure safety in the face of a crash - that must be done by userspace | ||
3829 | * making a backup of the data. So in that case there is no particular | ||
3830 | * rush to update metadata. | ||
3831 | * Otherwise if 'safepos' is behind 'writepos', then we really need to | ||
3832 | * update the metadata to advance 'safepos' to match 'readpos' so that | ||
3833 | * we can be safe in the event of a crash. | ||
3834 | * So we insist on updating metadata if safepos is behind writepos and | ||
3835 | * readpos is beyond writepos. | ||
3836 | * In any case, update the metadata every 10 seconds. | ||
3837 | * Maybe that number should be configurable, but I'm not sure it is | ||
3838 | * worth it.... maybe it could be a multiple of safemode_delay??? | ||
3839 | */ | ||
3840 | if ((mddev->delta_disks < 0 | ||
3841 | ? (safepos > writepos && readpos < writepos) | ||
3842 | : (safepos < writepos && readpos > writepos)) || | ||
3843 | time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { | ||
3565 | /* Cannot proceed until we've updated the superblock... */ | 3844 | /* Cannot proceed until we've updated the superblock... */ |
3566 | wait_event(conf->wait_for_overlap, | 3845 | wait_event(conf->wait_for_overlap, |
3567 | atomic_read(&conf->reshape_stripes)==0); | 3846 | atomic_read(&conf->reshape_stripes)==0); |
3568 | mddev->reshape_position = conf->expand_progress; | 3847 | mddev->reshape_position = conf->reshape_progress; |
3848 | conf->reshape_checkpoint = jiffies; | ||
3569 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | 3849 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
3570 | md_wakeup_thread(mddev->thread); | 3850 | md_wakeup_thread(mddev->thread); |
3571 | wait_event(mddev->sb_wait, mddev->flags == 0 || | 3851 | wait_event(mddev->sb_wait, mddev->flags == 0 || |
3572 | kthread_should_stop()); | 3852 | kthread_should_stop()); |
3573 | spin_lock_irq(&conf->device_lock); | 3853 | spin_lock_irq(&conf->device_lock); |
3574 | conf->expand_lo = mddev->reshape_position; | 3854 | conf->reshape_safe = mddev->reshape_position; |
3575 | spin_unlock_irq(&conf->device_lock); | 3855 | spin_unlock_irq(&conf->device_lock); |
3576 | wake_up(&conf->wait_for_overlap); | 3856 | wake_up(&conf->wait_for_overlap); |
3577 | } | 3857 | } |
3578 | 3858 | ||
3579 | for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) { | 3859 | if (mddev->delta_disks < 0) { |
3860 | BUG_ON(conf->reshape_progress == 0); | ||
3861 | stripe_addr = writepos; | ||
3862 | BUG_ON((mddev->dev_sectors & | ||
3863 | ~((sector_t)reshape_sectors - 1)) | ||
3864 | - reshape_sectors - stripe_addr | ||
3865 | != sector_nr); | ||
3866 | } else { | ||
3867 | BUG_ON(writepos != sector_nr + reshape_sectors); | ||
3868 | stripe_addr = sector_nr; | ||
3869 | } | ||
3870 | INIT_LIST_HEAD(&stripes); | ||
3871 | for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { | ||
3580 | int j; | 3872 | int j; |
3581 | int skipped = 0; | 3873 | int skipped = 0; |
3582 | pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks); | 3874 | sh = get_active_stripe(conf, stripe_addr+i, 0, 0); |
3583 | sh = get_active_stripe(conf, sector_nr+i, | ||
3584 | conf->raid_disks, pd_idx, 0); | ||
3585 | set_bit(STRIPE_EXPANDING, &sh->state); | 3875 | set_bit(STRIPE_EXPANDING, &sh->state); |
3586 | atomic_inc(&conf->reshape_stripes); | 3876 | atomic_inc(&conf->reshape_stripes); |
3587 | /* If any of this stripe is beyond the end of the old | 3877 | /* If any of this stripe is beyond the end of the old |
@@ -3592,10 +3882,10 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped | |||
3592 | if (j == sh->pd_idx) | 3882 | if (j == sh->pd_idx) |
3593 | continue; | 3883 | continue; |
3594 | if (conf->level == 6 && | 3884 | if (conf->level == 6 && |
3595 | j == raid6_next_disk(sh->pd_idx, sh->disks)) | 3885 | j == sh->qd_idx) |
3596 | continue; | 3886 | continue; |
3597 | s = compute_blocknr(sh, j); | 3887 | s = compute_blocknr(sh, j, 0); |
3598 | if (s < mddev->array_sectors) { | 3888 | if (s < raid5_size(mddev, 0, 0)) { |
3599 | skipped = 1; | 3889 | skipped = 1; |
3600 | continue; | 3890 | continue; |
3601 | } | 3891 | } |
@@ -3607,10 +3897,13 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped | |||
3607 | set_bit(STRIPE_EXPAND_READY, &sh->state); | 3897 | set_bit(STRIPE_EXPAND_READY, &sh->state); |
3608 | set_bit(STRIPE_HANDLE, &sh->state); | 3898 | set_bit(STRIPE_HANDLE, &sh->state); |
3609 | } | 3899 | } |
3610 | release_stripe(sh); | 3900 | list_add(&sh->lru, &stripes); |
3611 | } | 3901 | } |
3612 | spin_lock_irq(&conf->device_lock); | 3902 | spin_lock_irq(&conf->device_lock); |
3613 | conf->expand_progress = (sector_nr + i) * new_data_disks; | 3903 | if (mddev->delta_disks < 0) |
3904 | conf->reshape_progress -= reshape_sectors * new_data_disks; | ||
3905 | else | ||
3906 | conf->reshape_progress += reshape_sectors * new_data_disks; | ||
3614 | spin_unlock_irq(&conf->device_lock); | 3907 | spin_unlock_irq(&conf->device_lock); |
3615 | /* Ok, those stripe are ready. We can start scheduling | 3908 | /* Ok, those stripe are ready. We can start scheduling |
3616 | * reads on the source stripes. | 3909 | * reads on the source stripes. |
@@ -3618,46 +3911,50 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped | |||
3618 | * block on the destination stripes. | 3911 | * block on the destination stripes. |
3619 | */ | 3912 | */ |
3620 | first_sector = | 3913 | first_sector = |
3621 | raid5_compute_sector(sector_nr*(new_data_disks), | 3914 | raid5_compute_sector(conf, stripe_addr*(new_data_disks), |
3622 | raid_disks, data_disks, | 3915 | 1, &dd_idx, NULL); |
3623 | &dd_idx, &pd_idx, conf); | ||
3624 | last_sector = | 3916 | last_sector = |
3625 | raid5_compute_sector((sector_nr+conf->chunk_size/512) | 3917 | raid5_compute_sector(conf, ((stripe_addr+conf->chunk_size/512) |
3626 | *(new_data_disks) -1, | 3918 | *(new_data_disks) - 1), |
3627 | raid_disks, data_disks, | 3919 | 1, &dd_idx, NULL); |
3628 | &dd_idx, &pd_idx, conf); | 3920 | if (last_sector >= mddev->dev_sectors) |
3629 | if (last_sector >= (mddev->size<<1)) | 3921 | last_sector = mddev->dev_sectors - 1; |
3630 | last_sector = (mddev->size<<1)-1; | ||
3631 | while (first_sector <= last_sector) { | 3922 | while (first_sector <= last_sector) { |
3632 | pd_idx = stripe_to_pdidx(first_sector, conf, | 3923 | sh = get_active_stripe(conf, first_sector, 1, 0); |
3633 | conf->previous_raid_disks); | ||
3634 | sh = get_active_stripe(conf, first_sector, | ||
3635 | conf->previous_raid_disks, pd_idx, 0); | ||
3636 | set_bit(STRIPE_EXPAND_SOURCE, &sh->state); | 3924 | set_bit(STRIPE_EXPAND_SOURCE, &sh->state); |
3637 | set_bit(STRIPE_HANDLE, &sh->state); | 3925 | set_bit(STRIPE_HANDLE, &sh->state); |
3638 | release_stripe(sh); | 3926 | release_stripe(sh); |
3639 | first_sector += STRIPE_SECTORS; | 3927 | first_sector += STRIPE_SECTORS; |
3640 | } | 3928 | } |
3929 | /* Now that the sources are clearly marked, we can release | ||
3930 | * the destination stripes | ||
3931 | */ | ||
3932 | while (!list_empty(&stripes)) { | ||
3933 | sh = list_entry(stripes.next, struct stripe_head, lru); | ||
3934 | list_del_init(&sh->lru); | ||
3935 | release_stripe(sh); | ||
3936 | } | ||
3641 | /* If this takes us to the resync_max point where we have to pause, | 3937 | /* If this takes us to the resync_max point where we have to pause, |
3642 | * then we need to write out the superblock. | 3938 | * then we need to write out the superblock. |
3643 | */ | 3939 | */ |
3644 | sector_nr += conf->chunk_size>>9; | 3940 | sector_nr += reshape_sectors; |
3645 | if (sector_nr >= mddev->resync_max) { | 3941 | if (sector_nr >= mddev->resync_max) { |
3646 | /* Cannot proceed until we've updated the superblock... */ | 3942 | /* Cannot proceed until we've updated the superblock... */ |
3647 | wait_event(conf->wait_for_overlap, | 3943 | wait_event(conf->wait_for_overlap, |
3648 | atomic_read(&conf->reshape_stripes) == 0); | 3944 | atomic_read(&conf->reshape_stripes) == 0); |
3649 | mddev->reshape_position = conf->expand_progress; | 3945 | mddev->reshape_position = conf->reshape_progress; |
3946 | conf->reshape_checkpoint = jiffies; | ||
3650 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | 3947 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
3651 | md_wakeup_thread(mddev->thread); | 3948 | md_wakeup_thread(mddev->thread); |
3652 | wait_event(mddev->sb_wait, | 3949 | wait_event(mddev->sb_wait, |
3653 | !test_bit(MD_CHANGE_DEVS, &mddev->flags) | 3950 | !test_bit(MD_CHANGE_DEVS, &mddev->flags) |
3654 | || kthread_should_stop()); | 3951 | || kthread_should_stop()); |
3655 | spin_lock_irq(&conf->device_lock); | 3952 | spin_lock_irq(&conf->device_lock); |
3656 | conf->expand_lo = mddev->reshape_position; | 3953 | conf->reshape_safe = mddev->reshape_position; |
3657 | spin_unlock_irq(&conf->device_lock); | 3954 | spin_unlock_irq(&conf->device_lock); |
3658 | wake_up(&conf->wait_for_overlap); | 3955 | wake_up(&conf->wait_for_overlap); |
3659 | } | 3956 | } |
3660 | return conf->chunk_size>>9; | 3957 | return reshape_sectors; |
3661 | } | 3958 | } |
3662 | 3959 | ||
3663 | /* FIXME go_faster isn't used */ | 3960 | /* FIXME go_faster isn't used */ |
@@ -3665,9 +3962,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski | |||
3665 | { | 3962 | { |
3666 | raid5_conf_t *conf = (raid5_conf_t *) mddev->private; | 3963 | raid5_conf_t *conf = (raid5_conf_t *) mddev->private; |
3667 | struct stripe_head *sh; | 3964 | struct stripe_head *sh; |
3668 | int pd_idx; | 3965 | sector_t max_sector = mddev->dev_sectors; |
3669 | int raid_disks = conf->raid_disks; | ||
3670 | sector_t max_sector = mddev->size << 1; | ||
3671 | int sync_blocks; | 3966 | int sync_blocks; |
3672 | int still_degraded = 0; | 3967 | int still_degraded = 0; |
3673 | int i; | 3968 | int i; |
@@ -3675,6 +3970,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski | |||
3675 | if (sector_nr >= max_sector) { | 3970 | if (sector_nr >= max_sector) { |
3676 | /* just being told to finish up .. nothing much to do */ | 3971 | /* just being told to finish up .. nothing much to do */ |
3677 | unplug_slaves(mddev); | 3972 | unplug_slaves(mddev); |
3973 | |||
3678 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { | 3974 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { |
3679 | end_reshape(conf); | 3975 | end_reshape(conf); |
3680 | return 0; | 3976 | return 0; |
@@ -3705,7 +4001,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski | |||
3705 | */ | 4001 | */ |
3706 | if (mddev->degraded >= conf->max_degraded && | 4002 | if (mddev->degraded >= conf->max_degraded && |
3707 | test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { | 4003 | test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { |
3708 | sector_t rv = (mddev->size << 1) - sector_nr; | 4004 | sector_t rv = mddev->dev_sectors - sector_nr; |
3709 | *skipped = 1; | 4005 | *skipped = 1; |
3710 | return rv; | 4006 | return rv; |
3711 | } | 4007 | } |
@@ -3721,10 +4017,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski | |||
3721 | 4017 | ||
3722 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); | 4018 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); |
3723 | 4019 | ||
3724 | pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks); | 4020 | sh = get_active_stripe(conf, sector_nr, 0, 1); |
3725 | sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1); | ||
3726 | if (sh == NULL) { | 4021 | if (sh == NULL) { |
3727 | sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0); | 4022 | sh = get_active_stripe(conf, sector_nr, 0, 0); |
3728 | /* make sure we don't swamp the stripe cache if someone else | 4023 | /* make sure we don't swamp the stripe cache if someone else |
3729 | * is trying to get access | 4024 | * is trying to get access |
3730 | */ | 4025 | */ |
@@ -3766,19 +4061,15 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
3766 | * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. | 4061 | * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. |
3767 | */ | 4062 | */ |
3768 | struct stripe_head *sh; | 4063 | struct stripe_head *sh; |
3769 | int dd_idx, pd_idx; | 4064 | int dd_idx; |
3770 | sector_t sector, logical_sector, last_sector; | 4065 | sector_t sector, logical_sector, last_sector; |
3771 | int scnt = 0; | 4066 | int scnt = 0; |
3772 | int remaining; | 4067 | int remaining; |
3773 | int handled = 0; | 4068 | int handled = 0; |
3774 | 4069 | ||
3775 | logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); | 4070 | logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); |
3776 | sector = raid5_compute_sector( logical_sector, | 4071 | sector = raid5_compute_sector(conf, logical_sector, |
3777 | conf->raid_disks, | 4072 | 0, &dd_idx, NULL); |
3778 | conf->raid_disks - conf->max_degraded, | ||
3779 | &dd_idx, | ||
3780 | &pd_idx, | ||
3781 | conf); | ||
3782 | last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); | 4073 | last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); |
3783 | 4074 | ||
3784 | for (; logical_sector < last_sector; | 4075 | for (; logical_sector < last_sector; |
@@ -3790,7 +4081,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
3790 | /* already done this stripe */ | 4081 | /* already done this stripe */ |
3791 | continue; | 4082 | continue; |
3792 | 4083 | ||
3793 | sh = get_active_stripe(conf, sector, conf->raid_disks, pd_idx, 1); | 4084 | sh = get_active_stripe(conf, sector, 0, 1); |
3794 | 4085 | ||
3795 | if (!sh) { | 4086 | if (!sh) { |
3796 | /* failed to get a stripe - must wait */ | 4087 | /* failed to get a stripe - must wait */ |
@@ -3992,89 +4283,69 @@ static struct attribute_group raid5_attrs_group = { | |||
3992 | .attrs = raid5_attrs, | 4283 | .attrs = raid5_attrs, |
3993 | }; | 4284 | }; |
3994 | 4285 | ||
3995 | static int run(mddev_t *mddev) | 4286 | static sector_t |
4287 | raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) | ||
4288 | { | ||
4289 | raid5_conf_t *conf = mddev_to_conf(mddev); | ||
4290 | |||
4291 | if (!sectors) | ||
4292 | sectors = mddev->dev_sectors; | ||
4293 | if (!raid_disks) { | ||
4294 | /* size is defined by the smallest of previous and new size */ | ||
4295 | if (conf->raid_disks < conf->previous_raid_disks) | ||
4296 | raid_disks = conf->raid_disks; | ||
4297 | else | ||
4298 | raid_disks = conf->previous_raid_disks; | ||
4299 | } | ||
4300 | |||
4301 | sectors &= ~((sector_t)mddev->chunk_size/512 - 1); | ||
4302 | sectors &= ~((sector_t)mddev->new_chunk/512 - 1); | ||
4303 | return sectors * (raid_disks - conf->max_degraded); | ||
4304 | } | ||
4305 | |||
4306 | static raid5_conf_t *setup_conf(mddev_t *mddev) | ||
3996 | { | 4307 | { |
3997 | raid5_conf_t *conf; | 4308 | raid5_conf_t *conf; |
3998 | int raid_disk, memory; | 4309 | int raid_disk, memory; |
3999 | mdk_rdev_t *rdev; | 4310 | mdk_rdev_t *rdev; |
4000 | struct disk_info *disk; | 4311 | struct disk_info *disk; |
4001 | int working_disks = 0; | ||
4002 | 4312 | ||
4003 | if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) { | 4313 | if (mddev->new_level != 5 |
4314 | && mddev->new_level != 4 | ||
4315 | && mddev->new_level != 6) { | ||
4004 | printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", | 4316 | printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", |
4005 | mdname(mddev), mddev->level); | 4317 | mdname(mddev), mddev->new_level); |
4006 | return -EIO; | 4318 | return ERR_PTR(-EIO); |
4007 | } | 4319 | } |
4008 | 4320 | if ((mddev->new_level == 5 | |
4009 | if (mddev->chunk_size < PAGE_SIZE) { | 4321 | && !algorithm_valid_raid5(mddev->new_layout)) || |
4010 | printk(KERN_ERR "md/raid5: chunk_size must be at least " | 4322 | (mddev->new_level == 6 |
4011 | "PAGE_SIZE but %d < %ld\n", | 4323 | && !algorithm_valid_raid6(mddev->new_layout))) { |
4012 | mddev->chunk_size, PAGE_SIZE); | 4324 | printk(KERN_ERR "raid5: %s: layout %d not supported\n", |
4013 | return -EINVAL; | 4325 | mdname(mddev), mddev->new_layout); |
4326 | return ERR_PTR(-EIO); | ||
4014 | } | 4327 | } |
4015 | 4328 | if (mddev->new_level == 6 && mddev->raid_disks < 4) { | |
4016 | if (mddev->reshape_position != MaxSector) { | 4329 | printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", |
4017 | /* Check that we can continue the reshape. | 4330 | mdname(mddev), mddev->raid_disks); |
4018 | * Currently only disks can change, it must | 4331 | return ERR_PTR(-EINVAL); |
4019 | * increase, and we must be past the point where | ||
4020 | * a stripe over-writes itself | ||
4021 | */ | ||
4022 | sector_t here_new, here_old; | ||
4023 | int old_disks; | ||
4024 | int max_degraded = (mddev->level == 5 ? 1 : 2); | ||
4025 | |||
4026 | if (mddev->new_level != mddev->level || | ||
4027 | mddev->new_layout != mddev->layout || | ||
4028 | mddev->new_chunk != mddev->chunk_size) { | ||
4029 | printk(KERN_ERR "raid5: %s: unsupported reshape " | ||
4030 | "required - aborting.\n", | ||
4031 | mdname(mddev)); | ||
4032 | return -EINVAL; | ||
4033 | } | ||
4034 | if (mddev->delta_disks <= 0) { | ||
4035 | printk(KERN_ERR "raid5: %s: unsupported reshape " | ||
4036 | "(reduce disks) required - aborting.\n", | ||
4037 | mdname(mddev)); | ||
4038 | return -EINVAL; | ||
4039 | } | ||
4040 | old_disks = mddev->raid_disks - mddev->delta_disks; | ||
4041 | /* reshape_position must be on a new-stripe boundary, and one | ||
4042 | * further up in new geometry must map after here in old | ||
4043 | * geometry. | ||
4044 | */ | ||
4045 | here_new = mddev->reshape_position; | ||
4046 | if (sector_div(here_new, (mddev->chunk_size>>9)* | ||
4047 | (mddev->raid_disks - max_degraded))) { | ||
4048 | printk(KERN_ERR "raid5: reshape_position not " | ||
4049 | "on a stripe boundary\n"); | ||
4050 | return -EINVAL; | ||
4051 | } | ||
4052 | /* here_new is the stripe we will write to */ | ||
4053 | here_old = mddev->reshape_position; | ||
4054 | sector_div(here_old, (mddev->chunk_size>>9)* | ||
4055 | (old_disks-max_degraded)); | ||
4056 | /* here_old is the first stripe that we might need to read | ||
4057 | * from */ | ||
4058 | if (here_new >= here_old) { | ||
4059 | /* Reading from the same stripe as writing to - bad */ | ||
4060 | printk(KERN_ERR "raid5: reshape_position too early for " | ||
4061 | "auto-recovery - aborting.\n"); | ||
4062 | return -EINVAL; | ||
4063 | } | ||
4064 | printk(KERN_INFO "raid5: reshape will continue\n"); | ||
4065 | /* OK, we should be able to continue; */ | ||
4066 | } | 4332 | } |
4067 | 4333 | ||
4334 | if (!mddev->new_chunk || mddev->new_chunk % PAGE_SIZE) { | ||
4335 | printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", | ||
4336 | mddev->new_chunk, mdname(mddev)); | ||
4337 | return ERR_PTR(-EINVAL); | ||
4338 | } | ||
4068 | 4339 | ||
4069 | mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL); | 4340 | conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL); |
4070 | if ((conf = mddev->private) == NULL) | 4341 | if (conf == NULL) |
4071 | goto abort; | 4342 | goto abort; |
4072 | if (mddev->reshape_position == MaxSector) { | 4343 | |
4073 | conf->previous_raid_disks = conf->raid_disks = mddev->raid_disks; | 4344 | conf->raid_disks = mddev->raid_disks; |
4074 | } else { | 4345 | if (mddev->reshape_position == MaxSector) |
4075 | conf->raid_disks = mddev->raid_disks; | 4346 | conf->previous_raid_disks = mddev->raid_disks; |
4347 | else | ||
4076 | conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; | 4348 | conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; |
4077 | } | ||
4078 | 4349 | ||
4079 | conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), | 4350 | conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), |
4080 | GFP_KERNEL); | 4351 | GFP_KERNEL); |
@@ -4086,13 +4357,12 @@ static int run(mddev_t *mddev) | |||
4086 | if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) | 4357 | if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) |
4087 | goto abort; | 4358 | goto abort; |
4088 | 4359 | ||
4089 | if (mddev->level == 6) { | 4360 | if (mddev->new_level == 6) { |
4090 | conf->spare_page = alloc_page(GFP_KERNEL); | 4361 | conf->spare_page = alloc_page(GFP_KERNEL); |
4091 | if (!conf->spare_page) | 4362 | if (!conf->spare_page) |
4092 | goto abort; | 4363 | goto abort; |
4093 | } | 4364 | } |
4094 | spin_lock_init(&conf->device_lock); | 4365 | spin_lock_init(&conf->device_lock); |
4095 | mddev->queue->queue_lock = &conf->device_lock; | ||
4096 | init_waitqueue_head(&conf->wait_for_stripe); | 4366 | init_waitqueue_head(&conf->wait_for_stripe); |
4097 | init_waitqueue_head(&conf->wait_for_overlap); | 4367 | init_waitqueue_head(&conf->wait_for_overlap); |
4098 | INIT_LIST_HEAD(&conf->handle_list); | 4368 | INIT_LIST_HEAD(&conf->handle_list); |
@@ -4121,47 +4391,134 @@ static int run(mddev_t *mddev) | |||
4121 | printk(KERN_INFO "raid5: device %s operational as raid" | 4391 | printk(KERN_INFO "raid5: device %s operational as raid" |
4122 | " disk %d\n", bdevname(rdev->bdev,b), | 4392 | " disk %d\n", bdevname(rdev->bdev,b), |
4123 | raid_disk); | 4393 | raid_disk); |
4124 | working_disks++; | ||
4125 | } else | 4394 | } else |
4126 | /* Cannot rely on bitmap to complete recovery */ | 4395 | /* Cannot rely on bitmap to complete recovery */ |
4127 | conf->fullsync = 1; | 4396 | conf->fullsync = 1; |
4128 | } | 4397 | } |
4129 | 4398 | ||
4130 | /* | 4399 | conf->chunk_size = mddev->new_chunk; |
4131 | * 0 for a fully functional array, 1 or 2 for a degraded array. | 4400 | conf->level = mddev->new_level; |
4132 | */ | ||
4133 | mddev->degraded = conf->raid_disks - working_disks; | ||
4134 | conf->mddev = mddev; | ||
4135 | conf->chunk_size = mddev->chunk_size; | ||
4136 | conf->level = mddev->level; | ||
4137 | if (conf->level == 6) | 4401 | if (conf->level == 6) |
4138 | conf->max_degraded = 2; | 4402 | conf->max_degraded = 2; |
4139 | else | 4403 | else |
4140 | conf->max_degraded = 1; | 4404 | conf->max_degraded = 1; |
4141 | conf->algorithm = mddev->layout; | 4405 | conf->algorithm = mddev->new_layout; |
4142 | conf->max_nr_stripes = NR_STRIPES; | 4406 | conf->max_nr_stripes = NR_STRIPES; |
4143 | conf->expand_progress = mddev->reshape_position; | 4407 | conf->reshape_progress = mddev->reshape_position; |
4144 | 4408 | if (conf->reshape_progress != MaxSector) { | |
4145 | /* device size must be a multiple of chunk size */ | 4409 | conf->prev_chunk = mddev->chunk_size; |
4146 | mddev->size &= ~(mddev->chunk_size/1024 -1); | 4410 | conf->prev_algo = mddev->layout; |
4147 | mddev->resync_max_sectors = mddev->size << 1; | 4411 | } |
4148 | 4412 | ||
4149 | if (conf->level == 6 && conf->raid_disks < 4) { | 4413 | memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + |
4150 | printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", | 4414 | conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; |
4151 | mdname(mddev), conf->raid_disks); | 4415 | if (grow_stripes(conf, conf->max_nr_stripes)) { |
4416 | printk(KERN_ERR | ||
4417 | "raid5: couldn't allocate %dkB for buffers\n", memory); | ||
4152 | goto abort; | 4418 | goto abort; |
4153 | } | 4419 | } else |
4154 | if (!conf->chunk_size || conf->chunk_size % 4) { | 4420 | printk(KERN_INFO "raid5: allocated %dkB for %s\n", |
4155 | printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", | 4421 | memory, mdname(mddev)); |
4156 | conf->chunk_size, mdname(mddev)); | 4422 | |
4423 | conf->thread = md_register_thread(raid5d, mddev, "%s_raid5"); | ||
4424 | if (!conf->thread) { | ||
4425 | printk(KERN_ERR | ||
4426 | "raid5: couldn't allocate thread for %s\n", | ||
4427 | mdname(mddev)); | ||
4157 | goto abort; | 4428 | goto abort; |
4158 | } | 4429 | } |
4159 | if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) { | 4430 | |
4160 | printk(KERN_ERR | 4431 | return conf; |
4161 | "raid5: unsupported parity algorithm %d for %s\n", | 4432 | |
4162 | conf->algorithm, mdname(mddev)); | 4433 | abort: |
4163 | goto abort; | 4434 | if (conf) { |
4435 | shrink_stripes(conf); | ||
4436 | safe_put_page(conf->spare_page); | ||
4437 | kfree(conf->disks); | ||
4438 | kfree(conf->stripe_hashtbl); | ||
4439 | kfree(conf); | ||
4440 | return ERR_PTR(-EIO); | ||
4441 | } else | ||
4442 | return ERR_PTR(-ENOMEM); | ||
4443 | } | ||
4444 | |||
4445 | static int run(mddev_t *mddev) | ||
4446 | { | ||
4447 | raid5_conf_t *conf; | ||
4448 | int working_disks = 0; | ||
4449 | mdk_rdev_t *rdev; | ||
4450 | |||
4451 | if (mddev->reshape_position != MaxSector) { | ||
4452 | /* Check that we can continue the reshape. | ||
4453 | * Currently only disks can change, it must | ||
4454 | * increase, and we must be past the point where | ||
4455 | * a stripe over-writes itself | ||
4456 | */ | ||
4457 | sector_t here_new, here_old; | ||
4458 | int old_disks; | ||
4459 | int max_degraded = (mddev->level == 6 ? 2 : 1); | ||
4460 | |||
4461 | if (mddev->new_level != mddev->level) { | ||
4462 | printk(KERN_ERR "raid5: %s: unsupported reshape " | ||
4463 | "required - aborting.\n", | ||
4464 | mdname(mddev)); | ||
4465 | return -EINVAL; | ||
4466 | } | ||
4467 | old_disks = mddev->raid_disks - mddev->delta_disks; | ||
4468 | /* reshape_position must be on a new-stripe boundary, and one | ||
4469 | * further up in new geometry must map after here in old | ||
4470 | * geometry. | ||
4471 | */ | ||
4472 | here_new = mddev->reshape_position; | ||
4473 | if (sector_div(here_new, (mddev->new_chunk>>9)* | ||
4474 | (mddev->raid_disks - max_degraded))) { | ||
4475 | printk(KERN_ERR "raid5: reshape_position not " | ||
4476 | "on a stripe boundary\n"); | ||
4477 | return -EINVAL; | ||
4478 | } | ||
4479 | /* here_new is the stripe we will write to */ | ||
4480 | here_old = mddev->reshape_position; | ||
4481 | sector_div(here_old, (mddev->chunk_size>>9)* | ||
4482 | (old_disks-max_degraded)); | ||
4483 | /* here_old is the first stripe that we might need to read | ||
4484 | * from */ | ||
4485 | if (here_new >= here_old) { | ||
4486 | /* Reading from the same stripe as writing to - bad */ | ||
4487 | printk(KERN_ERR "raid5: reshape_position too early for " | ||
4488 | "auto-recovery - aborting.\n"); | ||
4489 | return -EINVAL; | ||
4490 | } | ||
4491 | printk(KERN_INFO "raid5: reshape will continue\n"); | ||
4492 | /* OK, we should be able to continue; */ | ||
4493 | } else { | ||
4494 | BUG_ON(mddev->level != mddev->new_level); | ||
4495 | BUG_ON(mddev->layout != mddev->new_layout); | ||
4496 | BUG_ON(mddev->chunk_size != mddev->new_chunk); | ||
4497 | BUG_ON(mddev->delta_disks != 0); | ||
4164 | } | 4498 | } |
4499 | |||
4500 | if (mddev->private == NULL) | ||
4501 | conf = setup_conf(mddev); | ||
4502 | else | ||
4503 | conf = mddev->private; | ||
4504 | |||
4505 | if (IS_ERR(conf)) | ||
4506 | return PTR_ERR(conf); | ||
4507 | |||
4508 | mddev->thread = conf->thread; | ||
4509 | conf->thread = NULL; | ||
4510 | mddev->private = conf; | ||
4511 | |||
4512 | /* | ||
4513 | * 0 for a fully functional array, 1 or 2 for a degraded array. | ||
4514 | */ | ||
4515 | list_for_each_entry(rdev, &mddev->disks, same_set) | ||
4516 | if (rdev->raid_disk >= 0 && | ||
4517 | test_bit(In_sync, &rdev->flags)) | ||
4518 | working_disks++; | ||
4519 | |||
4520 | mddev->degraded = conf->raid_disks - working_disks; | ||
4521 | |||
4165 | if (mddev->degraded > conf->max_degraded) { | 4522 | if (mddev->degraded > conf->max_degraded) { |
4166 | printk(KERN_ERR "raid5: not enough operational devices for %s" | 4523 | printk(KERN_ERR "raid5: not enough operational devices for %s" |
4167 | " (%d/%d failed)\n", | 4524 | " (%d/%d failed)\n", |
@@ -4169,6 +4526,10 @@ static int run(mddev_t *mddev) | |||
4169 | goto abort; | 4526 | goto abort; |
4170 | } | 4527 | } |
4171 | 4528 | ||
4529 | /* device size must be a multiple of chunk size */ | ||
4530 | mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1); | ||
4531 | mddev->resync_max_sectors = mddev->dev_sectors; | ||
4532 | |||
4172 | if (mddev->degraded > 0 && | 4533 | if (mddev->degraded > 0 && |
4173 | mddev->recovery_cp != MaxSector) { | 4534 | mddev->recovery_cp != MaxSector) { |
4174 | if (mddev->ok_start_degraded) | 4535 | if (mddev->ok_start_degraded) |
@@ -4184,43 +4545,22 @@ static int run(mddev_t *mddev) | |||
4184 | } | 4545 | } |
4185 | } | 4546 | } |
4186 | 4547 | ||
4187 | { | ||
4188 | mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5"); | ||
4189 | if (!mddev->thread) { | ||
4190 | printk(KERN_ERR | ||
4191 | "raid5: couldn't allocate thread for %s\n", | ||
4192 | mdname(mddev)); | ||
4193 | goto abort; | ||
4194 | } | ||
4195 | } | ||
4196 | memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + | ||
4197 | conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; | ||
4198 | if (grow_stripes(conf, conf->max_nr_stripes)) { | ||
4199 | printk(KERN_ERR | ||
4200 | "raid5: couldn't allocate %dkB for buffers\n", memory); | ||
4201 | shrink_stripes(conf); | ||
4202 | md_unregister_thread(mddev->thread); | ||
4203 | goto abort; | ||
4204 | } else | ||
4205 | printk(KERN_INFO "raid5: allocated %dkB for %s\n", | ||
4206 | memory, mdname(mddev)); | ||
4207 | |||
4208 | if (mddev->degraded == 0) | 4548 | if (mddev->degraded == 0) |
4209 | printk("raid5: raid level %d set %s active with %d out of %d" | 4549 | printk("raid5: raid level %d set %s active with %d out of %d" |
4210 | " devices, algorithm %d\n", conf->level, mdname(mddev), | 4550 | " devices, algorithm %d\n", conf->level, mdname(mddev), |
4211 | mddev->raid_disks-mddev->degraded, mddev->raid_disks, | 4551 | mddev->raid_disks-mddev->degraded, mddev->raid_disks, |
4212 | conf->algorithm); | 4552 | mddev->new_layout); |
4213 | else | 4553 | else |
4214 | printk(KERN_ALERT "raid5: raid level %d set %s active with %d" | 4554 | printk(KERN_ALERT "raid5: raid level %d set %s active with %d" |
4215 | " out of %d devices, algorithm %d\n", conf->level, | 4555 | " out of %d devices, algorithm %d\n", conf->level, |
4216 | mdname(mddev), mddev->raid_disks - mddev->degraded, | 4556 | mdname(mddev), mddev->raid_disks - mddev->degraded, |
4217 | mddev->raid_disks, conf->algorithm); | 4557 | mddev->raid_disks, mddev->new_layout); |
4218 | 4558 | ||
4219 | print_raid5_conf(conf); | 4559 | print_raid5_conf(conf); |
4220 | 4560 | ||
4221 | if (conf->expand_progress != MaxSector) { | 4561 | if (conf->reshape_progress != MaxSector) { |
4222 | printk("...ok start reshape thread\n"); | 4562 | printk("...ok start reshape thread\n"); |
4223 | conf->expand_lo = conf->expand_progress; | 4563 | conf->reshape_safe = conf->reshape_progress; |
4224 | atomic_set(&conf->reshape_stripes, 0); | 4564 | atomic_set(&conf->reshape_stripes, 0); |
4225 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); | 4565 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
4226 | clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); | 4566 | clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); |
@@ -4247,18 +4587,22 @@ static int run(mddev_t *mddev) | |||
4247 | "raid5: failed to create sysfs attributes for %s\n", | 4587 | "raid5: failed to create sysfs attributes for %s\n", |
4248 | mdname(mddev)); | 4588 | mdname(mddev)); |
4249 | 4589 | ||
4590 | mddev->queue->queue_lock = &conf->device_lock; | ||
4591 | |||
4250 | mddev->queue->unplug_fn = raid5_unplug_device; | 4592 | mddev->queue->unplug_fn = raid5_unplug_device; |
4251 | mddev->queue->backing_dev_info.congested_data = mddev; | 4593 | mddev->queue->backing_dev_info.congested_data = mddev; |
4252 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; | 4594 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; |
4253 | 4595 | ||
4254 | mddev->array_sectors = 2 * mddev->size * (conf->previous_raid_disks - | 4596 | md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); |
4255 | conf->max_degraded); | ||
4256 | 4597 | ||
4257 | blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); | 4598 | blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); |
4258 | 4599 | ||
4259 | return 0; | 4600 | return 0; |
4260 | abort: | 4601 | abort: |
4602 | md_unregister_thread(mddev->thread); | ||
4603 | mddev->thread = NULL; | ||
4261 | if (conf) { | 4604 | if (conf) { |
4605 | shrink_stripes(conf); | ||
4262 | print_raid5_conf(conf); | 4606 | print_raid5_conf(conf); |
4263 | safe_put_page(conf->spare_page); | 4607 | safe_put_page(conf->spare_page); |
4264 | kfree(conf->disks); | 4608 | kfree(conf->disks); |
@@ -4396,6 +4740,10 @@ static int raid5_remove_disk(mddev_t *mddev, int number) | |||
4396 | print_raid5_conf(conf); | 4740 | print_raid5_conf(conf); |
4397 | rdev = p->rdev; | 4741 | rdev = p->rdev; |
4398 | if (rdev) { | 4742 | if (rdev) { |
4743 | if (number >= conf->raid_disks && | ||
4744 | conf->reshape_progress == MaxSector) | ||
4745 | clear_bit(In_sync, &rdev->flags); | ||
4746 | |||
4399 | if (test_bit(In_sync, &rdev->flags) || | 4747 | if (test_bit(In_sync, &rdev->flags) || |
4400 | atomic_read(&rdev->nr_pending)) { | 4748 | atomic_read(&rdev->nr_pending)) { |
4401 | err = -EBUSY; | 4749 | err = -EBUSY; |
@@ -4405,7 +4753,8 @@ static int raid5_remove_disk(mddev_t *mddev, int number) | |||
4405 | * isn't possible. | 4753 | * isn't possible. |
4406 | */ | 4754 | */ |
4407 | if (!test_bit(Faulty, &rdev->flags) && | 4755 | if (!test_bit(Faulty, &rdev->flags) && |
4408 | mddev->degraded <= conf->max_degraded) { | 4756 | mddev->degraded <= conf->max_degraded && |
4757 | number < conf->raid_disks) { | ||
4409 | err = -EBUSY; | 4758 | err = -EBUSY; |
4410 | goto abort; | 4759 | goto abort; |
4411 | } | 4760 | } |
@@ -4472,36 +4821,48 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors) | |||
4472 | * any io in the removed space completes, but it hardly seems | 4821 | * any io in the removed space completes, but it hardly seems |
4473 | * worth it. | 4822 | * worth it. |
4474 | */ | 4823 | */ |
4475 | raid5_conf_t *conf = mddev_to_conf(mddev); | ||
4476 | |||
4477 | sectors &= ~((sector_t)mddev->chunk_size/512 - 1); | 4824 | sectors &= ~((sector_t)mddev->chunk_size/512 - 1); |
4478 | mddev->array_sectors = sectors * (mddev->raid_disks | 4825 | md_set_array_sectors(mddev, raid5_size(mddev, sectors, |
4479 | - conf->max_degraded); | 4826 | mddev->raid_disks)); |
4827 | if (mddev->array_sectors > | ||
4828 | raid5_size(mddev, sectors, mddev->raid_disks)) | ||
4829 | return -EINVAL; | ||
4480 | set_capacity(mddev->gendisk, mddev->array_sectors); | 4830 | set_capacity(mddev->gendisk, mddev->array_sectors); |
4481 | mddev->changed = 1; | 4831 | mddev->changed = 1; |
4482 | if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { | 4832 | if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) { |
4483 | mddev->recovery_cp = mddev->size << 1; | 4833 | mddev->recovery_cp = mddev->dev_sectors; |
4484 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 4834 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
4485 | } | 4835 | } |
4486 | mddev->size = sectors /2; | 4836 | mddev->dev_sectors = sectors; |
4487 | mddev->resync_max_sectors = sectors; | 4837 | mddev->resync_max_sectors = sectors; |
4488 | return 0; | 4838 | return 0; |
4489 | } | 4839 | } |
4490 | 4840 | ||
4491 | #ifdef CONFIG_MD_RAID5_RESHAPE | ||
4492 | static int raid5_check_reshape(mddev_t *mddev) | 4841 | static int raid5_check_reshape(mddev_t *mddev) |
4493 | { | 4842 | { |
4494 | raid5_conf_t *conf = mddev_to_conf(mddev); | 4843 | raid5_conf_t *conf = mddev_to_conf(mddev); |
4495 | int err; | ||
4496 | 4844 | ||
4497 | if (mddev->delta_disks < 0 || | 4845 | if (mddev->delta_disks == 0 && |
4498 | mddev->new_level != mddev->level) | 4846 | mddev->new_layout == mddev->layout && |
4499 | return -EINVAL; /* Cannot shrink array or change level yet */ | 4847 | mddev->new_chunk == mddev->chunk_size) |
4500 | if (mddev->delta_disks == 0) | 4848 | return -EINVAL; /* nothing to do */ |
4501 | return 0; /* nothing to do */ | ||
4502 | if (mddev->bitmap) | 4849 | if (mddev->bitmap) |
4503 | /* Cannot grow a bitmap yet */ | 4850 | /* Cannot grow a bitmap yet */ |
4504 | return -EBUSY; | 4851 | return -EBUSY; |
4852 | if (mddev->degraded > conf->max_degraded) | ||
4853 | return -EINVAL; | ||
4854 | if (mddev->delta_disks < 0) { | ||
4855 | /* We might be able to shrink, but the devices must | ||
4856 | * be made bigger first. | ||
4857 | * For raid6, 4 is the minimum size. | ||
4858 | * Otherwise 2 is the minimum | ||
4859 | */ | ||
4860 | int min = 2; | ||
4861 | if (mddev->level == 6) | ||
4862 | min = 4; | ||
4863 | if (mddev->raid_disks + mddev->delta_disks < min) | ||
4864 | return -EINVAL; | ||
4865 | } | ||
4505 | 4866 | ||
4506 | /* Can only proceed if there are plenty of stripe_heads. | 4867 | /* Can only proceed if there are plenty of stripe_heads. |
4507 | * We need a minimum of one full stripe,, and for sensible progress | 4868 | * We need a minimum of one full stripe,, and for sensible progress |
@@ -4514,18 +4875,12 @@ static int raid5_check_reshape(mddev_t *mddev) | |||
4514 | if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || | 4875 | if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || |
4515 | (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { | 4876 | (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { |
4516 | printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", | 4877 | printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", |
4517 | (mddev->chunk_size / STRIPE_SIZE)*4); | 4878 | (max(mddev->chunk_size, mddev->new_chunk) |
4879 | / STRIPE_SIZE)*4); | ||
4518 | return -ENOSPC; | 4880 | return -ENOSPC; |
4519 | } | 4881 | } |
4520 | 4882 | ||
4521 | err = resize_stripes(conf, conf->raid_disks + mddev->delta_disks); | 4883 | return resize_stripes(conf, conf->raid_disks + mddev->delta_disks); |
4522 | if (err) | ||
4523 | return err; | ||
4524 | |||
4525 | if (mddev->degraded > conf->max_degraded) | ||
4526 | return -EINVAL; | ||
4527 | /* looks like we might be able to manage this */ | ||
4528 | return 0; | ||
4529 | } | 4884 | } |
4530 | 4885 | ||
4531 | static int raid5_start_reshape(mddev_t *mddev) | 4886 | static int raid5_start_reshape(mddev_t *mddev) |
@@ -4550,12 +4905,31 @@ static int raid5_start_reshape(mddev_t *mddev) | |||
4550 | */ | 4905 | */ |
4551 | return -EINVAL; | 4906 | return -EINVAL; |
4552 | 4907 | ||
4908 | /* Refuse to reduce size of the array. Any reductions in | ||
4909 | * array size must be through explicit setting of array_size | ||
4910 | * attribute. | ||
4911 | */ | ||
4912 | if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) | ||
4913 | < mddev->array_sectors) { | ||
4914 | printk(KERN_ERR "md: %s: array size must be reduced " | ||
4915 | "before number of disks\n", mdname(mddev)); | ||
4916 | return -EINVAL; | ||
4917 | } | ||
4918 | |||
4553 | atomic_set(&conf->reshape_stripes, 0); | 4919 | atomic_set(&conf->reshape_stripes, 0); |
4554 | spin_lock_irq(&conf->device_lock); | 4920 | spin_lock_irq(&conf->device_lock); |
4555 | conf->previous_raid_disks = conf->raid_disks; | 4921 | conf->previous_raid_disks = conf->raid_disks; |
4556 | conf->raid_disks += mddev->delta_disks; | 4922 | conf->raid_disks += mddev->delta_disks; |
4557 | conf->expand_progress = 0; | 4923 | conf->prev_chunk = conf->chunk_size; |
4558 | conf->expand_lo = 0; | 4924 | conf->chunk_size = mddev->new_chunk; |
4925 | conf->prev_algo = conf->algorithm; | ||
4926 | conf->algorithm = mddev->new_layout; | ||
4927 | if (mddev->delta_disks < 0) | ||
4928 | conf->reshape_progress = raid5_size(mddev, 0, 0); | ||
4929 | else | ||
4930 | conf->reshape_progress = 0; | ||
4931 | conf->reshape_safe = conf->reshape_progress; | ||
4932 | conf->generation++; | ||
4559 | spin_unlock_irq(&conf->device_lock); | 4933 | spin_unlock_irq(&conf->device_lock); |
4560 | 4934 | ||
4561 | /* Add some new drives, as many as will fit. | 4935 | /* Add some new drives, as many as will fit. |
@@ -4580,9 +4954,12 @@ static int raid5_start_reshape(mddev_t *mddev) | |||
4580 | break; | 4954 | break; |
4581 | } | 4955 | } |
4582 | 4956 | ||
4583 | spin_lock_irqsave(&conf->device_lock, flags); | 4957 | if (mddev->delta_disks > 0) { |
4584 | mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices; | 4958 | spin_lock_irqsave(&conf->device_lock, flags); |
4585 | spin_unlock_irqrestore(&conf->device_lock, flags); | 4959 | mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) |
4960 | - added_devices; | ||
4961 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
4962 | } | ||
4586 | mddev->raid_disks = conf->raid_disks; | 4963 | mddev->raid_disks = conf->raid_disks; |
4587 | mddev->reshape_position = 0; | 4964 | mddev->reshape_position = 0; |
4588 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | 4965 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
@@ -4597,52 +4974,86 @@ static int raid5_start_reshape(mddev_t *mddev) | |||
4597 | mddev->recovery = 0; | 4974 | mddev->recovery = 0; |
4598 | spin_lock_irq(&conf->device_lock); | 4975 | spin_lock_irq(&conf->device_lock); |
4599 | mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; | 4976 | mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; |
4600 | conf->expand_progress = MaxSector; | 4977 | conf->reshape_progress = MaxSector; |
4601 | spin_unlock_irq(&conf->device_lock); | 4978 | spin_unlock_irq(&conf->device_lock); |
4602 | return -EAGAIN; | 4979 | return -EAGAIN; |
4603 | } | 4980 | } |
4981 | conf->reshape_checkpoint = jiffies; | ||
4604 | md_wakeup_thread(mddev->sync_thread); | 4982 | md_wakeup_thread(mddev->sync_thread); |
4605 | md_new_event(mddev); | 4983 | md_new_event(mddev); |
4606 | return 0; | 4984 | return 0; |
4607 | } | 4985 | } |
4608 | #endif | ||
4609 | 4986 | ||
4987 | /* This is called from the reshape thread and should make any | ||
4988 | * changes needed in 'conf' | ||
4989 | */ | ||
4610 | static void end_reshape(raid5_conf_t *conf) | 4990 | static void end_reshape(raid5_conf_t *conf) |
4611 | { | 4991 | { |
4612 | struct block_device *bdev; | ||
4613 | 4992 | ||
4614 | if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { | 4993 | if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { |
4615 | conf->mddev->array_sectors = 2 * conf->mddev->size * | 4994 | |
4616 | (conf->raid_disks - conf->max_degraded); | ||
4617 | set_capacity(conf->mddev->gendisk, conf->mddev->array_sectors); | ||
4618 | conf->mddev->changed = 1; | ||
4619 | |||
4620 | bdev = bdget_disk(conf->mddev->gendisk, 0); | ||
4621 | if (bdev) { | ||
4622 | mutex_lock(&bdev->bd_inode->i_mutex); | ||
4623 | i_size_write(bdev->bd_inode, | ||
4624 | (loff_t)conf->mddev->array_sectors << 9); | ||
4625 | mutex_unlock(&bdev->bd_inode->i_mutex); | ||
4626 | bdput(bdev); | ||
4627 | } | ||
4628 | spin_lock_irq(&conf->device_lock); | 4995 | spin_lock_irq(&conf->device_lock); |
4629 | conf->expand_progress = MaxSector; | 4996 | conf->previous_raid_disks = conf->raid_disks; |
4997 | conf->reshape_progress = MaxSector; | ||
4630 | spin_unlock_irq(&conf->device_lock); | 4998 | spin_unlock_irq(&conf->device_lock); |
4631 | conf->mddev->reshape_position = MaxSector; | 4999 | wake_up(&conf->wait_for_overlap); |
4632 | 5000 | ||
4633 | /* read-ahead size must cover two whole stripes, which is | 5001 | /* read-ahead size must cover two whole stripes, which is |
4634 | * 2 * (datadisks) * chunksize where 'n' is the number of raid devices | 5002 | * 2 * (datadisks) * chunksize where 'n' is the number of raid devices |
4635 | */ | 5003 | */ |
4636 | { | 5004 | { |
4637 | int data_disks = conf->previous_raid_disks - conf->max_degraded; | 5005 | int data_disks = conf->raid_disks - conf->max_degraded; |
4638 | int stripe = data_disks * | 5006 | int stripe = data_disks * (conf->chunk_size |
4639 | (conf->mddev->chunk_size / PAGE_SIZE); | 5007 | / PAGE_SIZE); |
4640 | if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) | 5008 | if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) |
4641 | conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; | 5009 | conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; |
4642 | } | 5010 | } |
4643 | } | 5011 | } |
4644 | } | 5012 | } |
4645 | 5013 | ||
5014 | /* This is called from the raid5d thread with mddev_lock held. | ||
5015 | * It makes config changes to the device. | ||
5016 | */ | ||
5017 | static void raid5_finish_reshape(mddev_t *mddev) | ||
5018 | { | ||
5019 | struct block_device *bdev; | ||
5020 | raid5_conf_t *conf = mddev_to_conf(mddev); | ||
5021 | |||
5022 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { | ||
5023 | |||
5024 | if (mddev->delta_disks > 0) { | ||
5025 | md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); | ||
5026 | set_capacity(mddev->gendisk, mddev->array_sectors); | ||
5027 | mddev->changed = 1; | ||
5028 | |||
5029 | bdev = bdget_disk(mddev->gendisk, 0); | ||
5030 | if (bdev) { | ||
5031 | mutex_lock(&bdev->bd_inode->i_mutex); | ||
5032 | i_size_write(bdev->bd_inode, | ||
5033 | (loff_t)mddev->array_sectors << 9); | ||
5034 | mutex_unlock(&bdev->bd_inode->i_mutex); | ||
5035 | bdput(bdev); | ||
5036 | } | ||
5037 | } else { | ||
5038 | int d; | ||
5039 | mddev->degraded = conf->raid_disks; | ||
5040 | for (d = 0; d < conf->raid_disks ; d++) | ||
5041 | if (conf->disks[d].rdev && | ||
5042 | test_bit(In_sync, | ||
5043 | &conf->disks[d].rdev->flags)) | ||
5044 | mddev->degraded--; | ||
5045 | for (d = conf->raid_disks ; | ||
5046 | d < conf->raid_disks - mddev->delta_disks; | ||
5047 | d++) | ||
5048 | raid5_remove_disk(mddev, d); | ||
5049 | } | ||
5050 | mddev->layout = conf->algorithm; | ||
5051 | mddev->chunk_size = conf->chunk_size; | ||
5052 | mddev->reshape_position = MaxSector; | ||
5053 | mddev->delta_disks = 0; | ||
5054 | } | ||
5055 | } | ||
5056 | |||
4646 | static void raid5_quiesce(mddev_t *mddev, int state) | 5057 | static void raid5_quiesce(mddev_t *mddev, int state) |
4647 | { | 5058 | { |
4648 | raid5_conf_t *conf = mddev_to_conf(mddev); | 5059 | raid5_conf_t *conf = mddev_to_conf(mddev); |
@@ -4672,6 +5083,212 @@ static void raid5_quiesce(mddev_t *mddev, int state) | |||
4672 | } | 5083 | } |
4673 | } | 5084 | } |
4674 | 5085 | ||
5086 | |||
5087 | static void *raid5_takeover_raid1(mddev_t *mddev) | ||
5088 | { | ||
5089 | int chunksect; | ||
5090 | |||
5091 | if (mddev->raid_disks != 2 || | ||
5092 | mddev->degraded > 1) | ||
5093 | return ERR_PTR(-EINVAL); | ||
5094 | |||
5095 | /* Should check if there are write-behind devices? */ | ||
5096 | |||
5097 | chunksect = 64*2; /* 64K by default */ | ||
5098 | |||
5099 | /* The array must be an exact multiple of chunksize */ | ||
5100 | while (chunksect && (mddev->array_sectors & (chunksect-1))) | ||
5101 | chunksect >>= 1; | ||
5102 | |||
5103 | if ((chunksect<<9) < STRIPE_SIZE) | ||
5104 | /* array size does not allow a suitable chunk size */ | ||
5105 | return ERR_PTR(-EINVAL); | ||
5106 | |||
5107 | mddev->new_level = 5; | ||
5108 | mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; | ||
5109 | mddev->new_chunk = chunksect << 9; | ||
5110 | |||
5111 | return setup_conf(mddev); | ||
5112 | } | ||
5113 | |||
5114 | static void *raid5_takeover_raid6(mddev_t *mddev) | ||
5115 | { | ||
5116 | int new_layout; | ||
5117 | |||
5118 | switch (mddev->layout) { | ||
5119 | case ALGORITHM_LEFT_ASYMMETRIC_6: | ||
5120 | new_layout = ALGORITHM_LEFT_ASYMMETRIC; | ||
5121 | break; | ||
5122 | case ALGORITHM_RIGHT_ASYMMETRIC_6: | ||
5123 | new_layout = ALGORITHM_RIGHT_ASYMMETRIC; | ||
5124 | break; | ||
5125 | case ALGORITHM_LEFT_SYMMETRIC_6: | ||
5126 | new_layout = ALGORITHM_LEFT_SYMMETRIC; | ||
5127 | break; | ||
5128 | case ALGORITHM_RIGHT_SYMMETRIC_6: | ||
5129 | new_layout = ALGORITHM_RIGHT_SYMMETRIC; | ||
5130 | break; | ||
5131 | case ALGORITHM_PARITY_0_6: | ||
5132 | new_layout = ALGORITHM_PARITY_0; | ||
5133 | break; | ||
5134 | case ALGORITHM_PARITY_N: | ||
5135 | new_layout = ALGORITHM_PARITY_N; | ||
5136 | break; | ||
5137 | default: | ||
5138 | return ERR_PTR(-EINVAL); | ||
5139 | } | ||
5140 | mddev->new_level = 5; | ||
5141 | mddev->new_layout = new_layout; | ||
5142 | mddev->delta_disks = -1; | ||
5143 | mddev->raid_disks -= 1; | ||
5144 | return setup_conf(mddev); | ||
5145 | } | ||
5146 | |||
5147 | |||
5148 | static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk) | ||
5149 | { | ||
5150 | /* For a 2-drive array, the layout and chunk size can be changed | ||
5151 | * immediately as not restriping is needed. | ||
5152 | * For larger arrays we record the new value - after validation | ||
5153 | * to be used by a reshape pass. | ||
5154 | */ | ||
5155 | raid5_conf_t *conf = mddev_to_conf(mddev); | ||
5156 | |||
5157 | if (new_layout >= 0 && !algorithm_valid_raid5(new_layout)) | ||
5158 | return -EINVAL; | ||
5159 | if (new_chunk > 0) { | ||
5160 | if (new_chunk & (new_chunk-1)) | ||
5161 | /* not a power of 2 */ | ||
5162 | return -EINVAL; | ||
5163 | if (new_chunk < PAGE_SIZE) | ||
5164 | return -EINVAL; | ||
5165 | if (mddev->array_sectors & ((new_chunk>>9)-1)) | ||
5166 | /* not factor of array size */ | ||
5167 | return -EINVAL; | ||
5168 | } | ||
5169 | |||
5170 | /* They look valid */ | ||
5171 | |||
5172 | if (mddev->raid_disks == 2) { | ||
5173 | |||
5174 | if (new_layout >= 0) { | ||
5175 | conf->algorithm = new_layout; | ||
5176 | mddev->layout = mddev->new_layout = new_layout; | ||
5177 | } | ||
5178 | if (new_chunk > 0) { | ||
5179 | conf->chunk_size = new_chunk; | ||
5180 | mddev->chunk_size = mddev->new_chunk = new_chunk; | ||
5181 | } | ||
5182 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | ||
5183 | md_wakeup_thread(mddev->thread); | ||
5184 | } else { | ||
5185 | if (new_layout >= 0) | ||
5186 | mddev->new_layout = new_layout; | ||
5187 | if (new_chunk > 0) | ||
5188 | mddev->new_chunk = new_chunk; | ||
5189 | } | ||
5190 | return 0; | ||
5191 | } | ||
5192 | |||
5193 | static int raid6_reconfig(mddev_t *mddev, int new_layout, int new_chunk) | ||
5194 | { | ||
5195 | if (new_layout >= 0 && !algorithm_valid_raid6(new_layout)) | ||
5196 | return -EINVAL; | ||
5197 | if (new_chunk > 0) { | ||
5198 | if (new_chunk & (new_chunk-1)) | ||
5199 | /* not a power of 2 */ | ||
5200 | return -EINVAL; | ||
5201 | if (new_chunk < PAGE_SIZE) | ||
5202 | return -EINVAL; | ||
5203 | if (mddev->array_sectors & ((new_chunk>>9)-1)) | ||
5204 | /* not factor of array size */ | ||
5205 | return -EINVAL; | ||
5206 | } | ||
5207 | |||
5208 | /* They look valid */ | ||
5209 | |||
5210 | if (new_layout >= 0) | ||
5211 | mddev->new_layout = new_layout; | ||
5212 | if (new_chunk > 0) | ||
5213 | mddev->new_chunk = new_chunk; | ||
5214 | |||
5215 | return 0; | ||
5216 | } | ||
5217 | |||
5218 | static void *raid5_takeover(mddev_t *mddev) | ||
5219 | { | ||
5220 | /* raid5 can take over: | ||
5221 | * raid0 - if all devices are the same - make it a raid4 layout | ||
5222 | * raid1 - if there are two drives. We need to know the chunk size | ||
5223 | * raid4 - trivial - just use a raid4 layout. | ||
5224 | * raid6 - Providing it is a *_6 layout | ||
5225 | * | ||
5226 | * For now, just do raid1 | ||
5227 | */ | ||
5228 | |||
5229 | if (mddev->level == 1) | ||
5230 | return raid5_takeover_raid1(mddev); | ||
5231 | if (mddev->level == 4) { | ||
5232 | mddev->new_layout = ALGORITHM_PARITY_N; | ||
5233 | mddev->new_level = 5; | ||
5234 | return setup_conf(mddev); | ||
5235 | } | ||
5236 | if (mddev->level == 6) | ||
5237 | return raid5_takeover_raid6(mddev); | ||
5238 | |||
5239 | return ERR_PTR(-EINVAL); | ||
5240 | } | ||
5241 | |||
5242 | |||
5243 | static struct mdk_personality raid5_personality; | ||
5244 | |||
5245 | static void *raid6_takeover(mddev_t *mddev) | ||
5246 | { | ||
5247 | /* Currently can only take over a raid5. We map the | ||
5248 | * personality to an equivalent raid6 personality | ||
5249 | * with the Q block at the end. | ||
5250 | */ | ||
5251 | int new_layout; | ||
5252 | |||
5253 | if (mddev->pers != &raid5_personality) | ||
5254 | return ERR_PTR(-EINVAL); | ||
5255 | if (mddev->degraded > 1) | ||
5256 | return ERR_PTR(-EINVAL); | ||
5257 | if (mddev->raid_disks > 253) | ||
5258 | return ERR_PTR(-EINVAL); | ||
5259 | if (mddev->raid_disks < 3) | ||
5260 | return ERR_PTR(-EINVAL); | ||
5261 | |||
5262 | switch (mddev->layout) { | ||
5263 | case ALGORITHM_LEFT_ASYMMETRIC: | ||
5264 | new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; | ||
5265 | break; | ||
5266 | case ALGORITHM_RIGHT_ASYMMETRIC: | ||
5267 | new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; | ||
5268 | break; | ||
5269 | case ALGORITHM_LEFT_SYMMETRIC: | ||
5270 | new_layout = ALGORITHM_LEFT_SYMMETRIC_6; | ||
5271 | break; | ||
5272 | case ALGORITHM_RIGHT_SYMMETRIC: | ||
5273 | new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; | ||
5274 | break; | ||
5275 | case ALGORITHM_PARITY_0: | ||
5276 | new_layout = ALGORITHM_PARITY_0_6; | ||
5277 | break; | ||
5278 | case ALGORITHM_PARITY_N: | ||
5279 | new_layout = ALGORITHM_PARITY_N; | ||
5280 | break; | ||
5281 | default: | ||
5282 | return ERR_PTR(-EINVAL); | ||
5283 | } | ||
5284 | mddev->new_level = 6; | ||
5285 | mddev->new_layout = new_layout; | ||
5286 | mddev->delta_disks = 1; | ||
5287 | mddev->raid_disks += 1; | ||
5288 | return setup_conf(mddev); | ||
5289 | } | ||
5290 | |||
5291 | |||
4675 | static struct mdk_personality raid6_personality = | 5292 | static struct mdk_personality raid6_personality = |
4676 | { | 5293 | { |
4677 | .name = "raid6", | 5294 | .name = "raid6", |
@@ -4687,11 +5304,13 @@ static struct mdk_personality raid6_personality = | |||
4687 | .spare_active = raid5_spare_active, | 5304 | .spare_active = raid5_spare_active, |
4688 | .sync_request = sync_request, | 5305 | .sync_request = sync_request, |
4689 | .resize = raid5_resize, | 5306 | .resize = raid5_resize, |
4690 | #ifdef CONFIG_MD_RAID5_RESHAPE | 5307 | .size = raid5_size, |
4691 | .check_reshape = raid5_check_reshape, | 5308 | .check_reshape = raid5_check_reshape, |
4692 | .start_reshape = raid5_start_reshape, | 5309 | .start_reshape = raid5_start_reshape, |
4693 | #endif | 5310 | .finish_reshape = raid5_finish_reshape, |
4694 | .quiesce = raid5_quiesce, | 5311 | .quiesce = raid5_quiesce, |
5312 | .takeover = raid6_takeover, | ||
5313 | .reconfig = raid6_reconfig, | ||
4695 | }; | 5314 | }; |
4696 | static struct mdk_personality raid5_personality = | 5315 | static struct mdk_personality raid5_personality = |
4697 | { | 5316 | { |
@@ -4708,11 +5327,13 @@ static struct mdk_personality raid5_personality = | |||
4708 | .spare_active = raid5_spare_active, | 5327 | .spare_active = raid5_spare_active, |
4709 | .sync_request = sync_request, | 5328 | .sync_request = sync_request, |
4710 | .resize = raid5_resize, | 5329 | .resize = raid5_resize, |
4711 | #ifdef CONFIG_MD_RAID5_RESHAPE | 5330 | .size = raid5_size, |
4712 | .check_reshape = raid5_check_reshape, | 5331 | .check_reshape = raid5_check_reshape, |
4713 | .start_reshape = raid5_start_reshape, | 5332 | .start_reshape = raid5_start_reshape, |
4714 | #endif | 5333 | .finish_reshape = raid5_finish_reshape, |
4715 | .quiesce = raid5_quiesce, | 5334 | .quiesce = raid5_quiesce, |
5335 | .takeover = raid5_takeover, | ||
5336 | .reconfig = raid5_reconfig, | ||
4716 | }; | 5337 | }; |
4717 | 5338 | ||
4718 | static struct mdk_personality raid4_personality = | 5339 | static struct mdk_personality raid4_personality = |
@@ -4730,20 +5351,15 @@ static struct mdk_personality raid4_personality = | |||
4730 | .spare_active = raid5_spare_active, | 5351 | .spare_active = raid5_spare_active, |
4731 | .sync_request = sync_request, | 5352 | .sync_request = sync_request, |
4732 | .resize = raid5_resize, | 5353 | .resize = raid5_resize, |
4733 | #ifdef CONFIG_MD_RAID5_RESHAPE | 5354 | .size = raid5_size, |
4734 | .check_reshape = raid5_check_reshape, | 5355 | .check_reshape = raid5_check_reshape, |
4735 | .start_reshape = raid5_start_reshape, | 5356 | .start_reshape = raid5_start_reshape, |
4736 | #endif | 5357 | .finish_reshape = raid5_finish_reshape, |
4737 | .quiesce = raid5_quiesce, | 5358 | .quiesce = raid5_quiesce, |
4738 | }; | 5359 | }; |
4739 | 5360 | ||
4740 | static int __init raid5_init(void) | 5361 | static int __init raid5_init(void) |
4741 | { | 5362 | { |
4742 | int e; | ||
4743 | |||
4744 | e = raid6_select_algo(); | ||
4745 | if ( e ) | ||
4746 | return e; | ||
4747 | register_md_personality(&raid6_personality); | 5363 | register_md_personality(&raid6_personality); |
4748 | register_md_personality(&raid5_personality); | 5364 | register_md_personality(&raid5_personality); |
4749 | register_md_personality(&raid4_personality); | 5365 | register_md_personality(&raid4_personality); |