diff options
author | Singh, Vimal <vimalsingh@ti.com> | 2008-08-23 12:18:34 -0400 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2008-08-25 07:23:36 -0400 |
commit | d68156cfad0fe09201dd049fff167a8a881427ad (patch) | |
tree | 25014d3eaf89f9c93a33bf88f2d621a59e0d5f58 /drivers/mtd | |
parent | dffc8d66544563fe00f176f230d5d8a5b45847bb (diff) |
[MTD] [NAND] nand_ecc.c: adding support for 512 byte ecc
Support 512 byte ECC calculation
[FM: updated two comments]
Signed-off-by: Vimal Singh <vimalsingh@ti.com>
Signed-off-by: Frans Meulenbroeks <fransmeulenbroeks@gmail.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/mtd')
-rw-r--r-- | drivers/mtd/nand/nand_ecc.c | 86 |
1 files changed, 62 insertions, 24 deletions
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c index fd19787c9ce7..868147acce2c 100644 --- a/drivers/mtd/nand/nand_ecc.c +++ b/drivers/mtd/nand/nand_ecc.c | |||
@@ -42,6 +42,8 @@ | |||
42 | #include <linux/types.h> | 42 | #include <linux/types.h> |
43 | #include <linux/kernel.h> | 43 | #include <linux/kernel.h> |
44 | #include <linux/module.h> | 44 | #include <linux/module.h> |
45 | #include <linux/mtd/mtd.h> | ||
46 | #include <linux/mtd/nand.h> | ||
45 | #include <linux/mtd/nand_ecc.h> | 47 | #include <linux/mtd/nand_ecc.h> |
46 | #include <asm/byteorder.h> | 48 | #include <asm/byteorder.h> |
47 | #else | 49 | #else |
@@ -148,8 +150,9 @@ static const char addressbits[256] = { | |||
148 | }; | 150 | }; |
149 | 151 | ||
150 | /** | 152 | /** |
151 | * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256-byte block | 153 | * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte |
152 | * @mtd: MTD block structure (unused) | 154 | * block |
155 | * @mtd: MTD block structure | ||
153 | * @buf: input buffer with raw data | 156 | * @buf: input buffer with raw data |
154 | * @code: output buffer with ECC | 157 | * @code: output buffer with ECC |
155 | */ | 158 | */ |
@@ -158,13 +161,18 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, | |||
158 | { | 161 | { |
159 | int i; | 162 | int i; |
160 | const uint32_t *bp = (uint32_t *)buf; | 163 | const uint32_t *bp = (uint32_t *)buf; |
164 | /* 256 or 512 bytes/ecc */ | ||
165 | const uint32_t eccsize_mult = | ||
166 | (((struct nand_chip *)mtd->priv)->ecc.size) >> 8; | ||
161 | uint32_t cur; /* current value in buffer */ | 167 | uint32_t cur; /* current value in buffer */ |
162 | /* rp0..rp15 are the various accumulated parities (per byte) */ | 168 | /* rp0..rp15..rp17 are the various accumulated parities (per byte) */ |
163 | uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7; | 169 | uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7; |
164 | uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15; | 170 | uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16; |
171 | uint32_t uninitialized_var(rp17); /* to make compiler happy */ | ||
165 | uint32_t par; /* the cumulative parity for all data */ | 172 | uint32_t par; /* the cumulative parity for all data */ |
166 | uint32_t tmppar; /* the cumulative parity for this iteration; | 173 | uint32_t tmppar; /* the cumulative parity for this iteration; |
167 | for rp12 and rp14 at the end of the loop */ | 174 | for rp12, rp14 and rp16 at the end of the |
175 | loop */ | ||
168 | 176 | ||
169 | par = 0; | 177 | par = 0; |
170 | rp4 = 0; | 178 | rp4 = 0; |
@@ -173,6 +181,7 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, | |||
173 | rp10 = 0; | 181 | rp10 = 0; |
174 | rp12 = 0; | 182 | rp12 = 0; |
175 | rp14 = 0; | 183 | rp14 = 0; |
184 | rp16 = 0; | ||
176 | 185 | ||
177 | /* | 186 | /* |
178 | * The loop is unrolled a number of times; | 187 | * The loop is unrolled a number of times; |
@@ -181,10 +190,10 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, | |||
181 | * Note: passing unaligned data might give a performance penalty. | 190 | * Note: passing unaligned data might give a performance penalty. |
182 | * It is assumed that the buffers are aligned. | 191 | * It is assumed that the buffers are aligned. |
183 | * tmppar is the cumulative sum of this iteration. | 192 | * tmppar is the cumulative sum of this iteration. |
184 | * needed for calculating rp12, rp14 and par | 193 | * needed for calculating rp12, rp14, rp16 and par |
185 | * also used as a performance improvement for rp6, rp8 and rp10 | 194 | * also used as a performance improvement for rp6, rp8 and rp10 |
186 | */ | 195 | */ |
187 | for (i = 0; i < 4; i++) { | 196 | for (i = 0; i < eccsize_mult << 2; i++) { |
188 | cur = *bp++; | 197 | cur = *bp++; |
189 | tmppar = cur; | 198 | tmppar = cur; |
190 | rp4 ^= cur; | 199 | rp4 ^= cur; |
@@ -247,12 +256,14 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, | |||
247 | rp12 ^= tmppar; | 256 | rp12 ^= tmppar; |
248 | if ((i & 0x2) == 0) | 257 | if ((i & 0x2) == 0) |
249 | rp14 ^= tmppar; | 258 | rp14 ^= tmppar; |
259 | if (eccsize_mult == 2 && (i & 0x4) == 0) | ||
260 | rp16 ^= tmppar; | ||
250 | } | 261 | } |
251 | 262 | ||
252 | /* | 263 | /* |
253 | * handle the fact that we use longword operations | 264 | * handle the fact that we use longword operations |
254 | * we'll bring rp4..rp14 back to single byte entities by shifting and | 265 | * we'll bring rp4..rp14..rp16 back to single byte entities by |
255 | * xoring first fold the upper and lower 16 bits, | 266 | * shifting and xoring first fold the upper and lower 16 bits, |
256 | * then the upper and lower 8 bits. | 267 | * then the upper and lower 8 bits. |
257 | */ | 268 | */ |
258 | rp4 ^= (rp4 >> 16); | 269 | rp4 ^= (rp4 >> 16); |
@@ -273,6 +284,11 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, | |||
273 | rp14 ^= (rp14 >> 16); | 284 | rp14 ^= (rp14 >> 16); |
274 | rp14 ^= (rp14 >> 8); | 285 | rp14 ^= (rp14 >> 8); |
275 | rp14 &= 0xff; | 286 | rp14 &= 0xff; |
287 | if (eccsize_mult == 2) { | ||
288 | rp16 ^= (rp16 >> 16); | ||
289 | rp16 ^= (rp16 >> 8); | ||
290 | rp16 &= 0xff; | ||
291 | } | ||
276 | 292 | ||
277 | /* | 293 | /* |
278 | * we also need to calculate the row parity for rp0..rp3 | 294 | * we also need to calculate the row parity for rp0..rp3 |
@@ -315,7 +331,7 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, | |||
315 | par &= 0xff; | 331 | par &= 0xff; |
316 | 332 | ||
317 | /* | 333 | /* |
318 | * and calculate rp5..rp15 | 334 | * and calculate rp5..rp15..rp17 |
319 | * note that par = rp4 ^ rp5 and due to the commutative property | 335 | * note that par = rp4 ^ rp5 and due to the commutative property |
320 | * of the ^ operator we can say: | 336 | * of the ^ operator we can say: |
321 | * rp5 = (par ^ rp4); | 337 | * rp5 = (par ^ rp4); |
@@ -329,6 +345,8 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, | |||
329 | rp11 = (par ^ rp10) & 0xff; | 345 | rp11 = (par ^ rp10) & 0xff; |
330 | rp13 = (par ^ rp12) & 0xff; | 346 | rp13 = (par ^ rp12) & 0xff; |
331 | rp15 = (par ^ rp14) & 0xff; | 347 | rp15 = (par ^ rp14) & 0xff; |
348 | if (eccsize_mult == 2) | ||
349 | rp17 = (par ^ rp16) & 0xff; | ||
332 | 350 | ||
333 | /* | 351 | /* |
334 | * Finally calculate the ecc bits. | 352 | * Finally calculate the ecc bits. |
@@ -375,32 +393,46 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, | |||
375 | (invparity[rp9] << 1) | | 393 | (invparity[rp9] << 1) | |
376 | (invparity[rp8]); | 394 | (invparity[rp8]); |
377 | #endif | 395 | #endif |
378 | code[2] = | 396 | if (eccsize_mult == 1) |
379 | (invparity[par & 0xf0] << 7) | | 397 | code[2] = |
380 | (invparity[par & 0x0f] << 6) | | 398 | (invparity[par & 0xf0] << 7) | |
381 | (invparity[par & 0xcc] << 5) | | 399 | (invparity[par & 0x0f] << 6) | |
382 | (invparity[par & 0x33] << 4) | | 400 | (invparity[par & 0xcc] << 5) | |
383 | (invparity[par & 0xaa] << 3) | | 401 | (invparity[par & 0x33] << 4) | |
384 | (invparity[par & 0x55] << 2) | | 402 | (invparity[par & 0xaa] << 3) | |
385 | 3; | 403 | (invparity[par & 0x55] << 2) | |
404 | 3; | ||
405 | else | ||
406 | code[2] = | ||
407 | (invparity[par & 0xf0] << 7) | | ||
408 | (invparity[par & 0x0f] << 6) | | ||
409 | (invparity[par & 0xcc] << 5) | | ||
410 | (invparity[par & 0x33] << 4) | | ||
411 | (invparity[par & 0xaa] << 3) | | ||
412 | (invparity[par & 0x55] << 2) | | ||
413 | (invparity[rp17] << 1) | | ||
414 | (invparity[rp16] << 0); | ||
386 | return 0; | 415 | return 0; |
387 | } | 416 | } |
388 | EXPORT_SYMBOL(nand_calculate_ecc); | 417 | EXPORT_SYMBOL(nand_calculate_ecc); |
389 | 418 | ||
390 | /** | 419 | /** |
391 | * nand_correct_data - [NAND Interface] Detect and correct bit error(s) | 420 | * nand_correct_data - [NAND Interface] Detect and correct bit error(s) |
392 | * @mtd: MTD block structure (unused) | 421 | * @mtd: MTD block structure |
393 | * @buf: raw data read from the chip | 422 | * @buf: raw data read from the chip |
394 | * @read_ecc: ECC from the chip | 423 | * @read_ecc: ECC from the chip |
395 | * @calc_ecc: the ECC calculated from raw data | 424 | * @calc_ecc: the ECC calculated from raw data |
396 | * | 425 | * |
397 | * Detect and correct a 1 bit error for 256 byte block | 426 | * Detect and correct a 1 bit error for 256/512 byte block |
398 | */ | 427 | */ |
399 | int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, | 428 | int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, |
400 | unsigned char *read_ecc, unsigned char *calc_ecc) | 429 | unsigned char *read_ecc, unsigned char *calc_ecc) |
401 | { | 430 | { |
402 | unsigned char b0, b1, b2; | 431 | unsigned char b0, b1, b2; |
403 | unsigned char byte_addr, bit_addr; | 432 | unsigned char byte_addr, bit_addr; |
433 | /* 256 or 512 bytes/ecc */ | ||
434 | const uint32_t eccsize_mult = | ||
435 | (((struct nand_chip *)mtd->priv)->ecc.size) >> 8; | ||
404 | 436 | ||
405 | /* | 437 | /* |
406 | * b0 to b2 indicate which bit is faulty (if any) | 438 | * b0 to b2 indicate which bit is faulty (if any) |
@@ -426,10 +458,12 @@ int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, | |||
426 | 458 | ||
427 | if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) && | 459 | if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) && |
428 | (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) && | 460 | (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) && |
429 | (((b2 ^ (b2 >> 1)) & 0x54) == 0x54)) { /* single bit error */ | 461 | ((eccsize_mult == 1 && ((b2 ^ (b2 >> 1)) & 0x54) == 0x54) || |
462 | (eccsize_mult == 2 && ((b2 ^ (b2 >> 1)) & 0x55) == 0x55))) { | ||
463 | /* single bit error */ | ||
430 | /* | 464 | /* |
431 | * rp15/13/11/9/7/5/3/1 indicate which byte is the faulty byte | 465 | * rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty |
432 | * cp 5/3/1 indicate the faulty bit. | 466 | * byte, cp 5/3/1 indicate the faulty bit. |
433 | * A lookup table (called addressbits) is used to filter | 467 | * A lookup table (called addressbits) is used to filter |
434 | * the bits from the byte they are in. | 468 | * the bits from the byte they are in. |
435 | * A marginal optimisation is possible by having three | 469 | * A marginal optimisation is possible by having three |
@@ -443,7 +477,11 @@ int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, | |||
443 | * We could also do addressbits[b2] >> 1 but for the | 477 | * We could also do addressbits[b2] >> 1 but for the |
444 | * performace it does not make any difference | 478 | * performace it does not make any difference |
445 | */ | 479 | */ |
446 | byte_addr = (addressbits[b1] << 4) + addressbits[b0]; | 480 | if (eccsize_mult == 1) |
481 | byte_addr = (addressbits[b1] << 4) + addressbits[b0]; | ||
482 | else | ||
483 | byte_addr = (addressbits[b2 & 0x3] << 8) + | ||
484 | (addressbits[b1] << 4) + addressbits[b0]; | ||
447 | bit_addr = addressbits[b2 >> 2]; | 485 | bit_addr = addressbits[b2 >> 2]; |
448 | /* flip the bit */ | 486 | /* flip the bit */ |
449 | buf[byte_addr] ^= (1 << bit_addr); | 487 | buf[byte_addr] ^= (1 << bit_addr); |