aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Petazzoni <thomas.petazzoni@free-electrons.com>2016-02-10 08:54:21 -0500
committerBrian Norris <computersforpeace@gmail.com>2016-02-12 14:13:34 -0500
commitc2cdace755b583bae540a9979bff1aa428181b8c (patch)
tree5fcbffb2edb4e3cf83b4f6ec491c73c57036e2c7
parent95a001f22b1c5717eafd500a43832249ddd93662 (diff)
mtd: nand: pxa3xx_nand: add support for partial chunks
This commit is needed to properly support the 8-bits ECC configuration with 4KB pages. When pages larger than 2 KB are used on platforms using the PXA3xx NAND controller, the reading/programming operations need to be split in chunks of 2 KBs or less because the controller FIFO is limited to about 2 KB (i.e a bit more than 2 KB to accommodate OOB data). Due to this requirement, the data layout on NAND is a bit strange, with ECC interleaved with data, at the end of each chunk. When a 4-bits ECC configuration is used with 4 KB pages, the physical data layout on the NAND looks like this: | 2048 data | 32 spare | 30 ECC | 2048 data | 32 spare | 30 ECC | So the data chunks have an equal size, 2080 bytes for each chunk, which the driver supports properly. When a 8-bits ECC configuration is used with 4KB pages, the physical data layout on the NAND looks like this: | 1024 data | 30 ECC | 1024 data | 30 ECC | 1024 data | 30 ECC | 1024 data | 30 ECC | 64 spare | 30 ECC | So, the spare area is stored in its own chunk, which has a different size than the other chunks. Since OOB is not used by UBIFS, the initial implementation of the driver has chosen to not support reading this additional "spare" chunk of data. Unfortunately, Marvell has chosen to store the BBT signature in the OOB area. Therefore, if the driver doesn't read this spare area, Linux has no way of finding the BBT. It thinks there is no BBT, and rewrites one, which U-Boot does not recognize, causing compatibility problems between the bootloader and the kernel in terms of NAND usage. To fix this, this commit implements the support for reading a partial last chunk. This support is currently only useful for the case of 8 bits ECC with 4 KB pages, but it will be useful in the future to enable other configurations such as 12 bits and 16 bits ECC with 4 KB pages, or 8 bits ECC with 8 KB pages, etc. All those configurations have a "last" chunk that doesn't have the same size as the other chunks. In order to implement reading of the last chunk, this commit: - Adds a number of new fields to the pxa3xx_nand_info to describe how many full chunks and how many chunks we have, the size of full chunks and partial chunks, both in terms of data area and spare area. - Fills in the step_chunk_size and step_spare_size variables to describe how much data and spare should be read/written for the current read/program step. - Reworks the state machine to accommodate doing the additional read or program step when a last partial chunk is used. This commit has been tested on a Marvell Armada 398 DB board, with a 4KB page NAND, tested in both 4 bits ECC and 8 bits ECC configurations. Robert Jarzmik has tested on some PXA platforms. Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> Tested-by: Robert Jarzmik <robert.jarzmik@free.fr> Acked-by: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar> Signed-off-by: Brian Norris <computersforpeace@gmail.com>
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c157
1 files changed, 101 insertions, 56 deletions
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index e42496adda8d..56e8954dd02d 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -234,15 +234,44 @@ struct pxa3xx_nand_info {
234 int use_spare; /* use spare ? */ 234 int use_spare; /* use spare ? */
235 int need_wait; 235 int need_wait;
236 236
237 unsigned int data_size; /* data to be read from FIFO */ 237 /* Amount of real data per full chunk */
238 unsigned int chunk_size; /* split commands chunk size */ 238 unsigned int chunk_size;
239 unsigned int oob_size; 239
240 /* Amount of spare data per full chunk */
240 unsigned int spare_size; 241 unsigned int spare_size;
242
243 /* Number of full chunks (i.e chunk_size + spare_size) */
244 unsigned int nfullchunks;
245
246 /*
247 * Total number of chunks. If equal to nfullchunks, then there
248 * are only full chunks. Otherwise, there is one last chunk of
249 * size (last_chunk_size + last_spare_size)
250 */
251 unsigned int ntotalchunks;
252
253 /* Amount of real data in the last chunk */
254 unsigned int last_chunk_size;
255
256 /* Amount of spare data in the last chunk */
257 unsigned int last_spare_size;
258
241 unsigned int ecc_size; 259 unsigned int ecc_size;
242 unsigned int ecc_err_cnt; 260 unsigned int ecc_err_cnt;
243 unsigned int max_bitflips; 261 unsigned int max_bitflips;
244 int retcode; 262 int retcode;
245 263
264 /*
265 * Variables only valid during command
266 * execution. step_chunk_size and step_spare_size is the
267 * amount of real data and spare data in the current
268 * chunk. cur_chunk is the current chunk being
269 * read/programmed.
270 */
271 unsigned int step_chunk_size;
272 unsigned int step_spare_size;
273 unsigned int cur_chunk;
274
246 /* cached register value */ 275 /* cached register value */
247 uint32_t reg_ndcr; 276 uint32_t reg_ndcr;
248 uint32_t ndtr0cs0; 277 uint32_t ndtr0cs0;
@@ -538,25 +567,6 @@ static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
538 return 0; 567 return 0;
539} 568}
540 569
541/*
542 * Set the data and OOB size, depending on the selected
543 * spare and ECC configuration.
544 * Only applicable to READ0, READOOB and PAGEPROG commands.
545 */
546static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
547 struct mtd_info *mtd)
548{
549 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
550
551 info->data_size = mtd->writesize;
552 if (!oob_enable)
553 return;
554
555 info->oob_size = info->spare_size;
556 if (!info->use_ecc)
557 info->oob_size += info->ecc_size;
558}
559
560/** 570/**
561 * NOTE: it is a must to set ND_RUN firstly, then write 571 * NOTE: it is a must to set ND_RUN firstly, then write
562 * command buffer, otherwise, it does not work. 572 * command buffer, otherwise, it does not work.
@@ -672,28 +682,28 @@ static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
672 682
673static void handle_data_pio(struct pxa3xx_nand_info *info) 683static void handle_data_pio(struct pxa3xx_nand_info *info)
674{ 684{
675 unsigned int do_bytes = min(info->data_size, info->chunk_size);
676
677 switch (info->state) { 685 switch (info->state) {
678 case STATE_PIO_WRITING: 686 case STATE_PIO_WRITING:
679 writesl(info->mmio_base + NDDB, 687 if (info->step_chunk_size)
680 info->data_buff + info->data_buff_pos, 688 writesl(info->mmio_base + NDDB,
681 DIV_ROUND_UP(do_bytes, 4)); 689 info->data_buff + info->data_buff_pos,
690 DIV_ROUND_UP(info->step_chunk_size, 4));
682 691
683 if (info->oob_size > 0) 692 if (info->step_spare_size)
684 writesl(info->mmio_base + NDDB, 693 writesl(info->mmio_base + NDDB,
685 info->oob_buff + info->oob_buff_pos, 694 info->oob_buff + info->oob_buff_pos,
686 DIV_ROUND_UP(info->oob_size, 4)); 695 DIV_ROUND_UP(info->step_spare_size, 4));
687 break; 696 break;
688 case STATE_PIO_READING: 697 case STATE_PIO_READING:
689 drain_fifo(info, 698 if (info->step_chunk_size)
690 info->data_buff + info->data_buff_pos, 699 drain_fifo(info,
691 DIV_ROUND_UP(do_bytes, 4)); 700 info->data_buff + info->data_buff_pos,
701 DIV_ROUND_UP(info->step_chunk_size, 4));
692 702
693 if (info->oob_size > 0) 703 if (info->step_spare_size)
694 drain_fifo(info, 704 drain_fifo(info,
695 info->oob_buff + info->oob_buff_pos, 705 info->oob_buff + info->oob_buff_pos,
696 DIV_ROUND_UP(info->oob_size, 4)); 706 DIV_ROUND_UP(info->step_spare_size, 4));
697 break; 707 break;
698 default: 708 default:
699 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, 709 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
@@ -702,9 +712,8 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
702 } 712 }
703 713
704 /* Update buffer pointers for multi-page read/write */ 714 /* Update buffer pointers for multi-page read/write */
705 info->data_buff_pos += do_bytes; 715 info->data_buff_pos += info->step_chunk_size;
706 info->oob_buff_pos += info->oob_size; 716 info->oob_buff_pos += info->step_spare_size;
707 info->data_size -= do_bytes;
708} 717}
709 718
710static void pxa3xx_nand_data_dma_irq(void *data) 719static void pxa3xx_nand_data_dma_irq(void *data)
@@ -745,8 +754,9 @@ static void start_data_dma(struct pxa3xx_nand_info *info)
745 info->state); 754 info->state);
746 BUG(); 755 BUG();
747 } 756 }
748 info->sg.length = info->data_size + 757 info->sg.length = info->chunk_size;
749 (info->oob_size ? info->spare_size + info->ecc_size : 0); 758 if (info->use_spare)
759 info->sg.length += info->spare_size + info->ecc_size;
750 dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir); 760 dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
751 761
752 tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction, 762 tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
@@ -907,9 +917,11 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
907 /* reset data and oob column point to handle data */ 917 /* reset data and oob column point to handle data */
908 info->buf_start = 0; 918 info->buf_start = 0;
909 info->buf_count = 0; 919 info->buf_count = 0;
910 info->oob_size = 0;
911 info->data_buff_pos = 0; 920 info->data_buff_pos = 0;
912 info->oob_buff_pos = 0; 921 info->oob_buff_pos = 0;
922 info->step_chunk_size = 0;
923 info->step_spare_size = 0;
924 info->cur_chunk = 0;
913 info->use_ecc = 0; 925 info->use_ecc = 0;
914 info->use_spare = 1; 926 info->use_spare = 1;
915 info->retcode = ERR_NONE; 927 info->retcode = ERR_NONE;
@@ -921,8 +933,6 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
921 case NAND_CMD_READ0: 933 case NAND_CMD_READ0:
922 case NAND_CMD_PAGEPROG: 934 case NAND_CMD_PAGEPROG:
923 info->use_ecc = 1; 935 info->use_ecc = 1;
924 case NAND_CMD_READOOB:
925 pxa3xx_set_datasize(info, mtd);
926 break; 936 break;
927 case NAND_CMD_PARAM: 937 case NAND_CMD_PARAM:
928 info->use_spare = 0; 938 info->use_spare = 0;
@@ -981,6 +991,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
981 if (command == NAND_CMD_READOOB) 991 if (command == NAND_CMD_READOOB)
982 info->buf_start += mtd->writesize; 992 info->buf_start += mtd->writesize;
983 993
994 if (info->cur_chunk < info->nfullchunks) {
995 info->step_chunk_size = info->chunk_size;
996 info->step_spare_size = info->spare_size;
997 } else {
998 info->step_chunk_size = info->last_chunk_size;
999 info->step_spare_size = info->last_spare_size;
1000 }
1001
984 /* 1002 /*
985 * Multiple page read needs an 'extended command type' field, 1003 * Multiple page read needs an 'extended command type' field,
986 * which is either naked-read or last-read according to the 1004 * which is either naked-read or last-read according to the
@@ -992,8 +1010,8 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
992 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) 1010 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
993 | NDCB0_LEN_OVRD 1011 | NDCB0_LEN_OVRD
994 | NDCB0_EXT_CMD_TYPE(ext_cmd_type); 1012 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
995 info->ndcb3 = info->chunk_size + 1013 info->ndcb3 = info->step_chunk_size +
996 info->oob_size; 1014 info->step_spare_size;
997 } 1015 }
998 1016
999 set_command_address(info, mtd->writesize, column, page_addr); 1017 set_command_address(info, mtd->writesize, column, page_addr);
@@ -1013,8 +1031,6 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
1013 | NDCB0_EXT_CMD_TYPE(ext_cmd_type) 1031 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1014 | addr_cycle 1032 | addr_cycle
1015 | command; 1033 | command;
1016 /* No data transfer in this case */
1017 info->data_size = 0;
1018 exec_cmd = 1; 1034 exec_cmd = 1;
1019 } 1035 }
1020 break; 1036 break;
@@ -1026,6 +1042,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
1026 break; 1042 break;
1027 } 1043 }
1028 1044
1045 if (info->cur_chunk < info->nfullchunks) {
1046 info->step_chunk_size = info->chunk_size;
1047 info->step_spare_size = info->spare_size;
1048 } else {
1049 info->step_chunk_size = info->last_chunk_size;
1050 info->step_spare_size = info->last_spare_size;
1051 }
1052
1029 /* Second command setting for large pages */ 1053 /* Second command setting for large pages */
1030 if (mtd->writesize > PAGE_CHUNK_SIZE) { 1054 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1031 /* 1055 /*
@@ -1036,14 +1060,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
1036 info->ndcb0 |= NDCB0_CMD_TYPE(0x1) 1060 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1037 | NDCB0_LEN_OVRD 1061 | NDCB0_LEN_OVRD
1038 | NDCB0_EXT_CMD_TYPE(ext_cmd_type); 1062 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1039 info->ndcb3 = info->chunk_size + 1063 info->ndcb3 = info->step_chunk_size +
1040 info->oob_size; 1064 info->step_spare_size;
1041 1065
1042 /* 1066 /*
1043 * This is the command dispatch that completes a chunked 1067 * This is the command dispatch that completes a chunked
1044 * page program operation. 1068 * page program operation.
1045 */ 1069 */
1046 if (info->data_size == 0) { 1070 if (info->cur_chunk == info->ntotalchunks) {
1047 info->ndcb0 = NDCB0_CMD_TYPE(0x1) 1071 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
1048 | NDCB0_EXT_CMD_TYPE(ext_cmd_type) 1072 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1049 | command; 1073 | command;
@@ -1070,7 +1094,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
1070 | command; 1094 | command;
1071 info->ndcb1 = (column & 0xFF); 1095 info->ndcb1 = (column & 0xFF);
1072 info->ndcb3 = INIT_BUFFER_SIZE; 1096 info->ndcb3 = INIT_BUFFER_SIZE;
1073 info->data_size = INIT_BUFFER_SIZE; 1097 info->step_chunk_size = INIT_BUFFER_SIZE;
1074 break; 1098 break;
1075 1099
1076 case NAND_CMD_READID: 1100 case NAND_CMD_READID:
@@ -1080,7 +1104,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
1080 | command; 1104 | command;
1081 info->ndcb1 = (column & 0xFF); 1105 info->ndcb1 = (column & 0xFF);
1082 1106
1083 info->data_size = 8; 1107 info->step_chunk_size = 8;
1084 break; 1108 break;
1085 case NAND_CMD_STATUS: 1109 case NAND_CMD_STATUS:
1086 info->buf_count = 1; 1110 info->buf_count = 1;
@@ -1088,7 +1112,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
1088 | NDCB0_ADDR_CYC(1) 1112 | NDCB0_ADDR_CYC(1)
1089 | command; 1113 | command;
1090 1114
1091 info->data_size = 8; 1115 info->step_chunk_size = 8;
1092 break; 1116 break;
1093 1117
1094 case NAND_CMD_ERASE1: 1118 case NAND_CMD_ERASE1:
@@ -1229,6 +1253,7 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
1229 init_completion(&info->dev_ready); 1253 init_completion(&info->dev_ready);
1230 do { 1254 do {
1231 info->state = STATE_PREPARED; 1255 info->state = STATE_PREPARED;
1256
1232 exec_cmd = prepare_set_command(info, command, ext_cmd_type, 1257 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1233 column, page_addr); 1258 column, page_addr);
1234 if (!exec_cmd) { 1259 if (!exec_cmd) {
@@ -1248,22 +1273,30 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
1248 break; 1273 break;
1249 } 1274 }
1250 1275
1276 /* Only a few commands need several steps */
1277 if (command != NAND_CMD_PAGEPROG &&
1278 command != NAND_CMD_READ0 &&
1279 command != NAND_CMD_READOOB)
1280 break;
1281
1282 info->cur_chunk++;
1283
1251 /* Check if the sequence is complete */ 1284 /* Check if the sequence is complete */
1252 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG) 1285 if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG)
1253 break; 1286 break;
1254 1287
1255 /* 1288 /*
1256 * After a splitted program command sequence has issued 1289 * After a splitted program command sequence has issued
1257 * the command dispatch, the command sequence is complete. 1290 * the command dispatch, the command sequence is complete.
1258 */ 1291 */
1259 if (info->data_size == 0 && 1292 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1260 command == NAND_CMD_PAGEPROG && 1293 command == NAND_CMD_PAGEPROG &&
1261 ext_cmd_type == EXT_CMD_TYPE_DISPATCH) 1294 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1262 break; 1295 break;
1263 1296
1264 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) { 1297 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1265 /* Last read: issue a 'last naked read' */ 1298 /* Last read: issue a 'last naked read' */
1266 if (info->data_size == info->chunk_size) 1299 if (info->cur_chunk == info->ntotalchunks - 1)
1267 ext_cmd_type = EXT_CMD_TYPE_LAST_RW; 1300 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1268 else 1301 else
1269 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW; 1302 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
@@ -1273,7 +1306,7 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
1273 * the command dispatch must be issued to complete. 1306 * the command dispatch must be issued to complete.
1274 */ 1307 */
1275 } else if (command == NAND_CMD_PAGEPROG && 1308 } else if (command == NAND_CMD_PAGEPROG &&
1276 info->data_size == 0) { 1309 info->cur_chunk == info->ntotalchunks) {
1277 ext_cmd_type = EXT_CMD_TYPE_DISPATCH; 1310 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1278 } 1311 }
1279 } while (1); 1312 } while (1);
@@ -1518,6 +1551,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1518 int strength, int ecc_stepsize, int page_size) 1551 int strength, int ecc_stepsize, int page_size)
1519{ 1552{
1520 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) { 1553 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1554 info->nfullchunks = 1;
1555 info->ntotalchunks = 1;
1521 info->chunk_size = 2048; 1556 info->chunk_size = 2048;
1522 info->spare_size = 40; 1557 info->spare_size = 40;
1523 info->ecc_size = 24; 1558 info->ecc_size = 24;
@@ -1526,6 +1561,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1526 ecc->strength = 1; 1561 ecc->strength = 1;
1527 1562
1528 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) { 1563 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1564 info->nfullchunks = 1;
1565 info->ntotalchunks = 1;
1529 info->chunk_size = 512; 1566 info->chunk_size = 512;
1530 info->spare_size = 8; 1567 info->spare_size = 8;
1531 info->ecc_size = 8; 1568 info->ecc_size = 8;
@@ -1539,6 +1576,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1539 */ 1576 */
1540 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) { 1577 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1541 info->ecc_bch = 1; 1578 info->ecc_bch = 1;
1579 info->nfullchunks = 1;
1580 info->ntotalchunks = 1;
1542 info->chunk_size = 2048; 1581 info->chunk_size = 2048;
1543 info->spare_size = 32; 1582 info->spare_size = 32;
1544 info->ecc_size = 32; 1583 info->ecc_size = 32;
@@ -1549,6 +1588,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1549 1588
1550 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) { 1589 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1551 info->ecc_bch = 1; 1590 info->ecc_bch = 1;
1591 info->nfullchunks = 2;
1592 info->ntotalchunks = 2;
1552 info->chunk_size = 2048; 1593 info->chunk_size = 2048;
1553 info->spare_size = 32; 1594 info->spare_size = 32;
1554 info->ecc_size = 32; 1595 info->ecc_size = 32;
@@ -1563,8 +1604,12 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1563 */ 1604 */
1564 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) { 1605 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1565 info->ecc_bch = 1; 1606 info->ecc_bch = 1;
1607 info->nfullchunks = 4;
1608 info->ntotalchunks = 5;
1566 info->chunk_size = 1024; 1609 info->chunk_size = 1024;
1567 info->spare_size = 0; 1610 info->spare_size = 0;
1611 info->last_chunk_size = 0;
1612 info->last_spare_size = 64;
1568 info->ecc_size = 32; 1613 info->ecc_size = 32;
1569 ecc->mode = NAND_ECC_HW; 1614 ecc->mode = NAND_ECC_HW;
1570 ecc->size = info->chunk_size; 1615 ecc->size = info->chunk_size;