aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChuanxiao Dong <chuanxiao.dong@intel.com>2010-07-26 22:41:53 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2010-08-05 11:31:53 -0400
commit345b1d3b46fa1b7b2bf5d27bef6ea4c4dbf08731 (patch)
treea55eb41498a4917544fad5c5eae4c2c849460b65 /drivers
parent5bac3acfb82aa22c5e917063973db5482f7ff6ab (diff)
mtd: denali.c: fixed all open brace { check-patch errors
Signed-off-by: Chuanxiao Dong <chuanxiao.dong@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mtd/nand/denali.c164
1 files changed, 48 insertions, 116 deletions
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index ca02838a420..f850a6516bc 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -211,9 +211,7 @@ static void reset_bank(struct denali_nand_info *denali)
211 irq_status = wait_for_irq(denali, irq_mask); 211 irq_status = wait_for_irq(denali, irq_mask);
212 212
213 if (irq_status & operation_timeout[denali->flash_bank]) 213 if (irq_status & operation_timeout[denali->flash_bank])
214 {
215 printk(KERN_ERR "reset bank failed.\n"); 214 printk(KERN_ERR "reset bank failed.\n");
216 }
217} 215}
218 216
219/* Reset the flash controller */ 217/* Reset the flash controller */
@@ -637,14 +635,12 @@ static void find_valid_banks(struct denali_nand_info *denali)
637 } 635 }
638 } 636 }
639 637
640 if (denali->platform == INTEL_CE4100) 638 if (denali->platform == INTEL_CE4100) {
641 {
642 /* Platform limitations of the CE4100 device limit 639 /* Platform limitations of the CE4100 device limit
643 * users to a single chip solution for NAND. 640 * users to a single chip solution for NAND.
644 * Multichip support is not enabled. 641 * Multichip support is not enabled.
645 */ 642 */
646 if (denali->total_used_banks != 1) 643 if (denali->total_used_banks != 1) {
647 {
648 printk(KERN_ERR "Sorry, Intel CE4100 only supports " 644 printk(KERN_ERR "Sorry, Intel CE4100 only supports "
649 "a single NAND device.\n"); 645 "a single NAND device.\n");
650 BUG(); 646 BUG();
@@ -888,9 +884,7 @@ static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali)
888 * with a specific ONFI mode, we apply those changes here. 884 * with a specific ONFI mode, we apply those changes here.
889 */ 885 */
890 if (onfi_timing_mode != NAND_DEFAULT_TIMINGS) 886 if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
891 {
892 NAND_ONFi_Timing_Mode(denali, onfi_timing_mode); 887 NAND_ONFi_Timing_Mode(denali, onfi_timing_mode);
893 }
894 888
895 return status; 889 return status;
896} 890}
@@ -997,9 +991,7 @@ static void print_irq_log(struct denali_nand_info *denali)
997 991
998 printk("ISR debug log index = %X\n", denali->idx); 992 printk("ISR debug log index = %X\n", denali->idx);
999 for (i = 0; i < 32; i++) 993 for (i = 0; i < 32; i++)
1000 {
1001 printk("%08X: %08X\n", i, denali->irq_debug_array[i]); 994 printk("%08X: %08X\n", i, denali->irq_debug_array[i]);
1002 }
1003} 995}
1004#endif 996#endif
1005 997
@@ -1018,12 +1010,10 @@ static irqreturn_t denali_isr(int irq, void *dev_id)
1018 /* check to see if a valid NAND chip has 1010 /* check to see if a valid NAND chip has
1019 * been selected. 1011 * been selected.
1020 */ 1012 */
1021 if (is_flash_bank_valid(denali->flash_bank)) 1013 if (is_flash_bank_valid(denali->flash_bank)) {
1022 {
1023 /* check to see if controller generated 1014 /* check to see if controller generated
1024 * the interrupt, since this is a shared interrupt */ 1015 * the interrupt, since this is a shared interrupt */
1025 if ((irq_status = denali_irq_detected(denali)) != 0) 1016 if ((irq_status = denali_irq_detected(denali)) != 0) {
1026 {
1027#if DEBUG_DENALI 1017#if DEBUG_DENALI
1028 denali->irq_debug_array[denali->idx++] = 0x10000000 | irq_status; 1018 denali->irq_debug_array[denali->idx++] = 0x10000000 | irq_status;
1029 denali->idx %= 32; 1019 denali->idx %= 32;
@@ -1054,8 +1044,7 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
1054 bool retry = false; 1044 bool retry = false;
1055 unsigned long timeout = msecs_to_jiffies(1000); 1045 unsigned long timeout = msecs_to_jiffies(1000);
1056 1046
1057 do 1047 do {
1058 {
1059#if DEBUG_DENALI 1048#if DEBUG_DENALI
1060 printk("waiting for 0x%x\n", irq_mask); 1049 printk("waiting for 0x%x\n", irq_mask);
1061#endif 1050#endif
@@ -1068,8 +1057,7 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
1068 denali->idx %= 32; 1057 denali->idx %= 32;
1069#endif 1058#endif
1070 1059
1071 if (intr_status & irq_mask) 1060 if (intr_status & irq_mask) {
1072 {
1073 denali->irq_status &= ~irq_mask; 1061 denali->irq_status &= ~irq_mask;
1074 spin_unlock_irq(&denali->irq_lock); 1062 spin_unlock_irq(&denali->irq_lock);
1075#if DEBUG_DENALI 1063#if DEBUG_DENALI
@@ -1077,9 +1065,7 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
1077#endif 1065#endif
1078 /* our interrupt was detected */ 1066 /* our interrupt was detected */
1079 break; 1067 break;
1080 } 1068 } else {
1081 else
1082 {
1083 /* these are not the interrupts you are looking for - 1069 /* these are not the interrupts you are looking for -
1084 * need to wait again */ 1070 * need to wait again */
1085 spin_unlock_irq(&denali->irq_lock); 1071 spin_unlock_irq(&denali->irq_lock);
@@ -1092,8 +1078,7 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
1092 } 1078 }
1093 } while (comp_res != 0); 1079 } while (comp_res != 0);
1094 1080
1095 if (comp_res == 0) 1081 if (comp_res == 0) {
1096 {
1097 /* timeout */ 1082 /* timeout */
1098 printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n", 1083 printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n",
1099 intr_status, irq_mask); 1084 intr_status, irq_mask);
@@ -1149,22 +1134,17 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en
1149 1134
1150 addr = BANK(denali->flash_bank) | denali->page; 1135 addr = BANK(denali->flash_bank) | denali->page;
1151 1136
1152 if (op == DENALI_WRITE && access_type != SPARE_ACCESS) 1137 if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
1153 {
1154 cmd = MODE_01 | addr; 1138 cmd = MODE_01 | addr;
1155 denali_write32(cmd, denali->flash_mem); 1139 denali_write32(cmd, denali->flash_mem);
1156 } 1140 } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
1157 else if (op == DENALI_WRITE && access_type == SPARE_ACCESS)
1158 {
1159 /* read spare area */ 1141 /* read spare area */
1160 cmd = MODE_10 | addr; 1142 cmd = MODE_10 | addr;
1161 index_addr(denali, (uint32_t)cmd, access_type); 1143 index_addr(denali, (uint32_t)cmd, access_type);
1162 1144
1163 cmd = MODE_01 | addr; 1145 cmd = MODE_01 | addr;
1164 denali_write32(cmd, denali->flash_mem); 1146 denali_write32(cmd, denali->flash_mem);
1165 } 1147 } else if (op == DENALI_READ) {
1166 else if (op == DENALI_READ)
1167 {
1168 /* setup page read request for access type */ 1148 /* setup page read request for access type */
1169 cmd = MODE_10 | addr; 1149 cmd = MODE_10 | addr;
1170 index_addr(denali, (uint32_t)cmd, access_type); 1150 index_addr(denali, (uint32_t)cmd, access_type);
@@ -1173,13 +1153,10 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en
1173 use the pipeline commands in Spare area only mode. So we 1153 use the pipeline commands in Spare area only mode. So we
1174 don't. 1154 don't.
1175 */ 1155 */
1176 if (access_type == SPARE_ACCESS) 1156 if (access_type == SPARE_ACCESS) {
1177 {
1178 cmd = MODE_01 | addr; 1157 cmd = MODE_01 | addr;
1179 denali_write32(cmd, denali->flash_mem); 1158 denali_write32(cmd, denali->flash_mem);
1180 } 1159 } else {
1181 else
1182 {
1183 index_addr(denali, (uint32_t)cmd, 0x2000 | op | page_count); 1160 index_addr(denali, (uint32_t)cmd, 0x2000 | op | page_count);
1184 1161
1185 /* wait for command to be accepted 1162 /* wait for command to be accepted
@@ -1187,14 +1164,11 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en
1187 * bank. */ 1164 * bank. */
1188 irq_status = wait_for_irq(denali, irq_mask); 1165 irq_status = wait_for_irq(denali, irq_mask);
1189 1166
1190 if (irq_status == 0) 1167 if (irq_status == 0) {
1191 {
1192 printk(KERN_ERR "cmd, page, addr on timeout " 1168 printk(KERN_ERR "cmd, page, addr on timeout "
1193 "(0x%x, 0x%x, 0x%x)\n", cmd, denali->page, addr); 1169 "(0x%x, 0x%x, 0x%x)\n", cmd, denali->page, addr);
1194 status = FAIL; 1170 status = FAIL;
1195 } 1171 } else {
1196 else
1197 {
1198 cmd = MODE_01 | addr; 1172 cmd = MODE_01 | addr;
1199 denali_write32(cmd, denali->flash_mem); 1173 denali_write32(cmd, denali->flash_mem);
1200 } 1174 }
@@ -1216,9 +1190,7 @@ static int write_data_to_flash_mem(struct denali_nand_info *denali, const uint8_
1216 /* write the data to the flash memory */ 1190 /* write the data to the flash memory */
1217 buf32 = (uint32_t *)buf; 1191 buf32 = (uint32_t *)buf;
1218 for (i = 0; i < len / 4; i++) 1192 for (i = 0; i < len / 4; i++)
1219 {
1220 denali_write32(*buf32++, denali->flash_mem + 0x10); 1193 denali_write32(*buf32++, denali->flash_mem + 0x10);
1221 }
1222 return i*4; /* intent is to return the number of bytes read */ 1194 return i*4; /* intent is to return the number of bytes read */
1223} 1195}
1224 1196
@@ -1241,9 +1213,7 @@ static int read_data_from_flash_mem(struct denali_nand_info *denali, uint8_t *bu
1241 /* transfer the data from the flash */ 1213 /* transfer the data from the flash */
1242 buf32 = (uint32_t *)buf; 1214 buf32 = (uint32_t *)buf;
1243 for (i = 0; i < len / 4; i++) 1215 for (i = 0; i < len / 4; i++)
1244 {
1245 *buf32++ = ioread32(denali->flash_mem + 0x10); 1216 *buf32++ = ioread32(denali->flash_mem + 0x10);
1246 }
1247 return i*4; /* intent is to return the number of bytes read */ 1217 return i*4; /* intent is to return the number of bytes read */
1248} 1218}
1249 1219
@@ -1259,8 +1229,7 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
1259 denali->page = page; 1229 denali->page = page;
1260 1230
1261 if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS, 1231 if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
1262 DENALI_WRITE) == PASS) 1232 DENALI_WRITE) == PASS) {
1263 {
1264 write_data_to_flash_mem(denali, buf, mtd->oobsize); 1233 write_data_to_flash_mem(denali, buf, mtd->oobsize);
1265 1234
1266#if DEBUG_DENALI 1235#if DEBUG_DENALI
@@ -1274,14 +1243,11 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
1274 /* wait for operation to complete */ 1243 /* wait for operation to complete */
1275 irq_status = wait_for_irq(denali, irq_mask); 1244 irq_status = wait_for_irq(denali, irq_mask);
1276 1245
1277 if (irq_status == 0) 1246 if (irq_status == 0) {
1278 {
1279 printk(KERN_ERR "OOB write failed\n"); 1247 printk(KERN_ERR "OOB write failed\n");
1280 status = -EIO; 1248 status = -EIO;
1281 } 1249 }
1282 } 1250 } else {
1283 else
1284 {
1285 printk(KERN_ERR "unable to send pipeline command\n"); 1251 printk(KERN_ERR "unable to send pipeline command\n");
1286 status = -EIO; 1252 status = -EIO;
1287 } 1253 }
@@ -1300,8 +1266,7 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
1300 printk("read_oob %d\n", page); 1266 printk("read_oob %d\n", page);
1301#endif 1267#endif
1302 if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS, 1268 if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
1303 DENALI_READ) == PASS) 1269 DENALI_READ) == PASS) {
1304 {
1305 read_data_from_flash_mem(denali, buf, mtd->oobsize); 1270 read_data_from_flash_mem(denali, buf, mtd->oobsize);
1306 1271
1307 /* wait for command to be accepted 1272 /* wait for command to be accepted
@@ -1310,9 +1275,7 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
1310 irq_status = wait_for_irq(denali, irq_mask); 1275 irq_status = wait_for_irq(denali, irq_mask);
1311 1276
1312 if (irq_status == 0) 1277 if (irq_status == 0)
1313 {
1314 printk(KERN_ERR "page on OOB timeout %d\n", denali->page); 1278 printk(KERN_ERR "page on OOB timeout %d\n", denali->page);
1315 }
1316 1279
1317 /* We set the device back to MAIN_ACCESS here as I observed 1280 /* We set the device back to MAIN_ACCESS here as I observed
1318 * instability with the controller if you do a block erase 1281 * instability with the controller if you do a block erase
@@ -1340,12 +1303,8 @@ bool is_erased(uint8_t *buf, int len)
1340{ 1303{
1341 int i = 0; 1304 int i = 0;
1342 for (i = 0; i < len; i++) 1305 for (i = 0; i < len; i++)
1343 {
1344 if (buf[i] != 0xFF) 1306 if (buf[i] != 0xFF)
1345 {
1346 return false; 1307 return false;
1347 }
1348 }
1349 return true; 1308 return true;
1350} 1309}
1351#define ECC_SECTOR_SIZE 512 1310#define ECC_SECTOR_SIZE 512
@@ -1362,15 +1321,13 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
1362{ 1321{
1363 bool check_erased_page = false; 1322 bool check_erased_page = false;
1364 1323
1365 if (irq_status & INTR_STATUS0__ECC_ERR) 1324 if (irq_status & INTR_STATUS0__ECC_ERR) {
1366 {
1367 /* read the ECC errors. we'll ignore them for now */ 1325 /* read the ECC errors. we'll ignore them for now */
1368 uint32_t err_address = 0, err_correction_info = 0; 1326 uint32_t err_address = 0, err_correction_info = 0;
1369 uint32_t err_byte = 0, err_sector = 0, err_device = 0; 1327 uint32_t err_byte = 0, err_sector = 0, err_device = 0;
1370 uint32_t err_correction_value = 0; 1328 uint32_t err_correction_value = 0;
1371 1329
1372 do 1330 do {
1373 {
1374 err_address = ioread32(denali->flash_reg + 1331 err_address = ioread32(denali->flash_reg +
1375 ECC_ERROR_ADDRESS); 1332 ECC_ERROR_ADDRESS);
1376 err_sector = ECC_SECTOR(err_address); 1333 err_sector = ECC_SECTOR(err_address);
@@ -1383,29 +1340,23 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
1383 ECC_CORRECTION_VALUE(err_correction_info); 1340 ECC_CORRECTION_VALUE(err_correction_info);
1384 err_device = ECC_ERR_DEVICE(err_correction_info); 1341 err_device = ECC_ERR_DEVICE(err_correction_info);
1385 1342
1386 if (ECC_ERROR_CORRECTABLE(err_correction_info)) 1343 if (ECC_ERROR_CORRECTABLE(err_correction_info)) {
1387 {
1388 /* offset in our buffer is computed as: 1344 /* offset in our buffer is computed as:
1389 sector number * sector size + offset in 1345 sector number * sector size + offset in
1390 sector 1346 sector
1391 */ 1347 */
1392 int offset = err_sector * ECC_SECTOR_SIZE + 1348 int offset = err_sector * ECC_SECTOR_SIZE +
1393 err_byte; 1349 err_byte;
1394 if (offset < denali->mtd.writesize) 1350 if (offset < denali->mtd.writesize) {
1395 {
1396 /* correct the ECC error */ 1351 /* correct the ECC error */
1397 buf[offset] ^= err_correction_value; 1352 buf[offset] ^= err_correction_value;
1398 denali->mtd.ecc_stats.corrected++; 1353 denali->mtd.ecc_stats.corrected++;
1399 } 1354 } else {
1400 else
1401 {
1402 /* bummer, couldn't correct the error */ 1355 /* bummer, couldn't correct the error */
1403 printk(KERN_ERR "ECC offset invalid\n"); 1356 printk(KERN_ERR "ECC offset invalid\n");
1404 denali->mtd.ecc_stats.failed++; 1357 denali->mtd.ecc_stats.failed++;
1405 } 1358 }
1406 } 1359 } else {
1407 else
1408 {
1409 /* if the error is not correctable, need to 1360 /* if the error is not correctable, need to
1410 * look at the page to see if it is an erased page. 1361 * look at the page to see if it is an erased page.
1411 * if so, then it's not a real ECC error */ 1362 * if so, then it's not a real ECC error */
@@ -1482,8 +1433,7 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
1482 /* copy buffer into DMA buffer */ 1433 /* copy buffer into DMA buffer */
1483 memcpy(denali->buf.buf, buf, mtd->writesize); 1434 memcpy(denali->buf.buf, buf, mtd->writesize);
1484 1435
1485 if (raw_xfer) 1436 if (raw_xfer) {
1486 {
1487 /* transfer the data to the spare area */ 1437 /* transfer the data to the spare area */
1488 memcpy(denali->buf.buf + mtd->writesize, 1438 memcpy(denali->buf.buf + mtd->writesize,
1489 chip->oob_poi, 1439 chip->oob_poi,
@@ -1500,8 +1450,7 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
1500 /* wait for operation to complete */ 1450 /* wait for operation to complete */
1501 irq_status = wait_for_irq(denali, irq_mask); 1451 irq_status = wait_for_irq(denali, irq_mask);
1502 1452
1503 if (irq_status == 0) 1453 if (irq_status == 0) {
1504 {
1505 printk(KERN_ERR "timeout on write_page (type = %d)\n", raw_xfer); 1454 printk(KERN_ERR "timeout on write_page (type = %d)\n", raw_xfer);
1506 denali->status = 1455 denali->status =
1507 (irq_status & INTR_STATUS0__PROGRAM_FAIL) ? NAND_STATUS_FAIL : 1456 (irq_status & INTR_STATUS0__PROGRAM_FAIL) ? NAND_STATUS_FAIL :
@@ -1584,21 +1533,15 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1584 check_erased_page = handle_ecc(denali, buf, chip->oob_poi, irq_status); 1533 check_erased_page = handle_ecc(denali, buf, chip->oob_poi, irq_status);
1585 denali_enable_dma(denali, false); 1534 denali_enable_dma(denali, false);
1586 1535
1587 if (check_erased_page) 1536 if (check_erased_page) {
1588 {
1589 read_oob_data(&denali->mtd, chip->oob_poi, denali->page); 1537 read_oob_data(&denali->mtd, chip->oob_poi, denali->page);
1590 1538
1591 /* check ECC failures that may have occurred on erased pages */ 1539 /* check ECC failures that may have occurred on erased pages */
1592 if (check_erased_page) 1540 if (check_erased_page) {
1593 {
1594 if (!is_erased(buf, denali->mtd.writesize)) 1541 if (!is_erased(buf, denali->mtd.writesize))
1595 {
1596 denali->mtd.ecc_stats.failed++; 1542 denali->mtd.ecc_stats.failed++;
1597 }
1598 if (!is_erased(buf, denali->mtd.oobsize)) 1543 if (!is_erased(buf, denali->mtd.oobsize))
1599 {
1600 denali->mtd.ecc_stats.failed++; 1544 denali->mtd.ecc_stats.failed++;
1601 }
1602 } 1545 }
1603 } 1546 }
1604 return 0; 1547 return 0;
@@ -1643,9 +1586,7 @@ static uint8_t denali_read_byte(struct mtd_info *mtd)
1643 uint8_t result = 0xff; 1586 uint8_t result = 0xff;
1644 1587
1645 if (denali->buf.head < denali->buf.tail) 1588 if (denali->buf.head < denali->buf.tail)
1646 {
1647 result = denali->buf.buf[denali->buf.head++]; 1589 result = denali->buf.buf[denali->buf.head++];
1648 }
1649 1590
1650#if DEBUG_DENALI 1591#if DEBUG_DENALI
1651 printk("read byte -> 0x%02x\n", result); 1592 printk("read byte -> 0x%02x\n", result);
@@ -1708,8 +1649,7 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
1708#if DEBUG_DENALI 1649#if DEBUG_DENALI
1709 printk("cmdfunc: 0x%x %d %d\n", cmd, col, page); 1650 printk("cmdfunc: 0x%x %d %d\n", cmd, col, page);
1710#endif 1651#endif
1711 switch (cmd) 1652 switch (cmd) {
1712 {
1713 case NAND_CMD_PAGEPROG: 1653 case NAND_CMD_PAGEPROG:
1714 break; 1654 break;
1715 case NAND_CMD_STATUS: 1655 case NAND_CMD_STATUS:
@@ -1717,8 +1657,7 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
1717 break; 1657 break;
1718 case NAND_CMD_READID: 1658 case NAND_CMD_READID:
1719 reset_buf(denali); 1659 reset_buf(denali);
1720 if (denali->flash_bank < denali->total_used_banks) 1660 if (denali->flash_bank < denali->total_used_banks) {
1721 {
1722 /* write manufacturer information into nand 1661 /* write manufacturer information into nand
1723 buffer for NAND subsystem to fetch. 1662 buffer for NAND subsystem to fetch.
1724 */ 1663 */
@@ -1727,9 +1666,7 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
1727 write_byte_to_buf(denali, denali->dev_info.bDeviceParam0); 1666 write_byte_to_buf(denali, denali->dev_info.bDeviceParam0);
1728 write_byte_to_buf(denali, denali->dev_info.bDeviceParam1); 1667 write_byte_to_buf(denali, denali->dev_info.bDeviceParam1);
1729 write_byte_to_buf(denali, denali->dev_info.bDeviceParam2); 1668 write_byte_to_buf(denali, denali->dev_info.bDeviceParam2);
1730 } 1669 } else {
1731 else
1732 {
1733 int i; 1670 int i;
1734 for (i = 0; i < 5; i++) 1671 for (i = 0; i < 5; i++)
1735 write_byte_to_buf(denali, 0xff); 1672 write_byte_to_buf(denali, 0xff);
@@ -1796,20 +1733,24 @@ static void denali_hw_init(struct denali_nand_info *denali)
1796static struct nand_ecclayout nand_oob_slc = { 1733static struct nand_ecclayout nand_oob_slc = {
1797 .eccbytes = 4, 1734 .eccbytes = 4,
1798 .eccpos = { 0, 1, 2, 3 }, /* not used */ 1735 .eccpos = { 0, 1, 2, 3 }, /* not used */
1799 .oobfree = {{ 1736 .oobfree = {
1737 {
1800 .offset = ECC_BYTES_SLC, 1738 .offset = ECC_BYTES_SLC,
1801 .length = 64 - ECC_BYTES_SLC 1739 .length = 64 - ECC_BYTES_SLC
1802 }} 1740 }
1741 }
1803}; 1742};
1804 1743
1805#define ECC_BYTES_MLC 14 * (2048 / ECC_SECTOR_SIZE) 1744#define ECC_BYTES_MLC 14 * (2048 / ECC_SECTOR_SIZE)
1806static struct nand_ecclayout nand_oob_mlc_14bit = { 1745static struct nand_ecclayout nand_oob_mlc_14bit = {
1807 .eccbytes = 14, 1746 .eccbytes = 14,
1808 .eccpos = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, /* not used */ 1747 .eccpos = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, /* not used */
1809 .oobfree = {{ 1748 .oobfree = {
1749 {
1810 .offset = ECC_BYTES_MLC, 1750 .offset = ECC_BYTES_MLC,
1811 .length = 64 - ECC_BYTES_MLC 1751 .length = 64 - ECC_BYTES_MLC
1812 }} 1752 }
1753 }
1813}; 1754};
1814 1755
1815static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; 1756static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
@@ -1882,8 +1823,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1882 /* Due to a silicon limitation, we can only support 1823 /* Due to a silicon limitation, we can only support
1883 * ONFI timing mode 1 and below. 1824 * ONFI timing mode 1 and below.
1884 */ 1825 */
1885 if (onfi_timing_mode < -1 || onfi_timing_mode > 1) 1826 if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
1886 {
1887 printk("Intel CE4100 only supports ONFI timing mode 1 " 1827 printk("Intel CE4100 only supports ONFI timing mode 1 "
1888 "or below\n"); 1828 "or below\n");
1889 ret = -EINVAL; 1829 ret = -EINVAL;
@@ -1912,16 +1852,14 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1912 /* Is 32-bit DMA supported? */ 1852 /* Is 32-bit DMA supported? */
1913 ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); 1853 ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
1914 1854
1915 if (ret) 1855 if (ret) {
1916 {
1917 printk(KERN_ERR "Spectra: no usable DMA configuration\n"); 1856 printk(KERN_ERR "Spectra: no usable DMA configuration\n");
1918 goto failed_enable; 1857 goto failed_enable;
1919 } 1858 }
1920 denali->buf.dma_buf = pci_map_single(dev, denali->buf.buf, DENALI_BUF_SIZE, 1859 denali->buf.dma_buf = pci_map_single(dev, denali->buf.buf, DENALI_BUF_SIZE,
1921 PCI_DMA_BIDIRECTIONAL); 1860 PCI_DMA_BIDIRECTIONAL);
1922 1861
1923 if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) 1862 if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) {
1924 {
1925 printk(KERN_ERR "Spectra: failed to map DMA buffer\n"); 1863 printk(KERN_ERR "Spectra: failed to map DMA buffer\n");
1926 goto failed_enable; 1864 goto failed_enable;
1927 } 1865 }
@@ -1978,8 +1916,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1978 /* MTD supported page sizes vary by kernel. We validate our 1916 /* MTD supported page sizes vary by kernel. We validate our
1979 * kernel supports the device here. 1917 * kernel supports the device here.
1980 */ 1918 */
1981 if (denali->dev_info.wPageSize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) 1919 if (denali->dev_info.wPageSize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
1982 {
1983 ret = -ENODEV; 1920 ret = -ENODEV;
1984 printk(KERN_ERR "Spectra: device size not supported by this " 1921 printk(KERN_ERR "Spectra: device size not supported by this "
1985 "version of MTD."); 1922 "version of MTD.");
@@ -2011,8 +1948,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
2011 /* scan for NAND devices attached to the controller 1948 /* scan for NAND devices attached to the controller
2012 * this is the first stage in a two step process to register 1949 * this is the first stage in a two step process to register
2013 * with the nand subsystem */ 1950 * with the nand subsystem */
2014 if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) 1951 if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) {
2015 {
2016 ret = -ENXIO; 1952 ret = -ENXIO;
2017 goto failed_nand; 1953 goto failed_nand;
2018 } 1954 }
@@ -2029,13 +1965,10 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
2029 denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN; 1965 denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
2030 denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; 1966 denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
2031 1967
2032 if (denali->dev_info.MLCDevice) 1968 if (denali->dev_info.MLCDevice) {
2033 {
2034 denali->nand.ecc.layout = &nand_oob_mlc_14bit; 1969 denali->nand.ecc.layout = &nand_oob_mlc_14bit;
2035 denali->nand.ecc.bytes = ECC_BYTES_MLC; 1970 denali->nand.ecc.bytes = ECC_BYTES_MLC;
2036 } 1971 } else {/* SLC */
2037 else /* SLC */
2038 {
2039 denali->nand.ecc.layout = &nand_oob_slc; 1972 denali->nand.ecc.layout = &nand_oob_slc;
2040 denali->nand.ecc.bytes = ECC_BYTES_SLC; 1973 denali->nand.ecc.bytes = ECC_BYTES_SLC;
2041 } 1974 }
@@ -2057,8 +1990,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
2057 denali->nand.ecc.write_oob = denali_write_oob; 1990 denali->nand.ecc.write_oob = denali_write_oob;
2058 denali->nand.erase_cmd = denali_erase; 1991 denali->nand.erase_cmd = denali_erase;
2059 1992
2060 if (nand_scan_tail(&denali->mtd)) 1993 if (nand_scan_tail(&denali->mtd)) {
2061 {
2062 ret = -ENXIO; 1994 ret = -ENXIO;
2063 goto failed_nand; 1995 goto failed_nand;
2064 } 1996 }