aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/nand/denali.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/nand/denali.c')
-rw-r--r--drivers/mtd/nand/denali.c497
1 files changed, 194 insertions, 303 deletions
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 618fb42b86b0..532fe07cf886 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting."
56 INTR_STATUS0__ERASE_COMP) 56 INTR_STATUS0__ERASE_COMP)
57 57
58/* indicates whether or not the internal value for the flash bank is 58/* indicates whether or not the internal value for the flash bank is
59 valid or not */ 59 * valid or not */
60#define CHIP_SELECT_INVALID -1 60#define CHIP_SELECT_INVALID -1
61 61
62#define SUPPORT_8BITECC 1 62#define SUPPORT_8BITECC 1
@@ -71,7 +71,7 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting."
71#define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd) 71#define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd)
72 72
73/* These constants are defined by the driver to enable common driver 73/* These constants are defined by the driver to enable common driver
74 configuration options. */ 74 * configuration options. */
75#define SPARE_ACCESS 0x41 75#define SPARE_ACCESS 0x41
76#define MAIN_ACCESS 0x42 76#define MAIN_ACCESS 0x42
77#define MAIN_SPARE_ACCESS 0x43 77#define MAIN_SPARE_ACCESS 0x43
@@ -97,7 +97,7 @@ static const struct pci_device_id denali_pci_ids[] = {
97 97
98 98
99/* these are static lookup tables that give us easy access to 99/* these are static lookup tables that give us easy access to
100 registers in the NAND controller. 100 * registers in the NAND controller.
101 */ 101 */
102static const uint32_t intr_status_addresses[4] = {INTR_STATUS0, 102static const uint32_t intr_status_addresses[4] = {INTR_STATUS0,
103 INTR_STATUS1, 103 INTR_STATUS1,
@@ -119,9 +119,6 @@ static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP,
119 INTR_STATUS2__RST_COMP, 119 INTR_STATUS2__RST_COMP,
120 INTR_STATUS3__RST_COMP}; 120 INTR_STATUS3__RST_COMP};
121 121
122/* specifies the debug level of the driver */
123static int nand_debug_level;
124
125/* forward declarations */ 122/* forward declarations */
126static void clear_interrupts(struct denali_nand_info *denali); 123static void clear_interrupts(struct denali_nand_info *denali);
127static uint32_t wait_for_irq(struct denali_nand_info *denali, 124static uint32_t wait_for_irq(struct denali_nand_info *denali,
@@ -130,22 +127,6 @@ static void denali_irq_enable(struct denali_nand_info *denali,
130 uint32_t int_mask); 127 uint32_t int_mask);
131static uint32_t read_interrupt_status(struct denali_nand_info *denali); 128static uint32_t read_interrupt_status(struct denali_nand_info *denali);
132 129
133#define DEBUG_DENALI 0
134
135/* This is a wrapper for writing to the denali registers.
136 * this allows us to create debug information so we can
137 * observe how the driver is programming the device.
138 * it uses standard linux convention for (val, addr) */
139static void denali_write32(uint32_t value, void *addr)
140{
141 iowrite32(value, addr);
142
143#if DEBUG_DENALI
144 printk(KERN_INFO "wrote: 0x%x -> 0x%x\n", value,
145 (uint32_t)((uint32_t)addr & 0x1fff));
146#endif
147}
148
149/* Certain operations for the denali NAND controller use 130/* Certain operations for the denali NAND controller use
150 * an indexed mode to read/write data. The operation is 131 * an indexed mode to read/write data. The operation is
151 * performed by writing the address value of the command 132 * performed by writing the address value of the command
@@ -155,15 +136,15 @@ static void denali_write32(uint32_t value, void *addr)
155static void index_addr(struct denali_nand_info *denali, 136static void index_addr(struct denali_nand_info *denali,
156 uint32_t address, uint32_t data) 137 uint32_t address, uint32_t data)
157{ 138{
158 denali_write32(address, denali->flash_mem); 139 iowrite32(address, denali->flash_mem);
159 denali_write32(data, denali->flash_mem + 0x10); 140 iowrite32(data, denali->flash_mem + 0x10);
160} 141}
161 142
162/* Perform an indexed read of the device */ 143/* Perform an indexed read of the device */
163static void index_addr_read_data(struct denali_nand_info *denali, 144static void index_addr_read_data(struct denali_nand_info *denali,
164 uint32_t address, uint32_t *pdata) 145 uint32_t address, uint32_t *pdata)
165{ 146{
166 denali_write32(address, denali->flash_mem); 147 iowrite32(address, denali->flash_mem);
167 *pdata = ioread32(denali->flash_mem + 0x10); 148 *pdata = ioread32(denali->flash_mem + 0x10);
168} 149}
169 150
@@ -188,18 +169,11 @@ static void read_status(struct denali_nand_info *denali)
188 /* initialize the data buffer to store status */ 169 /* initialize the data buffer to store status */
189 reset_buf(denali); 170 reset_buf(denali);
190 171
191 /* initiate a device status read */ 172 cmd = ioread32(denali->flash_reg + WRITE_PROTECT);
192 cmd = MODE_11 | BANK(denali->flash_bank); 173 if (cmd)
193 index_addr(denali, cmd | COMMAND_CYCLE, 0x70); 174 write_byte_to_buf(denali, NAND_STATUS_WP);
194 denali_write32(cmd | STATUS_CYCLE, denali->flash_mem); 175 else
195 176 write_byte_to_buf(denali, 0);
196 /* update buffer with status value */
197 write_byte_to_buf(denali, ioread32(denali->flash_mem + 0x10));
198
199#if DEBUG_DENALI
200 printk(KERN_INFO "device reporting status value of 0x%2x\n",
201 denali->buf.buf[0]);
202#endif
203} 177}
204 178
205/* resets a specific device connected to the core */ 179/* resets a specific device connected to the core */
@@ -213,12 +187,12 @@ static void reset_bank(struct denali_nand_info *denali)
213 clear_interrupts(denali); 187 clear_interrupts(denali);
214 188
215 bank = device_reset_banks[denali->flash_bank]; 189 bank = device_reset_banks[denali->flash_bank];
216 denali_write32(bank, denali->flash_reg + DEVICE_RESET); 190 iowrite32(bank, denali->flash_reg + DEVICE_RESET);
217 191
218 irq_status = wait_for_irq(denali, irq_mask); 192 irq_status = wait_for_irq(denali, irq_mask);
219 193
220 if (irq_status & operation_timeout[denali->flash_bank]) 194 if (irq_status & operation_timeout[denali->flash_bank])
221 printk(KERN_ERR "reset bank failed.\n"); 195 dev_err(&denali->dev->dev, "reset bank failed.\n");
222} 196}
223 197
224/* Reset the flash controller */ 198/* Reset the flash controller */
@@ -226,28 +200,28 @@ static uint16_t denali_nand_reset(struct denali_nand_info *denali)
226{ 200{
227 uint32_t i; 201 uint32_t i;
228 202
229 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", 203 dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n",
230 __FILE__, __LINE__, __func__); 204 __FILE__, __LINE__, __func__);
231 205
232 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) 206 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
233 denali_write32(reset_complete[i] | operation_timeout[i], 207 iowrite32(reset_complete[i] | operation_timeout[i],
234 denali->flash_reg + intr_status_addresses[i]); 208 denali->flash_reg + intr_status_addresses[i]);
235 209
236 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) { 210 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
237 denali_write32(device_reset_banks[i], 211 iowrite32(device_reset_banks[i],
238 denali->flash_reg + DEVICE_RESET); 212 denali->flash_reg + DEVICE_RESET);
239 while (!(ioread32(denali->flash_reg + 213 while (!(ioread32(denali->flash_reg +
240 intr_status_addresses[i]) & 214 intr_status_addresses[i]) &
241 (reset_complete[i] | operation_timeout[i]))) 215 (reset_complete[i] | operation_timeout[i])))
242 ; 216 cpu_relax();
243 if (ioread32(denali->flash_reg + intr_status_addresses[i]) & 217 if (ioread32(denali->flash_reg + intr_status_addresses[i]) &
244 operation_timeout[i]) 218 operation_timeout[i])
245 nand_dbg_print(NAND_DBG_WARN, 219 dev_dbg(&denali->dev->dev,
246 "NAND Reset operation timed out on bank %d\n", i); 220 "NAND Reset operation timed out on bank %d\n", i);
247 } 221 }
248 222
249 for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) 223 for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
250 denali_write32(reset_complete[i] | operation_timeout[i], 224 iowrite32(reset_complete[i] | operation_timeout[i],
251 denali->flash_reg + intr_status_addresses[i]); 225 denali->flash_reg + intr_status_addresses[i]);
252 226
253 return PASS; 227 return PASS;
@@ -280,7 +254,7 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
280 uint16_t acc_clks; 254 uint16_t acc_clks;
281 uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt; 255 uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
282 256
283 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", 257 dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n",
284 __FILE__, __LINE__, __func__); 258 __FILE__, __LINE__, __func__);
285 259
286 en_lo = CEIL_DIV(Trp[mode], CLK_X); 260 en_lo = CEIL_DIV(Trp[mode], CLK_X);
@@ -317,7 +291,7 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
317 acc_clks++; 291 acc_clks++;
318 292
319 if ((data_invalid - acc_clks * CLK_X) < 2) 293 if ((data_invalid - acc_clks * CLK_X) < 2)
320 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n", 294 dev_warn(&denali->dev->dev, "%s, Line %d: Warning!\n",
321 __FILE__, __LINE__); 295 __FILE__, __LINE__);
322 296
323 addr_2_data = CEIL_DIV(Tadl[mode], CLK_X); 297 addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
@@ -345,14 +319,14 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
345 (ioread32(denali->flash_reg + DEVICE_ID) == 0x88)) 319 (ioread32(denali->flash_reg + DEVICE_ID) == 0x88))
346 acc_clks = 6; 320 acc_clks = 6;
347 321
348 denali_write32(acc_clks, denali->flash_reg + ACC_CLKS); 322 iowrite32(acc_clks, denali->flash_reg + ACC_CLKS);
349 denali_write32(re_2_we, denali->flash_reg + RE_2_WE); 323 iowrite32(re_2_we, denali->flash_reg + RE_2_WE);
350 denali_write32(re_2_re, denali->flash_reg + RE_2_RE); 324 iowrite32(re_2_re, denali->flash_reg + RE_2_RE);
351 denali_write32(we_2_re, denali->flash_reg + WE_2_RE); 325 iowrite32(we_2_re, denali->flash_reg + WE_2_RE);
352 denali_write32(addr_2_data, denali->flash_reg + ADDR_2_DATA); 326 iowrite32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
353 denali_write32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT); 327 iowrite32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
354 denali_write32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT); 328 iowrite32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
355 denali_write32(cs_cnt, denali->flash_reg + CS_SETUP_CNT); 329 iowrite32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
356} 330}
357 331
358/* queries the NAND device to see what ONFI modes it supports. */ 332/* queries the NAND device to see what ONFI modes it supports. */
@@ -387,13 +361,13 @@ static void get_samsung_nand_para(struct denali_nand_info *denali,
387{ 361{
388 if (device_id == 0xd3) { /* Samsung K9WAG08U1A */ 362 if (device_id == 0xd3) { /* Samsung K9WAG08U1A */
389 /* Set timing register values according to datasheet */ 363 /* Set timing register values according to datasheet */
390 denali_write32(5, denali->flash_reg + ACC_CLKS); 364 iowrite32(5, denali->flash_reg + ACC_CLKS);
391 denali_write32(20, denali->flash_reg + RE_2_WE); 365 iowrite32(20, denali->flash_reg + RE_2_WE);
392 denali_write32(12, denali->flash_reg + WE_2_RE); 366 iowrite32(12, denali->flash_reg + WE_2_RE);
393 denali_write32(14, denali->flash_reg + ADDR_2_DATA); 367 iowrite32(14, denali->flash_reg + ADDR_2_DATA);
394 denali_write32(3, denali->flash_reg + RDWR_EN_LO_CNT); 368 iowrite32(3, denali->flash_reg + RDWR_EN_LO_CNT);
395 denali_write32(2, denali->flash_reg + RDWR_EN_HI_CNT); 369 iowrite32(2, denali->flash_reg + RDWR_EN_HI_CNT);
396 denali_write32(2, denali->flash_reg + CS_SETUP_CNT); 370 iowrite32(2, denali->flash_reg + CS_SETUP_CNT);
397 } 371 }
398} 372}
399 373
@@ -405,15 +379,15 @@ static void get_toshiba_nand_para(struct denali_nand_info *denali)
405 /* spare area size for some kind of Toshiba NAND device */ 379 /* spare area size for some kind of Toshiba NAND device */
406 if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) && 380 if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
407 (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) { 381 (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
408 denali_write32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); 382 iowrite32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
409 tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) * 383 tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) *
410 ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE); 384 ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
411 denali_write32(tmp, 385 iowrite32(tmp,
412 denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); 386 denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
413#if SUPPORT_15BITECC 387#if SUPPORT_15BITECC
414 denali_write32(15, denali->flash_reg + ECC_CORRECTION); 388 iowrite32(15, denali->flash_reg + ECC_CORRECTION);
415#elif SUPPORT_8BITECC 389#elif SUPPORT_8BITECC
416 denali_write32(8, denali->flash_reg + ECC_CORRECTION); 390 iowrite32(8, denali->flash_reg + ECC_CORRECTION);
417#endif 391#endif
418 } 392 }
419} 393}
@@ -426,26 +400,26 @@ static void get_hynix_nand_para(struct denali_nand_info *denali,
426 switch (device_id) { 400 switch (device_id) {
427 case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */ 401 case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
428 case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */ 402 case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
429 denali_write32(128, denali->flash_reg + PAGES_PER_BLOCK); 403 iowrite32(128, denali->flash_reg + PAGES_PER_BLOCK);
430 denali_write32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE); 404 iowrite32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
431 denali_write32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); 405 iowrite32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
432 main_size = 4096 * 406 main_size = 4096 *
433 ioread32(denali->flash_reg + DEVICES_CONNECTED); 407 ioread32(denali->flash_reg + DEVICES_CONNECTED);
434 spare_size = 224 * 408 spare_size = 224 *
435 ioread32(denali->flash_reg + DEVICES_CONNECTED); 409 ioread32(denali->flash_reg + DEVICES_CONNECTED);
436 denali_write32(main_size, 410 iowrite32(main_size,
437 denali->flash_reg + LOGICAL_PAGE_DATA_SIZE); 411 denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
438 denali_write32(spare_size, 412 iowrite32(spare_size,
439 denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); 413 denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
440 denali_write32(0, denali->flash_reg + DEVICE_WIDTH); 414 iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
441#if SUPPORT_15BITECC 415#if SUPPORT_15BITECC
442 denali_write32(15, denali->flash_reg + ECC_CORRECTION); 416 iowrite32(15, denali->flash_reg + ECC_CORRECTION);
443#elif SUPPORT_8BITECC 417#elif SUPPORT_8BITECC
444 denali_write32(8, denali->flash_reg + ECC_CORRECTION); 418 iowrite32(8, denali->flash_reg + ECC_CORRECTION);
445#endif 419#endif
446 break; 420 break;
447 default: 421 default:
448 nand_dbg_print(NAND_DBG_WARN, 422 dev_warn(&denali->dev->dev,
449 "Spectra: Unknown Hynix NAND (Device ID: 0x%x)." 423 "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
450 "Will use default parameter values instead.\n", 424 "Will use default parameter values instead.\n",
451 device_id); 425 device_id);
@@ -453,7 +427,7 @@ static void get_hynix_nand_para(struct denali_nand_info *denali,
453} 427}
454 428
455/* determines how many NAND chips are connected to the controller. Note for 429/* determines how many NAND chips are connected to the controller. Note for
456 Intel CE4100 devices we don't support more than one device. 430 * Intel CE4100 devices we don't support more than one device.
457 */ 431 */
458static void find_valid_banks(struct denali_nand_info *denali) 432static void find_valid_banks(struct denali_nand_info *denali)
459{ 433{
@@ -467,7 +441,7 @@ static void find_valid_banks(struct denali_nand_info *denali)
467 index_addr_read_data(denali, 441 index_addr_read_data(denali,
468 (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]); 442 (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]);
469 443
470 nand_dbg_print(NAND_DBG_DEBUG, 444 dev_dbg(&denali->dev->dev,
471 "Return 1st ID for bank[%d]: %x\n", i, id[i]); 445 "Return 1st ID for bank[%d]: %x\n", i, id[i]);
472 446
473 if (i == 0) { 447 if (i == 0) {
@@ -487,12 +461,13 @@ static void find_valid_banks(struct denali_nand_info *denali)
487 * Multichip support is not enabled. 461 * Multichip support is not enabled.
488 */ 462 */
489 if (denali->total_used_banks != 1) { 463 if (denali->total_used_banks != 1) {
490 printk(KERN_ERR "Sorry, Intel CE4100 only supports " 464 dev_err(&denali->dev->dev,
465 "Sorry, Intel CE4100 only supports "
491 "a single NAND device.\n"); 466 "a single NAND device.\n");
492 BUG(); 467 BUG();
493 } 468 }
494 } 469 }
495 nand_dbg_print(NAND_DBG_DEBUG, 470 dev_dbg(&denali->dev->dev,
496 "denali->total_used_banks: %d\n", denali->total_used_banks); 471 "denali->total_used_banks: %d\n", denali->total_used_banks);
497} 472}
498 473
@@ -526,8 +501,9 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
526 uint32_t id_bytes[5], addr; 501 uint32_t id_bytes[5], addr;
527 uint8_t i, maf_id, device_id; 502 uint8_t i, maf_id, device_id;
528 503
529 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", 504 dev_dbg(&denali->dev->dev,
530 __FILE__, __LINE__, __func__); 505 "%s, Line %d, Function: %s\n",
506 __FILE__, __LINE__, __func__);
531 507
532 /* Use read id method to get device ID and other 508 /* Use read id method to get device ID and other
533 * params. For some NAND chips, controller can't 509 * params. For some NAND chips, controller can't
@@ -554,12 +530,14 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
554 get_hynix_nand_para(denali, device_id); 530 get_hynix_nand_para(denali, device_id);
555 } 531 }
556 532
557 nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:" 533 dev_info(&denali->dev->dev,
558 "acc_clks: %d, re_2_we: %d, we_2_re: %d," 534 "Dump timing register values:"
559 "addr_2_data: %d, rdwr_en_lo_cnt: %d, " 535 "acc_clks: %d, re_2_we: %d, re_2_re: %d\n"
536 "we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n"
560 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n", 537 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
561 ioread32(denali->flash_reg + ACC_CLKS), 538 ioread32(denali->flash_reg + ACC_CLKS),
562 ioread32(denali->flash_reg + RE_2_WE), 539 ioread32(denali->flash_reg + RE_2_WE),
540 ioread32(denali->flash_reg + RE_2_RE),
563 ioread32(denali->flash_reg + WE_2_RE), 541 ioread32(denali->flash_reg + WE_2_RE),
564 ioread32(denali->flash_reg + ADDR_2_DATA), 542 ioread32(denali->flash_reg + ADDR_2_DATA),
565 ioread32(denali->flash_reg + RDWR_EN_LO_CNT), 543 ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
@@ -582,17 +560,17 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
582static void denali_set_intr_modes(struct denali_nand_info *denali, 560static void denali_set_intr_modes(struct denali_nand_info *denali,
583 uint16_t INT_ENABLE) 561 uint16_t INT_ENABLE)
584{ 562{
585 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", 563 dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n",
586 __FILE__, __LINE__, __func__); 564 __FILE__, __LINE__, __func__);
587 565
588 if (INT_ENABLE) 566 if (INT_ENABLE)
589 denali_write32(1, denali->flash_reg + GLOBAL_INT_ENABLE); 567 iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
590 else 568 else
591 denali_write32(0, denali->flash_reg + GLOBAL_INT_ENABLE); 569 iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
592} 570}
593 571
594/* validation function to verify that the controlling software is making 572/* validation function to verify that the controlling software is making
595 a valid request 573 * a valid request
596 */ 574 */
597static inline bool is_flash_bank_valid(int flash_bank) 575static inline bool is_flash_bank_valid(int flash_bank)
598{ 576{
@@ -609,10 +587,10 @@ static void denali_irq_init(struct denali_nand_info *denali)
609 int_mask = DENALI_IRQ_ALL; 587 int_mask = DENALI_IRQ_ALL;
610 588
611 /* Clear all status bits */ 589 /* Clear all status bits */
612 denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS0); 590 iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS0);
613 denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS1); 591 iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS1);
614 denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS2); 592 iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS2);
615 denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS3); 593 iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS3);
616 594
617 denali_irq_enable(denali, int_mask); 595 denali_irq_enable(denali, int_mask);
618} 596}
@@ -626,10 +604,10 @@ static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
626static void denali_irq_enable(struct denali_nand_info *denali, 604static void denali_irq_enable(struct denali_nand_info *denali,
627 uint32_t int_mask) 605 uint32_t int_mask)
628{ 606{
629 denali_write32(int_mask, denali->flash_reg + INTR_EN0); 607 iowrite32(int_mask, denali->flash_reg + INTR_EN0);
630 denali_write32(int_mask, denali->flash_reg + INTR_EN1); 608 iowrite32(int_mask, denali->flash_reg + INTR_EN1);
631 denali_write32(int_mask, denali->flash_reg + INTR_EN2); 609 iowrite32(int_mask, denali->flash_reg + INTR_EN2);
632 denali_write32(int_mask, denali->flash_reg + INTR_EN3); 610 iowrite32(int_mask, denali->flash_reg + INTR_EN3);
633} 611}
634 612
635/* This function only returns when an interrupt that this driver cares about 613/* This function only returns when an interrupt that this driver cares about
@@ -648,7 +626,7 @@ static inline void clear_interrupt(struct denali_nand_info *denali,
648 626
649 intr_status_reg = intr_status_addresses[denali->flash_bank]; 627 intr_status_reg = intr_status_addresses[denali->flash_bank];
650 628
651 denali_write32(irq_mask, denali->flash_reg + intr_status_reg); 629 iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
652} 630}
653 631
654static void clear_interrupts(struct denali_nand_info *denali) 632static void clear_interrupts(struct denali_nand_info *denali)
@@ -657,11 +635,7 @@ static void clear_interrupts(struct denali_nand_info *denali)
657 spin_lock_irq(&denali->irq_lock); 635 spin_lock_irq(&denali->irq_lock);
658 636
659 status = read_interrupt_status(denali); 637 status = read_interrupt_status(denali);
660 638 clear_interrupt(denali, status);
661#if DEBUG_DENALI
662 denali->irq_debug_array[denali->idx++] = 0x30000000 | status;
663 denali->idx %= 32;
664#endif
665 639
666 denali->irq_status = 0x0; 640 denali->irq_status = 0x0;
667 spin_unlock_irq(&denali->irq_lock); 641 spin_unlock_irq(&denali->irq_lock);
@@ -676,17 +650,6 @@ static uint32_t read_interrupt_status(struct denali_nand_info *denali)
676 return ioread32(denali->flash_reg + intr_status_reg); 650 return ioread32(denali->flash_reg + intr_status_reg);
677} 651}
678 652
679#if DEBUG_DENALI
680static void print_irq_log(struct denali_nand_info *denali)
681{
682 int i = 0;
683
684 printk(KERN_INFO "ISR debug log index = %X\n", denali->idx);
685 for (i = 0; i < 32; i++)
686 printk(KERN_INFO "%08X: %08X\n", i, denali->irq_debug_array[i]);
687}
688#endif
689
690/* This is the interrupt service routine. It handles all interrupts 653/* This is the interrupt service routine. It handles all interrupts
691 * sent to this device. Note that on CE4100, this is a shared 654 * sent to this device. Note that on CE4100, this is a shared
692 * interrupt. 655 * interrupt.
@@ -707,13 +670,6 @@ static irqreturn_t denali_isr(int irq, void *dev_id)
707 * the interrupt, since this is a shared interrupt */ 670 * the interrupt, since this is a shared interrupt */
708 irq_status = denali_irq_detected(denali); 671 irq_status = denali_irq_detected(denali);
709 if (irq_status != 0) { 672 if (irq_status != 0) {
710#if DEBUG_DENALI
711 denali->irq_debug_array[denali->idx++] =
712 0x10000000 | irq_status;
713 denali->idx %= 32;
714
715 printk(KERN_INFO "IRQ status = 0x%04x\n", irq_status);
716#endif
717 /* handle interrupt */ 673 /* handle interrupt */
718 /* first acknowledge it */ 674 /* first acknowledge it */
719 clear_interrupt(denali, irq_status); 675 clear_interrupt(denali, irq_status);
@@ -739,41 +695,20 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
739 unsigned long timeout = msecs_to_jiffies(1000); 695 unsigned long timeout = msecs_to_jiffies(1000);
740 696
741 do { 697 do {
742#if DEBUG_DENALI
743 printk(KERN_INFO "waiting for 0x%x\n", irq_mask);
744#endif
745 comp_res = 698 comp_res =
746 wait_for_completion_timeout(&denali->complete, timeout); 699 wait_for_completion_timeout(&denali->complete, timeout);
747 spin_lock_irq(&denali->irq_lock); 700 spin_lock_irq(&denali->irq_lock);
748 intr_status = denali->irq_status; 701 intr_status = denali->irq_status;
749 702
750#if DEBUG_DENALI
751 denali->irq_debug_array[denali->idx++] =
752 0x20000000 | (irq_mask << 16) | intr_status;
753 denali->idx %= 32;
754#endif
755
756 if (intr_status & irq_mask) { 703 if (intr_status & irq_mask) {
757 denali->irq_status &= ~irq_mask; 704 denali->irq_status &= ~irq_mask;
758 spin_unlock_irq(&denali->irq_lock); 705 spin_unlock_irq(&denali->irq_lock);
759#if DEBUG_DENALI
760 if (retry)
761 printk(KERN_INFO "status on retry = 0x%x\n",
762 intr_status);
763#endif
764 /* our interrupt was detected */ 706 /* our interrupt was detected */
765 break; 707 break;
766 } else { 708 } else {
767 /* these are not the interrupts you are looking for - 709 /* these are not the interrupts you are looking for -
768 * need to wait again */ 710 * need to wait again */
769 spin_unlock_irq(&denali->irq_lock); 711 spin_unlock_irq(&denali->irq_lock);
770#if DEBUG_DENALI
771 print_irq_log(denali);
772 printk(KERN_INFO "received irq nobody cared:"
773 " irq_status = 0x%x, irq_mask = 0x%x,"
774 " timeout = %ld\n", intr_status,
775 irq_mask, comp_res);
776#endif
777 retry = true; 712 retry = true;
778 } 713 }
779 } while (comp_res != 0); 714 } while (comp_res != 0);
@@ -789,7 +724,7 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
789} 724}
790 725
791/* This helper function setups the registers for ECC and whether or not 726/* This helper function setups the registers for ECC and whether or not
792 the spare area will be transfered. */ 727 * the spare area will be transfered. */
793static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en, 728static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
794 bool transfer_spare) 729 bool transfer_spare)
795{ 730{
@@ -800,13 +735,13 @@ static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
800 transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0; 735 transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
801 736
802 /* Enable spare area/ECC per user's request. */ 737 /* Enable spare area/ECC per user's request. */
803 denali_write32(ecc_en_flag, denali->flash_reg + ECC_ENABLE); 738 iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
804 denali_write32(transfer_spare_flag, 739 iowrite32(transfer_spare_flag,
805 denali->flash_reg + TRANSFER_SPARE_REG); 740 denali->flash_reg + TRANSFER_SPARE_REG);
806} 741}
807 742
808/* sends a pipeline command operation to the controller. See the Denali NAND 743/* sends a pipeline command operation to the controller. See the Denali NAND
809 controller's user guide for more information (section 4.2.3.6). 744 * controller's user guide for more information (section 4.2.3.6).
810 */ 745 */
811static int denali_send_pipeline_cmd(struct denali_nand_info *denali, 746static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
812 bool ecc_en, 747 bool ecc_en,
@@ -827,16 +762,6 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
827 762
828 setup_ecc_for_xfer(denali, ecc_en, transfer_spare); 763 setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
829 764
830#if DEBUG_DENALI
831 spin_lock_irq(&denali->irq_lock);
832 denali->irq_debug_array[denali->idx++] =
833 0x40000000 | ioread32(denali->flash_reg + ECC_ENABLE) |
834 (access_type << 4);
835 denali->idx %= 32;
836 spin_unlock_irq(&denali->irq_lock);
837#endif
838
839
840 /* clear interrupts */ 765 /* clear interrupts */
841 clear_interrupts(denali); 766 clear_interrupts(denali);
842 767
@@ -844,14 +769,14 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
844 769
845 if (op == DENALI_WRITE && access_type != SPARE_ACCESS) { 770 if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
846 cmd = MODE_01 | addr; 771 cmd = MODE_01 | addr;
847 denali_write32(cmd, denali->flash_mem); 772 iowrite32(cmd, denali->flash_mem);
848 } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) { 773 } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
849 /* read spare area */ 774 /* read spare area */
850 cmd = MODE_10 | addr; 775 cmd = MODE_10 | addr;
851 index_addr(denali, (uint32_t)cmd, access_type); 776 index_addr(denali, (uint32_t)cmd, access_type);
852 777
853 cmd = MODE_01 | addr; 778 cmd = MODE_01 | addr;
854 denali_write32(cmd, denali->flash_mem); 779 iowrite32(cmd, denali->flash_mem);
855 } else if (op == DENALI_READ) { 780 } else if (op == DENALI_READ) {
856 /* setup page read request for access type */ 781 /* setup page read request for access type */
857 cmd = MODE_10 | addr; 782 cmd = MODE_10 | addr;
@@ -863,7 +788,7 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
863 */ 788 */
864 if (access_type == SPARE_ACCESS) { 789 if (access_type == SPARE_ACCESS) {
865 cmd = MODE_01 | addr; 790 cmd = MODE_01 | addr;
866 denali_write32(cmd, denali->flash_mem); 791 iowrite32(cmd, denali->flash_mem);
867 } else { 792 } else {
868 index_addr(denali, (uint32_t)cmd, 793 index_addr(denali, (uint32_t)cmd,
869 0x2000 | op | page_count); 794 0x2000 | op | page_count);
@@ -875,13 +800,14 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
875 irq_status = wait_for_irq(denali, irq_mask); 800 irq_status = wait_for_irq(denali, irq_mask);
876 801
877 if (irq_status == 0) { 802 if (irq_status == 0) {
878 printk(KERN_ERR "cmd, page, addr on timeout " 803 dev_err(&denali->dev->dev,
879 "(0x%x, 0x%x, 0x%x)\n", cmd, 804 "cmd, page, addr on timeout "
880 denali->page, addr); 805 "(0x%x, 0x%x, 0x%x)\n",
806 cmd, denali->page, addr);
881 status = FAIL; 807 status = FAIL;
882 } else { 808 } else {
883 cmd = MODE_01 | addr; 809 cmd = MODE_01 | addr;
884 denali_write32(cmd, denali->flash_mem); 810 iowrite32(cmd, denali->flash_mem);
885 } 811 }
886 } 812 }
887 } 813 }
@@ -902,7 +828,7 @@ static int write_data_to_flash_mem(struct denali_nand_info *denali,
902 /* write the data to the flash memory */ 828 /* write the data to the flash memory */
903 buf32 = (uint32_t *)buf; 829 buf32 = (uint32_t *)buf;
904 for (i = 0; i < len / 4; i++) 830 for (i = 0; i < len / 4; i++)
905 denali_write32(*buf32++, denali->flash_mem + 0x10); 831 iowrite32(*buf32++, denali->flash_mem + 0x10);
906 return i*4; /* intent is to return the number of bytes read */ 832 return i*4; /* intent is to return the number of bytes read */
907} 833}
908 834
@@ -945,24 +871,15 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
945 DENALI_WRITE) == PASS) { 871 DENALI_WRITE) == PASS) {
946 write_data_to_flash_mem(denali, buf, mtd->oobsize); 872 write_data_to_flash_mem(denali, buf, mtd->oobsize);
947 873
948#if DEBUG_DENALI
949 spin_lock_irq(&denali->irq_lock);
950 denali->irq_debug_array[denali->idx++] =
951 0x80000000 | mtd->oobsize;
952 denali->idx %= 32;
953 spin_unlock_irq(&denali->irq_lock);
954#endif
955
956
957 /* wait for operation to complete */ 874 /* wait for operation to complete */
958 irq_status = wait_for_irq(denali, irq_mask); 875 irq_status = wait_for_irq(denali, irq_mask);
959 876
960 if (irq_status == 0) { 877 if (irq_status == 0) {
961 printk(KERN_ERR "OOB write failed\n"); 878 dev_err(&denali->dev->dev, "OOB write failed\n");
962 status = -EIO; 879 status = -EIO;
963 } 880 }
964 } else { 881 } else {
965 printk(KERN_ERR "unable to send pipeline command\n"); 882 dev_err(&denali->dev->dev, "unable to send pipeline command\n");
966 status = -EIO; 883 status = -EIO;
967 } 884 }
968 return status; 885 return status;
@@ -977,9 +894,6 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
977 894
978 denali->page = page; 895 denali->page = page;
979 896
980#if DEBUG_DENALI
981 printk(KERN_INFO "read_oob %d\n", page);
982#endif
983 if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS, 897 if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
984 DENALI_READ) == PASS) { 898 DENALI_READ) == PASS) {
985 read_data_from_flash_mem(denali, buf, mtd->oobsize); 899 read_data_from_flash_mem(denali, buf, mtd->oobsize);
@@ -990,7 +904,7 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
990 irq_status = wait_for_irq(denali, irq_mask); 904 irq_status = wait_for_irq(denali, irq_mask);
991 905
992 if (irq_status == 0) 906 if (irq_status == 0)
993 printk(KERN_ERR "page on OOB timeout %d\n", 907 dev_err(&denali->dev->dev, "page on OOB timeout %d\n",
994 denali->page); 908 denali->page);
995 909
996 /* We set the device back to MAIN_ACCESS here as I observed 910 /* We set the device back to MAIN_ACCESS here as I observed
@@ -1002,14 +916,6 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
1002 addr = BANK(denali->flash_bank) | denali->page; 916 addr = BANK(denali->flash_bank) | denali->page;
1003 cmd = MODE_10 | addr; 917 cmd = MODE_10 | addr;
1004 index_addr(denali, (uint32_t)cmd, MAIN_ACCESS); 918 index_addr(denali, (uint32_t)cmd, MAIN_ACCESS);
1005
1006#if DEBUG_DENALI
1007 spin_lock_irq(&denali->irq_lock);
1008 denali->irq_debug_array[denali->idx++] =
1009 0x60000000 | mtd->oobsize;
1010 denali->idx %= 32;
1011 spin_unlock_irq(&denali->irq_lock);
1012#endif
1013 } 919 }
1014} 920}
1015 921
@@ -1029,12 +935,12 @@ bool is_erased(uint8_t *buf, int len)
1029#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12) 935#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
1030#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET)) 936#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
1031#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK) 937#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
1032#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO)) 938#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO__ERROR_TYPE))
1033#define ECC_ERR_DEVICE(x) ((x) & ERR_CORRECTION_INFO__DEVICE_NR >> 8) 939#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
1034#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO) 940#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
1035 941
1036static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, 942static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
1037 uint8_t *oobbuf, uint32_t irq_status) 943 uint32_t irq_status)
1038{ 944{
1039 bool check_erased_page = false; 945 bool check_erased_page = false;
1040 946
@@ -1043,6 +949,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
1043 uint32_t err_address = 0, err_correction_info = 0; 949 uint32_t err_address = 0, err_correction_info = 0;
1044 uint32_t err_byte = 0, err_sector = 0, err_device = 0; 950 uint32_t err_byte = 0, err_sector = 0, err_device = 0;
1045 uint32_t err_correction_value = 0; 951 uint32_t err_correction_value = 0;
952 denali_set_intr_modes(denali, false);
1046 953
1047 do { 954 do {
1048 err_address = ioread32(denali->flash_reg + 955 err_address = ioread32(denali->flash_reg +
@@ -1050,7 +957,6 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
1050 err_sector = ECC_SECTOR(err_address); 957 err_sector = ECC_SECTOR(err_address);
1051 err_byte = ECC_BYTE(err_address); 958 err_byte = ECC_BYTE(err_address);
1052 959
1053
1054 err_correction_info = ioread32(denali->flash_reg + 960 err_correction_info = ioread32(denali->flash_reg +
1055 ERR_CORRECTION_INFO); 961 ERR_CORRECTION_INFO);
1056 err_correction_value = 962 err_correction_value =
@@ -1058,20 +964,23 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
1058 err_device = ECC_ERR_DEVICE(err_correction_info); 964 err_device = ECC_ERR_DEVICE(err_correction_info);
1059 965
1060 if (ECC_ERROR_CORRECTABLE(err_correction_info)) { 966 if (ECC_ERROR_CORRECTABLE(err_correction_info)) {
1061 /* offset in our buffer is computed as: 967 /* If err_byte is larger than ECC_SECTOR_SIZE,
1062 sector number * sector size + offset in 968 * means error happend in OOB, so we ignore
1063 sector 969 * it. It's no need for us to correct it
1064 */ 970 * err_device is represented the NAND error
1065 int offset = err_sector * ECC_SECTOR_SIZE + 971 * bits are happened in if there are more
1066 err_byte; 972 * than one NAND connected.
1067 if (offset < denali->mtd.writesize) { 973 * */
974 if (err_byte < ECC_SECTOR_SIZE) {
975 int offset;
976 offset = (err_sector *
977 ECC_SECTOR_SIZE +
978 err_byte) *
979 denali->devnum +
980 err_device;
1068 /* correct the ECC error */ 981 /* correct the ECC error */
1069 buf[offset] ^= err_correction_value; 982 buf[offset] ^= err_correction_value;
1070 denali->mtd.ecc_stats.corrected++; 983 denali->mtd.ecc_stats.corrected++;
1071 } else {
1072 /* bummer, couldn't correct the error */
1073 printk(KERN_ERR "ECC offset invalid\n");
1074 denali->mtd.ecc_stats.failed++;
1075 } 984 }
1076 } else { 985 } else {
1077 /* if the error is not correctable, need to 986 /* if the error is not correctable, need to
@@ -1080,14 +989,16 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
1080 * */ 989 * */
1081 check_erased_page = true; 990 check_erased_page = true;
1082 } 991 }
1083
1084#if DEBUG_DENALI
1085 printk(KERN_INFO "Detected ECC error in page %d:"
1086 " err_addr = 0x%08x, info to fix is"
1087 " 0x%08x\n", denali->page, err_address,
1088 err_correction_info);
1089#endif
1090 } while (!ECC_LAST_ERR(err_correction_info)); 992 } while (!ECC_LAST_ERR(err_correction_info));
993 /* Once handle all ecc errors, controller will triger
994 * a ECC_TRANSACTION_DONE interrupt, so here just wait
995 * for a while for this interrupt
996 * */
997 while (!(read_interrupt_status(denali) &
998 INTR_STATUS0__ECC_TRANSACTION_DONE))
999 cpu_relax();
1000 clear_interrupts(denali);
1001 denali_set_intr_modes(denali, true);
1091 } 1002 }
1092 return check_erased_page; 1003 return check_erased_page;
1093} 1004}
@@ -1100,7 +1011,7 @@ static void denali_enable_dma(struct denali_nand_info *denali, bool en)
1100 if (en) 1011 if (en)
1101 reg_val = DMA_ENABLE__FLAG; 1012 reg_val = DMA_ENABLE__FLAG;
1102 1013
1103 denali_write32(reg_val, denali->flash_reg + DMA_ENABLE); 1014 iowrite32(reg_val, denali->flash_reg + DMA_ENABLE);
1104 ioread32(denali->flash_reg + DMA_ENABLE); 1015 ioread32(denali->flash_reg + DMA_ENABLE);
1105} 1016}
1106 1017
@@ -1129,7 +1040,7 @@ static void denali_setup_dma(struct denali_nand_info *denali, int op)
1129} 1040}
1130 1041
1131/* writes a page. user specifies type, and this function handles the 1042/* writes a page. user specifies type, and this function handles the
1132 configuration details. */ 1043 * configuration details. */
1133static void write_page(struct mtd_info *mtd, struct nand_chip *chip, 1044static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
1134 const uint8_t *buf, bool raw_xfer) 1045 const uint8_t *buf, bool raw_xfer)
1135{ 1046{
@@ -1171,8 +1082,9 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
1171 irq_status = wait_for_irq(denali, irq_mask); 1082 irq_status = wait_for_irq(denali, irq_mask);
1172 1083
1173 if (irq_status == 0) { 1084 if (irq_status == 0) {
1174 printk(KERN_ERR "timeout on write_page" 1085 dev_err(&denali->dev->dev,
1175 " (type = %d)\n", raw_xfer); 1086 "timeout on write_page (type = %d)\n",
1087 raw_xfer);
1176 denali->status = 1088 denali->status =
1177 (irq_status & INTR_STATUS0__PROGRAM_FAIL) ? 1089 (irq_status & INTR_STATUS0__PROGRAM_FAIL) ?
1178 NAND_STATUS_FAIL : PASS; 1090 NAND_STATUS_FAIL : PASS;
@@ -1185,8 +1097,9 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
1185/* NAND core entry points */ 1097/* NAND core entry points */
1186 1098
1187/* this is the callback that the NAND core calls to write a page. Since 1099/* this is the callback that the NAND core calls to write a page. Since
1188 writing a page with ECC or without is similar, all the work is done 1100 * writing a page with ECC or without is similar, all the work is done
1189 by write_page above. */ 1101 * by write_page above.
1102 * */
1190static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, 1103static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1191 const uint8_t *buf) 1104 const uint8_t *buf)
1192{ 1105{
@@ -1196,8 +1109,8 @@ static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1196} 1109}
1197 1110
1198/* This is the callback that the NAND core calls to write a page without ECC. 1111/* This is the callback that the NAND core calls to write a page without ECC.
1199 raw access is similiar to ECC page writes, so all the work is done in the 1112 * raw access is similiar to ECC page writes, so all the work is done in the
1200 write_page() function above. 1113 * write_page() function above.
1201 */ 1114 */
1202static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1115static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1203 const uint8_t *buf) 1116 const uint8_t *buf)
@@ -1236,6 +1149,13 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1236 INTR_STATUS0__ECC_ERR; 1149 INTR_STATUS0__ECC_ERR;
1237 bool check_erased_page = false; 1150 bool check_erased_page = false;
1238 1151
1152 if (page != denali->page) {
1153 dev_err(&denali->dev->dev, "IN %s: page %d is not"
1154 " equal to denali->page %d, investigate!!",
1155 __func__, page, denali->page);
1156 BUG();
1157 }
1158
1239 setup_ecc_for_xfer(denali, true, false); 1159 setup_ecc_for_xfer(denali, true, false);
1240 1160
1241 denali_enable_dma(denali, true); 1161 denali_enable_dma(denali, true);
@@ -1251,7 +1171,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1251 1171
1252 memcpy(buf, denali->buf.buf, mtd->writesize); 1172 memcpy(buf, denali->buf.buf, mtd->writesize);
1253 1173
1254 check_erased_page = handle_ecc(denali, buf, chip->oob_poi, irq_status); 1174 check_erased_page = handle_ecc(denali, buf, irq_status);
1255 denali_enable_dma(denali, false); 1175 denali_enable_dma(denali, false);
1256 1176
1257 if (check_erased_page) { 1177 if (check_erased_page) {
@@ -1280,6 +1200,13 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1280 uint32_t irq_status = 0; 1200 uint32_t irq_status = 0;
1281 uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP; 1201 uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP;
1282 1202
1203 if (page != denali->page) {
1204 dev_err(&denali->dev->dev, "IN %s: page %d is not"
1205 " equal to denali->page %d, investigate!!",
1206 __func__, page, denali->page);
1207 BUG();
1208 }
1209
1283 setup_ecc_for_xfer(denali, false, true); 1210 setup_ecc_for_xfer(denali, false, true);
1284 denali_enable_dma(denali, true); 1211 denali_enable_dma(denali, true);
1285 1212
@@ -1309,18 +1236,13 @@ static uint8_t denali_read_byte(struct mtd_info *mtd)
1309 if (denali->buf.head < denali->buf.tail) 1236 if (denali->buf.head < denali->buf.tail)
1310 result = denali->buf.buf[denali->buf.head++]; 1237 result = denali->buf.buf[denali->buf.head++];
1311 1238
1312#if DEBUG_DENALI
1313 printk(KERN_INFO "read byte -> 0x%02x\n", result);
1314#endif
1315 return result; 1239 return result;
1316} 1240}
1317 1241
1318static void denali_select_chip(struct mtd_info *mtd, int chip) 1242static void denali_select_chip(struct mtd_info *mtd, int chip)
1319{ 1243{
1320 struct denali_nand_info *denali = mtd_to_denali(mtd); 1244 struct denali_nand_info *denali = mtd_to_denali(mtd);
1321#if DEBUG_DENALI 1245
1322 printk(KERN_INFO "denali select chip %d\n", chip);
1323#endif
1324 spin_lock_irq(&denali->irq_lock); 1246 spin_lock_irq(&denali->irq_lock);
1325 denali->flash_bank = chip; 1247 denali->flash_bank = chip;
1326 spin_unlock_irq(&denali->irq_lock); 1248 spin_unlock_irq(&denali->irq_lock);
@@ -1332,9 +1254,6 @@ static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
1332 int status = denali->status; 1254 int status = denali->status;
1333 denali->status = 0; 1255 denali->status = 0;
1334 1256
1335#if DEBUG_DENALI
1336 printk(KERN_INFO "waitfunc %d\n", status);
1337#endif
1338 return status; 1257 return status;
1339} 1258}
1340 1259
@@ -1344,9 +1263,6 @@ static void denali_erase(struct mtd_info *mtd, int page)
1344 1263
1345 uint32_t cmd = 0x0, irq_status = 0; 1264 uint32_t cmd = 0x0, irq_status = 0;
1346 1265
1347#if DEBUG_DENALI
1348 printk(KERN_INFO "erase page: %d\n", page);
1349#endif
1350 /* clear interrupts */ 1266 /* clear interrupts */
1351 clear_interrupts(denali); 1267 clear_interrupts(denali);
1352 1268
@@ -1369,9 +1285,6 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
1369 uint32_t addr, id; 1285 uint32_t addr, id;
1370 int i; 1286 int i;
1371 1287
1372#if DEBUG_DENALI
1373 printk(KERN_INFO "cmdfunc: 0x%x %d %d\n", cmd, col, page);
1374#endif
1375 switch (cmd) { 1288 switch (cmd) {
1376 case NAND_CMD_PAGEPROG: 1289 case NAND_CMD_PAGEPROG:
1377 break; 1290 break;
@@ -1415,7 +1328,9 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
1415static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data, 1328static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
1416 uint8_t *ecc_code) 1329 uint8_t *ecc_code)
1417{ 1330{
1418 printk(KERN_ERR "denali_ecc_calculate called unexpectedly\n"); 1331 struct denali_nand_info *denali = mtd_to_denali(mtd);
1332 dev_err(&denali->dev->dev,
1333 "denali_ecc_calculate called unexpectedly\n");
1419 BUG(); 1334 BUG();
1420 return -EIO; 1335 return -EIO;
1421} 1336}
@@ -1423,14 +1338,18 @@ static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
1423static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data, 1338static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
1424 uint8_t *read_ecc, uint8_t *calc_ecc) 1339 uint8_t *read_ecc, uint8_t *calc_ecc)
1425{ 1340{
1426 printk(KERN_ERR "denali_ecc_correct called unexpectedly\n"); 1341 struct denali_nand_info *denali = mtd_to_denali(mtd);
1342 dev_err(&denali->dev->dev,
1343 "denali_ecc_correct called unexpectedly\n");
1427 BUG(); 1344 BUG();
1428 return -EIO; 1345 return -EIO;
1429} 1346}
1430 1347
1431static void denali_ecc_hwctl(struct mtd_info *mtd, int mode) 1348static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
1432{ 1349{
1433 printk(KERN_ERR "denali_ecc_hwctl called unexpectedly\n"); 1350 struct denali_nand_info *denali = mtd_to_denali(mtd);
1351 dev_err(&denali->dev->dev,
1352 "denali_ecc_hwctl called unexpectedly\n");
1434 BUG(); 1353 BUG();
1435} 1354}
1436/* end NAND core entry points */ 1355/* end NAND core entry points */
@@ -1445,18 +1364,18 @@ static void denali_hw_init(struct denali_nand_info *denali)
1445 * */ 1364 * */
1446 denali->bbtskipbytes = ioread32(denali->flash_reg + 1365 denali->bbtskipbytes = ioread32(denali->flash_reg +
1447 SPARE_AREA_SKIP_BYTES); 1366 SPARE_AREA_SKIP_BYTES);
1448 denali_irq_init(denali);
1449 denali_nand_reset(denali); 1367 denali_nand_reset(denali);
1450 denali_write32(0x0F, denali->flash_reg + RB_PIN_ENABLED); 1368 iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
1451 denali_write32(CHIP_EN_DONT_CARE__FLAG, 1369 iowrite32(CHIP_EN_DONT_CARE__FLAG,
1452 denali->flash_reg + CHIP_ENABLE_DONT_CARE); 1370 denali->flash_reg + CHIP_ENABLE_DONT_CARE);
1453 1371
1454 denali_write32(0x0, denali->flash_reg + SPARE_AREA_SKIP_BYTES); 1372 iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
1455 denali_write32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
1456 1373
1457 /* Should set value for these registers when init */ 1374 /* Should set value for these registers when init */
1458 denali_write32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES); 1375 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1459 denali_write32(1, denali->flash_reg + ECC_ENABLE); 1376 iowrite32(1, denali->flash_reg + ECC_ENABLE);
1377 denali_nand_timing_set(denali);
1378 denali_irq_init(denali);
1460} 1379}
1461 1380
1462/* Althogh controller spec said SLC ECC is forceb to be 4bit, 1381/* Althogh controller spec said SLC ECC is forceb to be 4bit,
@@ -1526,9 +1445,6 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1526 unsigned long csr_len, mem_len; 1445 unsigned long csr_len, mem_len;
1527 struct denali_nand_info *denali; 1446 struct denali_nand_info *denali;
1528 1447
1529 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1530 __FILE__, __LINE__, __func__);
1531
1532 denali = kzalloc(sizeof(*denali), GFP_KERNEL); 1448 denali = kzalloc(sizeof(*denali), GFP_KERNEL);
1533 if (!denali) 1449 if (!denali)
1534 return -ENOMEM; 1450 return -ENOMEM;
@@ -1536,7 +1452,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1536 ret = pci_enable_device(dev); 1452 ret = pci_enable_device(dev);
1537 if (ret) { 1453 if (ret) {
1538 printk(KERN_ERR "Spectra: pci_enable_device failed.\n"); 1454 printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
1539 goto failed_enable; 1455 goto failed_alloc_memery;
1540 } 1456 }
1541 1457
1542 if (id->driver_data == INTEL_CE4100) { 1458 if (id->driver_data == INTEL_CE4100) {
@@ -1547,7 +1463,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1547 printk(KERN_ERR "Intel CE4100 only supports" 1463 printk(KERN_ERR "Intel CE4100 only supports"
1548 " ONFI timing mode 1 or below\n"); 1464 " ONFI timing mode 1 or below\n");
1549 ret = -EINVAL; 1465 ret = -EINVAL;
1550 goto failed_enable; 1466 goto failed_enable_dev;
1551 } 1467 }
1552 denali->platform = INTEL_CE4100; 1468 denali->platform = INTEL_CE4100;
1553 mem_base = pci_resource_start(dev, 0); 1469 mem_base = pci_resource_start(dev, 0);
@@ -1557,17 +1473,12 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1557 } else { 1473 } else {
1558 denali->platform = INTEL_MRST; 1474 denali->platform = INTEL_MRST;
1559 csr_base = pci_resource_start(dev, 0); 1475 csr_base = pci_resource_start(dev, 0);
1560 csr_len = pci_resource_start(dev, 0); 1476 csr_len = pci_resource_len(dev, 0);
1561 mem_base = pci_resource_start(dev, 1); 1477 mem_base = pci_resource_start(dev, 1);
1562 mem_len = pci_resource_len(dev, 1); 1478 mem_len = pci_resource_len(dev, 1);
1563 if (!mem_len) { 1479 if (!mem_len) {
1564 mem_base = csr_base + csr_len; 1480 mem_base = csr_base + csr_len;
1565 mem_len = csr_len; 1481 mem_len = csr_len;
1566 nand_dbg_print(NAND_DBG_WARN,
1567 "Spectra: No second"
1568 " BAR for PCI device;"
1569 " assuming %08Lx\n",
1570 (uint64_t)csr_base);
1571 } 1482 }
1572 } 1483 }
1573 1484
@@ -1576,7 +1487,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1576 1487
1577 if (ret) { 1488 if (ret) {
1578 printk(KERN_ERR "Spectra: no usable DMA configuration\n"); 1489 printk(KERN_ERR "Spectra: no usable DMA configuration\n");
1579 goto failed_enable; 1490 goto failed_enable_dev;
1580 } 1491 }
1581 denali->buf.dma_buf = 1492 denali->buf.dma_buf =
1582 pci_map_single(dev, denali->buf.buf, 1493 pci_map_single(dev, denali->buf.buf,
@@ -1584,50 +1495,44 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1584 PCI_DMA_BIDIRECTIONAL); 1495 PCI_DMA_BIDIRECTIONAL);
1585 1496
1586 if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) { 1497 if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) {
1587 printk(KERN_ERR "Spectra: failed to map DMA buffer\n"); 1498 dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n");
1588 goto failed_enable; 1499 goto failed_enable_dev;
1589 } 1500 }
1590 1501
1591 pci_set_master(dev); 1502 pci_set_master(dev);
1592 denali->dev = dev; 1503 denali->dev = dev;
1504 denali->mtd.dev.parent = &dev->dev;
1593 1505
1594 ret = pci_request_regions(dev, DENALI_NAND_NAME); 1506 ret = pci_request_regions(dev, DENALI_NAND_NAME);
1595 if (ret) { 1507 if (ret) {
1596 printk(KERN_ERR "Spectra: Unable to request memory regions\n"); 1508 printk(KERN_ERR "Spectra: Unable to request memory regions\n");
1597 goto failed_req_csr; 1509 goto failed_dma_map;
1598 } 1510 }
1599 1511
1600 denali->flash_reg = ioremap_nocache(csr_base, csr_len); 1512 denali->flash_reg = ioremap_nocache(csr_base, csr_len);
1601 if (!denali->flash_reg) { 1513 if (!denali->flash_reg) {
1602 printk(KERN_ERR "Spectra: Unable to remap memory region\n"); 1514 printk(KERN_ERR "Spectra: Unable to remap memory region\n");
1603 ret = -ENOMEM; 1515 ret = -ENOMEM;
1604 goto failed_remap_csr; 1516 goto failed_req_regions;
1605 } 1517 }
1606 nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08Lx -> 0x%p (0x%lx)\n",
1607 (uint64_t)csr_base, denali->flash_reg, csr_len);
1608 1518
1609 denali->flash_mem = ioremap_nocache(mem_base, mem_len); 1519 denali->flash_mem = ioremap_nocache(mem_base, mem_len);
1610 if (!denali->flash_mem) { 1520 if (!denali->flash_mem) {
1611 printk(KERN_ERR "Spectra: ioremap_nocache failed!"); 1521 printk(KERN_ERR "Spectra: ioremap_nocache failed!");
1612 iounmap(denali->flash_reg);
1613 ret = -ENOMEM; 1522 ret = -ENOMEM;
1614 goto failed_remap_csr; 1523 goto failed_remap_reg;
1615 } 1524 }
1616 1525
1617 nand_dbg_print(NAND_DBG_WARN,
1618 "Spectra: Remapped flash base address: "
1619 "0x%p, len: %ld\n",
1620 denali->flash_mem, csr_len);
1621
1622 denali_hw_init(denali); 1526 denali_hw_init(denali);
1623 denali_drv_init(denali); 1527 denali_drv_init(denali);
1624 1528
1625 nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq); 1529 /* denali_isr register is done after all the hardware
1530 * initilization is finished*/
1626 if (request_irq(dev->irq, denali_isr, IRQF_SHARED, 1531 if (request_irq(dev->irq, denali_isr, IRQF_SHARED,
1627 DENALI_NAND_NAME, denali)) { 1532 DENALI_NAND_NAME, denali)) {
1628 printk(KERN_ERR "Spectra: Unable to allocate IRQ\n"); 1533 printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
1629 ret = -ENODEV; 1534 ret = -ENODEV;
1630 goto failed_request_irq; 1535 goto failed_remap_mem;
1631 } 1536 }
1632 1537
1633 /* now that our ISR is registered, we can enable interrupts */ 1538 /* now that our ISR is registered, we can enable interrupts */
@@ -1635,21 +1540,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1635 1540
1636 pci_set_drvdata(dev, denali); 1541 pci_set_drvdata(dev, denali);
1637 1542
1638 denali_nand_timing_set(denali); 1543 denali->mtd.name = "denali-nand";
1639
1640 nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
1641 "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
1642 "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
1643 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
1644 ioread32(denali->flash_reg + ACC_CLKS),
1645 ioread32(denali->flash_reg + RE_2_WE),
1646 ioread32(denali->flash_reg + WE_2_RE),
1647 ioread32(denali->flash_reg + ADDR_2_DATA),
1648 ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
1649 ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
1650 ioread32(denali->flash_reg + CS_SETUP_CNT));
1651
1652 denali->mtd.name = "Denali NAND";
1653 denali->mtd.owner = THIS_MODULE; 1544 denali->mtd.owner = THIS_MODULE;
1654 denali->mtd.priv = &denali->nand; 1545 denali->mtd.priv = &denali->nand;
1655 1546
@@ -1664,7 +1555,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1664 * with the nand subsystem */ 1555 * with the nand subsystem */
1665 if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) { 1556 if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) {
1666 ret = -ENXIO; 1557 ret = -ENXIO;
1667 goto failed_nand; 1558 goto failed_req_irq;
1668 } 1559 }
1669 1560
1670 /* MTD supported page sizes vary by kernel. We validate our 1561 /* MTD supported page sizes vary by kernel. We validate our
@@ -1674,7 +1565,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1674 ret = -ENODEV; 1565 ret = -ENODEV;
1675 printk(KERN_ERR "Spectra: device size not supported by this " 1566 printk(KERN_ERR "Spectra: device size not supported by this "
1676 "version of MTD."); 1567 "version of MTD.");
1677 goto failed_nand; 1568 goto failed_req_irq;
1678 } 1569 }
1679 1570
1680 /* support for multi nand 1571 /* support for multi nand
@@ -1719,17 +1610,17 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1719 /* if MLC OOB size is large enough, use 15bit ECC*/ 1610 /* if MLC OOB size is large enough, use 15bit ECC*/
1720 denali->nand.ecc.layout = &nand_15bit_oob; 1611 denali->nand.ecc.layout = &nand_15bit_oob;
1721 denali->nand.ecc.bytes = ECC_15BITS; 1612 denali->nand.ecc.bytes = ECC_15BITS;
1722 denali_write32(15, denali->flash_reg + ECC_CORRECTION); 1613 iowrite32(15, denali->flash_reg + ECC_CORRECTION);
1723 } else if (denali->mtd.oobsize < (denali->bbtskipbytes + 1614 } else if (denali->mtd.oobsize < (denali->bbtskipbytes +
1724 ECC_8BITS * (denali->mtd.writesize / 1615 ECC_8BITS * (denali->mtd.writesize /
1725 ECC_SECTOR_SIZE))) { 1616 ECC_SECTOR_SIZE))) {
1726 printk(KERN_ERR "Your NAND chip OOB is not large enough to" 1617 printk(KERN_ERR "Your NAND chip OOB is not large enough to"
1727 " contain 8bit ECC correction codes"); 1618 " contain 8bit ECC correction codes");
1728 goto failed_nand; 1619 goto failed_req_irq;
1729 } else { 1620 } else {
1730 denali->nand.ecc.layout = &nand_8bit_oob; 1621 denali->nand.ecc.layout = &nand_8bit_oob;
1731 denali->nand.ecc.bytes = ECC_8BITS; 1622 denali->nand.ecc.bytes = ECC_8BITS;
1732 denali_write32(8, denali->flash_reg + ECC_CORRECTION); 1623 iowrite32(8, denali->flash_reg + ECC_CORRECTION);
1733 } 1624 }
1734 1625
1735 denali->nand.ecc.bytes *= denali->devnum; 1626 denali->nand.ecc.bytes *= denali->devnum;
@@ -1769,28 +1660,31 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1769 1660
1770 if (nand_scan_tail(&denali->mtd)) { 1661 if (nand_scan_tail(&denali->mtd)) {
1771 ret = -ENXIO; 1662 ret = -ENXIO;
1772 goto failed_nand; 1663 goto failed_req_irq;
1773 } 1664 }
1774 1665
1775 ret = add_mtd_device(&denali->mtd); 1666 ret = add_mtd_device(&denali->mtd);
1776 if (ret) { 1667 if (ret) {
1777 printk(KERN_ERR "Spectra: Failed to register" 1668 dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n",
1778 " MTD device: %d\n", ret); 1669 ret);
1779 goto failed_nand; 1670 goto failed_req_irq;
1780 } 1671 }
1781 return 0; 1672 return 0;
1782 1673
1783 failed_nand: 1674failed_req_irq:
1784 denali_irq_cleanup(dev->irq, denali); 1675 denali_irq_cleanup(dev->irq, denali);
1785 failed_request_irq: 1676failed_remap_mem:
1786 iounmap(denali->flash_reg);
1787 iounmap(denali->flash_mem); 1677 iounmap(denali->flash_mem);
1788 failed_remap_csr: 1678failed_remap_reg:
1679 iounmap(denali->flash_reg);
1680failed_req_regions:
1789 pci_release_regions(dev); 1681 pci_release_regions(dev);
1790 failed_req_csr: 1682failed_dma_map:
1791 pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE, 1683 pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
1792 PCI_DMA_BIDIRECTIONAL); 1684 PCI_DMA_BIDIRECTIONAL);
1793 failed_enable: 1685failed_enable_dev:
1686 pci_disable_device(dev);
1687failed_alloc_memery:
1794 kfree(denali); 1688 kfree(denali);
1795 return ret; 1689 return ret;
1796} 1690}
@@ -1800,9 +1694,6 @@ static void denali_pci_remove(struct pci_dev *dev)
1800{ 1694{
1801 struct denali_nand_info *denali = pci_get_drvdata(dev); 1695 struct denali_nand_info *denali = pci_get_drvdata(dev);
1802 1696
1803 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1804 __FILE__, __LINE__, __func__);
1805
1806 nand_release(&denali->mtd); 1697 nand_release(&denali->mtd);
1807 del_mtd_device(&denali->mtd); 1698 del_mtd_device(&denali->mtd);
1808 1699