aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/chips/cfi_cmdset_0001.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/chips/cfi_cmdset_0001.c')
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c484
1 files changed, 289 insertions, 195 deletions
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index c3fc9b2f21fb..143f01a4c170 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -4,9 +4,9 @@
4 * 4 *
5 * (C) 2000 Red Hat. GPL'd 5 * (C) 2000 Red Hat. GPL'd
6 * 6 *
7 * $Id: cfi_cmdset_0001.c,v 1.178 2005/05/19 17:05:43 nico Exp $ 7 * $Id: cfi_cmdset_0001.c,v 1.185 2005/11/07 11:14:22 gleixner Exp $
8 *
8 * 9 *
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org> 10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and 11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.) 12 * independent of the flash geometry (buswidth, interleave, etc.)
@@ -51,6 +51,7 @@
51static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 51static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 52static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 53static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
54static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *); 55static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
55static void cfi_intelext_sync (struct mtd_info *); 56static void cfi_intelext_sync (struct mtd_info *);
56static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len); 57static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
@@ -105,6 +106,7 @@ static struct mtd_chip_driver cfi_intelext_chipdrv = {
105static void cfi_tell_features(struct cfi_pri_intelext *extp) 106static void cfi_tell_features(struct cfi_pri_intelext *extp)
106{ 107{
107 int i; 108 int i;
109 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
108 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport); 110 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
109 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported"); 111 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
110 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported"); 112 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
@@ -116,36 +118,43 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
116 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported"); 118 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
117 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported"); 119 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
118 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported"); 120 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
119 for (i=10; i<32; i++) { 121 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
120 if (extp->FeatureSupport & (1<<i)) 122 for (i=11; i<32; i++) {
123 if (extp->FeatureSupport & (1<<i))
121 printk(" - Unknown Bit %X: supported\n", i); 124 printk(" - Unknown Bit %X: supported\n", i);
122 } 125 }
123 126
124 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport); 127 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
125 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported"); 128 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
126 for (i=1; i<8; i++) { 129 for (i=1; i<8; i++) {
127 if (extp->SuspendCmdSupport & (1<<i)) 130 if (extp->SuspendCmdSupport & (1<<i))
128 printk(" - Unknown Bit %X: supported\n", i); 131 printk(" - Unknown Bit %X: supported\n", i);
129 } 132 }
130 133
131 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask); 134 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
132 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no"); 135 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
133 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no"); 136 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
134 for (i=2; i<16; i++) { 137 for (i=2; i<3; i++) {
135 if (extp->BlkStatusRegMask & (1<<i)) 138 if (extp->BlkStatusRegMask & (1<<i))
136 printk(" - Unknown Bit %X Active: yes\n",i); 139 printk(" - Unknown Bit %X Active: yes\n",i);
137 } 140 }
138 141 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
139 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 142 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143 for (i=6; i<16; i++) {
144 if (extp->BlkStatusRegMask & (1<<i))
145 printk(" - Unknown Bit %X Active: yes\n",i);
146 }
147
148 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
140 extp->VccOptimal >> 4, extp->VccOptimal & 0xf); 149 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
141 if (extp->VppOptimal) 150 if (extp->VppOptimal)
142 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 151 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
143 extp->VppOptimal >> 4, extp->VppOptimal & 0xf); 152 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
144} 153}
145#endif 154#endif
146 155
147#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 156#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
148/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 157/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
149static void fixup_intel_strataflash(struct mtd_info *mtd, void* param) 158static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
150{ 159{
151 struct map_info *map = mtd->priv; 160 struct map_info *map = mtd->priv;
@@ -176,7 +185,7 @@ static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
176{ 185{
177 struct map_info *map = mtd->priv; 186 struct map_info *map = mtd->priv;
178 struct cfi_private *cfi = map->fldrv_priv; 187 struct cfi_private *cfi = map->fldrv_priv;
179 188
180 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */ 189 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
181 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */ 190 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
182} 191}
@@ -185,7 +194,7 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
185{ 194{
186 struct map_info *map = mtd->priv; 195 struct map_info *map = mtd->priv;
187 struct cfi_private *cfi = map->fldrv_priv; 196 struct cfi_private *cfi = map->fldrv_priv;
188 197
189 /* Note this is done after the region info is endian swapped */ 198 /* Note this is done after the region info is endian swapped */
190 cfi->cfiq->EraseRegionInfo[1] = 199 cfi->cfiq->EraseRegionInfo[1] =
191 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e; 200 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
@@ -207,12 +216,13 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
207 if (cfi->cfiq->BufWriteTimeoutTyp) { 216 if (cfi->cfiq->BufWriteTimeoutTyp) {
208 printk(KERN_INFO "Using buffer write method\n" ); 217 printk(KERN_INFO "Using buffer write method\n" );
209 mtd->write = cfi_intelext_write_buffers; 218 mtd->write = cfi_intelext_write_buffers;
219 mtd->writev = cfi_intelext_writev;
210 } 220 }
211} 221}
212 222
213static struct cfi_fixup cfi_fixup_table[] = { 223static struct cfi_fixup cfi_fixup_table[] = {
214#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 224#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
215 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 225 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
216#endif 226#endif
217#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND 227#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
218 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL }, 228 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
@@ -252,12 +262,21 @@ read_pri_intelext(struct map_info *map, __u16 adr)
252 if (!extp) 262 if (!extp)
253 return NULL; 263 return NULL;
254 264
265 if (extp->MajorVersion != '1' ||
266 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
267 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
268 "version %c.%c.\n", extp->MajorVersion,
269 extp->MinorVersion);
270 kfree(extp);
271 return NULL;
272 }
273
255 /* Do some byteswapping if necessary */ 274 /* Do some byteswapping if necessary */
256 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport); 275 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
257 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask); 276 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
258 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr); 277 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
259 278
260 if (extp->MajorVersion == '1' && extp->MinorVersion == '3') { 279 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
261 unsigned int extra_size = 0; 280 unsigned int extra_size = 0;
262 int nb_parts, i; 281 int nb_parts, i;
263 282
@@ -266,7 +285,10 @@ read_pri_intelext(struct map_info *map, __u16 adr)
266 sizeof(struct cfi_intelext_otpinfo); 285 sizeof(struct cfi_intelext_otpinfo);
267 286
268 /* Burst Read info */ 287 /* Burst Read info */
269 extra_size += 6; 288 extra_size += 2;
289 if (extp_size < sizeof(*extp) + extra_size)
290 goto need_more;
291 extra_size += extp->extra[extra_size-1];
270 292
271 /* Number of hardware-partitions */ 293 /* Number of hardware-partitions */
272 extra_size += 1; 294 extra_size += 1;
@@ -274,6 +296,10 @@ read_pri_intelext(struct map_info *map, __u16 adr)
274 goto need_more; 296 goto need_more;
275 nb_parts = extp->extra[extra_size - 1]; 297 nb_parts = extp->extra[extra_size - 1];
276 298
299 /* skip the sizeof(partregion) field in CFI 1.4 */
300 if (extp->MinorVersion >= '4')
301 extra_size += 2;
302
277 for (i = 0; i < nb_parts; i++) { 303 for (i = 0; i < nb_parts; i++) {
278 struct cfi_intelext_regioninfo *rinfo; 304 struct cfi_intelext_regioninfo *rinfo;
279 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size]; 305 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
@@ -285,6 +311,9 @@ read_pri_intelext(struct map_info *map, __u16 adr)
285 * sizeof(struct cfi_intelext_blockinfo); 311 * sizeof(struct cfi_intelext_blockinfo);
286 } 312 }
287 313
314 if (extp->MinorVersion >= '4')
315 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316
288 if (extp_size < sizeof(*extp) + extra_size) { 317 if (extp_size < sizeof(*extp) + extra_size) {
289 need_more: 318 need_more:
290 extp_size = sizeof(*extp) + extra_size; 319 extp_size = sizeof(*extp) + extra_size;
@@ -298,7 +327,7 @@ read_pri_intelext(struct map_info *map, __u16 adr)
298 goto again; 327 goto again;
299 } 328 }
300 } 329 }
301 330
302 return extp; 331 return extp;
303} 332}
304 333
@@ -339,7 +368,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
339 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot; 368 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
340 369
341 if (cfi->cfi_mode == CFI_MODE_CFI) { 370 if (cfi->cfi_mode == CFI_MODE_CFI) {
342 /* 371 /*
343 * It's a real CFI chip, not one for which the probe 372 * It's a real CFI chip, not one for which the probe
344 * routine faked a CFI structure. So we read the feature 373 * routine faked a CFI structure. So we read the feature
345 * table from it. 374 * table from it.
@@ -354,14 +383,14 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
354 } 383 }
355 384
356 /* Install our own private info structure */ 385 /* Install our own private info structure */
357 cfi->cmdset_priv = extp; 386 cfi->cmdset_priv = extp;
358 387
359 cfi_fixup(mtd, cfi_fixup_table); 388 cfi_fixup(mtd, cfi_fixup_table);
360 389
361#ifdef DEBUG_CFI_FEATURES 390#ifdef DEBUG_CFI_FEATURES
362 /* Tell the user about it in lots of lovely detail */ 391 /* Tell the user about it in lots of lovely detail */
363 cfi_tell_features(extp); 392 cfi_tell_features(extp);
364#endif 393#endif
365 394
366 if(extp->SuspendCmdSupport & 1) { 395 if(extp->SuspendCmdSupport & 1) {
367 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n"); 396 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
@@ -379,10 +408,10 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
379 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 408 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
380 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 409 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
381 cfi->chips[i].ref_point_counter = 0; 410 cfi->chips[i].ref_point_counter = 0;
382 } 411 }
383 412
384 map->fldrv = &cfi_intelext_chipdrv; 413 map->fldrv = &cfi_intelext_chipdrv;
385 414
386 return cfi_intelext_setup(mtd); 415 return cfi_intelext_setup(mtd);
387} 416}
388 417
@@ -399,13 +428,13 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
399 mtd->size = devsize * cfi->numchips; 428 mtd->size = devsize * cfi->numchips;
400 429
401 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 430 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
402 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 431 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
403 * mtd->numeraseregions, GFP_KERNEL); 432 * mtd->numeraseregions, GFP_KERNEL);
404 if (!mtd->eraseregions) { 433 if (!mtd->eraseregions) {
405 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n"); 434 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
406 goto setup_err; 435 goto setup_err;
407 } 436 }
408 437
409 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 438 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
410 unsigned long ernum, ersize; 439 unsigned long ernum, ersize;
411 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 440 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
@@ -429,7 +458,7 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
429 } 458 }
430 459
431 for (i=0; i<mtd->numeraseregions;i++){ 460 for (i=0; i<mtd->numeraseregions;i++){
432 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n", 461 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
433 i,mtd->eraseregions[i].offset, 462 i,mtd->eraseregions[i].offset,
434 mtd->eraseregions[i].erasesize, 463 mtd->eraseregions[i].erasesize,
435 mtd->eraseregions[i].numblocks); 464 mtd->eraseregions[i].numblocks);
@@ -480,7 +509,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
480 * arrangement at this point. This can be rearranged in the future 509 * arrangement at this point. This can be rearranged in the future
481 * if someone feels motivated enough. --nico 510 * if someone feels motivated enough. --nico
482 */ 511 */
483 if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3' 512 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
484 && extp->FeatureSupport & (1 << 9)) { 513 && extp->FeatureSupport & (1 << 9)) {
485 struct cfi_private *newcfi; 514 struct cfi_private *newcfi;
486 struct flchip *chip; 515 struct flchip *chip;
@@ -492,12 +521,16 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
492 sizeof(struct cfi_intelext_otpinfo); 521 sizeof(struct cfi_intelext_otpinfo);
493 522
494 /* Burst Read info */ 523 /* Burst Read info */
495 offs += 6; 524 offs += extp->extra[offs+1]+2;
496 525
497 /* Number of partition regions */ 526 /* Number of partition regions */
498 numregions = extp->extra[offs]; 527 numregions = extp->extra[offs];
499 offs += 1; 528 offs += 1;
500 529
530 /* skip the sizeof(partregion) field in CFI 1.4 */
531 if (extp->MinorVersion >= '4')
532 offs += 2;
533
501 /* Number of hardware partitions */ 534 /* Number of hardware partitions */
502 numparts = 0; 535 numparts = 0;
503 for (i = 0; i < numregions; i++) { 536 for (i = 0; i < numregions; i++) {
@@ -509,6 +542,20 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
509 sizeof(struct cfi_intelext_blockinfo); 542 sizeof(struct cfi_intelext_blockinfo);
510 } 543 }
511 544
545 /* Programming Region info */
546 if (extp->MinorVersion >= '4') {
547 struct cfi_intelext_programming_regioninfo *prinfo;
548 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
549 MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
550 MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
551 MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
552 mtd->flags |= MTD_PROGRAM_REGIONS;
553 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
554 map->name, MTD_PROGREGION_SIZE(mtd),
555 MTD_PROGREGION_CTRLMODE_VALID(mtd),
556 MTD_PROGREGION_CTRLMODE_INVALID(mtd));
557 }
558
512 /* 559 /*
513 * All functions below currently rely on all chips having 560 * All functions below currently rely on all chips having
514 * the same geometry so we'll just assume that all hardware 561 * the same geometry so we'll just assume that all hardware
@@ -653,8 +700,8 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
653 break; 700 break;
654 701
655 if (time_after(jiffies, timeo)) { 702 if (time_after(jiffies, timeo)) {
656 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n", 703 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
657 status.x[0]); 704 map->name, status.x[0]);
658 return -EIO; 705 return -EIO;
659 } 706 }
660 spin_unlock(chip->mutex); 707 spin_unlock(chip->mutex);
@@ -663,7 +710,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
663 /* Someone else might have been playing with it. */ 710 /* Someone else might have been playing with it. */
664 goto retry; 711 goto retry;
665 } 712 }
666 713
667 case FL_READY: 714 case FL_READY:
668 case FL_CFI_QUERY: 715 case FL_CFI_QUERY:
669 case FL_JEDEC_QUERY: 716 case FL_JEDEC_QUERY:
@@ -701,8 +748,8 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
701 map_write(map, CMD(0x70), adr); 748 map_write(map, CMD(0x70), adr);
702 chip->state = FL_ERASING; 749 chip->state = FL_ERASING;
703 chip->oldstate = FL_READY; 750 chip->oldstate = FL_READY;
704 printk(KERN_ERR "Chip not ready after erase " 751 printk(KERN_ERR "%s: Chip not ready after erase "
705 "suspended: status = 0x%lx\n", status.x[0]); 752 "suspended: status = 0x%lx\n", map->name, status.x[0]);
706 return -EIO; 753 return -EIO;
707 } 754 }
708 755
@@ -782,14 +829,14 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
782 switch(chip->oldstate) { 829 switch(chip->oldstate) {
783 case FL_ERASING: 830 case FL_ERASING:
784 chip->state = chip->oldstate; 831 chip->state = chip->oldstate;
785 /* What if one interleaved chip has finished and the 832 /* What if one interleaved chip has finished and the
786 other hasn't? The old code would leave the finished 833 other hasn't? The old code would leave the finished
787 one in READY mode. That's bad, and caused -EROFS 834 one in READY mode. That's bad, and caused -EROFS
788 errors to be returned from do_erase_oneblock because 835 errors to be returned from do_erase_oneblock because
789 that's the only bit it checked for at the time. 836 that's the only bit it checked for at the time.
790 As the state machine appears to explicitly allow 837 As the state machine appears to explicitly allow
791 sending the 0x70 (Read Status) command to an erasing 838 sending the 0x70 (Read Status) command to an erasing
792 chip and expecting it to be ignored, that's what we 839 chip and expecting it to be ignored, that's what we
793 do. */ 840 do. */
794 map_write(map, CMD(0xd0), adr); 841 map_write(map, CMD(0xd0), adr);
795 map_write(map, CMD(0x70), adr); 842 map_write(map, CMD(0x70), adr);
@@ -809,7 +856,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
809 DISABLE_VPP(map); 856 DISABLE_VPP(map);
810 break; 857 break;
811 default: 858 default:
812 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate); 859 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
813 } 860 }
814 wake_up(&chip->wq); 861 wake_up(&chip->wq);
815} 862}
@@ -1025,8 +1072,8 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
1025 1072
1026 adr += chip->start; 1073 adr += chip->start;
1027 1074
1028 /* Ensure cmd read/writes are aligned. */ 1075 /* Ensure cmd read/writes are aligned. */
1029 cmd_addr = adr & ~(map_bankwidth(map)-1); 1076 cmd_addr = adr & ~(map_bankwidth(map)-1);
1030 1077
1031 spin_lock(chip->mutex); 1078 spin_lock(chip->mutex);
1032 1079
@@ -1054,7 +1101,7 @@ static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, si
1054 1101
1055 if (!map->virt || (from + len > mtd->size)) 1102 if (!map->virt || (from + len > mtd->size))
1056 return -EINVAL; 1103 return -EINVAL;
1057 1104
1058 *mtdbuf = (void *)map->virt + from; 1105 *mtdbuf = (void *)map->virt + from;
1059 *retlen = 0; 1106 *retlen = 0;
1060 1107
@@ -1081,7 +1128,7 @@ static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, si
1081 1128
1082 *retlen += thislen; 1129 *retlen += thislen;
1083 len -= thislen; 1130 len -= thislen;
1084 1131
1085 ofs = 0; 1132 ofs = 0;
1086 chipnum++; 1133 chipnum++;
1087 } 1134 }
@@ -1120,7 +1167,7 @@ static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t fro
1120 if(chip->ref_point_counter == 0) 1167 if(chip->ref_point_counter == 0)
1121 chip->state = FL_READY; 1168 chip->state = FL_READY;
1122 } else 1169 } else
1123 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */ 1170 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1124 1171
1125 put_chip(map, chip, chip->start); 1172 put_chip(map, chip, chip->start);
1126 spin_unlock(chip->mutex); 1173 spin_unlock(chip->mutex);
@@ -1139,8 +1186,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
1139 1186
1140 adr += chip->start; 1187 adr += chip->start;
1141 1188
1142 /* Ensure cmd read/writes are aligned. */ 1189 /* Ensure cmd read/writes are aligned. */
1143 cmd_addr = adr & ~(map_bankwidth(map)-1); 1190 cmd_addr = adr & ~(map_bankwidth(map)-1);
1144 1191
1145 spin_lock(chip->mutex); 1192 spin_lock(chip->mutex);
1146 ret = get_chip(map, chip, cmd_addr, FL_READY); 1193 ret = get_chip(map, chip, cmd_addr, FL_READY);
@@ -1195,7 +1242,7 @@ static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, siz
1195 *retlen += thislen; 1242 *retlen += thislen;
1196 len -= thislen; 1243 len -= thislen;
1197 buf += thislen; 1244 buf += thislen;
1198 1245
1199 ofs = 0; 1246 ofs = 0;
1200 chipnum++; 1247 chipnum++;
1201 } 1248 }
@@ -1212,12 +1259,17 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1212 1259
1213 adr += chip->start; 1260 adr += chip->start;
1214 1261
1215 /* Let's determine this according to the interleave only once */ 1262 /* Let's determine those according to the interleave only once */
1216 status_OK = CMD(0x80); 1263 status_OK = CMD(0x80);
1217 switch (mode) { 1264 switch (mode) {
1218 case FL_WRITING: write_cmd = CMD(0x40); break; 1265 case FL_WRITING:
1219 case FL_OTP_WRITE: write_cmd = CMD(0xc0); break; 1266 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1220 default: return -EINVAL; 1267 break;
1268 case FL_OTP_WRITE:
1269 write_cmd = CMD(0xc0);
1270 break;
1271 default:
1272 return -EINVAL;
1221 } 1273 }
1222 1274
1223 spin_lock(chip->mutex); 1275 spin_lock(chip->mutex);
@@ -1258,12 +1310,13 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1258 status = map_read(map, adr); 1310 status = map_read(map, adr);
1259 if (map_word_andequal(map, status, status_OK, status_OK)) 1311 if (map_word_andequal(map, status, status_OK, status_OK))
1260 break; 1312 break;
1261 1313
1262 /* OK Still waiting */ 1314 /* OK Still waiting */
1263 if (time_after(jiffies, timeo)) { 1315 if (time_after(jiffies, timeo)) {
1316 map_write(map, CMD(0x70), adr);
1264 chip->state = FL_STATUS; 1317 chip->state = FL_STATUS;
1265 xip_enable(map, chip, adr); 1318 xip_enable(map, chip, adr);
1266 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n"); 1319 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1267 ret = -EIO; 1320 ret = -EIO;
1268 goto out; 1321 goto out;
1269 } 1322 }
@@ -1275,27 +1328,39 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1275 if (!z) { 1328 if (!z) {
1276 chip->word_write_time--; 1329 chip->word_write_time--;
1277 if (!chip->word_write_time) 1330 if (!chip->word_write_time)
1278 chip->word_write_time++; 1331 chip->word_write_time = 1;
1279 } 1332 }
1280 if (z > 1) 1333 if (z > 1)
1281 chip->word_write_time++; 1334 chip->word_write_time++;
1282 1335
1283 /* Done and happy. */ 1336 /* Done and happy. */
1284 chip->state = FL_STATUS; 1337 chip->state = FL_STATUS;
1285 1338
1286 /* check for lock bit */ 1339 /* check for errors */
1287 if (map_word_bitsset(map, status, CMD(0x02))) { 1340 if (map_word_bitsset(map, status, CMD(0x1a))) {
1288 /* clear status */ 1341 unsigned long chipstatus = MERGESTATUS(status);
1342
1343 /* reset status */
1289 map_write(map, CMD(0x50), adr); 1344 map_write(map, CMD(0x50), adr);
1290 /* put back into read status register mode */
1291 map_write(map, CMD(0x70), adr); 1345 map_write(map, CMD(0x70), adr);
1292 ret = -EROFS; 1346 xip_enable(map, chip, adr);
1347
1348 if (chipstatus & 0x02) {
1349 ret = -EROFS;
1350 } else if (chipstatus & 0x08) {
1351 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1352 ret = -EIO;
1353 } else {
1354 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1355 ret = -EINVAL;
1356 }
1357
1358 goto out;
1293 } 1359 }
1294 1360
1295 xip_enable(map, chip, adr); 1361 xip_enable(map, chip, adr);
1296 out: put_chip(map, chip, adr); 1362 out: put_chip(map, chip, adr);
1297 spin_unlock(chip->mutex); 1363 spin_unlock(chip->mutex);
1298
1299 return ret; 1364 return ret;
1300} 1365}
1301 1366
@@ -1328,7 +1393,7 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1328 1393
1329 ret = do_write_oneword(map, &cfi->chips[chipnum], 1394 ret = do_write_oneword(map, &cfi->chips[chipnum],
1330 bus_ofs, datum, FL_WRITING); 1395 bus_ofs, datum, FL_WRITING);
1331 if (ret) 1396 if (ret)
1332 return ret; 1397 return ret;
1333 1398
1334 len -= n; 1399 len -= n;
@@ -1337,13 +1402,13 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1337 (*retlen) += n; 1402 (*retlen) += n;
1338 1403
1339 if (ofs >> cfi->chipshift) { 1404 if (ofs >> cfi->chipshift) {
1340 chipnum ++; 1405 chipnum ++;
1341 ofs = 0; 1406 ofs = 0;
1342 if (chipnum == cfi->numchips) 1407 if (chipnum == cfi->numchips)
1343 return 0; 1408 return 0;
1344 } 1409 }
1345 } 1410 }
1346 1411
1347 while(len >= map_bankwidth(map)) { 1412 while(len >= map_bankwidth(map)) {
1348 map_word datum = map_word_load(map, buf); 1413 map_word datum = map_word_load(map, buf);
1349 1414
@@ -1358,7 +1423,7 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1358 len -= map_bankwidth(map); 1423 len -= map_bankwidth(map);
1359 1424
1360 if (ofs >> cfi->chipshift) { 1425 if (ofs >> cfi->chipshift) {
1361 chipnum ++; 1426 chipnum ++;
1362 ofs = 0; 1427 ofs = 0;
1363 if (chipnum == cfi->numchips) 1428 if (chipnum == cfi->numchips)
1364 return 0; 1429 return 0;
@@ -1373,9 +1438,9 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1373 1438
1374 ret = do_write_oneword(map, &cfi->chips[chipnum], 1439 ret = do_write_oneword(map, &cfi->chips[chipnum],
1375 ofs, datum, FL_WRITING); 1440 ofs, datum, FL_WRITING);
1376 if (ret) 1441 if (ret)
1377 return ret; 1442 return ret;
1378 1443
1379 (*retlen) += len; 1444 (*retlen) += len;
1380 } 1445 }
1381 1446
@@ -1383,20 +1448,24 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1383} 1448}
1384 1449
1385 1450
1386static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1451static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1387 unsigned long adr, const u_char *buf, int len) 1452 unsigned long adr, const struct kvec **pvec,
1453 unsigned long *pvec_seek, int len)
1388{ 1454{
1389 struct cfi_private *cfi = map->fldrv_priv; 1455 struct cfi_private *cfi = map->fldrv_priv;
1390 map_word status, status_OK; 1456 map_word status, status_OK, write_cmd, datum;
1391 unsigned long cmd_adr, timeo; 1457 unsigned long cmd_adr, timeo;
1392 int wbufsize, z, ret=0, bytes, words; 1458 int wbufsize, z, ret=0, word_gap, words;
1459 const struct kvec *vec;
1460 unsigned long vec_seek;
1393 1461
1394 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1462 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1395 adr += chip->start; 1463 adr += chip->start;
1396 cmd_adr = adr & ~(wbufsize-1); 1464 cmd_adr = adr & ~(wbufsize-1);
1397 1465
1398 /* Let's determine this according to the interleave only once */ 1466 /* Let's determine this according to the interleave only once */
1399 status_OK = CMD(0x80); 1467 status_OK = CMD(0x80);
1468 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1400 1469
1401 spin_lock(chip->mutex); 1470 spin_lock(chip->mutex);
1402 ret = get_chip(map, chip, cmd_adr, FL_WRITING); 1471 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
@@ -1410,7 +1479,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1410 xip_disable(map, chip, cmd_adr); 1479 xip_disable(map, chip, cmd_adr);
1411 1480
1412 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set 1481 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1413 [...], the device will not accept any more Write to Buffer commands". 1482 [...], the device will not accept any more Write to Buffer commands".
1414 So we must check here and reset those bits if they're set. Otherwise 1483 So we must check here and reset those bits if they're set. Otherwise
1415 we're just pissing in the wind */ 1484 we're just pissing in the wind */
1416 if (chip->state != FL_STATUS) 1485 if (chip->state != FL_STATUS)
@@ -1428,7 +1497,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1428 1497
1429 z = 0; 1498 z = 0;
1430 for (;;) { 1499 for (;;) {
1431 map_write(map, CMD(0xe8), cmd_adr); 1500 map_write(map, write_cmd, cmd_adr);
1432 1501
1433 status = map_read(map, cmd_adr); 1502 status = map_read(map, cmd_adr);
1434 if (map_word_andequal(map, status, status_OK, status_OK)) 1503 if (map_word_andequal(map, status, status_OK, status_OK))
@@ -1446,41 +1515,66 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1446 map_write(map, CMD(0x50), cmd_adr); 1515 map_write(map, CMD(0x50), cmd_adr);
1447 map_write(map, CMD(0x70), cmd_adr); 1516 map_write(map, CMD(0x70), cmd_adr);
1448 xip_enable(map, chip, cmd_adr); 1517 xip_enable(map, chip, cmd_adr);
1449 printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n", 1518 printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1450 status.x[0], Xstatus.x[0]); 1519 map->name, status.x[0], Xstatus.x[0]);
1451 ret = -EIO; 1520 ret = -EIO;
1452 goto out; 1521 goto out;
1453 } 1522 }
1454 } 1523 }
1455 1524
1525 /* Figure out the number of words to write */
1526 word_gap = (-adr & (map_bankwidth(map)-1));
1527 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1528 if (!word_gap) {
1529 words--;
1530 } else {
1531 word_gap = map_bankwidth(map) - word_gap;
1532 adr -= word_gap;
1533 datum = map_word_ff(map);
1534 }
1535
1456 /* Write length of data to come */ 1536 /* Write length of data to come */
1457 bytes = len & (map_bankwidth(map)-1); 1537 map_write(map, CMD(words), cmd_adr );
1458 words = len / map_bankwidth(map);
1459 map_write(map, CMD(words - !bytes), cmd_adr );
1460 1538
1461 /* Write data */ 1539 /* Write data */
1462 z = 0; 1540 vec = *pvec;
1463 while(z < words * map_bankwidth(map)) { 1541 vec_seek = *pvec_seek;
1464 map_word datum = map_word_load(map, buf); 1542 do {
1465 map_write(map, datum, adr+z); 1543 int n = map_bankwidth(map) - word_gap;
1544 if (n > vec->iov_len - vec_seek)
1545 n = vec->iov_len - vec_seek;
1546 if (n > len)
1547 n = len;
1466 1548
1467 z += map_bankwidth(map); 1549 if (!word_gap && len < map_bankwidth(map))
1468 buf += map_bankwidth(map); 1550 datum = map_word_ff(map);
1469 }
1470 1551
1471 if (bytes) { 1552 datum = map_word_load_partial(map, datum,
1472 map_word datum; 1553 vec->iov_base + vec_seek,
1554 word_gap, n);
1473 1555
1474 datum = map_word_ff(map); 1556 len -= n;
1475 datum = map_word_load_partial(map, datum, buf, 0, bytes); 1557 word_gap += n;
1476 map_write(map, datum, adr+z); 1558 if (!len || word_gap == map_bankwidth(map)) {
1477 } 1559 map_write(map, datum, adr);
1560 adr += map_bankwidth(map);
1561 word_gap = 0;
1562 }
1563
1564 vec_seek += n;
1565 if (vec_seek == vec->iov_len) {
1566 vec++;
1567 vec_seek = 0;
1568 }
1569 } while (len);
1570 *pvec = vec;
1571 *pvec_seek = vec_seek;
1478 1572
1479 /* GO GO GO */ 1573 /* GO GO GO */
1480 map_write(map, CMD(0xd0), cmd_adr); 1574 map_write(map, CMD(0xd0), cmd_adr);
1481 chip->state = FL_WRITING; 1575 chip->state = FL_WRITING;
1482 1576
1483 INVALIDATE_CACHE_UDELAY(map, chip, 1577 INVALIDATE_CACHE_UDELAY(map, chip,
1484 cmd_adr, len, 1578 cmd_adr, len,
1485 chip->buffer_write_time); 1579 chip->buffer_write_time);
1486 1580
@@ -1506,13 +1600,14 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1506 1600
1507 /* OK Still waiting */ 1601 /* OK Still waiting */
1508 if (time_after(jiffies, timeo)) { 1602 if (time_after(jiffies, timeo)) {
1603 map_write(map, CMD(0x70), cmd_adr);
1509 chip->state = FL_STATUS; 1604 chip->state = FL_STATUS;
1510 xip_enable(map, chip, cmd_adr); 1605 xip_enable(map, chip, cmd_adr);
1511 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n"); 1606 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1512 ret = -EIO; 1607 ret = -EIO;
1513 goto out; 1608 goto out;
1514 } 1609 }
1515 1610
1516 /* Latency issues. Drop the lock, wait a while and retry */ 1611 /* Latency issues. Drop the lock, wait a while and retry */
1517 z++; 1612 z++;
1518 UDELAY(map, chip, cmd_adr, 1); 1613 UDELAY(map, chip, cmd_adr, 1);
@@ -1520,21 +1615,34 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1520 if (!z) { 1615 if (!z) {
1521 chip->buffer_write_time--; 1616 chip->buffer_write_time--;
1522 if (!chip->buffer_write_time) 1617 if (!chip->buffer_write_time)
1523 chip->buffer_write_time++; 1618 chip->buffer_write_time = 1;
1524 } 1619 }
1525 if (z > 1) 1620 if (z > 1)
1526 chip->buffer_write_time++; 1621 chip->buffer_write_time++;
1527 1622
1528 /* Done and happy. */ 1623 /* Done and happy. */
1529 chip->state = FL_STATUS; 1624 chip->state = FL_STATUS;
1530 1625
1531 /* check for lock bit */ 1626 /* check for errors */
1532 if (map_word_bitsset(map, status, CMD(0x02))) { 1627 if (map_word_bitsset(map, status, CMD(0x1a))) {
1533 /* clear status */ 1628 unsigned long chipstatus = MERGESTATUS(status);
1629
1630 /* reset status */
1534 map_write(map, CMD(0x50), cmd_adr); 1631 map_write(map, CMD(0x50), cmd_adr);
1535 /* put back into read status register mode */ 1632 map_write(map, CMD(0x70), cmd_adr);
1536 map_write(map, CMD(0x70), adr); 1633 xip_enable(map, chip, cmd_adr);
1537 ret = -EROFS; 1634
1635 if (chipstatus & 0x02) {
1636 ret = -EROFS;
1637 } else if (chipstatus & 0x08) {
1638 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1639 ret = -EIO;
1640 } else {
1641 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1642 ret = -EINVAL;
1643 }
1644
1645 goto out;
1538 } 1646 }
1539 1647
1540 xip_enable(map, chip, cmd_adr); 1648 xip_enable(map, chip, cmd_adr);
@@ -1543,70 +1651,65 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1543 return ret; 1651 return ret;
1544} 1652}
1545 1653
1546static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to, 1654static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1547 size_t len, size_t *retlen, const u_char *buf) 1655 unsigned long count, loff_t to, size_t *retlen)
1548{ 1656{
1549 struct map_info *map = mtd->priv; 1657 struct map_info *map = mtd->priv;
1550 struct cfi_private *cfi = map->fldrv_priv; 1658 struct cfi_private *cfi = map->fldrv_priv;
1551 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1659 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1552 int ret = 0; 1660 int ret = 0;
1553 int chipnum; 1661 int chipnum;
1554 unsigned long ofs; 1662 unsigned long ofs, vec_seek, i;
1663 size_t len = 0;
1664
1665 for (i = 0; i < count; i++)
1666 len += vecs[i].iov_len;
1555 1667
1556 *retlen = 0; 1668 *retlen = 0;
1557 if (!len) 1669 if (!len)
1558 return 0; 1670 return 0;
1559 1671
1560 chipnum = to >> cfi->chipshift; 1672 chipnum = to >> cfi->chipshift;
1561 ofs = to - (chipnum << cfi->chipshift); 1673 ofs = to - (chipnum << cfi->chipshift);
1562 1674 vec_seek = 0;
1563 /* If it's not bus-aligned, do the first word write */
1564 if (ofs & (map_bankwidth(map)-1)) {
1565 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1566 if (local_len > len)
1567 local_len = len;
1568 ret = cfi_intelext_write_words(mtd, to, local_len,
1569 retlen, buf);
1570 if (ret)
1571 return ret;
1572 ofs += local_len;
1573 buf += local_len;
1574 len -= local_len;
1575
1576 if (ofs >> cfi->chipshift) {
1577 chipnum ++;
1578 ofs = 0;
1579 if (chipnum == cfi->numchips)
1580 return 0;
1581 }
1582 }
1583 1675
1584 while(len) { 1676 do {
1585 /* We must not cross write block boundaries */ 1677 /* We must not cross write block boundaries */
1586 int size = wbufsize - (ofs & (wbufsize-1)); 1678 int size = wbufsize - (ofs & (wbufsize-1));
1587 1679
1588 if (size > len) 1680 if (size > len)
1589 size = len; 1681 size = len;
1590 ret = do_write_buffer(map, &cfi->chips[chipnum], 1682 ret = do_write_buffer(map, &cfi->chips[chipnum],
1591 ofs, buf, size); 1683 ofs, &vecs, &vec_seek, size);
1592 if (ret) 1684 if (ret)
1593 return ret; 1685 return ret;
1594 1686
1595 ofs += size; 1687 ofs += size;
1596 buf += size;
1597 (*retlen) += size; 1688 (*retlen) += size;
1598 len -= size; 1689 len -= size;
1599 1690
1600 if (ofs >> cfi->chipshift) { 1691 if (ofs >> cfi->chipshift) {
1601 chipnum ++; 1692 chipnum ++;
1602 ofs = 0; 1693 ofs = 0;
1603 if (chipnum == cfi->numchips) 1694 if (chipnum == cfi->numchips)
1604 return 0; 1695 return 0;
1605 } 1696 }
1606 } 1697 } while (len);
1698
1607 return 0; 1699 return 0;
1608} 1700}
1609 1701
1702static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1703 size_t len, size_t *retlen, const u_char *buf)
1704{
1705 struct kvec vec;
1706
1707 vec.iov_base = (void *) buf;
1708 vec.iov_len = len;
1709
1710 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1711}
1712
1610static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, 1713static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1611 unsigned long adr, int len, void *thunk) 1714 unsigned long adr, int len, void *thunk)
1612{ 1715{
@@ -1672,23 +1775,17 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1672 status = map_read(map, adr); 1775 status = map_read(map, adr);
1673 if (map_word_andequal(map, status, status_OK, status_OK)) 1776 if (map_word_andequal(map, status, status_OK, status_OK))
1674 break; 1777 break;
1675 1778
1676 /* OK Still waiting */ 1779 /* OK Still waiting */
1677 if (time_after(jiffies, timeo)) { 1780 if (time_after(jiffies, timeo)) {
1678 map_word Xstatus;
1679 map_write(map, CMD(0x70), adr); 1781 map_write(map, CMD(0x70), adr);
1680 chip->state = FL_STATUS; 1782 chip->state = FL_STATUS;
1681 Xstatus = map_read(map, adr);
1682 /* Clear status bits */
1683 map_write(map, CMD(0x50), adr);
1684 map_write(map, CMD(0x70), adr);
1685 xip_enable(map, chip, adr); 1783 xip_enable(map, chip, adr);
1686 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n", 1784 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1687 adr, status.x[0], Xstatus.x[0]);
1688 ret = -EIO; 1785 ret = -EIO;
1689 goto out; 1786 goto out;
1690 } 1787 }
1691 1788
1692 /* Latency issues. Drop the lock, wait a while and retry */ 1789 /* Latency issues. Drop the lock, wait a while and retry */
1693 UDELAY(map, chip, adr, 1000000/HZ); 1790 UDELAY(map, chip, adr, 1000000/HZ);
1694 } 1791 }
@@ -1698,43 +1795,40 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1698 chip->state = FL_STATUS; 1795 chip->state = FL_STATUS;
1699 status = map_read(map, adr); 1796 status = map_read(map, adr);
1700 1797
1701 /* check for lock bit */ 1798 /* check for errors */
1702 if (map_word_bitsset(map, status, CMD(0x3a))) { 1799 if (map_word_bitsset(map, status, CMD(0x3a))) {
1703 unsigned long chipstatus; 1800 unsigned long chipstatus = MERGESTATUS(status);
1704 1801
1705 /* Reset the error bits */ 1802 /* Reset the error bits */
1706 map_write(map, CMD(0x50), adr); 1803 map_write(map, CMD(0x50), adr);
1707 map_write(map, CMD(0x70), adr); 1804 map_write(map, CMD(0x70), adr);
1708 xip_enable(map, chip, adr); 1805 xip_enable(map, chip, adr);
1709 1806
1710 chipstatus = MERGESTATUS(status);
1711
1712 if ((chipstatus & 0x30) == 0x30) { 1807 if ((chipstatus & 0x30) == 0x30) {
1713 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%lx\n", chipstatus); 1808 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1714 ret = -EIO; 1809 ret = -EINVAL;
1715 } else if (chipstatus & 0x02) { 1810 } else if (chipstatus & 0x02) {
1716 /* Protection bit set */ 1811 /* Protection bit set */
1717 ret = -EROFS; 1812 ret = -EROFS;
1718 } else if (chipstatus & 0x8) { 1813 } else if (chipstatus & 0x8) {
1719 /* Voltage */ 1814 /* Voltage */
1720 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%lx\n", chipstatus); 1815 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1721 ret = -EIO; 1816 ret = -EIO;
1722 } else if (chipstatus & 0x20) { 1817 } else if (chipstatus & 0x20 && retries--) {
1723 if (retries--) { 1818 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1724 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); 1819 timeo = jiffies + HZ;
1725 timeo = jiffies + HZ; 1820 put_chip(map, chip, adr);
1726 put_chip(map, chip, adr); 1821 spin_unlock(chip->mutex);
1727 spin_unlock(chip->mutex); 1822 goto retry;
1728 goto retry; 1823 } else {
1729 } 1824 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1730 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx\n", adr, chipstatus);
1731 ret = -EIO; 1825 ret = -EIO;
1732 } 1826 }
1733 } else { 1827
1734 xip_enable(map, chip, adr); 1828 goto out;
1735 ret = 0;
1736 } 1829 }
1737 1830
1831 xip_enable(map, chip, adr);
1738 out: put_chip(map, chip, adr); 1832 out: put_chip(map, chip, adr);
1739 spin_unlock(chip->mutex); 1833 spin_unlock(chip->mutex);
1740 return ret; 1834 return ret;
@@ -1754,7 +1848,7 @@ int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1754 1848
1755 instr->state = MTD_ERASE_DONE; 1849 instr->state = MTD_ERASE_DONE;
1756 mtd_erase_callback(instr); 1850 mtd_erase_callback(instr);
1757 1851
1758 return 0; 1852 return 0;
1759} 1853}
1760 1854
@@ -1775,7 +1869,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
1775 if (!ret) { 1869 if (!ret) {
1776 chip->oldstate = chip->state; 1870 chip->oldstate = chip->state;
1777 chip->state = FL_SYNCING; 1871 chip->state = FL_SYNCING;
1778 /* No need to wake_up() on this state change - 1872 /* No need to wake_up() on this state change -
1779 * as the whole point is that nobody can do anything 1873 * as the whole point is that nobody can do anything
1780 * with the chip now anyway. 1874 * with the chip now anyway.
1781 */ 1875 */
@@ -1789,7 +1883,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
1789 chip = &cfi->chips[i]; 1883 chip = &cfi->chips[i];
1790 1884
1791 spin_lock(chip->mutex); 1885 spin_lock(chip->mutex);
1792 1886
1793 if (chip->state == FL_SYNCING) { 1887 if (chip->state == FL_SYNCING) {
1794 chip->state = chip->oldstate; 1888 chip->state = chip->oldstate;
1795 chip->oldstate = FL_READY; 1889 chip->oldstate = FL_READY;
@@ -1846,7 +1940,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
1846 1940
1847 ENABLE_VPP(map); 1941 ENABLE_VPP(map);
1848 xip_disable(map, chip, adr); 1942 xip_disable(map, chip, adr);
1849 1943
1850 map_write(map, CMD(0x60), adr); 1944 map_write(map, CMD(0x60), adr);
1851 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { 1945 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1852 map_write(map, CMD(0x01), adr); 1946 map_write(map, CMD(0x01), adr);
@@ -1874,25 +1968,22 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
1874 status = map_read(map, adr); 1968 status = map_read(map, adr);
1875 if (map_word_andequal(map, status, status_OK, status_OK)) 1969 if (map_word_andequal(map, status, status_OK, status_OK))
1876 break; 1970 break;
1877 1971
1878 /* OK Still waiting */ 1972 /* OK Still waiting */
1879 if (time_after(jiffies, timeo)) { 1973 if (time_after(jiffies, timeo)) {
1880 map_word Xstatus;
1881 map_write(map, CMD(0x70), adr); 1974 map_write(map, CMD(0x70), adr);
1882 chip->state = FL_STATUS; 1975 chip->state = FL_STATUS;
1883 Xstatus = map_read(map, adr);
1884 xip_enable(map, chip, adr); 1976 xip_enable(map, chip, adr);
1885 printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n", 1977 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1886 status.x[0], Xstatus.x[0]);
1887 put_chip(map, chip, adr); 1978 put_chip(map, chip, adr);
1888 spin_unlock(chip->mutex); 1979 spin_unlock(chip->mutex);
1889 return -EIO; 1980 return -EIO;
1890 } 1981 }
1891 1982
1892 /* Latency issues. Drop the lock, wait a while and retry */ 1983 /* Latency issues. Drop the lock, wait a while and retry */
1893 UDELAY(map, chip, adr, 1); 1984 UDELAY(map, chip, adr, 1);
1894 } 1985 }
1895 1986
1896 /* Done and happy. */ 1987 /* Done and happy. */
1897 chip->state = FL_STATUS; 1988 chip->state = FL_STATUS;
1898 xip_enable(map, chip, adr); 1989 xip_enable(map, chip, adr);
@@ -1912,9 +2003,9 @@ static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1912 ofs, len, 0); 2003 ofs, len, 0);
1913#endif 2004#endif
1914 2005
1915 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, 2006 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1916 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK); 2007 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1917 2008
1918#ifdef DEBUG_LOCK_BITS 2009#ifdef DEBUG_LOCK_BITS
1919 printk(KERN_DEBUG "%s: lock status after, ret=%d\n", 2010 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1920 __FUNCTION__, ret); 2011 __FUNCTION__, ret);
@@ -1938,20 +2029,20 @@ static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1938 2029
1939 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, 2030 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1940 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK); 2031 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1941 2032
1942#ifdef DEBUG_LOCK_BITS 2033#ifdef DEBUG_LOCK_BITS
1943 printk(KERN_DEBUG "%s: lock status after, ret=%d\n", 2034 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1944 __FUNCTION__, ret); 2035 __FUNCTION__, ret);
1945 cfi_varsize_frob(mtd, do_printlockstatus_oneblock, 2036 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1946 ofs, len, 0); 2037 ofs, len, 0);
1947#endif 2038#endif
1948 2039
1949 return ret; 2040 return ret;
1950} 2041}
1951 2042
1952#ifdef CONFIG_MTD_OTP 2043#ifdef CONFIG_MTD_OTP
1953 2044
1954typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, 2045typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1955 u_long data_offset, u_char *buf, u_int size, 2046 u_long data_offset, u_char *buf, u_int size,
1956 u_long prot_offset, u_int groupno, u_int groupsize); 2047 u_long prot_offset, u_int groupno, u_int groupsize);
1957 2048
@@ -2002,7 +2093,7 @@ do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2002 2093
2003 datum = map_word_load_partial(map, datum, buf, gap, n); 2094 datum = map_word_load_partial(map, datum, buf, gap, n);
2004 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE); 2095 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2005 if (ret) 2096 if (ret)
2006 return ret; 2097 return ret;
2007 2098
2008 offset += n; 2099 offset += n;
@@ -2195,7 +2286,7 @@ static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2195 NULL, do_otp_lock, 1); 2286 NULL, do_otp_lock, 1);
2196} 2287}
2197 2288
2198static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, 2289static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2199 struct otp_info *buf, size_t len) 2290 struct otp_info *buf, size_t len)
2200{ 2291{
2201 size_t retlen; 2292 size_t retlen;
@@ -2238,7 +2329,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2238 if (chip->oldstate == FL_READY) { 2329 if (chip->oldstate == FL_READY) {
2239 chip->oldstate = chip->state; 2330 chip->oldstate = chip->state;
2240 chip->state = FL_PM_SUSPENDED; 2331 chip->state = FL_PM_SUSPENDED;
2241 /* No need to wake_up() on this state change - 2332 /* No need to wake_up() on this state change -
2242 * as the whole point is that nobody can do anything 2333 * as the whole point is that nobody can do anything
2243 * with the chip now anyway. 2334 * with the chip now anyway.
2244 */ 2335 */
@@ -2266,9 +2357,9 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2266 if (ret) { 2357 if (ret) {
2267 for (i--; i >=0; i--) { 2358 for (i--; i >=0; i--) {
2268 chip = &cfi->chips[i]; 2359 chip = &cfi->chips[i];
2269 2360
2270 spin_lock(chip->mutex); 2361 spin_lock(chip->mutex);
2271 2362
2272 if (chip->state == FL_PM_SUSPENDED) { 2363 if (chip->state == FL_PM_SUSPENDED) {
2273 /* No need to force it into a known state here, 2364 /* No need to force it into a known state here,
2274 because we're returning failure, and it didn't 2365 because we're returning failure, and it didn't
@@ -2279,8 +2370,8 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2279 } 2370 }
2280 spin_unlock(chip->mutex); 2371 spin_unlock(chip->mutex);
2281 } 2372 }
2282 } 2373 }
2283 2374
2284 return ret; 2375 return ret;
2285} 2376}
2286 2377
@@ -2292,11 +2383,11 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
2292 struct flchip *chip; 2383 struct flchip *chip;
2293 2384
2294 for (i=0; i<cfi->numchips; i++) { 2385 for (i=0; i<cfi->numchips; i++) {
2295 2386
2296 chip = &cfi->chips[i]; 2387 chip = &cfi->chips[i];
2297 2388
2298 spin_lock(chip->mutex); 2389 spin_lock(chip->mutex);
2299 2390
2300 /* Go to known state. Chip may have been power cycled */ 2391 /* Go to known state. Chip may have been power cycled */
2301 if (chip->state == FL_PM_SUSPENDED) { 2392 if (chip->state == FL_PM_SUSPENDED) {
2302 map_write(map, CMD(0xFF), cfi->chips[i].start); 2393 map_write(map, CMD(0xFF), cfi->chips[i].start);
@@ -2318,7 +2409,7 @@ static int cfi_intelext_reset(struct mtd_info *mtd)
2318 struct flchip *chip = &cfi->chips[i]; 2409 struct flchip *chip = &cfi->chips[i];
2319 2410
2320 /* force the completion of any ongoing operation 2411 /* force the completion of any ongoing operation
2321 and switch to array mode so any bootloader in 2412 and switch to array mode so any bootloader in
2322 flash is accessible for soft reboot. */ 2413 flash is accessible for soft reboot. */
2323 spin_lock(chip->mutex); 2414 spin_lock(chip->mutex);
2324 ret = get_chip(map, chip, chip->start, FL_SYNCING); 2415 ret = get_chip(map, chip, chip->start, FL_SYNCING);
@@ -2355,20 +2446,23 @@ static void cfi_intelext_destroy(struct mtd_info *mtd)
2355 kfree(mtd->eraseregions); 2446 kfree(mtd->eraseregions);
2356} 2447}
2357 2448
2358static char im_name_1[]="cfi_cmdset_0001"; 2449static char im_name_0001[] = "cfi_cmdset_0001";
2359static char im_name_3[]="cfi_cmdset_0003"; 2450static char im_name_0003[] = "cfi_cmdset_0003";
2451static char im_name_0200[] = "cfi_cmdset_0200";
2360 2452
2361static int __init cfi_intelext_init(void) 2453static int __init cfi_intelext_init(void)
2362{ 2454{
2363 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001); 2455 inter_module_register(im_name_0001, THIS_MODULE, &cfi_cmdset_0001);
2364 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001); 2456 inter_module_register(im_name_0003, THIS_MODULE, &cfi_cmdset_0001);
2457 inter_module_register(im_name_0200, THIS_MODULE, &cfi_cmdset_0001);
2365 return 0; 2458 return 0;
2366} 2459}
2367 2460
2368static void __exit cfi_intelext_exit(void) 2461static void __exit cfi_intelext_exit(void)
2369{ 2462{
2370 inter_module_unregister(im_name_1); 2463 inter_module_unregister(im_name_0001);
2371 inter_module_unregister(im_name_3); 2464 inter_module_unregister(im_name_0003);
2465 inter_module_unregister(im_name_0200);
2372} 2466}
2373 2467
2374module_init(cfi_intelext_init); 2468module_init(cfi_intelext_init);