aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/chips/cfi_cmdset_0001.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/chips/cfi_cmdset_0001.c')
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c487
1 files changed, 290 insertions, 197 deletions
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 0cfcd88468e0..143f01a4c170 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -4,9 +4,9 @@
4 * 4 *
5 * (C) 2000 Red Hat. GPL'd 5 * (C) 2000 Red Hat. GPL'd
6 * 6 *
7 * $Id: cfi_cmdset_0001.c,v 1.178 2005/05/19 17:05:43 nico Exp $ 7 * $Id: cfi_cmdset_0001.c,v 1.185 2005/11/07 11:14:22 gleixner Exp $
8 *
8 * 9 *
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org> 10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and 11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.) 12 * independent of the flash geometry (buswidth, interleave, etc.)
@@ -51,6 +51,7 @@
51static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 51static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 52static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 53static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
54static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *); 55static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
55static void cfi_intelext_sync (struct mtd_info *); 56static void cfi_intelext_sync (struct mtd_info *);
56static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len); 57static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
@@ -105,6 +106,7 @@ static struct mtd_chip_driver cfi_intelext_chipdrv = {
105static void cfi_tell_features(struct cfi_pri_intelext *extp) 106static void cfi_tell_features(struct cfi_pri_intelext *extp)
106{ 107{
107 int i; 108 int i;
109 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
108 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport); 110 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
109 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported"); 111 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
110 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported"); 112 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
@@ -116,36 +118,43 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
116 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported"); 118 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
117 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported"); 119 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
118 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported"); 120 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
119 for (i=10; i<32; i++) { 121 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
120 if (extp->FeatureSupport & (1<<i)) 122 for (i=11; i<32; i++) {
123 if (extp->FeatureSupport & (1<<i))
121 printk(" - Unknown Bit %X: supported\n", i); 124 printk(" - Unknown Bit %X: supported\n", i);
122 } 125 }
123 126
124 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport); 127 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
125 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported"); 128 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
126 for (i=1; i<8; i++) { 129 for (i=1; i<8; i++) {
127 if (extp->SuspendCmdSupport & (1<<i)) 130 if (extp->SuspendCmdSupport & (1<<i))
128 printk(" - Unknown Bit %X: supported\n", i); 131 printk(" - Unknown Bit %X: supported\n", i);
129 } 132 }
130 133
131 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask); 134 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
132 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no"); 135 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
133 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no"); 136 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
134 for (i=2; i<16; i++) { 137 for (i=2; i<3; i++) {
135 if (extp->BlkStatusRegMask & (1<<i)) 138 if (extp->BlkStatusRegMask & (1<<i))
136 printk(" - Unknown Bit %X Active: yes\n",i); 139 printk(" - Unknown Bit %X Active: yes\n",i);
137 } 140 }
138 141 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
139 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 142 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143 for (i=6; i<16; i++) {
144 if (extp->BlkStatusRegMask & (1<<i))
145 printk(" - Unknown Bit %X Active: yes\n",i);
146 }
147
148 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
140 extp->VccOptimal >> 4, extp->VccOptimal & 0xf); 149 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
141 if (extp->VppOptimal) 150 if (extp->VppOptimal)
142 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 151 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
143 extp->VppOptimal >> 4, extp->VppOptimal & 0xf); 152 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
144} 153}
145#endif 154#endif
146 155
147#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 156#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
148/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 157/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
149static void fixup_intel_strataflash(struct mtd_info *mtd, void* param) 158static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
150{ 159{
151 struct map_info *map = mtd->priv; 160 struct map_info *map = mtd->priv;
@@ -176,7 +185,7 @@ static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
176{ 185{
177 struct map_info *map = mtd->priv; 186 struct map_info *map = mtd->priv;
178 struct cfi_private *cfi = map->fldrv_priv; 187 struct cfi_private *cfi = map->fldrv_priv;
179 188
180 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */ 189 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
181 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */ 190 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
182} 191}
@@ -185,7 +194,7 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
185{ 194{
186 struct map_info *map = mtd->priv; 195 struct map_info *map = mtd->priv;
187 struct cfi_private *cfi = map->fldrv_priv; 196 struct cfi_private *cfi = map->fldrv_priv;
188 197
189 /* Note this is done after the region info is endian swapped */ 198 /* Note this is done after the region info is endian swapped */
190 cfi->cfiq->EraseRegionInfo[1] = 199 cfi->cfiq->EraseRegionInfo[1] =
191 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e; 200 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
@@ -207,12 +216,13 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
207 if (cfi->cfiq->BufWriteTimeoutTyp) { 216 if (cfi->cfiq->BufWriteTimeoutTyp) {
208 printk(KERN_INFO "Using buffer write method\n" ); 217 printk(KERN_INFO "Using buffer write method\n" );
209 mtd->write = cfi_intelext_write_buffers; 218 mtd->write = cfi_intelext_write_buffers;
219 mtd->writev = cfi_intelext_writev;
210 } 220 }
211} 221}
212 222
213static struct cfi_fixup cfi_fixup_table[] = { 223static struct cfi_fixup cfi_fixup_table[] = {
214#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 224#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
215 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 225 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
216#endif 226#endif
217#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND 227#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
218 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL }, 228 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
@@ -252,12 +262,21 @@ read_pri_intelext(struct map_info *map, __u16 adr)
252 if (!extp) 262 if (!extp)
253 return NULL; 263 return NULL;
254 264
265 if (extp->MajorVersion != '1' ||
266 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
267 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
268 "version %c.%c.\n", extp->MajorVersion,
269 extp->MinorVersion);
270 kfree(extp);
271 return NULL;
272 }
273
255 /* Do some byteswapping if necessary */ 274 /* Do some byteswapping if necessary */
256 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport); 275 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
257 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask); 276 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
258 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr); 277 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
259 278
260 if (extp->MajorVersion == '1' && extp->MinorVersion == '3') { 279 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
261 unsigned int extra_size = 0; 280 unsigned int extra_size = 0;
262 int nb_parts, i; 281 int nb_parts, i;
263 282
@@ -266,7 +285,10 @@ read_pri_intelext(struct map_info *map, __u16 adr)
266 sizeof(struct cfi_intelext_otpinfo); 285 sizeof(struct cfi_intelext_otpinfo);
267 286
268 /* Burst Read info */ 287 /* Burst Read info */
269 extra_size += 6; 288 extra_size += 2;
289 if (extp_size < sizeof(*extp) + extra_size)
290 goto need_more;
291 extra_size += extp->extra[extra_size-1];
270 292
271 /* Number of hardware-partitions */ 293 /* Number of hardware-partitions */
272 extra_size += 1; 294 extra_size += 1;
@@ -274,6 +296,10 @@ read_pri_intelext(struct map_info *map, __u16 adr)
274 goto need_more; 296 goto need_more;
275 nb_parts = extp->extra[extra_size - 1]; 297 nb_parts = extp->extra[extra_size - 1];
276 298
299 /* skip the sizeof(partregion) field in CFI 1.4 */
300 if (extp->MinorVersion >= '4')
301 extra_size += 2;
302
277 for (i = 0; i < nb_parts; i++) { 303 for (i = 0; i < nb_parts; i++) {
278 struct cfi_intelext_regioninfo *rinfo; 304 struct cfi_intelext_regioninfo *rinfo;
279 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size]; 305 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
@@ -285,6 +311,9 @@ read_pri_intelext(struct map_info *map, __u16 adr)
285 * sizeof(struct cfi_intelext_blockinfo); 311 * sizeof(struct cfi_intelext_blockinfo);
286 } 312 }
287 313
314 if (extp->MinorVersion >= '4')
315 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316
288 if (extp_size < sizeof(*extp) + extra_size) { 317 if (extp_size < sizeof(*extp) + extra_size) {
289 need_more: 318 need_more:
290 extp_size = sizeof(*extp) + extra_size; 319 extp_size = sizeof(*extp) + extra_size;
@@ -298,7 +327,7 @@ read_pri_intelext(struct map_info *map, __u16 adr)
298 goto again; 327 goto again;
299 } 328 }
300 } 329 }
301 330
302 return extp; 331 return extp;
303} 332}
304 333
@@ -339,7 +368,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
339 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot; 368 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
340 369
341 if (cfi->cfi_mode == CFI_MODE_CFI) { 370 if (cfi->cfi_mode == CFI_MODE_CFI) {
342 /* 371 /*
343 * It's a real CFI chip, not one for which the probe 372 * It's a real CFI chip, not one for which the probe
344 * routine faked a CFI structure. So we read the feature 373 * routine faked a CFI structure. So we read the feature
345 * table from it. 374 * table from it.
@@ -354,14 +383,14 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
354 } 383 }
355 384
356 /* Install our own private info structure */ 385 /* Install our own private info structure */
357 cfi->cmdset_priv = extp; 386 cfi->cmdset_priv = extp;
358 387
359 cfi_fixup(mtd, cfi_fixup_table); 388 cfi_fixup(mtd, cfi_fixup_table);
360 389
361#ifdef DEBUG_CFI_FEATURES 390#ifdef DEBUG_CFI_FEATURES
362 /* Tell the user about it in lots of lovely detail */ 391 /* Tell the user about it in lots of lovely detail */
363 cfi_tell_features(extp); 392 cfi_tell_features(extp);
364#endif 393#endif
365 394
366 if(extp->SuspendCmdSupport & 1) { 395 if(extp->SuspendCmdSupport & 1) {
367 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n"); 396 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
@@ -379,10 +408,10 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
379 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 408 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
380 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 409 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
381 cfi->chips[i].ref_point_counter = 0; 410 cfi->chips[i].ref_point_counter = 0;
382 } 411 }
383 412
384 map->fldrv = &cfi_intelext_chipdrv; 413 map->fldrv = &cfi_intelext_chipdrv;
385 414
386 return cfi_intelext_setup(mtd); 415 return cfi_intelext_setup(mtd);
387} 416}
388 417
@@ -399,13 +428,13 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
399 mtd->size = devsize * cfi->numchips; 428 mtd->size = devsize * cfi->numchips;
400 429
401 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 430 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
402 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 431 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
403 * mtd->numeraseregions, GFP_KERNEL); 432 * mtd->numeraseregions, GFP_KERNEL);
404 if (!mtd->eraseregions) { 433 if (!mtd->eraseregions) {
405 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n"); 434 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
406 goto setup_err; 435 goto setup_err;
407 } 436 }
408 437
409 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 438 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
410 unsigned long ernum, ersize; 439 unsigned long ernum, ersize;
411 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 440 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
@@ -429,7 +458,7 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
429 } 458 }
430 459
431 for (i=0; i<mtd->numeraseregions;i++){ 460 for (i=0; i<mtd->numeraseregions;i++){
432 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n", 461 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
433 i,mtd->eraseregions[i].offset, 462 i,mtd->eraseregions[i].offset,
434 mtd->eraseregions[i].erasesize, 463 mtd->eraseregions[i].erasesize,
435 mtd->eraseregions[i].numblocks); 464 mtd->eraseregions[i].numblocks);
@@ -455,8 +484,7 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
455 484
456 setup_err: 485 setup_err:
457 if(mtd) { 486 if(mtd) {
458 if(mtd->eraseregions) 487 kfree(mtd->eraseregions);
459 kfree(mtd->eraseregions);
460 kfree(mtd); 488 kfree(mtd);
461 } 489 }
462 kfree(cfi->cmdset_priv); 490 kfree(cfi->cmdset_priv);
@@ -481,7 +509,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
481 * arrangement at this point. This can be rearranged in the future 509 * arrangement at this point. This can be rearranged in the future
482 * if someone feels motivated enough. --nico 510 * if someone feels motivated enough. --nico
483 */ 511 */
484 if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3' 512 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
485 && extp->FeatureSupport & (1 << 9)) { 513 && extp->FeatureSupport & (1 << 9)) {
486 struct cfi_private *newcfi; 514 struct cfi_private *newcfi;
487 struct flchip *chip; 515 struct flchip *chip;
@@ -493,12 +521,16 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
493 sizeof(struct cfi_intelext_otpinfo); 521 sizeof(struct cfi_intelext_otpinfo);
494 522
495 /* Burst Read info */ 523 /* Burst Read info */
496 offs += 6; 524 offs += extp->extra[offs+1]+2;
497 525
498 /* Number of partition regions */ 526 /* Number of partition regions */
499 numregions = extp->extra[offs]; 527 numregions = extp->extra[offs];
500 offs += 1; 528 offs += 1;
501 529
530 /* skip the sizeof(partregion) field in CFI 1.4 */
531 if (extp->MinorVersion >= '4')
532 offs += 2;
533
502 /* Number of hardware partitions */ 534 /* Number of hardware partitions */
503 numparts = 0; 535 numparts = 0;
504 for (i = 0; i < numregions; i++) { 536 for (i = 0; i < numregions; i++) {
@@ -510,6 +542,20 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
510 sizeof(struct cfi_intelext_blockinfo); 542 sizeof(struct cfi_intelext_blockinfo);
511 } 543 }
512 544
545 /* Programming Region info */
546 if (extp->MinorVersion >= '4') {
547 struct cfi_intelext_programming_regioninfo *prinfo;
548 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
549 MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
550 MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
551 MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
552 mtd->flags |= MTD_PROGRAM_REGIONS;
553 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
554 map->name, MTD_PROGREGION_SIZE(mtd),
555 MTD_PROGREGION_CTRLMODE_VALID(mtd),
556 MTD_PROGREGION_CTRLMODE_INVALID(mtd));
557 }
558
513 /* 559 /*
514 * All functions below currently rely on all chips having 560 * All functions below currently rely on all chips having
515 * the same geometry so we'll just assume that all hardware 561 * the same geometry so we'll just assume that all hardware
@@ -654,8 +700,8 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
654 break; 700 break;
655 701
656 if (time_after(jiffies, timeo)) { 702 if (time_after(jiffies, timeo)) {
657 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n", 703 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
658 status.x[0]); 704 map->name, status.x[0]);
659 return -EIO; 705 return -EIO;
660 } 706 }
661 spin_unlock(chip->mutex); 707 spin_unlock(chip->mutex);
@@ -664,7 +710,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
664 /* Someone else might have been playing with it. */ 710 /* Someone else might have been playing with it. */
665 goto retry; 711 goto retry;
666 } 712 }
667 713
668 case FL_READY: 714 case FL_READY:
669 case FL_CFI_QUERY: 715 case FL_CFI_QUERY:
670 case FL_JEDEC_QUERY: 716 case FL_JEDEC_QUERY:
@@ -702,8 +748,8 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
702 map_write(map, CMD(0x70), adr); 748 map_write(map, CMD(0x70), adr);
703 chip->state = FL_ERASING; 749 chip->state = FL_ERASING;
704 chip->oldstate = FL_READY; 750 chip->oldstate = FL_READY;
705 printk(KERN_ERR "Chip not ready after erase " 751 printk(KERN_ERR "%s: Chip not ready after erase "
706 "suspended: status = 0x%lx\n", status.x[0]); 752 "suspended: status = 0x%lx\n", map->name, status.x[0]);
707 return -EIO; 753 return -EIO;
708 } 754 }
709 755
@@ -783,14 +829,14 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
783 switch(chip->oldstate) { 829 switch(chip->oldstate) {
784 case FL_ERASING: 830 case FL_ERASING:
785 chip->state = chip->oldstate; 831 chip->state = chip->oldstate;
786 /* What if one interleaved chip has finished and the 832 /* What if one interleaved chip has finished and the
787 other hasn't? The old code would leave the finished 833 other hasn't? The old code would leave the finished
788 one in READY mode. That's bad, and caused -EROFS 834 one in READY mode. That's bad, and caused -EROFS
789 errors to be returned from do_erase_oneblock because 835 errors to be returned from do_erase_oneblock because
790 that's the only bit it checked for at the time. 836 that's the only bit it checked for at the time.
791 As the state machine appears to explicitly allow 837 As the state machine appears to explicitly allow
792 sending the 0x70 (Read Status) command to an erasing 838 sending the 0x70 (Read Status) command to an erasing
793 chip and expecting it to be ignored, that's what we 839 chip and expecting it to be ignored, that's what we
794 do. */ 840 do. */
795 map_write(map, CMD(0xd0), adr); 841 map_write(map, CMD(0xd0), adr);
796 map_write(map, CMD(0x70), adr); 842 map_write(map, CMD(0x70), adr);
@@ -810,7 +856,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
810 DISABLE_VPP(map); 856 DISABLE_VPP(map);
811 break; 857 break;
812 default: 858 default:
813 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate); 859 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
814 } 860 }
815 wake_up(&chip->wq); 861 wake_up(&chip->wq);
816} 862}
@@ -1026,8 +1072,8 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
1026 1072
1027 adr += chip->start; 1073 adr += chip->start;
1028 1074
1029 /* Ensure cmd read/writes are aligned. */ 1075 /* Ensure cmd read/writes are aligned. */
1030 cmd_addr = adr & ~(map_bankwidth(map)-1); 1076 cmd_addr = adr & ~(map_bankwidth(map)-1);
1031 1077
1032 spin_lock(chip->mutex); 1078 spin_lock(chip->mutex);
1033 1079
@@ -1055,7 +1101,7 @@ static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, si
1055 1101
1056 if (!map->virt || (from + len > mtd->size)) 1102 if (!map->virt || (from + len > mtd->size))
1057 return -EINVAL; 1103 return -EINVAL;
1058 1104
1059 *mtdbuf = (void *)map->virt + from; 1105 *mtdbuf = (void *)map->virt + from;
1060 *retlen = 0; 1106 *retlen = 0;
1061 1107
@@ -1082,7 +1128,7 @@ static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, si
1082 1128
1083 *retlen += thislen; 1129 *retlen += thislen;
1084 len -= thislen; 1130 len -= thislen;
1085 1131
1086 ofs = 0; 1132 ofs = 0;
1087 chipnum++; 1133 chipnum++;
1088 } 1134 }
@@ -1121,7 +1167,7 @@ static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t fro
1121 if(chip->ref_point_counter == 0) 1167 if(chip->ref_point_counter == 0)
1122 chip->state = FL_READY; 1168 chip->state = FL_READY;
1123 } else 1169 } else
1124 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */ 1170 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1125 1171
1126 put_chip(map, chip, chip->start); 1172 put_chip(map, chip, chip->start);
1127 spin_unlock(chip->mutex); 1173 spin_unlock(chip->mutex);
@@ -1140,8 +1186,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
1140 1186
1141 adr += chip->start; 1187 adr += chip->start;
1142 1188
1143 /* Ensure cmd read/writes are aligned. */ 1189 /* Ensure cmd read/writes are aligned. */
1144 cmd_addr = adr & ~(map_bankwidth(map)-1); 1190 cmd_addr = adr & ~(map_bankwidth(map)-1);
1145 1191
1146 spin_lock(chip->mutex); 1192 spin_lock(chip->mutex);
1147 ret = get_chip(map, chip, cmd_addr, FL_READY); 1193 ret = get_chip(map, chip, cmd_addr, FL_READY);
@@ -1196,7 +1242,7 @@ static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, siz
1196 *retlen += thislen; 1242 *retlen += thislen;
1197 len -= thislen; 1243 len -= thislen;
1198 buf += thislen; 1244 buf += thislen;
1199 1245
1200 ofs = 0; 1246 ofs = 0;
1201 chipnum++; 1247 chipnum++;
1202 } 1248 }
@@ -1213,12 +1259,17 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1213 1259
1214 adr += chip->start; 1260 adr += chip->start;
1215 1261
1216 /* Let's determine this according to the interleave only once */ 1262 /* Let's determine those according to the interleave only once */
1217 status_OK = CMD(0x80); 1263 status_OK = CMD(0x80);
1218 switch (mode) { 1264 switch (mode) {
1219 case FL_WRITING: write_cmd = CMD(0x40); break; 1265 case FL_WRITING:
1220 case FL_OTP_WRITE: write_cmd = CMD(0xc0); break; 1266 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1221 default: return -EINVAL; 1267 break;
1268 case FL_OTP_WRITE:
1269 write_cmd = CMD(0xc0);
1270 break;
1271 default:
1272 return -EINVAL;
1222 } 1273 }
1223 1274
1224 spin_lock(chip->mutex); 1275 spin_lock(chip->mutex);
@@ -1259,12 +1310,13 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1259 status = map_read(map, adr); 1310 status = map_read(map, adr);
1260 if (map_word_andequal(map, status, status_OK, status_OK)) 1311 if (map_word_andequal(map, status, status_OK, status_OK))
1261 break; 1312 break;
1262 1313
1263 /* OK Still waiting */ 1314 /* OK Still waiting */
1264 if (time_after(jiffies, timeo)) { 1315 if (time_after(jiffies, timeo)) {
1316 map_write(map, CMD(0x70), adr);
1265 chip->state = FL_STATUS; 1317 chip->state = FL_STATUS;
1266 xip_enable(map, chip, adr); 1318 xip_enable(map, chip, adr);
1267 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n"); 1319 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1268 ret = -EIO; 1320 ret = -EIO;
1269 goto out; 1321 goto out;
1270 } 1322 }
@@ -1276,27 +1328,39 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1276 if (!z) { 1328 if (!z) {
1277 chip->word_write_time--; 1329 chip->word_write_time--;
1278 if (!chip->word_write_time) 1330 if (!chip->word_write_time)
1279 chip->word_write_time++; 1331 chip->word_write_time = 1;
1280 } 1332 }
1281 if (z > 1) 1333 if (z > 1)
1282 chip->word_write_time++; 1334 chip->word_write_time++;
1283 1335
1284 /* Done and happy. */ 1336 /* Done and happy. */
1285 chip->state = FL_STATUS; 1337 chip->state = FL_STATUS;
1286 1338
1287 /* check for lock bit */ 1339 /* check for errors */
1288 if (map_word_bitsset(map, status, CMD(0x02))) { 1340 if (map_word_bitsset(map, status, CMD(0x1a))) {
1289 /* clear status */ 1341 unsigned long chipstatus = MERGESTATUS(status);
1342
1343 /* reset status */
1290 map_write(map, CMD(0x50), adr); 1344 map_write(map, CMD(0x50), adr);
1291 /* put back into read status register mode */
1292 map_write(map, CMD(0x70), adr); 1345 map_write(map, CMD(0x70), adr);
1293 ret = -EROFS; 1346 xip_enable(map, chip, adr);
1347
1348 if (chipstatus & 0x02) {
1349 ret = -EROFS;
1350 } else if (chipstatus & 0x08) {
1351 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1352 ret = -EIO;
1353 } else {
1354 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1355 ret = -EINVAL;
1356 }
1357
1358 goto out;
1294 } 1359 }
1295 1360
1296 xip_enable(map, chip, adr); 1361 xip_enable(map, chip, adr);
1297 out: put_chip(map, chip, adr); 1362 out: put_chip(map, chip, adr);
1298 spin_unlock(chip->mutex); 1363 spin_unlock(chip->mutex);
1299
1300 return ret; 1364 return ret;
1301} 1365}
1302 1366
@@ -1329,7 +1393,7 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1329 1393
1330 ret = do_write_oneword(map, &cfi->chips[chipnum], 1394 ret = do_write_oneword(map, &cfi->chips[chipnum],
1331 bus_ofs, datum, FL_WRITING); 1395 bus_ofs, datum, FL_WRITING);
1332 if (ret) 1396 if (ret)
1333 return ret; 1397 return ret;
1334 1398
1335 len -= n; 1399 len -= n;
@@ -1338,13 +1402,13 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1338 (*retlen) += n; 1402 (*retlen) += n;
1339 1403
1340 if (ofs >> cfi->chipshift) { 1404 if (ofs >> cfi->chipshift) {
1341 chipnum ++; 1405 chipnum ++;
1342 ofs = 0; 1406 ofs = 0;
1343 if (chipnum == cfi->numchips) 1407 if (chipnum == cfi->numchips)
1344 return 0; 1408 return 0;
1345 } 1409 }
1346 } 1410 }
1347 1411
1348 while(len >= map_bankwidth(map)) { 1412 while(len >= map_bankwidth(map)) {
1349 map_word datum = map_word_load(map, buf); 1413 map_word datum = map_word_load(map, buf);
1350 1414
@@ -1359,7 +1423,7 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1359 len -= map_bankwidth(map); 1423 len -= map_bankwidth(map);
1360 1424
1361 if (ofs >> cfi->chipshift) { 1425 if (ofs >> cfi->chipshift) {
1362 chipnum ++; 1426 chipnum ++;
1363 ofs = 0; 1427 ofs = 0;
1364 if (chipnum == cfi->numchips) 1428 if (chipnum == cfi->numchips)
1365 return 0; 1429 return 0;
@@ -1374,9 +1438,9 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1374 1438
1375 ret = do_write_oneword(map, &cfi->chips[chipnum], 1439 ret = do_write_oneword(map, &cfi->chips[chipnum],
1376 ofs, datum, FL_WRITING); 1440 ofs, datum, FL_WRITING);
1377 if (ret) 1441 if (ret)
1378 return ret; 1442 return ret;
1379 1443
1380 (*retlen) += len; 1444 (*retlen) += len;
1381 } 1445 }
1382 1446
@@ -1384,20 +1448,24 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1384} 1448}
1385 1449
1386 1450
1387static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1451static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1388 unsigned long adr, const u_char *buf, int len) 1452 unsigned long adr, const struct kvec **pvec,
1453 unsigned long *pvec_seek, int len)
1389{ 1454{
1390 struct cfi_private *cfi = map->fldrv_priv; 1455 struct cfi_private *cfi = map->fldrv_priv;
1391 map_word status, status_OK; 1456 map_word status, status_OK, write_cmd, datum;
1392 unsigned long cmd_adr, timeo; 1457 unsigned long cmd_adr, timeo;
1393 int wbufsize, z, ret=0, bytes, words; 1458 int wbufsize, z, ret=0, word_gap, words;
1459 const struct kvec *vec;
1460 unsigned long vec_seek;
1394 1461
1395 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1462 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1396 adr += chip->start; 1463 adr += chip->start;
1397 cmd_adr = adr & ~(wbufsize-1); 1464 cmd_adr = adr & ~(wbufsize-1);
1398 1465
1399 /* Let's determine this according to the interleave only once */ 1466 /* Let's determine this according to the interleave only once */
1400 status_OK = CMD(0x80); 1467 status_OK = CMD(0x80);
1468 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1401 1469
1402 spin_lock(chip->mutex); 1470 spin_lock(chip->mutex);
1403 ret = get_chip(map, chip, cmd_adr, FL_WRITING); 1471 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
@@ -1411,7 +1479,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1411 xip_disable(map, chip, cmd_adr); 1479 xip_disable(map, chip, cmd_adr);
1412 1480
1413 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set 1481 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1414 [...], the device will not accept any more Write to Buffer commands". 1482 [...], the device will not accept any more Write to Buffer commands".
1415 So we must check here and reset those bits if they're set. Otherwise 1483 So we must check here and reset those bits if they're set. Otherwise
1416 we're just pissing in the wind */ 1484 we're just pissing in the wind */
1417 if (chip->state != FL_STATUS) 1485 if (chip->state != FL_STATUS)
@@ -1429,7 +1497,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1429 1497
1430 z = 0; 1498 z = 0;
1431 for (;;) { 1499 for (;;) {
1432 map_write(map, CMD(0xe8), cmd_adr); 1500 map_write(map, write_cmd, cmd_adr);
1433 1501
1434 status = map_read(map, cmd_adr); 1502 status = map_read(map, cmd_adr);
1435 if (map_word_andequal(map, status, status_OK, status_OK)) 1503 if (map_word_andequal(map, status, status_OK, status_OK))
@@ -1447,41 +1515,66 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1447 map_write(map, CMD(0x50), cmd_adr); 1515 map_write(map, CMD(0x50), cmd_adr);
1448 map_write(map, CMD(0x70), cmd_adr); 1516 map_write(map, CMD(0x70), cmd_adr);
1449 xip_enable(map, chip, cmd_adr); 1517 xip_enable(map, chip, cmd_adr);
1450 printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n", 1518 printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1451 status.x[0], Xstatus.x[0]); 1519 map->name, status.x[0], Xstatus.x[0]);
1452 ret = -EIO; 1520 ret = -EIO;
1453 goto out; 1521 goto out;
1454 } 1522 }
1455 } 1523 }
1456 1524
1525 /* Figure out the number of words to write */
1526 word_gap = (-adr & (map_bankwidth(map)-1));
1527 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1528 if (!word_gap) {
1529 words--;
1530 } else {
1531 word_gap = map_bankwidth(map) - word_gap;
1532 adr -= word_gap;
1533 datum = map_word_ff(map);
1534 }
1535
1457 /* Write length of data to come */ 1536 /* Write length of data to come */
1458 bytes = len & (map_bankwidth(map)-1); 1537 map_write(map, CMD(words), cmd_adr );
1459 words = len / map_bankwidth(map);
1460 map_write(map, CMD(words - !bytes), cmd_adr );
1461 1538
1462 /* Write data */ 1539 /* Write data */
1463 z = 0; 1540 vec = *pvec;
1464 while(z < words * map_bankwidth(map)) { 1541 vec_seek = *pvec_seek;
1465 map_word datum = map_word_load(map, buf); 1542 do {
1466 map_write(map, datum, adr+z); 1543 int n = map_bankwidth(map) - word_gap;
1544 if (n > vec->iov_len - vec_seek)
1545 n = vec->iov_len - vec_seek;
1546 if (n > len)
1547 n = len;
1467 1548
1468 z += map_bankwidth(map); 1549 if (!word_gap && len < map_bankwidth(map))
1469 buf += map_bankwidth(map); 1550 datum = map_word_ff(map);
1470 }
1471 1551
1472 if (bytes) { 1552 datum = map_word_load_partial(map, datum,
1473 map_word datum; 1553 vec->iov_base + vec_seek,
1554 word_gap, n);
1474 1555
1475 datum = map_word_ff(map); 1556 len -= n;
1476 datum = map_word_load_partial(map, datum, buf, 0, bytes); 1557 word_gap += n;
1477 map_write(map, datum, adr+z); 1558 if (!len || word_gap == map_bankwidth(map)) {
1478 } 1559 map_write(map, datum, adr);
1560 adr += map_bankwidth(map);
1561 word_gap = 0;
1562 }
1563
1564 vec_seek += n;
1565 if (vec_seek == vec->iov_len) {
1566 vec++;
1567 vec_seek = 0;
1568 }
1569 } while (len);
1570 *pvec = vec;
1571 *pvec_seek = vec_seek;
1479 1572
1480 /* GO GO GO */ 1573 /* GO GO GO */
1481 map_write(map, CMD(0xd0), cmd_adr); 1574 map_write(map, CMD(0xd0), cmd_adr);
1482 chip->state = FL_WRITING; 1575 chip->state = FL_WRITING;
1483 1576
1484 INVALIDATE_CACHE_UDELAY(map, chip, 1577 INVALIDATE_CACHE_UDELAY(map, chip,
1485 cmd_adr, len, 1578 cmd_adr, len,
1486 chip->buffer_write_time); 1579 chip->buffer_write_time);
1487 1580
@@ -1507,13 +1600,14 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1507 1600
1508 /* OK Still waiting */ 1601 /* OK Still waiting */
1509 if (time_after(jiffies, timeo)) { 1602 if (time_after(jiffies, timeo)) {
1603 map_write(map, CMD(0x70), cmd_adr);
1510 chip->state = FL_STATUS; 1604 chip->state = FL_STATUS;
1511 xip_enable(map, chip, cmd_adr); 1605 xip_enable(map, chip, cmd_adr);
1512 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n"); 1606 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1513 ret = -EIO; 1607 ret = -EIO;
1514 goto out; 1608 goto out;
1515 } 1609 }
1516 1610
1517 /* Latency issues. Drop the lock, wait a while and retry */ 1611 /* Latency issues. Drop the lock, wait a while and retry */
1518 z++; 1612 z++;
1519 UDELAY(map, chip, cmd_adr, 1); 1613 UDELAY(map, chip, cmd_adr, 1);
@@ -1521,21 +1615,34 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1521 if (!z) { 1615 if (!z) {
1522 chip->buffer_write_time--; 1616 chip->buffer_write_time--;
1523 if (!chip->buffer_write_time) 1617 if (!chip->buffer_write_time)
1524 chip->buffer_write_time++; 1618 chip->buffer_write_time = 1;
1525 } 1619 }
1526 if (z > 1) 1620 if (z > 1)
1527 chip->buffer_write_time++; 1621 chip->buffer_write_time++;
1528 1622
1529 /* Done and happy. */ 1623 /* Done and happy. */
1530 chip->state = FL_STATUS; 1624 chip->state = FL_STATUS;
1531 1625
1532 /* check for lock bit */ 1626 /* check for errors */
1533 if (map_word_bitsset(map, status, CMD(0x02))) { 1627 if (map_word_bitsset(map, status, CMD(0x1a))) {
1534 /* clear status */ 1628 unsigned long chipstatus = MERGESTATUS(status);
1629
1630 /* reset status */
1535 map_write(map, CMD(0x50), cmd_adr); 1631 map_write(map, CMD(0x50), cmd_adr);
1536 /* put back into read status register mode */ 1632 map_write(map, CMD(0x70), cmd_adr);
1537 map_write(map, CMD(0x70), adr); 1633 xip_enable(map, chip, cmd_adr);
1538 ret = -EROFS; 1634
1635 if (chipstatus & 0x02) {
1636 ret = -EROFS;
1637 } else if (chipstatus & 0x08) {
1638 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1639 ret = -EIO;
1640 } else {
1641 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1642 ret = -EINVAL;
1643 }
1644
1645 goto out;
1539 } 1646 }
1540 1647
1541 xip_enable(map, chip, cmd_adr); 1648 xip_enable(map, chip, cmd_adr);
@@ -1544,70 +1651,65 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1544 return ret; 1651 return ret;
1545} 1652}
1546 1653
1547static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to, 1654static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1548 size_t len, size_t *retlen, const u_char *buf) 1655 unsigned long count, loff_t to, size_t *retlen)
1549{ 1656{
1550 struct map_info *map = mtd->priv; 1657 struct map_info *map = mtd->priv;
1551 struct cfi_private *cfi = map->fldrv_priv; 1658 struct cfi_private *cfi = map->fldrv_priv;
1552 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1659 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1553 int ret = 0; 1660 int ret = 0;
1554 int chipnum; 1661 int chipnum;
1555 unsigned long ofs; 1662 unsigned long ofs, vec_seek, i;
1663 size_t len = 0;
1664
1665 for (i = 0; i < count; i++)
1666 len += vecs[i].iov_len;
1556 1667
1557 *retlen = 0; 1668 *retlen = 0;
1558 if (!len) 1669 if (!len)
1559 return 0; 1670 return 0;
1560 1671
1561 chipnum = to >> cfi->chipshift; 1672 chipnum = to >> cfi->chipshift;
1562 ofs = to - (chipnum << cfi->chipshift); 1673 ofs = to - (chipnum << cfi->chipshift);
1563 1674 vec_seek = 0;
1564 /* If it's not bus-aligned, do the first word write */
1565 if (ofs & (map_bankwidth(map)-1)) {
1566 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1567 if (local_len > len)
1568 local_len = len;
1569 ret = cfi_intelext_write_words(mtd, to, local_len,
1570 retlen, buf);
1571 if (ret)
1572 return ret;
1573 ofs += local_len;
1574 buf += local_len;
1575 len -= local_len;
1576
1577 if (ofs >> cfi->chipshift) {
1578 chipnum ++;
1579 ofs = 0;
1580 if (chipnum == cfi->numchips)
1581 return 0;
1582 }
1583 }
1584 1675
1585 while(len) { 1676 do {
1586 /* We must not cross write block boundaries */ 1677 /* We must not cross write block boundaries */
1587 int size = wbufsize - (ofs & (wbufsize-1)); 1678 int size = wbufsize - (ofs & (wbufsize-1));
1588 1679
1589 if (size > len) 1680 if (size > len)
1590 size = len; 1681 size = len;
1591 ret = do_write_buffer(map, &cfi->chips[chipnum], 1682 ret = do_write_buffer(map, &cfi->chips[chipnum],
1592 ofs, buf, size); 1683 ofs, &vecs, &vec_seek, size);
1593 if (ret) 1684 if (ret)
1594 return ret; 1685 return ret;
1595 1686
1596 ofs += size; 1687 ofs += size;
1597 buf += size;
1598 (*retlen) += size; 1688 (*retlen) += size;
1599 len -= size; 1689 len -= size;
1600 1690
1601 if (ofs >> cfi->chipshift) { 1691 if (ofs >> cfi->chipshift) {
1602 chipnum ++; 1692 chipnum ++;
1603 ofs = 0; 1693 ofs = 0;
1604 if (chipnum == cfi->numchips) 1694 if (chipnum == cfi->numchips)
1605 return 0; 1695 return 0;
1606 } 1696 }
1607 } 1697 } while (len);
1698
1608 return 0; 1699 return 0;
1609} 1700}
1610 1701
1702static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1703 size_t len, size_t *retlen, const u_char *buf)
1704{
1705 struct kvec vec;
1706
1707 vec.iov_base = (void *) buf;
1708 vec.iov_len = len;
1709
1710 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1711}
1712
1611static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, 1713static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1612 unsigned long adr, int len, void *thunk) 1714 unsigned long adr, int len, void *thunk)
1613{ 1715{
@@ -1673,23 +1775,17 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1673 status = map_read(map, adr); 1775 status = map_read(map, adr);
1674 if (map_word_andequal(map, status, status_OK, status_OK)) 1776 if (map_word_andequal(map, status, status_OK, status_OK))
1675 break; 1777 break;
1676 1778
1677 /* OK Still waiting */ 1779 /* OK Still waiting */
1678 if (time_after(jiffies, timeo)) { 1780 if (time_after(jiffies, timeo)) {
1679 map_word Xstatus;
1680 map_write(map, CMD(0x70), adr); 1781 map_write(map, CMD(0x70), adr);
1681 chip->state = FL_STATUS; 1782 chip->state = FL_STATUS;
1682 Xstatus = map_read(map, adr);
1683 /* Clear status bits */
1684 map_write(map, CMD(0x50), adr);
1685 map_write(map, CMD(0x70), adr);
1686 xip_enable(map, chip, adr); 1783 xip_enable(map, chip, adr);
1687 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n", 1784 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1688 adr, status.x[0], Xstatus.x[0]);
1689 ret = -EIO; 1785 ret = -EIO;
1690 goto out; 1786 goto out;
1691 } 1787 }
1692 1788
1693 /* Latency issues. Drop the lock, wait a while and retry */ 1789 /* Latency issues. Drop the lock, wait a while and retry */
1694 UDELAY(map, chip, adr, 1000000/HZ); 1790 UDELAY(map, chip, adr, 1000000/HZ);
1695 } 1791 }
@@ -1699,43 +1795,40 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1699 chip->state = FL_STATUS; 1795 chip->state = FL_STATUS;
1700 status = map_read(map, adr); 1796 status = map_read(map, adr);
1701 1797
1702 /* check for lock bit */ 1798 /* check for errors */
1703 if (map_word_bitsset(map, status, CMD(0x3a))) { 1799 if (map_word_bitsset(map, status, CMD(0x3a))) {
1704 unsigned long chipstatus; 1800 unsigned long chipstatus = MERGESTATUS(status);
1705 1801
1706 /* Reset the error bits */ 1802 /* Reset the error bits */
1707 map_write(map, CMD(0x50), adr); 1803 map_write(map, CMD(0x50), adr);
1708 map_write(map, CMD(0x70), adr); 1804 map_write(map, CMD(0x70), adr);
1709 xip_enable(map, chip, adr); 1805 xip_enable(map, chip, adr);
1710 1806
1711 chipstatus = MERGESTATUS(status);
1712
1713 if ((chipstatus & 0x30) == 0x30) { 1807 if ((chipstatus & 0x30) == 0x30) {
1714 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%lx\n", chipstatus); 1808 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1715 ret = -EIO; 1809 ret = -EINVAL;
1716 } else if (chipstatus & 0x02) { 1810 } else if (chipstatus & 0x02) {
1717 /* Protection bit set */ 1811 /* Protection bit set */
1718 ret = -EROFS; 1812 ret = -EROFS;
1719 } else if (chipstatus & 0x8) { 1813 } else if (chipstatus & 0x8) {
1720 /* Voltage */ 1814 /* Voltage */
1721 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%lx\n", chipstatus); 1815 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1722 ret = -EIO; 1816 ret = -EIO;
1723 } else if (chipstatus & 0x20) { 1817 } else if (chipstatus & 0x20 && retries--) {
1724 if (retries--) { 1818 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1725 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); 1819 timeo = jiffies + HZ;
1726 timeo = jiffies + HZ; 1820 put_chip(map, chip, adr);
1727 put_chip(map, chip, adr); 1821 spin_unlock(chip->mutex);
1728 spin_unlock(chip->mutex); 1822 goto retry;
1729 goto retry; 1823 } else {
1730 } 1824 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1731 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx\n", adr, chipstatus);
1732 ret = -EIO; 1825 ret = -EIO;
1733 } 1826 }
1734 } else { 1827
1735 xip_enable(map, chip, adr); 1828 goto out;
1736 ret = 0;
1737 } 1829 }
1738 1830
1831 xip_enable(map, chip, adr);
1739 out: put_chip(map, chip, adr); 1832 out: put_chip(map, chip, adr);
1740 spin_unlock(chip->mutex); 1833 spin_unlock(chip->mutex);
1741 return ret; 1834 return ret;
@@ -1755,7 +1848,7 @@ int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1755 1848
1756 instr->state = MTD_ERASE_DONE; 1849 instr->state = MTD_ERASE_DONE;
1757 mtd_erase_callback(instr); 1850 mtd_erase_callback(instr);
1758 1851
1759 return 0; 1852 return 0;
1760} 1853}
1761 1854
@@ -1776,7 +1869,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
1776 if (!ret) { 1869 if (!ret) {
1777 chip->oldstate = chip->state; 1870 chip->oldstate = chip->state;
1778 chip->state = FL_SYNCING; 1871 chip->state = FL_SYNCING;
1779 /* No need to wake_up() on this state change - 1872 /* No need to wake_up() on this state change -
1780 * as the whole point is that nobody can do anything 1873 * as the whole point is that nobody can do anything
1781 * with the chip now anyway. 1874 * with the chip now anyway.
1782 */ 1875 */
@@ -1790,7 +1883,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
1790 chip = &cfi->chips[i]; 1883 chip = &cfi->chips[i];
1791 1884
1792 spin_lock(chip->mutex); 1885 spin_lock(chip->mutex);
1793 1886
1794 if (chip->state == FL_SYNCING) { 1887 if (chip->state == FL_SYNCING) {
1795 chip->state = chip->oldstate; 1888 chip->state = chip->oldstate;
1796 chip->oldstate = FL_READY; 1889 chip->oldstate = FL_READY;
@@ -1847,7 +1940,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
1847 1940
1848 ENABLE_VPP(map); 1941 ENABLE_VPP(map);
1849 xip_disable(map, chip, adr); 1942 xip_disable(map, chip, adr);
1850 1943
1851 map_write(map, CMD(0x60), adr); 1944 map_write(map, CMD(0x60), adr);
1852 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { 1945 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1853 map_write(map, CMD(0x01), adr); 1946 map_write(map, CMD(0x01), adr);
@@ -1875,25 +1968,22 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
1875 status = map_read(map, adr); 1968 status = map_read(map, adr);
1876 if (map_word_andequal(map, status, status_OK, status_OK)) 1969 if (map_word_andequal(map, status, status_OK, status_OK))
1877 break; 1970 break;
1878 1971
1879 /* OK Still waiting */ 1972 /* OK Still waiting */
1880 if (time_after(jiffies, timeo)) { 1973 if (time_after(jiffies, timeo)) {
1881 map_word Xstatus;
1882 map_write(map, CMD(0x70), adr); 1974 map_write(map, CMD(0x70), adr);
1883 chip->state = FL_STATUS; 1975 chip->state = FL_STATUS;
1884 Xstatus = map_read(map, adr);
1885 xip_enable(map, chip, adr); 1976 xip_enable(map, chip, adr);
1886 printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n", 1977 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1887 status.x[0], Xstatus.x[0]);
1888 put_chip(map, chip, adr); 1978 put_chip(map, chip, adr);
1889 spin_unlock(chip->mutex); 1979 spin_unlock(chip->mutex);
1890 return -EIO; 1980 return -EIO;
1891 } 1981 }
1892 1982
1893 /* Latency issues. Drop the lock, wait a while and retry */ 1983 /* Latency issues. Drop the lock, wait a while and retry */
1894 UDELAY(map, chip, adr, 1); 1984 UDELAY(map, chip, adr, 1);
1895 } 1985 }
1896 1986
1897 /* Done and happy. */ 1987 /* Done and happy. */
1898 chip->state = FL_STATUS; 1988 chip->state = FL_STATUS;
1899 xip_enable(map, chip, adr); 1989 xip_enable(map, chip, adr);
@@ -1913,9 +2003,9 @@ static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1913 ofs, len, 0); 2003 ofs, len, 0);
1914#endif 2004#endif
1915 2005
1916 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, 2006 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1917 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK); 2007 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1918 2008
1919#ifdef DEBUG_LOCK_BITS 2009#ifdef DEBUG_LOCK_BITS
1920 printk(KERN_DEBUG "%s: lock status after, ret=%d\n", 2010 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1921 __FUNCTION__, ret); 2011 __FUNCTION__, ret);
@@ -1939,20 +2029,20 @@ static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1939 2029
1940 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, 2030 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1941 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK); 2031 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1942 2032
1943#ifdef DEBUG_LOCK_BITS 2033#ifdef DEBUG_LOCK_BITS
1944 printk(KERN_DEBUG "%s: lock status after, ret=%d\n", 2034 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1945 __FUNCTION__, ret); 2035 __FUNCTION__, ret);
1946 cfi_varsize_frob(mtd, do_printlockstatus_oneblock, 2036 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1947 ofs, len, 0); 2037 ofs, len, 0);
1948#endif 2038#endif
1949 2039
1950 return ret; 2040 return ret;
1951} 2041}
1952 2042
1953#ifdef CONFIG_MTD_OTP 2043#ifdef CONFIG_MTD_OTP
1954 2044
1955typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, 2045typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1956 u_long data_offset, u_char *buf, u_int size, 2046 u_long data_offset, u_char *buf, u_int size,
1957 u_long prot_offset, u_int groupno, u_int groupsize); 2047 u_long prot_offset, u_int groupno, u_int groupsize);
1958 2048
@@ -2003,7 +2093,7 @@ do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2003 2093
2004 datum = map_word_load_partial(map, datum, buf, gap, n); 2094 datum = map_word_load_partial(map, datum, buf, gap, n);
2005 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE); 2095 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2006 if (ret) 2096 if (ret)
2007 return ret; 2097 return ret;
2008 2098
2009 offset += n; 2099 offset += n;
@@ -2196,7 +2286,7 @@ static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2196 NULL, do_otp_lock, 1); 2286 NULL, do_otp_lock, 1);
2197} 2287}
2198 2288
2199static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, 2289static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2200 struct otp_info *buf, size_t len) 2290 struct otp_info *buf, size_t len)
2201{ 2291{
2202 size_t retlen; 2292 size_t retlen;
@@ -2239,7 +2329,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2239 if (chip->oldstate == FL_READY) { 2329 if (chip->oldstate == FL_READY) {
2240 chip->oldstate = chip->state; 2330 chip->oldstate = chip->state;
2241 chip->state = FL_PM_SUSPENDED; 2331 chip->state = FL_PM_SUSPENDED;
2242 /* No need to wake_up() on this state change - 2332 /* No need to wake_up() on this state change -
2243 * as the whole point is that nobody can do anything 2333 * as the whole point is that nobody can do anything
2244 * with the chip now anyway. 2334 * with the chip now anyway.
2245 */ 2335 */
@@ -2267,9 +2357,9 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2267 if (ret) { 2357 if (ret) {
2268 for (i--; i >=0; i--) { 2358 for (i--; i >=0; i--) {
2269 chip = &cfi->chips[i]; 2359 chip = &cfi->chips[i];
2270 2360
2271 spin_lock(chip->mutex); 2361 spin_lock(chip->mutex);
2272 2362
2273 if (chip->state == FL_PM_SUSPENDED) { 2363 if (chip->state == FL_PM_SUSPENDED) {
2274 /* No need to force it into a known state here, 2364 /* No need to force it into a known state here,
2275 because we're returning failure, and it didn't 2365 because we're returning failure, and it didn't
@@ -2280,8 +2370,8 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2280 } 2370 }
2281 spin_unlock(chip->mutex); 2371 spin_unlock(chip->mutex);
2282 } 2372 }
2283 } 2373 }
2284 2374
2285 return ret; 2375 return ret;
2286} 2376}
2287 2377
@@ -2293,11 +2383,11 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
2293 struct flchip *chip; 2383 struct flchip *chip;
2294 2384
2295 for (i=0; i<cfi->numchips; i++) { 2385 for (i=0; i<cfi->numchips; i++) {
2296 2386
2297 chip = &cfi->chips[i]; 2387 chip = &cfi->chips[i];
2298 2388
2299 spin_lock(chip->mutex); 2389 spin_lock(chip->mutex);
2300 2390
2301 /* Go to known state. Chip may have been power cycled */ 2391 /* Go to known state. Chip may have been power cycled */
2302 if (chip->state == FL_PM_SUSPENDED) { 2392 if (chip->state == FL_PM_SUSPENDED) {
2303 map_write(map, CMD(0xFF), cfi->chips[i].start); 2393 map_write(map, CMD(0xFF), cfi->chips[i].start);
@@ -2319,7 +2409,7 @@ static int cfi_intelext_reset(struct mtd_info *mtd)
2319 struct flchip *chip = &cfi->chips[i]; 2409 struct flchip *chip = &cfi->chips[i];
2320 2410
2321 /* force the completion of any ongoing operation 2411 /* force the completion of any ongoing operation
2322 and switch to array mode so any bootloader in 2412 and switch to array mode so any bootloader in
2323 flash is accessible for soft reboot. */ 2413 flash is accessible for soft reboot. */
2324 spin_lock(chip->mutex); 2414 spin_lock(chip->mutex);
2325 ret = get_chip(map, chip, chip->start, FL_SYNCING); 2415 ret = get_chip(map, chip, chip->start, FL_SYNCING);
@@ -2356,20 +2446,23 @@ static void cfi_intelext_destroy(struct mtd_info *mtd)
2356 kfree(mtd->eraseregions); 2446 kfree(mtd->eraseregions);
2357} 2447}
2358 2448
2359static char im_name_1[]="cfi_cmdset_0001"; 2449static char im_name_0001[] = "cfi_cmdset_0001";
2360static char im_name_3[]="cfi_cmdset_0003"; 2450static char im_name_0003[] = "cfi_cmdset_0003";
2451static char im_name_0200[] = "cfi_cmdset_0200";
2361 2452
2362static int __init cfi_intelext_init(void) 2453static int __init cfi_intelext_init(void)
2363{ 2454{
2364 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001); 2455 inter_module_register(im_name_0001, THIS_MODULE, &cfi_cmdset_0001);
2365 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001); 2456 inter_module_register(im_name_0003, THIS_MODULE, &cfi_cmdset_0001);
2457 inter_module_register(im_name_0200, THIS_MODULE, &cfi_cmdset_0001);
2366 return 0; 2458 return 0;
2367} 2459}
2368 2460
2369static void __exit cfi_intelext_exit(void) 2461static void __exit cfi_intelext_exit(void)
2370{ 2462{
2371 inter_module_unregister(im_name_1); 2463 inter_module_unregister(im_name_0001);
2372 inter_module_unregister(im_name_3); 2464 inter_module_unregister(im_name_0003);
2465 inter_module_unregister(im_name_0200);
2373} 2466}
2374 2467
2375module_init(cfi_intelext_init); 2468module_init(cfi_intelext_init);