diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/mtd/chips/cfi_cmdset_0002.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/mtd/chips/cfi_cmdset_0002.c')
-rw-r--r-- | drivers/mtd/chips/cfi_cmdset_0002.c | 1515 |
1 files changed, 1515 insertions, 0 deletions
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c new file mode 100644 index 000000000000..fca8ff6f7e14 --- /dev/null +++ b/drivers/mtd/chips/cfi_cmdset_0002.c | |||
@@ -0,0 +1,1515 @@ | |||
1 | /* | ||
2 | * Common Flash Interface support: | ||
3 | * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) | ||
4 | * | ||
5 | * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> | ||
6 | * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> | ||
7 | * | ||
8 | * 2_by_8 routines added by Simon Munton | ||
9 | * | ||
10 | * 4_by_16 work by Carolyn J. Smith | ||
11 | * | ||
12 | * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com | ||
13 | * | ||
14 | * This code is GPL | ||
15 | * | ||
16 | * $Id: cfi_cmdset_0002.c,v 1.114 2004/12/11 15:43:53 dedekind Exp $ | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/config.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/sched.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <asm/io.h> | ||
27 | #include <asm/byteorder.h> | ||
28 | |||
29 | #include <linux/errno.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/delay.h> | ||
32 | #include <linux/interrupt.h> | ||
33 | #include <linux/mtd/compatmac.h> | ||
34 | #include <linux/mtd/map.h> | ||
35 | #include <linux/mtd/mtd.h> | ||
36 | #include <linux/mtd/cfi.h> | ||
37 | |||
38 | #define AMD_BOOTLOC_BUG | ||
39 | #define FORCE_WORD_WRITE 0 | ||
40 | |||
41 | #define MAX_WORD_RETRIES 3 | ||
42 | |||
43 | #define MANUFACTURER_AMD 0x0001 | ||
44 | #define MANUFACTURER_SST 0x00BF | ||
45 | #define SST49LF004B 0x0060 | ||
46 | |||
47 | static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); | ||
48 | static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); | ||
49 | static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); | ||
50 | static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); | ||
51 | static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); | ||
52 | static void cfi_amdstd_sync (struct mtd_info *); | ||
53 | static int cfi_amdstd_suspend (struct mtd_info *); | ||
54 | static void cfi_amdstd_resume (struct mtd_info *); | ||
55 | static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); | ||
56 | |||
57 | static void cfi_amdstd_destroy(struct mtd_info *); | ||
58 | |||
59 | struct mtd_info *cfi_cmdset_0002(struct map_info *, int); | ||
60 | static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); | ||
61 | |||
62 | static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); | ||
63 | static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); | ||
64 | #include "fwh_lock.h" | ||
65 | |||
66 | static struct mtd_chip_driver cfi_amdstd_chipdrv = { | ||
67 | .probe = NULL, /* Not usable directly */ | ||
68 | .destroy = cfi_amdstd_destroy, | ||
69 | .name = "cfi_cmdset_0002", | ||
70 | .module = THIS_MODULE | ||
71 | }; | ||
72 | |||
73 | |||
74 | /* #define DEBUG_CFI_FEATURES */ | ||
75 | |||
76 | |||
77 | #ifdef DEBUG_CFI_FEATURES | ||
78 | static void cfi_tell_features(struct cfi_pri_amdstd *extp) | ||
79 | { | ||
80 | const char* erase_suspend[3] = { | ||
81 | "Not supported", "Read only", "Read/write" | ||
82 | }; | ||
83 | const char* top_bottom[6] = { | ||
84 | "No WP", "8x8KiB sectors at top & bottom, no WP", | ||
85 | "Bottom boot", "Top boot", | ||
86 | "Uniform, Bottom WP", "Uniform, Top WP" | ||
87 | }; | ||
88 | |||
89 | printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); | ||
90 | printk(" Address sensitive unlock: %s\n", | ||
91 | (extp->SiliconRevision & 1) ? "Not required" : "Required"); | ||
92 | |||
93 | if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) | ||
94 | printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); | ||
95 | else | ||
96 | printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); | ||
97 | |||
98 | if (extp->BlkProt == 0) | ||
99 | printk(" Block protection: Not supported\n"); | ||
100 | else | ||
101 | printk(" Block protection: %d sectors per group\n", extp->BlkProt); | ||
102 | |||
103 | |||
104 | printk(" Temporary block unprotect: %s\n", | ||
105 | extp->TmpBlkUnprotect ? "Supported" : "Not supported"); | ||
106 | printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); | ||
107 | printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); | ||
108 | printk(" Burst mode: %s\n", | ||
109 | extp->BurstMode ? "Supported" : "Not supported"); | ||
110 | if (extp->PageMode == 0) | ||
111 | printk(" Page mode: Not supported\n"); | ||
112 | else | ||
113 | printk(" Page mode: %d word page\n", extp->PageMode << 2); | ||
114 | |||
115 | printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", | ||
116 | extp->VppMin >> 4, extp->VppMin & 0xf); | ||
117 | printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", | ||
118 | extp->VppMax >> 4, extp->VppMax & 0xf); | ||
119 | |||
120 | if (extp->TopBottom < ARRAY_SIZE(top_bottom)) | ||
121 | printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); | ||
122 | else | ||
123 | printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); | ||
124 | } | ||
125 | #endif | ||
126 | |||
127 | #ifdef AMD_BOOTLOC_BUG | ||
128 | /* Wheee. Bring me the head of someone at AMD. */ | ||
129 | static void fixup_amd_bootblock(struct mtd_info *mtd, void* param) | ||
130 | { | ||
131 | struct map_info *map = mtd->priv; | ||
132 | struct cfi_private *cfi = map->fldrv_priv; | ||
133 | struct cfi_pri_amdstd *extp = cfi->cmdset_priv; | ||
134 | __u8 major = extp->MajorVersion; | ||
135 | __u8 minor = extp->MinorVersion; | ||
136 | |||
137 | if (((major << 8) | minor) < 0x3131) { | ||
138 | /* CFI version 1.0 => don't trust bootloc */ | ||
139 | if (cfi->id & 0x80) { | ||
140 | printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); | ||
141 | extp->TopBottom = 3; /* top boot */ | ||
142 | } else { | ||
143 | extp->TopBottom = 2; /* bottom boot */ | ||
144 | } | ||
145 | } | ||
146 | } | ||
147 | #endif | ||
148 | |||
149 | static void fixup_use_write_buffers(struct mtd_info *mtd, void *param) | ||
150 | { | ||
151 | struct map_info *map = mtd->priv; | ||
152 | struct cfi_private *cfi = map->fldrv_priv; | ||
153 | if (cfi->cfiq->BufWriteTimeoutTyp) { | ||
154 | DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" ); | ||
155 | mtd->write = cfi_amdstd_write_buffers; | ||
156 | } | ||
157 | } | ||
158 | |||
159 | static void fixup_use_secsi(struct mtd_info *mtd, void *param) | ||
160 | { | ||
161 | /* Setup for chips with a secsi area */ | ||
162 | mtd->read_user_prot_reg = cfi_amdstd_secsi_read; | ||
163 | mtd->read_fact_prot_reg = cfi_amdstd_secsi_read; | ||
164 | } | ||
165 | |||
166 | static void fixup_use_erase_chip(struct mtd_info *mtd, void *param) | ||
167 | { | ||
168 | struct map_info *map = mtd->priv; | ||
169 | struct cfi_private *cfi = map->fldrv_priv; | ||
170 | if ((cfi->cfiq->NumEraseRegions == 1) && | ||
171 | ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { | ||
172 | mtd->erase = cfi_amdstd_erase_chip; | ||
173 | } | ||
174 | |||
175 | } | ||
176 | |||
177 | static struct cfi_fixup cfi_fixup_table[] = { | ||
178 | #ifdef AMD_BOOTLOC_BUG | ||
179 | { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, | ||
180 | #endif | ||
181 | { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, }, | ||
182 | { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, }, | ||
183 | { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, }, | ||
184 | { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, }, | ||
185 | { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, }, | ||
186 | { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, }, | ||
187 | #if !FORCE_WORD_WRITE | ||
188 | { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, }, | ||
189 | #endif | ||
190 | { 0, 0, NULL, NULL } | ||
191 | }; | ||
192 | static struct cfi_fixup jedec_fixup_table[] = { | ||
193 | { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, }, | ||
194 | { 0, 0, NULL, NULL } | ||
195 | }; | ||
196 | |||
197 | static struct cfi_fixup fixup_table[] = { | ||
198 | /* The CFI vendor ids and the JEDEC vendor IDs appear | ||
199 | * to be common. It is like the devices id's are as | ||
200 | * well. This table is to pick all cases where | ||
201 | * we know that is the case. | ||
202 | */ | ||
203 | { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL }, | ||
204 | { 0, 0, NULL, NULL } | ||
205 | }; | ||
206 | |||
207 | |||
208 | struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) | ||
209 | { | ||
210 | struct cfi_private *cfi = map->fldrv_priv; | ||
211 | struct mtd_info *mtd; | ||
212 | int i; | ||
213 | |||
214 | mtd = kmalloc(sizeof(*mtd), GFP_KERNEL); | ||
215 | if (!mtd) { | ||
216 | printk(KERN_WARNING "Failed to allocate memory for MTD device\n"); | ||
217 | return NULL; | ||
218 | } | ||
219 | memset(mtd, 0, sizeof(*mtd)); | ||
220 | mtd->priv = map; | ||
221 | mtd->type = MTD_NORFLASH; | ||
222 | |||
223 | /* Fill in the default mtd operations */ | ||
224 | mtd->erase = cfi_amdstd_erase_varsize; | ||
225 | mtd->write = cfi_amdstd_write_words; | ||
226 | mtd->read = cfi_amdstd_read; | ||
227 | mtd->sync = cfi_amdstd_sync; | ||
228 | mtd->suspend = cfi_amdstd_suspend; | ||
229 | mtd->resume = cfi_amdstd_resume; | ||
230 | mtd->flags = MTD_CAP_NORFLASH; | ||
231 | mtd->name = map->name; | ||
232 | |||
233 | if (cfi->cfi_mode==CFI_MODE_CFI){ | ||
234 | unsigned char bootloc; | ||
235 | /* | ||
236 | * It's a real CFI chip, not one for which the probe | ||
237 | * routine faked a CFI structure. So we read the feature | ||
238 | * table from it. | ||
239 | */ | ||
240 | __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; | ||
241 | struct cfi_pri_amdstd *extp; | ||
242 | |||
243 | extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); | ||
244 | if (!extp) { | ||
245 | kfree(mtd); | ||
246 | return NULL; | ||
247 | } | ||
248 | |||
249 | /* Install our own private info structure */ | ||
250 | cfi->cmdset_priv = extp; | ||
251 | |||
252 | /* Apply cfi device specific fixups */ | ||
253 | cfi_fixup(mtd, cfi_fixup_table); | ||
254 | |||
255 | #ifdef DEBUG_CFI_FEATURES | ||
256 | /* Tell the user about it in lots of lovely detail */ | ||
257 | cfi_tell_features(extp); | ||
258 | #endif | ||
259 | |||
260 | bootloc = extp->TopBottom; | ||
261 | if ((bootloc != 2) && (bootloc != 3)) { | ||
262 | printk(KERN_WARNING "%s: CFI does not contain boot " | ||
263 | "bank location. Assuming top.\n", map->name); | ||
264 | bootloc = 2; | ||
265 | } | ||
266 | |||
267 | if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { | ||
268 | printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name); | ||
269 | |||
270 | for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { | ||
271 | int j = (cfi->cfiq->NumEraseRegions-1)-i; | ||
272 | __u32 swap; | ||
273 | |||
274 | swap = cfi->cfiq->EraseRegionInfo[i]; | ||
275 | cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; | ||
276 | cfi->cfiq->EraseRegionInfo[j] = swap; | ||
277 | } | ||
278 | } | ||
279 | /* Set the default CFI lock/unlock addresses */ | ||
280 | cfi->addr_unlock1 = 0x555; | ||
281 | cfi->addr_unlock2 = 0x2aa; | ||
282 | /* Modify the unlock address if we are in compatibility mode */ | ||
283 | if ( /* x16 in x8 mode */ | ||
284 | ((cfi->device_type == CFI_DEVICETYPE_X8) && | ||
285 | (cfi->cfiq->InterfaceDesc == 2)) || | ||
286 | /* x32 in x16 mode */ | ||
287 | ((cfi->device_type == CFI_DEVICETYPE_X16) && | ||
288 | (cfi->cfiq->InterfaceDesc == 4))) | ||
289 | { | ||
290 | cfi->addr_unlock1 = 0xaaa; | ||
291 | cfi->addr_unlock2 = 0x555; | ||
292 | } | ||
293 | |||
294 | } /* CFI mode */ | ||
295 | else if (cfi->cfi_mode == CFI_MODE_JEDEC) { | ||
296 | /* Apply jedec specific fixups */ | ||
297 | cfi_fixup(mtd, jedec_fixup_table); | ||
298 | } | ||
299 | /* Apply generic fixups */ | ||
300 | cfi_fixup(mtd, fixup_table); | ||
301 | |||
302 | for (i=0; i< cfi->numchips; i++) { | ||
303 | cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; | ||
304 | cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; | ||
305 | cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; | ||
306 | } | ||
307 | |||
308 | map->fldrv = &cfi_amdstd_chipdrv; | ||
309 | |||
310 | return cfi_amdstd_setup(mtd); | ||
311 | } | ||
312 | |||
313 | |||
314 | static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) | ||
315 | { | ||
316 | struct map_info *map = mtd->priv; | ||
317 | struct cfi_private *cfi = map->fldrv_priv; | ||
318 | unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; | ||
319 | unsigned long offset = 0; | ||
320 | int i,j; | ||
321 | |||
322 | printk(KERN_NOTICE "number of %s chips: %d\n", | ||
323 | (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); | ||
324 | /* Select the correct geometry setup */ | ||
325 | mtd->size = devsize * cfi->numchips; | ||
326 | |||
327 | mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; | ||
328 | mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) | ||
329 | * mtd->numeraseregions, GFP_KERNEL); | ||
330 | if (!mtd->eraseregions) { | ||
331 | printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); | ||
332 | goto setup_err; | ||
333 | } | ||
334 | |||
335 | for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { | ||
336 | unsigned long ernum, ersize; | ||
337 | ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; | ||
338 | ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; | ||
339 | |||
340 | if (mtd->erasesize < ersize) { | ||
341 | mtd->erasesize = ersize; | ||
342 | } | ||
343 | for (j=0; j<cfi->numchips; j++) { | ||
344 | mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; | ||
345 | mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; | ||
346 | mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; | ||
347 | } | ||
348 | offset += (ersize * ernum); | ||
349 | } | ||
350 | if (offset != devsize) { | ||
351 | /* Argh */ | ||
352 | printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); | ||
353 | goto setup_err; | ||
354 | } | ||
355 | #if 0 | ||
356 | // debug | ||
357 | for (i=0; i<mtd->numeraseregions;i++){ | ||
358 | printk("%d: offset=0x%x,size=0x%x,blocks=%d\n", | ||
359 | i,mtd->eraseregions[i].offset, | ||
360 | mtd->eraseregions[i].erasesize, | ||
361 | mtd->eraseregions[i].numblocks); | ||
362 | } | ||
363 | #endif | ||
364 | |||
365 | /* FIXME: erase-suspend-program is broken. See | ||
366 | http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */ | ||
367 | printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n"); | ||
368 | |||
369 | __module_get(THIS_MODULE); | ||
370 | return mtd; | ||
371 | |||
372 | setup_err: | ||
373 | if(mtd) { | ||
374 | if(mtd->eraseregions) | ||
375 | kfree(mtd->eraseregions); | ||
376 | kfree(mtd); | ||
377 | } | ||
378 | kfree(cfi->cmdset_priv); | ||
379 | kfree(cfi->cfiq); | ||
380 | return NULL; | ||
381 | } | ||
382 | |||
383 | /* | ||
384 | * Return true if the chip is ready. | ||
385 | * | ||
386 | * Ready is one of: read mode, query mode, erase-suspend-read mode (in any | ||
387 | * non-suspended sector) and is indicated by no toggle bits toggling. | ||
388 | * | ||
389 | * Note that anything more complicated than checking if no bits are toggling | ||
390 | * (including checking DQ5 for an error status) is tricky to get working | ||
391 | * correctly and is therefore not done (particulary with interleaved chips | ||
392 | * as each chip must be checked independantly of the others). | ||
393 | */ | ||
394 | static int chip_ready(struct map_info *map, unsigned long addr) | ||
395 | { | ||
396 | map_word d, t; | ||
397 | |||
398 | d = map_read(map, addr); | ||
399 | t = map_read(map, addr); | ||
400 | |||
401 | return map_word_equal(map, d, t); | ||
402 | } | ||
403 | |||
404 | static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) | ||
405 | { | ||
406 | DECLARE_WAITQUEUE(wait, current); | ||
407 | struct cfi_private *cfi = map->fldrv_priv; | ||
408 | unsigned long timeo; | ||
409 | struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; | ||
410 | |||
411 | resettime: | ||
412 | timeo = jiffies + HZ; | ||
413 | retry: | ||
414 | switch (chip->state) { | ||
415 | |||
416 | case FL_STATUS: | ||
417 | for (;;) { | ||
418 | if (chip_ready(map, adr)) | ||
419 | break; | ||
420 | |||
421 | if (time_after(jiffies, timeo)) { | ||
422 | printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); | ||
423 | cfi_spin_unlock(chip->mutex); | ||
424 | return -EIO; | ||
425 | } | ||
426 | cfi_spin_unlock(chip->mutex); | ||
427 | cfi_udelay(1); | ||
428 | cfi_spin_lock(chip->mutex); | ||
429 | /* Someone else might have been playing with it. */ | ||
430 | goto retry; | ||
431 | } | ||
432 | |||
433 | case FL_READY: | ||
434 | case FL_CFI_QUERY: | ||
435 | case FL_JEDEC_QUERY: | ||
436 | return 0; | ||
437 | |||
438 | case FL_ERASING: | ||
439 | if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */ | ||
440 | goto sleep; | ||
441 | |||
442 | if (!(mode == FL_READY || mode == FL_POINT | ||
443 | || !cfip | ||
444 | || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)) | ||
445 | || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)))) | ||
446 | goto sleep; | ||
447 | |||
448 | /* We could check to see if we're trying to access the sector | ||
449 | * that is currently being erased. However, no user will try | ||
450 | * anything like that so we just wait for the timeout. */ | ||
451 | |||
452 | /* Erase suspend */ | ||
453 | /* It's harmless to issue the Erase-Suspend and Erase-Resume | ||
454 | * commands when the erase algorithm isn't in progress. */ | ||
455 | map_write(map, CMD(0xB0), chip->in_progress_block_addr); | ||
456 | chip->oldstate = FL_ERASING; | ||
457 | chip->state = FL_ERASE_SUSPENDING; | ||
458 | chip->erase_suspended = 1; | ||
459 | for (;;) { | ||
460 | if (chip_ready(map, adr)) | ||
461 | break; | ||
462 | |||
463 | if (time_after(jiffies, timeo)) { | ||
464 | /* Should have suspended the erase by now. | ||
465 | * Send an Erase-Resume command as either | ||
466 | * there was an error (so leave the erase | ||
467 | * routine to recover from it) or we trying to | ||
468 | * use the erase-in-progress sector. */ | ||
469 | map_write(map, CMD(0x30), chip->in_progress_block_addr); | ||
470 | chip->state = FL_ERASING; | ||
471 | chip->oldstate = FL_READY; | ||
472 | printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); | ||
473 | return -EIO; | ||
474 | } | ||
475 | |||
476 | cfi_spin_unlock(chip->mutex); | ||
477 | cfi_udelay(1); | ||
478 | cfi_spin_lock(chip->mutex); | ||
479 | /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. | ||
480 | So we can just loop here. */ | ||
481 | } | ||
482 | chip->state = FL_READY; | ||
483 | return 0; | ||
484 | |||
485 | case FL_POINT: | ||
486 | /* Only if there's no operation suspended... */ | ||
487 | if (mode == FL_READY && chip->oldstate == FL_READY) | ||
488 | return 0; | ||
489 | |||
490 | default: | ||
491 | sleep: | ||
492 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
493 | add_wait_queue(&chip->wq, &wait); | ||
494 | cfi_spin_unlock(chip->mutex); | ||
495 | schedule(); | ||
496 | remove_wait_queue(&chip->wq, &wait); | ||
497 | cfi_spin_lock(chip->mutex); | ||
498 | goto resettime; | ||
499 | } | ||
500 | } | ||
501 | |||
502 | |||
503 | static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) | ||
504 | { | ||
505 | struct cfi_private *cfi = map->fldrv_priv; | ||
506 | |||
507 | switch(chip->oldstate) { | ||
508 | case FL_ERASING: | ||
509 | chip->state = chip->oldstate; | ||
510 | map_write(map, CMD(0x30), chip->in_progress_block_addr); | ||
511 | chip->oldstate = FL_READY; | ||
512 | chip->state = FL_ERASING; | ||
513 | break; | ||
514 | |||
515 | case FL_READY: | ||
516 | case FL_STATUS: | ||
517 | /* We should really make set_vpp() count, rather than doing this */ | ||
518 | DISABLE_VPP(map); | ||
519 | break; | ||
520 | default: | ||
521 | printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); | ||
522 | } | ||
523 | wake_up(&chip->wq); | ||
524 | } | ||
525 | |||
526 | |||
527 | static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) | ||
528 | { | ||
529 | unsigned long cmd_addr; | ||
530 | struct cfi_private *cfi = map->fldrv_priv; | ||
531 | int ret; | ||
532 | |||
533 | adr += chip->start; | ||
534 | |||
535 | /* Ensure cmd read/writes are aligned. */ | ||
536 | cmd_addr = adr & ~(map_bankwidth(map)-1); | ||
537 | |||
538 | cfi_spin_lock(chip->mutex); | ||
539 | ret = get_chip(map, chip, cmd_addr, FL_READY); | ||
540 | if (ret) { | ||
541 | cfi_spin_unlock(chip->mutex); | ||
542 | return ret; | ||
543 | } | ||
544 | |||
545 | if (chip->state != FL_POINT && chip->state != FL_READY) { | ||
546 | map_write(map, CMD(0xf0), cmd_addr); | ||
547 | chip->state = FL_READY; | ||
548 | } | ||
549 | |||
550 | map_copy_from(map, buf, adr, len); | ||
551 | |||
552 | put_chip(map, chip, cmd_addr); | ||
553 | |||
554 | cfi_spin_unlock(chip->mutex); | ||
555 | return 0; | ||
556 | } | ||
557 | |||
558 | |||
559 | static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) | ||
560 | { | ||
561 | struct map_info *map = mtd->priv; | ||
562 | struct cfi_private *cfi = map->fldrv_priv; | ||
563 | unsigned long ofs; | ||
564 | int chipnum; | ||
565 | int ret = 0; | ||
566 | |||
567 | /* ofs: offset within the first chip that the first read should start */ | ||
568 | |||
569 | chipnum = (from >> cfi->chipshift); | ||
570 | ofs = from - (chipnum << cfi->chipshift); | ||
571 | |||
572 | |||
573 | *retlen = 0; | ||
574 | |||
575 | while (len) { | ||
576 | unsigned long thislen; | ||
577 | |||
578 | if (chipnum >= cfi->numchips) | ||
579 | break; | ||
580 | |||
581 | if ((len + ofs -1) >> cfi->chipshift) | ||
582 | thislen = (1<<cfi->chipshift) - ofs; | ||
583 | else | ||
584 | thislen = len; | ||
585 | |||
586 | ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); | ||
587 | if (ret) | ||
588 | break; | ||
589 | |||
590 | *retlen += thislen; | ||
591 | len -= thislen; | ||
592 | buf += thislen; | ||
593 | |||
594 | ofs = 0; | ||
595 | chipnum++; | ||
596 | } | ||
597 | return ret; | ||
598 | } | ||
599 | |||
600 | |||
601 | static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) | ||
602 | { | ||
603 | DECLARE_WAITQUEUE(wait, current); | ||
604 | unsigned long timeo = jiffies + HZ; | ||
605 | struct cfi_private *cfi = map->fldrv_priv; | ||
606 | |||
607 | retry: | ||
608 | cfi_spin_lock(chip->mutex); | ||
609 | |||
610 | if (chip->state != FL_READY){ | ||
611 | #if 0 | ||
612 | printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state); | ||
613 | #endif | ||
614 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
615 | add_wait_queue(&chip->wq, &wait); | ||
616 | |||
617 | cfi_spin_unlock(chip->mutex); | ||
618 | |||
619 | schedule(); | ||
620 | remove_wait_queue(&chip->wq, &wait); | ||
621 | #if 0 | ||
622 | if(signal_pending(current)) | ||
623 | return -EINTR; | ||
624 | #endif | ||
625 | timeo = jiffies + HZ; | ||
626 | |||
627 | goto retry; | ||
628 | } | ||
629 | |||
630 | adr += chip->start; | ||
631 | |||
632 | chip->state = FL_READY; | ||
633 | |||
634 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
635 | cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); | ||
636 | cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
637 | |||
638 | map_copy_from(map, buf, adr, len); | ||
639 | |||
640 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
641 | cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); | ||
642 | cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
643 | cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
644 | |||
645 | wake_up(&chip->wq); | ||
646 | cfi_spin_unlock(chip->mutex); | ||
647 | |||
648 | return 0; | ||
649 | } | ||
650 | |||
651 | static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) | ||
652 | { | ||
653 | struct map_info *map = mtd->priv; | ||
654 | struct cfi_private *cfi = map->fldrv_priv; | ||
655 | unsigned long ofs; | ||
656 | int chipnum; | ||
657 | int ret = 0; | ||
658 | |||
659 | |||
660 | /* ofs: offset within the first chip that the first read should start */ | ||
661 | |||
662 | /* 8 secsi bytes per chip */ | ||
663 | chipnum=from>>3; | ||
664 | ofs=from & 7; | ||
665 | |||
666 | |||
667 | *retlen = 0; | ||
668 | |||
669 | while (len) { | ||
670 | unsigned long thislen; | ||
671 | |||
672 | if (chipnum >= cfi->numchips) | ||
673 | break; | ||
674 | |||
675 | if ((len + ofs -1) >> 3) | ||
676 | thislen = (1<<3) - ofs; | ||
677 | else | ||
678 | thislen = len; | ||
679 | |||
680 | ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); | ||
681 | if (ret) | ||
682 | break; | ||
683 | |||
684 | *retlen += thislen; | ||
685 | len -= thislen; | ||
686 | buf += thislen; | ||
687 | |||
688 | ofs = 0; | ||
689 | chipnum++; | ||
690 | } | ||
691 | return ret; | ||
692 | } | ||
693 | |||
694 | |||
695 | static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum) | ||
696 | { | ||
697 | struct cfi_private *cfi = map->fldrv_priv; | ||
698 | unsigned long timeo = jiffies + HZ; | ||
699 | /* | ||
700 | * We use a 1ms + 1 jiffies generic timeout for writes (most devices | ||
701 | * have a max write time of a few hundreds usec). However, we should | ||
702 | * use the maximum timeout value given by the chip at probe time | ||
703 | * instead. Unfortunately, struct flchip does have a field for | ||
704 | * maximum timeout, only for typical which can be far too short | ||
705 | * depending of the conditions. The ' + 1' is to avoid having a | ||
706 | * timeout of 0 jiffies if HZ is smaller than 1000. | ||
707 | */ | ||
708 | unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; | ||
709 | int ret = 0; | ||
710 | map_word oldd; | ||
711 | int retry_cnt = 0; | ||
712 | |||
713 | adr += chip->start; | ||
714 | |||
715 | cfi_spin_lock(chip->mutex); | ||
716 | ret = get_chip(map, chip, adr, FL_WRITING); | ||
717 | if (ret) { | ||
718 | cfi_spin_unlock(chip->mutex); | ||
719 | return ret; | ||
720 | } | ||
721 | |||
722 | DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", | ||
723 | __func__, adr, datum.x[0] ); | ||
724 | |||
725 | /* | ||
726 | * Check for a NOP for the case when the datum to write is already | ||
727 | * present - it saves time and works around buggy chips that corrupt | ||
728 | * data at other locations when 0xff is written to a location that | ||
729 | * already contains 0xff. | ||
730 | */ | ||
731 | oldd = map_read(map, adr); | ||
732 | if (map_word_equal(map, oldd, datum)) { | ||
733 | DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n", | ||
734 | __func__); | ||
735 | goto op_done; | ||
736 | } | ||
737 | |||
738 | ENABLE_VPP(map); | ||
739 | retry: | ||
740 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
741 | cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); | ||
742 | cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
743 | map_write(map, datum, adr); | ||
744 | chip->state = FL_WRITING; | ||
745 | |||
746 | cfi_spin_unlock(chip->mutex); | ||
747 | cfi_udelay(chip->word_write_time); | ||
748 | cfi_spin_lock(chip->mutex); | ||
749 | |||
750 | /* See comment above for timeout value. */ | ||
751 | timeo = jiffies + uWriteTimeout; | ||
752 | for (;;) { | ||
753 | if (chip->state != FL_WRITING) { | ||
754 | /* Someone's suspended the write. Sleep */ | ||
755 | DECLARE_WAITQUEUE(wait, current); | ||
756 | |||
757 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
758 | add_wait_queue(&chip->wq, &wait); | ||
759 | cfi_spin_unlock(chip->mutex); | ||
760 | schedule(); | ||
761 | remove_wait_queue(&chip->wq, &wait); | ||
762 | timeo = jiffies + (HZ / 2); /* FIXME */ | ||
763 | cfi_spin_lock(chip->mutex); | ||
764 | continue; | ||
765 | } | ||
766 | |||
767 | if (chip_ready(map, adr)) | ||
768 | goto op_done; | ||
769 | |||
770 | if (time_after(jiffies, timeo)) | ||
771 | break; | ||
772 | |||
773 | /* Latency issues. Drop the lock, wait a while and retry */ | ||
774 | cfi_spin_unlock(chip->mutex); | ||
775 | cfi_udelay(1); | ||
776 | cfi_spin_lock(chip->mutex); | ||
777 | } | ||
778 | |||
779 | printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); | ||
780 | |||
781 | /* reset on all failures. */ | ||
782 | map_write( map, CMD(0xF0), chip->start ); | ||
783 | /* FIXME - should have reset delay before continuing */ | ||
784 | if (++retry_cnt <= MAX_WORD_RETRIES) | ||
785 | goto retry; | ||
786 | |||
787 | ret = -EIO; | ||
788 | op_done: | ||
789 | chip->state = FL_READY; | ||
790 | put_chip(map, chip, adr); | ||
791 | cfi_spin_unlock(chip->mutex); | ||
792 | |||
793 | return ret; | ||
794 | } | ||
795 | |||
796 | |||
797 | static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | ||
798 | size_t *retlen, const u_char *buf) | ||
799 | { | ||
800 | struct map_info *map = mtd->priv; | ||
801 | struct cfi_private *cfi = map->fldrv_priv; | ||
802 | int ret = 0; | ||
803 | int chipnum; | ||
804 | unsigned long ofs, chipstart; | ||
805 | DECLARE_WAITQUEUE(wait, current); | ||
806 | |||
807 | *retlen = 0; | ||
808 | if (!len) | ||
809 | return 0; | ||
810 | |||
811 | chipnum = to >> cfi->chipshift; | ||
812 | ofs = to - (chipnum << cfi->chipshift); | ||
813 | chipstart = cfi->chips[chipnum].start; | ||
814 | |||
815 | /* If it's not bus-aligned, do the first byte write */ | ||
816 | if (ofs & (map_bankwidth(map)-1)) { | ||
817 | unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); | ||
818 | int i = ofs - bus_ofs; | ||
819 | int n = 0; | ||
820 | map_word tmp_buf; | ||
821 | |||
822 | retry: | ||
823 | cfi_spin_lock(cfi->chips[chipnum].mutex); | ||
824 | |||
825 | if (cfi->chips[chipnum].state != FL_READY) { | ||
826 | #if 0 | ||
827 | printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state); | ||
828 | #endif | ||
829 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
830 | add_wait_queue(&cfi->chips[chipnum].wq, &wait); | ||
831 | |||
832 | cfi_spin_unlock(cfi->chips[chipnum].mutex); | ||
833 | |||
834 | schedule(); | ||
835 | remove_wait_queue(&cfi->chips[chipnum].wq, &wait); | ||
836 | #if 0 | ||
837 | if(signal_pending(current)) | ||
838 | return -EINTR; | ||
839 | #endif | ||
840 | goto retry; | ||
841 | } | ||
842 | |||
843 | /* Load 'tmp_buf' with old contents of flash */ | ||
844 | tmp_buf = map_read(map, bus_ofs+chipstart); | ||
845 | |||
846 | cfi_spin_unlock(cfi->chips[chipnum].mutex); | ||
847 | |||
848 | /* Number of bytes to copy from buffer */ | ||
849 | n = min_t(int, len, map_bankwidth(map)-i); | ||
850 | |||
851 | tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); | ||
852 | |||
853 | ret = do_write_oneword(map, &cfi->chips[chipnum], | ||
854 | bus_ofs, tmp_buf); | ||
855 | if (ret) | ||
856 | return ret; | ||
857 | |||
858 | ofs += n; | ||
859 | buf += n; | ||
860 | (*retlen) += n; | ||
861 | len -= n; | ||
862 | |||
863 | if (ofs >> cfi->chipshift) { | ||
864 | chipnum ++; | ||
865 | ofs = 0; | ||
866 | if (chipnum == cfi->numchips) | ||
867 | return 0; | ||
868 | } | ||
869 | } | ||
870 | |||
871 | /* We are now aligned, write as much as possible */ | ||
872 | while(len >= map_bankwidth(map)) { | ||
873 | map_word datum; | ||
874 | |||
875 | datum = map_word_load(map, buf); | ||
876 | |||
877 | ret = do_write_oneword(map, &cfi->chips[chipnum], | ||
878 | ofs, datum); | ||
879 | if (ret) | ||
880 | return ret; | ||
881 | |||
882 | ofs += map_bankwidth(map); | ||
883 | buf += map_bankwidth(map); | ||
884 | (*retlen) += map_bankwidth(map); | ||
885 | len -= map_bankwidth(map); | ||
886 | |||
887 | if (ofs >> cfi->chipshift) { | ||
888 | chipnum ++; | ||
889 | ofs = 0; | ||
890 | if (chipnum == cfi->numchips) | ||
891 | return 0; | ||
892 | chipstart = cfi->chips[chipnum].start; | ||
893 | } | ||
894 | } | ||
895 | |||
896 | /* Write the trailing bytes if any */ | ||
897 | if (len & (map_bankwidth(map)-1)) { | ||
898 | map_word tmp_buf; | ||
899 | |||
900 | retry1: | ||
901 | cfi_spin_lock(cfi->chips[chipnum].mutex); | ||
902 | |||
903 | if (cfi->chips[chipnum].state != FL_READY) { | ||
904 | #if 0 | ||
905 | printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state); | ||
906 | #endif | ||
907 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
908 | add_wait_queue(&cfi->chips[chipnum].wq, &wait); | ||
909 | |||
910 | cfi_spin_unlock(cfi->chips[chipnum].mutex); | ||
911 | |||
912 | schedule(); | ||
913 | remove_wait_queue(&cfi->chips[chipnum].wq, &wait); | ||
914 | #if 0 | ||
915 | if(signal_pending(current)) | ||
916 | return -EINTR; | ||
917 | #endif | ||
918 | goto retry1; | ||
919 | } | ||
920 | |||
921 | tmp_buf = map_read(map, ofs + chipstart); | ||
922 | |||
923 | cfi_spin_unlock(cfi->chips[chipnum].mutex); | ||
924 | |||
925 | tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); | ||
926 | |||
927 | ret = do_write_oneword(map, &cfi->chips[chipnum], | ||
928 | ofs, tmp_buf); | ||
929 | if (ret) | ||
930 | return ret; | ||
931 | |||
932 | (*retlen) += len; | ||
933 | } | ||
934 | |||
935 | return 0; | ||
936 | } | ||
937 | |||
938 | |||
939 | /* | ||
940 | * FIXME: interleaved mode not tested, and probably not supported! | ||
941 | */ | ||
942 | static inline int do_write_buffer(struct map_info *map, struct flchip *chip, | ||
943 | unsigned long adr, const u_char *buf, int len) | ||
944 | { | ||
945 | struct cfi_private *cfi = map->fldrv_priv; | ||
946 | unsigned long timeo = jiffies + HZ; | ||
947 | /* see comments in do_write_oneword() regarding uWriteTimeo. */ | ||
948 | unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; | ||
949 | int ret = -EIO; | ||
950 | unsigned long cmd_adr; | ||
951 | int z, words; | ||
952 | map_word datum; | ||
953 | |||
954 | adr += chip->start; | ||
955 | cmd_adr = adr; | ||
956 | |||
957 | cfi_spin_lock(chip->mutex); | ||
958 | ret = get_chip(map, chip, adr, FL_WRITING); | ||
959 | if (ret) { | ||
960 | cfi_spin_unlock(chip->mutex); | ||
961 | return ret; | ||
962 | } | ||
963 | |||
964 | datum = map_word_load(map, buf); | ||
965 | |||
966 | DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", | ||
967 | __func__, adr, datum.x[0] ); | ||
968 | |||
969 | ENABLE_VPP(map); | ||
970 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
971 | cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); | ||
972 | //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
973 | |||
974 | /* Write Buffer Load */ | ||
975 | map_write(map, CMD(0x25), cmd_adr); | ||
976 | |||
977 | chip->state = FL_WRITING_TO_BUFFER; | ||
978 | |||
979 | /* Write length of data to come */ | ||
980 | words = len / map_bankwidth(map); | ||
981 | map_write(map, CMD(words - 1), cmd_adr); | ||
982 | /* Write data */ | ||
983 | z = 0; | ||
984 | while(z < words * map_bankwidth(map)) { | ||
985 | datum = map_word_load(map, buf); | ||
986 | map_write(map, datum, adr + z); | ||
987 | |||
988 | z += map_bankwidth(map); | ||
989 | buf += map_bankwidth(map); | ||
990 | } | ||
991 | z -= map_bankwidth(map); | ||
992 | |||
993 | adr += z; | ||
994 | |||
995 | /* Write Buffer Program Confirm: GO GO GO */ | ||
996 | map_write(map, CMD(0x29), cmd_adr); | ||
997 | chip->state = FL_WRITING; | ||
998 | |||
999 | cfi_spin_unlock(chip->mutex); | ||
1000 | cfi_udelay(chip->buffer_write_time); | ||
1001 | cfi_spin_lock(chip->mutex); | ||
1002 | |||
1003 | timeo = jiffies + uWriteTimeout; | ||
1004 | |||
1005 | for (;;) { | ||
1006 | if (chip->state != FL_WRITING) { | ||
1007 | /* Someone's suspended the write. Sleep */ | ||
1008 | DECLARE_WAITQUEUE(wait, current); | ||
1009 | |||
1010 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1011 | add_wait_queue(&chip->wq, &wait); | ||
1012 | cfi_spin_unlock(chip->mutex); | ||
1013 | schedule(); | ||
1014 | remove_wait_queue(&chip->wq, &wait); | ||
1015 | timeo = jiffies + (HZ / 2); /* FIXME */ | ||
1016 | cfi_spin_lock(chip->mutex); | ||
1017 | continue; | ||
1018 | } | ||
1019 | |||
1020 | if (chip_ready(map, adr)) | ||
1021 | goto op_done; | ||
1022 | |||
1023 | if( time_after(jiffies, timeo)) | ||
1024 | break; | ||
1025 | |||
1026 | /* Latency issues. Drop the lock, wait a while and retry */ | ||
1027 | cfi_spin_unlock(chip->mutex); | ||
1028 | cfi_udelay(1); | ||
1029 | cfi_spin_lock(chip->mutex); | ||
1030 | } | ||
1031 | |||
1032 | printk(KERN_WARNING "MTD %s(): software timeout\n", | ||
1033 | __func__ ); | ||
1034 | |||
1035 | /* reset on all failures. */ | ||
1036 | map_write( map, CMD(0xF0), chip->start ); | ||
1037 | /* FIXME - should have reset delay before continuing */ | ||
1038 | |||
1039 | ret = -EIO; | ||
1040 | op_done: | ||
1041 | chip->state = FL_READY; | ||
1042 | put_chip(map, chip, adr); | ||
1043 | cfi_spin_unlock(chip->mutex); | ||
1044 | |||
1045 | return ret; | ||
1046 | } | ||
1047 | |||
1048 | |||
1049 | static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, | ||
1050 | size_t *retlen, const u_char *buf) | ||
1051 | { | ||
1052 | struct map_info *map = mtd->priv; | ||
1053 | struct cfi_private *cfi = map->fldrv_priv; | ||
1054 | int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; | ||
1055 | int ret = 0; | ||
1056 | int chipnum; | ||
1057 | unsigned long ofs; | ||
1058 | |||
1059 | *retlen = 0; | ||
1060 | if (!len) | ||
1061 | return 0; | ||
1062 | |||
1063 | chipnum = to >> cfi->chipshift; | ||
1064 | ofs = to - (chipnum << cfi->chipshift); | ||
1065 | |||
1066 | /* If it's not bus-aligned, do the first word write */ | ||
1067 | if (ofs & (map_bankwidth(map)-1)) { | ||
1068 | size_t local_len = (-ofs)&(map_bankwidth(map)-1); | ||
1069 | if (local_len > len) | ||
1070 | local_len = len; | ||
1071 | ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), | ||
1072 | local_len, retlen, buf); | ||
1073 | if (ret) | ||
1074 | return ret; | ||
1075 | ofs += local_len; | ||
1076 | buf += local_len; | ||
1077 | len -= local_len; | ||
1078 | |||
1079 | if (ofs >> cfi->chipshift) { | ||
1080 | chipnum ++; | ||
1081 | ofs = 0; | ||
1082 | if (chipnum == cfi->numchips) | ||
1083 | return 0; | ||
1084 | } | ||
1085 | } | ||
1086 | |||
1087 | /* Write buffer is worth it only if more than one word to write... */ | ||
1088 | while (len >= map_bankwidth(map) * 2) { | ||
1089 | /* We must not cross write block boundaries */ | ||
1090 | int size = wbufsize - (ofs & (wbufsize-1)); | ||
1091 | |||
1092 | if (size > len) | ||
1093 | size = len; | ||
1094 | if (size % map_bankwidth(map)) | ||
1095 | size -= size % map_bankwidth(map); | ||
1096 | |||
1097 | ret = do_write_buffer(map, &cfi->chips[chipnum], | ||
1098 | ofs, buf, size); | ||
1099 | if (ret) | ||
1100 | return ret; | ||
1101 | |||
1102 | ofs += size; | ||
1103 | buf += size; | ||
1104 | (*retlen) += size; | ||
1105 | len -= size; | ||
1106 | |||
1107 | if (ofs >> cfi->chipshift) { | ||
1108 | chipnum ++; | ||
1109 | ofs = 0; | ||
1110 | if (chipnum == cfi->numchips) | ||
1111 | return 0; | ||
1112 | } | ||
1113 | } | ||
1114 | |||
1115 | if (len) { | ||
1116 | size_t retlen_dregs = 0; | ||
1117 | |||
1118 | ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), | ||
1119 | len, &retlen_dregs, buf); | ||
1120 | |||
1121 | *retlen += retlen_dregs; | ||
1122 | return ret; | ||
1123 | } | ||
1124 | |||
1125 | return 0; | ||
1126 | } | ||
1127 | |||
1128 | |||
1129 | /* | ||
1130 | * Handle devices with one erase region, that only implement | ||
1131 | * the chip erase command. | ||
1132 | */ | ||
1133 | static inline int do_erase_chip(struct map_info *map, struct flchip *chip) | ||
1134 | { | ||
1135 | struct cfi_private *cfi = map->fldrv_priv; | ||
1136 | unsigned long timeo = jiffies + HZ; | ||
1137 | unsigned long int adr; | ||
1138 | DECLARE_WAITQUEUE(wait, current); | ||
1139 | int ret = 0; | ||
1140 | |||
1141 | adr = cfi->addr_unlock1; | ||
1142 | |||
1143 | cfi_spin_lock(chip->mutex); | ||
1144 | ret = get_chip(map, chip, adr, FL_WRITING); | ||
1145 | if (ret) { | ||
1146 | cfi_spin_unlock(chip->mutex); | ||
1147 | return ret; | ||
1148 | } | ||
1149 | |||
1150 | DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", | ||
1151 | __func__, chip->start ); | ||
1152 | |||
1153 | ENABLE_VPP(map); | ||
1154 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
1155 | cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); | ||
1156 | cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
1157 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
1158 | cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); | ||
1159 | cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
1160 | |||
1161 | chip->state = FL_ERASING; | ||
1162 | chip->erase_suspended = 0; | ||
1163 | chip->in_progress_block_addr = adr; | ||
1164 | |||
1165 | cfi_spin_unlock(chip->mutex); | ||
1166 | msleep(chip->erase_time/2); | ||
1167 | cfi_spin_lock(chip->mutex); | ||
1168 | |||
1169 | timeo = jiffies + (HZ*20); | ||
1170 | |||
1171 | for (;;) { | ||
1172 | if (chip->state != FL_ERASING) { | ||
1173 | /* Someone's suspended the erase. Sleep */ | ||
1174 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1175 | add_wait_queue(&chip->wq, &wait); | ||
1176 | cfi_spin_unlock(chip->mutex); | ||
1177 | schedule(); | ||
1178 | remove_wait_queue(&chip->wq, &wait); | ||
1179 | cfi_spin_lock(chip->mutex); | ||
1180 | continue; | ||
1181 | } | ||
1182 | if (chip->erase_suspended) { | ||
1183 | /* This erase was suspended and resumed. | ||
1184 | Adjust the timeout */ | ||
1185 | timeo = jiffies + (HZ*20); /* FIXME */ | ||
1186 | chip->erase_suspended = 0; | ||
1187 | } | ||
1188 | |||
1189 | if (chip_ready(map, adr)) | ||
1190 | goto op_done; | ||
1191 | |||
1192 | if (time_after(jiffies, timeo)) | ||
1193 | break; | ||
1194 | |||
1195 | /* Latency issues. Drop the lock, wait a while and retry */ | ||
1196 | cfi_spin_unlock(chip->mutex); | ||
1197 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1198 | schedule_timeout(1); | ||
1199 | cfi_spin_lock(chip->mutex); | ||
1200 | } | ||
1201 | |||
1202 | printk(KERN_WARNING "MTD %s(): software timeout\n", | ||
1203 | __func__ ); | ||
1204 | |||
1205 | /* reset on all failures. */ | ||
1206 | map_write( map, CMD(0xF0), chip->start ); | ||
1207 | /* FIXME - should have reset delay before continuing */ | ||
1208 | |||
1209 | ret = -EIO; | ||
1210 | op_done: | ||
1211 | chip->state = FL_READY; | ||
1212 | put_chip(map, chip, adr); | ||
1213 | cfi_spin_unlock(chip->mutex); | ||
1214 | |||
1215 | return ret; | ||
1216 | } | ||
1217 | |||
1218 | |||
1219 | static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) | ||
1220 | { | ||
1221 | struct cfi_private *cfi = map->fldrv_priv; | ||
1222 | unsigned long timeo = jiffies + HZ; | ||
1223 | DECLARE_WAITQUEUE(wait, current); | ||
1224 | int ret = 0; | ||
1225 | |||
1226 | adr += chip->start; | ||
1227 | |||
1228 | cfi_spin_lock(chip->mutex); | ||
1229 | ret = get_chip(map, chip, adr, FL_ERASING); | ||
1230 | if (ret) { | ||
1231 | cfi_spin_unlock(chip->mutex); | ||
1232 | return ret; | ||
1233 | } | ||
1234 | |||
1235 | DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", | ||
1236 | __func__, adr ); | ||
1237 | |||
1238 | ENABLE_VPP(map); | ||
1239 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
1240 | cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); | ||
1241 | cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
1242 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
1243 | cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); | ||
1244 | map_write(map, CMD(0x30), adr); | ||
1245 | |||
1246 | chip->state = FL_ERASING; | ||
1247 | chip->erase_suspended = 0; | ||
1248 | chip->in_progress_block_addr = adr; | ||
1249 | |||
1250 | cfi_spin_unlock(chip->mutex); | ||
1251 | msleep(chip->erase_time/2); | ||
1252 | cfi_spin_lock(chip->mutex); | ||
1253 | |||
1254 | timeo = jiffies + (HZ*20); | ||
1255 | |||
1256 | for (;;) { | ||
1257 | if (chip->state != FL_ERASING) { | ||
1258 | /* Someone's suspended the erase. Sleep */ | ||
1259 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1260 | add_wait_queue(&chip->wq, &wait); | ||
1261 | cfi_spin_unlock(chip->mutex); | ||
1262 | schedule(); | ||
1263 | remove_wait_queue(&chip->wq, &wait); | ||
1264 | cfi_spin_lock(chip->mutex); | ||
1265 | continue; | ||
1266 | } | ||
1267 | if (chip->erase_suspended) { | ||
1268 | /* This erase was suspended and resumed. | ||
1269 | Adjust the timeout */ | ||
1270 | timeo = jiffies + (HZ*20); /* FIXME */ | ||
1271 | chip->erase_suspended = 0; | ||
1272 | } | ||
1273 | |||
1274 | if (chip_ready(map, adr)) | ||
1275 | goto op_done; | ||
1276 | |||
1277 | if (time_after(jiffies, timeo)) | ||
1278 | break; | ||
1279 | |||
1280 | /* Latency issues. Drop the lock, wait a while and retry */ | ||
1281 | cfi_spin_unlock(chip->mutex); | ||
1282 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1283 | schedule_timeout(1); | ||
1284 | cfi_spin_lock(chip->mutex); | ||
1285 | } | ||
1286 | |||
1287 | printk(KERN_WARNING "MTD %s(): software timeout\n", | ||
1288 | __func__ ); | ||
1289 | |||
1290 | /* reset on all failures. */ | ||
1291 | map_write( map, CMD(0xF0), chip->start ); | ||
1292 | /* FIXME - should have reset delay before continuing */ | ||
1293 | |||
1294 | ret = -EIO; | ||
1295 | op_done: | ||
1296 | chip->state = FL_READY; | ||
1297 | put_chip(map, chip, adr); | ||
1298 | cfi_spin_unlock(chip->mutex); | ||
1299 | return ret; | ||
1300 | } | ||
1301 | |||
1302 | |||
1303 | int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) | ||
1304 | { | ||
1305 | unsigned long ofs, len; | ||
1306 | int ret; | ||
1307 | |||
1308 | ofs = instr->addr; | ||
1309 | len = instr->len; | ||
1310 | |||
1311 | ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL); | ||
1312 | if (ret) | ||
1313 | return ret; | ||
1314 | |||
1315 | instr->state = MTD_ERASE_DONE; | ||
1316 | mtd_erase_callback(instr); | ||
1317 | |||
1318 | return 0; | ||
1319 | } | ||
1320 | |||
1321 | |||
1322 | static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) | ||
1323 | { | ||
1324 | struct map_info *map = mtd->priv; | ||
1325 | struct cfi_private *cfi = map->fldrv_priv; | ||
1326 | int ret = 0; | ||
1327 | |||
1328 | if (instr->addr != 0) | ||
1329 | return -EINVAL; | ||
1330 | |||
1331 | if (instr->len != mtd->size) | ||
1332 | return -EINVAL; | ||
1333 | |||
1334 | ret = do_erase_chip(map, &cfi->chips[0]); | ||
1335 | if (ret) | ||
1336 | return ret; | ||
1337 | |||
1338 | instr->state = MTD_ERASE_DONE; | ||
1339 | mtd_erase_callback(instr); | ||
1340 | |||
1341 | return 0; | ||
1342 | } | ||
1343 | |||
1344 | |||
1345 | static void cfi_amdstd_sync (struct mtd_info *mtd) | ||
1346 | { | ||
1347 | struct map_info *map = mtd->priv; | ||
1348 | struct cfi_private *cfi = map->fldrv_priv; | ||
1349 | int i; | ||
1350 | struct flchip *chip; | ||
1351 | int ret = 0; | ||
1352 | DECLARE_WAITQUEUE(wait, current); | ||
1353 | |||
1354 | for (i=0; !ret && i<cfi->numchips; i++) { | ||
1355 | chip = &cfi->chips[i]; | ||
1356 | |||
1357 | retry: | ||
1358 | cfi_spin_lock(chip->mutex); | ||
1359 | |||
1360 | switch(chip->state) { | ||
1361 | case FL_READY: | ||
1362 | case FL_STATUS: | ||
1363 | case FL_CFI_QUERY: | ||
1364 | case FL_JEDEC_QUERY: | ||
1365 | chip->oldstate = chip->state; | ||
1366 | chip->state = FL_SYNCING; | ||
1367 | /* No need to wake_up() on this state change - | ||
1368 | * as the whole point is that nobody can do anything | ||
1369 | * with the chip now anyway. | ||
1370 | */ | ||
1371 | case FL_SYNCING: | ||
1372 | cfi_spin_unlock(chip->mutex); | ||
1373 | break; | ||
1374 | |||
1375 | default: | ||
1376 | /* Not an idle state */ | ||
1377 | add_wait_queue(&chip->wq, &wait); | ||
1378 | |||
1379 | cfi_spin_unlock(chip->mutex); | ||
1380 | |||
1381 | schedule(); | ||
1382 | |||
1383 | remove_wait_queue(&chip->wq, &wait); | ||
1384 | |||
1385 | goto retry; | ||
1386 | } | ||
1387 | } | ||
1388 | |||
1389 | /* Unlock the chips again */ | ||
1390 | |||
1391 | for (i--; i >=0; i--) { | ||
1392 | chip = &cfi->chips[i]; | ||
1393 | |||
1394 | cfi_spin_lock(chip->mutex); | ||
1395 | |||
1396 | if (chip->state == FL_SYNCING) { | ||
1397 | chip->state = chip->oldstate; | ||
1398 | wake_up(&chip->wq); | ||
1399 | } | ||
1400 | cfi_spin_unlock(chip->mutex); | ||
1401 | } | ||
1402 | } | ||
1403 | |||
1404 | |||
1405 | static int cfi_amdstd_suspend(struct mtd_info *mtd) | ||
1406 | { | ||
1407 | struct map_info *map = mtd->priv; | ||
1408 | struct cfi_private *cfi = map->fldrv_priv; | ||
1409 | int i; | ||
1410 | struct flchip *chip; | ||
1411 | int ret = 0; | ||
1412 | |||
1413 | for (i=0; !ret && i<cfi->numchips; i++) { | ||
1414 | chip = &cfi->chips[i]; | ||
1415 | |||
1416 | cfi_spin_lock(chip->mutex); | ||
1417 | |||
1418 | switch(chip->state) { | ||
1419 | case FL_READY: | ||
1420 | case FL_STATUS: | ||
1421 | case FL_CFI_QUERY: | ||
1422 | case FL_JEDEC_QUERY: | ||
1423 | chip->oldstate = chip->state; | ||
1424 | chip->state = FL_PM_SUSPENDED; | ||
1425 | /* No need to wake_up() on this state change - | ||
1426 | * as the whole point is that nobody can do anything | ||
1427 | * with the chip now anyway. | ||
1428 | */ | ||
1429 | case FL_PM_SUSPENDED: | ||
1430 | break; | ||
1431 | |||
1432 | default: | ||
1433 | ret = -EAGAIN; | ||
1434 | break; | ||
1435 | } | ||
1436 | cfi_spin_unlock(chip->mutex); | ||
1437 | } | ||
1438 | |||
1439 | /* Unlock the chips again */ | ||
1440 | |||
1441 | if (ret) { | ||
1442 | for (i--; i >=0; i--) { | ||
1443 | chip = &cfi->chips[i]; | ||
1444 | |||
1445 | cfi_spin_lock(chip->mutex); | ||
1446 | |||
1447 | if (chip->state == FL_PM_SUSPENDED) { | ||
1448 | chip->state = chip->oldstate; | ||
1449 | wake_up(&chip->wq); | ||
1450 | } | ||
1451 | cfi_spin_unlock(chip->mutex); | ||
1452 | } | ||
1453 | } | ||
1454 | |||
1455 | return ret; | ||
1456 | } | ||
1457 | |||
1458 | |||
1459 | static void cfi_amdstd_resume(struct mtd_info *mtd) | ||
1460 | { | ||
1461 | struct map_info *map = mtd->priv; | ||
1462 | struct cfi_private *cfi = map->fldrv_priv; | ||
1463 | int i; | ||
1464 | struct flchip *chip; | ||
1465 | |||
1466 | for (i=0; i<cfi->numchips; i++) { | ||
1467 | |||
1468 | chip = &cfi->chips[i]; | ||
1469 | |||
1470 | cfi_spin_lock(chip->mutex); | ||
1471 | |||
1472 | if (chip->state == FL_PM_SUSPENDED) { | ||
1473 | chip->state = FL_READY; | ||
1474 | map_write(map, CMD(0xF0), chip->start); | ||
1475 | wake_up(&chip->wq); | ||
1476 | } | ||
1477 | else | ||
1478 | printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); | ||
1479 | |||
1480 | cfi_spin_unlock(chip->mutex); | ||
1481 | } | ||
1482 | } | ||
1483 | |||
1484 | static void cfi_amdstd_destroy(struct mtd_info *mtd) | ||
1485 | { | ||
1486 | struct map_info *map = mtd->priv; | ||
1487 | struct cfi_private *cfi = map->fldrv_priv; | ||
1488 | kfree(cfi->cmdset_priv); | ||
1489 | kfree(cfi->cfiq); | ||
1490 | kfree(cfi); | ||
1491 | kfree(mtd->eraseregions); | ||
1492 | } | ||
1493 | |||
1494 | static char im_name[]="cfi_cmdset_0002"; | ||
1495 | |||
1496 | |||
1497 | static int __init cfi_amdstd_init(void) | ||
1498 | { | ||
1499 | inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002); | ||
1500 | return 0; | ||
1501 | } | ||
1502 | |||
1503 | |||
1504 | static void __exit cfi_amdstd_exit(void) | ||
1505 | { | ||
1506 | inter_module_unregister(im_name); | ||
1507 | } | ||
1508 | |||
1509 | |||
1510 | module_init(cfi_amdstd_init); | ||
1511 | module_exit(cfi_amdstd_exit); | ||
1512 | |||
1513 | MODULE_LICENSE("GPL"); | ||
1514 | MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); | ||
1515 | MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); | ||