diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/mtd/chips/amd_flash.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/mtd/chips/amd_flash.c')
-rw-r--r-- | drivers/mtd/chips/amd_flash.c | 1415 |
1 files changed, 1415 insertions, 0 deletions
diff --git a/drivers/mtd/chips/amd_flash.c b/drivers/mtd/chips/amd_flash.c new file mode 100644 index 000000000000..41e2e3e31603 --- /dev/null +++ b/drivers/mtd/chips/amd_flash.c | |||
@@ -0,0 +1,1415 @@ | |||
1 | /* | ||
2 | * MTD map driver for AMD compatible flash chips (non-CFI) | ||
3 | * | ||
4 | * Author: Jonas Holmberg <jonas.holmberg@axis.com> | ||
5 | * | ||
6 | * $Id: amd_flash.c,v 1.26 2004/11/20 12:49:04 dwmw2 Exp $ | ||
7 | * | ||
8 | * Copyright (c) 2001 Axis Communications AB | ||
9 | * | ||
10 | * This file is under GPL. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/module.h> | ||
15 | #include <linux/types.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/mtd/map.h> | ||
24 | #include <linux/mtd/mtd.h> | ||
25 | #include <linux/mtd/flashchip.h> | ||
26 | |||
27 | /* There's no limit. It exists only to avoid realloc. */ | ||
28 | #define MAX_AMD_CHIPS 8 | ||
29 | |||
30 | #define DEVICE_TYPE_X8 (8 / 8) | ||
31 | #define DEVICE_TYPE_X16 (16 / 8) | ||
32 | #define DEVICE_TYPE_X32 (32 / 8) | ||
33 | |||
34 | /* Addresses */ | ||
35 | #define ADDR_MANUFACTURER 0x0000 | ||
36 | #define ADDR_DEVICE_ID 0x0001 | ||
37 | #define ADDR_SECTOR_LOCK 0x0002 | ||
38 | #define ADDR_HANDSHAKE 0x0003 | ||
39 | #define ADDR_UNLOCK_1 0x0555 | ||
40 | #define ADDR_UNLOCK_2 0x02AA | ||
41 | |||
42 | /* Commands */ | ||
43 | #define CMD_UNLOCK_DATA_1 0x00AA | ||
44 | #define CMD_UNLOCK_DATA_2 0x0055 | ||
45 | #define CMD_MANUFACTURER_UNLOCK_DATA 0x0090 | ||
46 | #define CMD_UNLOCK_BYPASS_MODE 0x0020 | ||
47 | #define CMD_PROGRAM_UNLOCK_DATA 0x00A0 | ||
48 | #define CMD_RESET_DATA 0x00F0 | ||
49 | #define CMD_SECTOR_ERASE_UNLOCK_DATA 0x0080 | ||
50 | #define CMD_SECTOR_ERASE_UNLOCK_DATA_2 0x0030 | ||
51 | |||
52 | #define CMD_UNLOCK_SECTOR 0x0060 | ||
53 | |||
54 | /* Manufacturers */ | ||
55 | #define MANUFACTURER_AMD 0x0001 | ||
56 | #define MANUFACTURER_ATMEL 0x001F | ||
57 | #define MANUFACTURER_FUJITSU 0x0004 | ||
58 | #define MANUFACTURER_ST 0x0020 | ||
59 | #define MANUFACTURER_SST 0x00BF | ||
60 | #define MANUFACTURER_TOSHIBA 0x0098 | ||
61 | |||
62 | /* AMD */ | ||
63 | #define AM29F800BB 0x2258 | ||
64 | #define AM29F800BT 0x22D6 | ||
65 | #define AM29LV800BB 0x225B | ||
66 | #define AM29LV800BT 0x22DA | ||
67 | #define AM29LV160DT 0x22C4 | ||
68 | #define AM29LV160DB 0x2249 | ||
69 | #define AM29BDS323D 0x22D1 | ||
70 | #define AM29BDS643D 0x227E | ||
71 | |||
72 | /* Atmel */ | ||
73 | #define AT49xV16x 0x00C0 | ||
74 | #define AT49xV16xT 0x00C2 | ||
75 | |||
76 | /* Fujitsu */ | ||
77 | #define MBM29LV160TE 0x22C4 | ||
78 | #define MBM29LV160BE 0x2249 | ||
79 | #define MBM29LV800BB 0x225B | ||
80 | |||
81 | /* ST - www.st.com */ | ||
82 | #define M29W800T 0x00D7 | ||
83 | #define M29W160DT 0x22C4 | ||
84 | #define M29W160DB 0x2249 | ||
85 | |||
86 | /* SST */ | ||
87 | #define SST39LF800 0x2781 | ||
88 | #define SST39LF160 0x2782 | ||
89 | |||
90 | /* Toshiba */ | ||
91 | #define TC58FVT160 0x00C2 | ||
92 | #define TC58FVB160 0x0043 | ||
93 | |||
94 | #define D6_MASK 0x40 | ||
95 | |||
96 | struct amd_flash_private { | ||
97 | int device_type; | ||
98 | int interleave; | ||
99 | int numchips; | ||
100 | unsigned long chipshift; | ||
101 | // const char *im_name; | ||
102 | struct flchip chips[0]; | ||
103 | }; | ||
104 | |||
105 | struct amd_flash_info { | ||
106 | const __u16 mfr_id; | ||
107 | const __u16 dev_id; | ||
108 | const char *name; | ||
109 | const u_long size; | ||
110 | const int numeraseregions; | ||
111 | const struct mtd_erase_region_info regions[4]; | ||
112 | }; | ||
113 | |||
114 | |||
115 | |||
116 | static int amd_flash_read(struct mtd_info *, loff_t, size_t, size_t *, | ||
117 | u_char *); | ||
118 | static int amd_flash_write(struct mtd_info *, loff_t, size_t, size_t *, | ||
119 | const u_char *); | ||
120 | static int amd_flash_erase(struct mtd_info *, struct erase_info *); | ||
121 | static void amd_flash_sync(struct mtd_info *); | ||
122 | static int amd_flash_suspend(struct mtd_info *); | ||
123 | static void amd_flash_resume(struct mtd_info *); | ||
124 | static void amd_flash_destroy(struct mtd_info *); | ||
125 | static struct mtd_info *amd_flash_probe(struct map_info *map); | ||
126 | |||
127 | |||
128 | static struct mtd_chip_driver amd_flash_chipdrv = { | ||
129 | .probe = amd_flash_probe, | ||
130 | .destroy = amd_flash_destroy, | ||
131 | .name = "amd_flash", | ||
132 | .module = THIS_MODULE | ||
133 | }; | ||
134 | |||
135 | |||
136 | |||
137 | static const char im_name[] = "amd_flash"; | ||
138 | |||
139 | |||
140 | |||
141 | static inline __u32 wide_read(struct map_info *map, __u32 addr) | ||
142 | { | ||
143 | if (map->buswidth == 1) { | ||
144 | return map_read8(map, addr); | ||
145 | } else if (map->buswidth == 2) { | ||
146 | return map_read16(map, addr); | ||
147 | } else if (map->buswidth == 4) { | ||
148 | return map_read32(map, addr); | ||
149 | } | ||
150 | |||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | static inline void wide_write(struct map_info *map, __u32 val, __u32 addr) | ||
155 | { | ||
156 | if (map->buswidth == 1) { | ||
157 | map_write8(map, val, addr); | ||
158 | } else if (map->buswidth == 2) { | ||
159 | map_write16(map, val, addr); | ||
160 | } else if (map->buswidth == 4) { | ||
161 | map_write32(map, val, addr); | ||
162 | } | ||
163 | } | ||
164 | |||
165 | static inline __u32 make_cmd(struct map_info *map, __u32 cmd) | ||
166 | { | ||
167 | const struct amd_flash_private *private = map->fldrv_priv; | ||
168 | if ((private->interleave == 2) && | ||
169 | (private->device_type == DEVICE_TYPE_X16)) { | ||
170 | cmd |= (cmd << 16); | ||
171 | } | ||
172 | |||
173 | return cmd; | ||
174 | } | ||
175 | |||
176 | static inline void send_unlock(struct map_info *map, unsigned long base) | ||
177 | { | ||
178 | wide_write(map, (CMD_UNLOCK_DATA_1 << 16) | CMD_UNLOCK_DATA_1, | ||
179 | base + (map->buswidth * ADDR_UNLOCK_1)); | ||
180 | wide_write(map, (CMD_UNLOCK_DATA_2 << 16) | CMD_UNLOCK_DATA_2, | ||
181 | base + (map->buswidth * ADDR_UNLOCK_2)); | ||
182 | } | ||
183 | |||
184 | static inline void send_cmd(struct map_info *map, unsigned long base, __u32 cmd) | ||
185 | { | ||
186 | send_unlock(map, base); | ||
187 | wide_write(map, make_cmd(map, cmd), | ||
188 | base + (map->buswidth * ADDR_UNLOCK_1)); | ||
189 | } | ||
190 | |||
191 | static inline void send_cmd_to_addr(struct map_info *map, unsigned long base, | ||
192 | __u32 cmd, unsigned long addr) | ||
193 | { | ||
194 | send_unlock(map, base); | ||
195 | wide_write(map, make_cmd(map, cmd), addr); | ||
196 | } | ||
197 | |||
198 | static inline int flash_is_busy(struct map_info *map, unsigned long addr, | ||
199 | int interleave) | ||
200 | { | ||
201 | |||
202 | if ((interleave == 2) && (map->buswidth == 4)) { | ||
203 | __u32 read1, read2; | ||
204 | |||
205 | read1 = wide_read(map, addr); | ||
206 | read2 = wide_read(map, addr); | ||
207 | |||
208 | return (((read1 >> 16) & D6_MASK) != | ||
209 | ((read2 >> 16) & D6_MASK)) || | ||
210 | (((read1 & 0xffff) & D6_MASK) != | ||
211 | ((read2 & 0xffff) & D6_MASK)); | ||
212 | } | ||
213 | |||
214 | return ((wide_read(map, addr) & D6_MASK) != | ||
215 | (wide_read(map, addr) & D6_MASK)); | ||
216 | } | ||
217 | |||
218 | static inline void unlock_sector(struct map_info *map, unsigned long sect_addr, | ||
219 | int unlock) | ||
220 | { | ||
221 | /* Sector lock address. A6 = 1 for unlock, A6 = 0 for lock */ | ||
222 | int SLA = unlock ? | ||
223 | (sect_addr | (0x40 * map->buswidth)) : | ||
224 | (sect_addr & ~(0x40 * map->buswidth)) ; | ||
225 | |||
226 | __u32 cmd = make_cmd(map, CMD_UNLOCK_SECTOR); | ||
227 | |||
228 | wide_write(map, make_cmd(map, CMD_RESET_DATA), 0); | ||
229 | wide_write(map, cmd, SLA); /* 1st cycle: write cmd to any address */ | ||
230 | wide_write(map, cmd, SLA); /* 2nd cycle: write cmd to any address */ | ||
231 | wide_write(map, cmd, SLA); /* 3rd cycle: write cmd to SLA */ | ||
232 | } | ||
233 | |||
234 | static inline int is_sector_locked(struct map_info *map, | ||
235 | unsigned long sect_addr) | ||
236 | { | ||
237 | int status; | ||
238 | |||
239 | wide_write(map, CMD_RESET_DATA, 0); | ||
240 | send_cmd(map, sect_addr, CMD_MANUFACTURER_UNLOCK_DATA); | ||
241 | |||
242 | /* status is 0x0000 for unlocked and 0x0001 for locked */ | ||
243 | status = wide_read(map, sect_addr + (map->buswidth * ADDR_SECTOR_LOCK)); | ||
244 | wide_write(map, CMD_RESET_DATA, 0); | ||
245 | return status; | ||
246 | } | ||
247 | |||
248 | static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len, | ||
249 | int is_unlock) | ||
250 | { | ||
251 | struct map_info *map; | ||
252 | struct mtd_erase_region_info *merip; | ||
253 | int eraseoffset, erasesize, eraseblocks; | ||
254 | int i; | ||
255 | int retval = 0; | ||
256 | int lock_status; | ||
257 | |||
258 | map = mtd->priv; | ||
259 | |||
260 | /* Pass the whole chip through sector by sector and check for each | ||
261 | sector if the sector and the given interval overlap */ | ||
262 | for(i = 0; i < mtd->numeraseregions; i++) { | ||
263 | merip = &mtd->eraseregions[i]; | ||
264 | |||
265 | eraseoffset = merip->offset; | ||
266 | erasesize = merip->erasesize; | ||
267 | eraseblocks = merip->numblocks; | ||
268 | |||
269 | if (ofs > eraseoffset + erasesize) | ||
270 | continue; | ||
271 | |||
272 | while (eraseblocks > 0) { | ||
273 | if (ofs < eraseoffset + erasesize && ofs + len > eraseoffset) { | ||
274 | unlock_sector(map, eraseoffset, is_unlock); | ||
275 | |||
276 | lock_status = is_sector_locked(map, eraseoffset); | ||
277 | |||
278 | if (is_unlock && lock_status) { | ||
279 | printk("Cannot unlock sector at address %x length %xx\n", | ||
280 | eraseoffset, merip->erasesize); | ||
281 | retval = -1; | ||
282 | } else if (!is_unlock && !lock_status) { | ||
283 | printk("Cannot lock sector at address %x length %x\n", | ||
284 | eraseoffset, merip->erasesize); | ||
285 | retval = -1; | ||
286 | } | ||
287 | } | ||
288 | eraseoffset += erasesize; | ||
289 | eraseblocks --; | ||
290 | } | ||
291 | } | ||
292 | return retval; | ||
293 | } | ||
294 | |||
295 | static int amd_flash_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) | ||
296 | { | ||
297 | return amd_flash_do_unlock(mtd, ofs, len, 1); | ||
298 | } | ||
299 | |||
300 | static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len) | ||
301 | { | ||
302 | return amd_flash_do_unlock(mtd, ofs, len, 0); | ||
303 | } | ||
304 | |||
305 | |||
306 | /* | ||
307 | * Reads JEDEC manufacturer ID and device ID and returns the index of the first | ||
308 | * matching table entry (-1 if not found or alias for already found chip). | ||
309 | */ | ||
310 | static int probe_new_chip(struct mtd_info *mtd, __u32 base, | ||
311 | struct flchip *chips, | ||
312 | struct amd_flash_private *private, | ||
313 | const struct amd_flash_info *table, int table_size) | ||
314 | { | ||
315 | __u32 mfr_id; | ||
316 | __u32 dev_id; | ||
317 | struct map_info *map = mtd->priv; | ||
318 | struct amd_flash_private temp; | ||
319 | int i; | ||
320 | |||
321 | temp.device_type = DEVICE_TYPE_X16; // Assume X16 (FIXME) | ||
322 | temp.interleave = 2; | ||
323 | map->fldrv_priv = &temp; | ||
324 | |||
325 | /* Enter autoselect mode. */ | ||
326 | send_cmd(map, base, CMD_RESET_DATA); | ||
327 | send_cmd(map, base, CMD_MANUFACTURER_UNLOCK_DATA); | ||
328 | |||
329 | mfr_id = wide_read(map, base + (map->buswidth * ADDR_MANUFACTURER)); | ||
330 | dev_id = wide_read(map, base + (map->buswidth * ADDR_DEVICE_ID)); | ||
331 | |||
332 | if ((map->buswidth == 4) && ((mfr_id >> 16) == (mfr_id & 0xffff)) && | ||
333 | ((dev_id >> 16) == (dev_id & 0xffff))) { | ||
334 | mfr_id &= 0xffff; | ||
335 | dev_id &= 0xffff; | ||
336 | } else { | ||
337 | temp.interleave = 1; | ||
338 | } | ||
339 | |||
340 | for (i = 0; i < table_size; i++) { | ||
341 | if ((mfr_id == table[i].mfr_id) && | ||
342 | (dev_id == table[i].dev_id)) { | ||
343 | if (chips) { | ||
344 | int j; | ||
345 | |||
346 | /* Is this an alias for an already found chip? | ||
347 | * In that case that chip should be in | ||
348 | * autoselect mode now. | ||
349 | */ | ||
350 | for (j = 0; j < private->numchips; j++) { | ||
351 | __u32 mfr_id_other; | ||
352 | __u32 dev_id_other; | ||
353 | |||
354 | mfr_id_other = | ||
355 | wide_read(map, chips[j].start + | ||
356 | (map->buswidth * | ||
357 | ADDR_MANUFACTURER | ||
358 | )); | ||
359 | dev_id_other = | ||
360 | wide_read(map, chips[j].start + | ||
361 | (map->buswidth * | ||
362 | ADDR_DEVICE_ID)); | ||
363 | if (temp.interleave == 2) { | ||
364 | mfr_id_other &= 0xffff; | ||
365 | dev_id_other &= 0xffff; | ||
366 | } | ||
367 | if ((mfr_id_other == mfr_id) && | ||
368 | (dev_id_other == dev_id)) { | ||
369 | |||
370 | /* Exit autoselect mode. */ | ||
371 | send_cmd(map, base, | ||
372 | CMD_RESET_DATA); | ||
373 | |||
374 | return -1; | ||
375 | } | ||
376 | } | ||
377 | |||
378 | if (private->numchips == MAX_AMD_CHIPS) { | ||
379 | printk(KERN_WARNING | ||
380 | "%s: Too many flash chips " | ||
381 | "detected. Increase " | ||
382 | "MAX_AMD_CHIPS from %d.\n", | ||
383 | map->name, MAX_AMD_CHIPS); | ||
384 | |||
385 | return -1; | ||
386 | } | ||
387 | |||
388 | chips[private->numchips].start = base; | ||
389 | chips[private->numchips].state = FL_READY; | ||
390 | chips[private->numchips].mutex = | ||
391 | &chips[private->numchips]._spinlock; | ||
392 | private->numchips++; | ||
393 | } | ||
394 | |||
395 | printk("%s: Found %d x %ldMiB %s at 0x%x\n", map->name, | ||
396 | temp.interleave, (table[i].size)/(1024*1024), | ||
397 | table[i].name, base); | ||
398 | |||
399 | mtd->size += table[i].size * temp.interleave; | ||
400 | mtd->numeraseregions += table[i].numeraseregions; | ||
401 | |||
402 | break; | ||
403 | } | ||
404 | } | ||
405 | |||
406 | /* Exit autoselect mode. */ | ||
407 | send_cmd(map, base, CMD_RESET_DATA); | ||
408 | |||
409 | if (i == table_size) { | ||
410 | printk(KERN_DEBUG "%s: unknown flash device at 0x%x, " | ||
411 | "mfr id 0x%x, dev id 0x%x\n", map->name, | ||
412 | base, mfr_id, dev_id); | ||
413 | map->fldrv_priv = NULL; | ||
414 | |||
415 | return -1; | ||
416 | } | ||
417 | |||
418 | private->device_type = temp.device_type; | ||
419 | private->interleave = temp.interleave; | ||
420 | |||
421 | return i; | ||
422 | } | ||
423 | |||
424 | |||
425 | |||
426 | static struct mtd_info *amd_flash_probe(struct map_info *map) | ||
427 | { | ||
428 | static const struct amd_flash_info table[] = { | ||
429 | { | ||
430 | .mfr_id = MANUFACTURER_AMD, | ||
431 | .dev_id = AM29LV160DT, | ||
432 | .name = "AMD AM29LV160DT", | ||
433 | .size = 0x00200000, | ||
434 | .numeraseregions = 4, | ||
435 | .regions = { | ||
436 | { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 }, | ||
437 | { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 }, | ||
438 | { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 }, | ||
439 | { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 } | ||
440 | } | ||
441 | }, { | ||
442 | .mfr_id = MANUFACTURER_AMD, | ||
443 | .dev_id = AM29LV160DB, | ||
444 | .name = "AMD AM29LV160DB", | ||
445 | .size = 0x00200000, | ||
446 | .numeraseregions = 4, | ||
447 | .regions = { | ||
448 | { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 }, | ||
449 | { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 }, | ||
450 | { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 }, | ||
451 | { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 } | ||
452 | } | ||
453 | }, { | ||
454 | .mfr_id = MANUFACTURER_TOSHIBA, | ||
455 | .dev_id = TC58FVT160, | ||
456 | .name = "Toshiba TC58FVT160", | ||
457 | .size = 0x00200000, | ||
458 | .numeraseregions = 4, | ||
459 | .regions = { | ||
460 | { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 }, | ||
461 | { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 }, | ||
462 | { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 }, | ||
463 | { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 } | ||
464 | } | ||
465 | }, { | ||
466 | .mfr_id = MANUFACTURER_FUJITSU, | ||
467 | .dev_id = MBM29LV160TE, | ||
468 | .name = "Fujitsu MBM29LV160TE", | ||
469 | .size = 0x00200000, | ||
470 | .numeraseregions = 4, | ||
471 | .regions = { | ||
472 | { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 }, | ||
473 | { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 }, | ||
474 | { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 }, | ||
475 | { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 } | ||
476 | } | ||
477 | }, { | ||
478 | .mfr_id = MANUFACTURER_TOSHIBA, | ||
479 | .dev_id = TC58FVB160, | ||
480 | .name = "Toshiba TC58FVB160", | ||
481 | .size = 0x00200000, | ||
482 | .numeraseregions = 4, | ||
483 | .regions = { | ||
484 | { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 }, | ||
485 | { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 }, | ||
486 | { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 }, | ||
487 | { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 } | ||
488 | } | ||
489 | }, { | ||
490 | .mfr_id = MANUFACTURER_FUJITSU, | ||
491 | .dev_id = MBM29LV160BE, | ||
492 | .name = "Fujitsu MBM29LV160BE", | ||
493 | .size = 0x00200000, | ||
494 | .numeraseregions = 4, | ||
495 | .regions = { | ||
496 | { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 }, | ||
497 | { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 }, | ||
498 | { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 }, | ||
499 | { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 } | ||
500 | } | ||
501 | }, { | ||
502 | .mfr_id = MANUFACTURER_AMD, | ||
503 | .dev_id = AM29LV800BB, | ||
504 | .name = "AMD AM29LV800BB", | ||
505 | .size = 0x00100000, | ||
506 | .numeraseregions = 4, | ||
507 | .regions = { | ||
508 | { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 }, | ||
509 | { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 }, | ||
510 | { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 }, | ||
511 | { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 } | ||
512 | } | ||
513 | }, { | ||
514 | .mfr_id = MANUFACTURER_AMD, | ||
515 | .dev_id = AM29F800BB, | ||
516 | .name = "AMD AM29F800BB", | ||
517 | .size = 0x00100000, | ||
518 | .numeraseregions = 4, | ||
519 | .regions = { | ||
520 | { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 }, | ||
521 | { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 }, | ||
522 | { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 }, | ||
523 | { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 } | ||
524 | } | ||
525 | }, { | ||
526 | .mfr_id = MANUFACTURER_AMD, | ||
527 | .dev_id = AM29LV800BT, | ||
528 | .name = "AMD AM29LV800BT", | ||
529 | .size = 0x00100000, | ||
530 | .numeraseregions = 4, | ||
531 | .regions = { | ||
532 | { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 }, | ||
533 | { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 }, | ||
534 | { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 }, | ||
535 | { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 } | ||
536 | } | ||
537 | }, { | ||
538 | .mfr_id = MANUFACTURER_AMD, | ||
539 | .dev_id = AM29F800BT, | ||
540 | .name = "AMD AM29F800BT", | ||
541 | .size = 0x00100000, | ||
542 | .numeraseregions = 4, | ||
543 | .regions = { | ||
544 | { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 }, | ||
545 | { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 }, | ||
546 | { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 }, | ||
547 | { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 } | ||
548 | } | ||
549 | }, { | ||
550 | .mfr_id = MANUFACTURER_AMD, | ||
551 | .dev_id = AM29LV800BB, | ||
552 | .name = "AMD AM29LV800BB", | ||
553 | .size = 0x00100000, | ||
554 | .numeraseregions = 4, | ||
555 | .regions = { | ||
556 | { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 }, | ||
557 | { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 }, | ||
558 | { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 }, | ||
559 | { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 } | ||
560 | } | ||
561 | }, { | ||
562 | .mfr_id = MANUFACTURER_FUJITSU, | ||
563 | .dev_id = MBM29LV800BB, | ||
564 | .name = "Fujitsu MBM29LV800BB", | ||
565 | .size = 0x00100000, | ||
566 | .numeraseregions = 4, | ||
567 | .regions = { | ||
568 | { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 }, | ||
569 | { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 }, | ||
570 | { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 }, | ||
571 | { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 } | ||
572 | } | ||
573 | }, { | ||
574 | .mfr_id = MANUFACTURER_ST, | ||
575 | .dev_id = M29W800T, | ||
576 | .name = "ST M29W800T", | ||
577 | .size = 0x00100000, | ||
578 | .numeraseregions = 4, | ||
579 | .regions = { | ||
580 | { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 }, | ||
581 | { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 }, | ||
582 | { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 }, | ||
583 | { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 } | ||
584 | } | ||
585 | }, { | ||
586 | .mfr_id = MANUFACTURER_ST, | ||
587 | .dev_id = M29W160DT, | ||
588 | .name = "ST M29W160DT", | ||
589 | .size = 0x00200000, | ||
590 | .numeraseregions = 4, | ||
591 | .regions = { | ||
592 | { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 }, | ||
593 | { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 }, | ||
594 | { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 }, | ||
595 | { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 } | ||
596 | } | ||
597 | }, { | ||
598 | .mfr_id = MANUFACTURER_ST, | ||
599 | .dev_id = M29W160DB, | ||
600 | .name = "ST M29W160DB", | ||
601 | .size = 0x00200000, | ||
602 | .numeraseregions = 4, | ||
603 | .regions = { | ||
604 | { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 }, | ||
605 | { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 }, | ||
606 | { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 }, | ||
607 | { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 } | ||
608 | } | ||
609 | }, { | ||
610 | .mfr_id = MANUFACTURER_AMD, | ||
611 | .dev_id = AM29BDS323D, | ||
612 | .name = "AMD AM29BDS323D", | ||
613 | .size = 0x00400000, | ||
614 | .numeraseregions = 3, | ||
615 | .regions = { | ||
616 | { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 48 }, | ||
617 | { .offset = 0x300000, .erasesize = 0x10000, .numblocks = 15 }, | ||
618 | { .offset = 0x3f0000, .erasesize = 0x02000, .numblocks = 8 }, | ||
619 | } | ||
620 | }, { | ||
621 | .mfr_id = MANUFACTURER_AMD, | ||
622 | .dev_id = AM29BDS643D, | ||
623 | .name = "AMD AM29BDS643D", | ||
624 | .size = 0x00800000, | ||
625 | .numeraseregions = 3, | ||
626 | .regions = { | ||
627 | { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 96 }, | ||
628 | { .offset = 0x600000, .erasesize = 0x10000, .numblocks = 31 }, | ||
629 | { .offset = 0x7f0000, .erasesize = 0x02000, .numblocks = 8 }, | ||
630 | } | ||
631 | }, { | ||
632 | .mfr_id = MANUFACTURER_ATMEL, | ||
633 | .dev_id = AT49xV16x, | ||
634 | .name = "Atmel AT49xV16x", | ||
635 | .size = 0x00200000, | ||
636 | .numeraseregions = 2, | ||
637 | .regions = { | ||
638 | { .offset = 0x000000, .erasesize = 0x02000, .numblocks = 8 }, | ||
639 | { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 } | ||
640 | } | ||
641 | }, { | ||
642 | .mfr_id = MANUFACTURER_ATMEL, | ||
643 | .dev_id = AT49xV16xT, | ||
644 | .name = "Atmel AT49xV16xT", | ||
645 | .size = 0x00200000, | ||
646 | .numeraseregions = 2, | ||
647 | .regions = { | ||
648 | { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 }, | ||
649 | { .offset = 0x1F0000, .erasesize = 0x02000, .numblocks = 8 } | ||
650 | } | ||
651 | } | ||
652 | }; | ||
653 | |||
654 | struct mtd_info *mtd; | ||
655 | struct flchip chips[MAX_AMD_CHIPS]; | ||
656 | int table_pos[MAX_AMD_CHIPS]; | ||
657 | struct amd_flash_private temp; | ||
658 | struct amd_flash_private *private; | ||
659 | u_long size; | ||
660 | unsigned long base; | ||
661 | int i; | ||
662 | int reg_idx; | ||
663 | int offset; | ||
664 | |||
665 | mtd = (struct mtd_info*)kmalloc(sizeof(*mtd), GFP_KERNEL); | ||
666 | if (!mtd) { | ||
667 | printk(KERN_WARNING | ||
668 | "%s: kmalloc failed for info structure\n", map->name); | ||
669 | return NULL; | ||
670 | } | ||
671 | memset(mtd, 0, sizeof(*mtd)); | ||
672 | mtd->priv = map; | ||
673 | |||
674 | memset(&temp, 0, sizeof(temp)); | ||
675 | |||
676 | printk("%s: Probing for AMD compatible flash...\n", map->name); | ||
677 | |||
678 | if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table, | ||
679 | sizeof(table)/sizeof(table[0]))) | ||
680 | == -1) { | ||
681 | printk(KERN_WARNING | ||
682 | "%s: Found no AMD compatible device at location zero\n", | ||
683 | map->name); | ||
684 | kfree(mtd); | ||
685 | |||
686 | return NULL; | ||
687 | } | ||
688 | |||
689 | chips[0].start = 0; | ||
690 | chips[0].state = FL_READY; | ||
691 | chips[0].mutex = &chips[0]._spinlock; | ||
692 | temp.numchips = 1; | ||
693 | for (size = mtd->size; size > 1; size >>= 1) { | ||
694 | temp.chipshift++; | ||
695 | } | ||
696 | switch (temp.interleave) { | ||
697 | case 2: | ||
698 | temp.chipshift += 1; | ||
699 | break; | ||
700 | case 4: | ||
701 | temp.chipshift += 2; | ||
702 | break; | ||
703 | } | ||
704 | |||
705 | /* Find out if there are any more chips in the map. */ | ||
706 | for (base = (1 << temp.chipshift); | ||
707 | base < map->size; | ||
708 | base += (1 << temp.chipshift)) { | ||
709 | int numchips = temp.numchips; | ||
710 | table_pos[numchips] = probe_new_chip(mtd, base, chips, | ||
711 | &temp, table, sizeof(table)/sizeof(table[0])); | ||
712 | } | ||
713 | |||
714 | mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * | ||
715 | mtd->numeraseregions, GFP_KERNEL); | ||
716 | if (!mtd->eraseregions) { | ||
717 | printk(KERN_WARNING "%s: Failed to allocate " | ||
718 | "memory for MTD erase region info\n", map->name); | ||
719 | kfree(mtd); | ||
720 | map->fldrv_priv = NULL; | ||
721 | return NULL; | ||
722 | } | ||
723 | |||
724 | reg_idx = 0; | ||
725 | offset = 0; | ||
726 | for (i = 0; i < temp.numchips; i++) { | ||
727 | int dev_size; | ||
728 | int j; | ||
729 | |||
730 | dev_size = 0; | ||
731 | for (j = 0; j < table[table_pos[i]].numeraseregions; j++) { | ||
732 | mtd->eraseregions[reg_idx].offset = offset + | ||
733 | (table[table_pos[i]].regions[j].offset * | ||
734 | temp.interleave); | ||
735 | mtd->eraseregions[reg_idx].erasesize = | ||
736 | table[table_pos[i]].regions[j].erasesize * | ||
737 | temp.interleave; | ||
738 | mtd->eraseregions[reg_idx].numblocks = | ||
739 | table[table_pos[i]].regions[j].numblocks; | ||
740 | if (mtd->erasesize < | ||
741 | mtd->eraseregions[reg_idx].erasesize) { | ||
742 | mtd->erasesize = | ||
743 | mtd->eraseregions[reg_idx].erasesize; | ||
744 | } | ||
745 | dev_size += mtd->eraseregions[reg_idx].erasesize * | ||
746 | mtd->eraseregions[reg_idx].numblocks; | ||
747 | reg_idx++; | ||
748 | } | ||
749 | offset += dev_size; | ||
750 | } | ||
751 | mtd->type = MTD_NORFLASH; | ||
752 | mtd->flags = MTD_CAP_NORFLASH; | ||
753 | mtd->name = map->name; | ||
754 | mtd->erase = amd_flash_erase; | ||
755 | mtd->read = amd_flash_read; | ||
756 | mtd->write = amd_flash_write; | ||
757 | mtd->sync = amd_flash_sync; | ||
758 | mtd->suspend = amd_flash_suspend; | ||
759 | mtd->resume = amd_flash_resume; | ||
760 | mtd->lock = amd_flash_lock; | ||
761 | mtd->unlock = amd_flash_unlock; | ||
762 | |||
763 | private = kmalloc(sizeof(*private) + (sizeof(struct flchip) * | ||
764 | temp.numchips), GFP_KERNEL); | ||
765 | if (!private) { | ||
766 | printk(KERN_WARNING | ||
767 | "%s: kmalloc failed for private structure\n", map->name); | ||
768 | kfree(mtd); | ||
769 | map->fldrv_priv = NULL; | ||
770 | return NULL; | ||
771 | } | ||
772 | memcpy(private, &temp, sizeof(temp)); | ||
773 | memcpy(private->chips, chips, | ||
774 | sizeof(struct flchip) * private->numchips); | ||
775 | for (i = 0; i < private->numchips; i++) { | ||
776 | init_waitqueue_head(&private->chips[i].wq); | ||
777 | spin_lock_init(&private->chips[i]._spinlock); | ||
778 | } | ||
779 | |||
780 | map->fldrv_priv = private; | ||
781 | |||
782 | map->fldrv = &amd_flash_chipdrv; | ||
783 | |||
784 | __module_get(THIS_MODULE); | ||
785 | return mtd; | ||
786 | } | ||
787 | |||
788 | |||
789 | |||
790 | static inline int read_one_chip(struct map_info *map, struct flchip *chip, | ||
791 | loff_t adr, size_t len, u_char *buf) | ||
792 | { | ||
793 | DECLARE_WAITQUEUE(wait, current); | ||
794 | unsigned long timeo = jiffies + HZ; | ||
795 | |||
796 | retry: | ||
797 | spin_lock_bh(chip->mutex); | ||
798 | |||
799 | if (chip->state != FL_READY){ | ||
800 | printk(KERN_INFO "%s: waiting for chip to read, state = %d\n", | ||
801 | map->name, chip->state); | ||
802 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
803 | add_wait_queue(&chip->wq, &wait); | ||
804 | |||
805 | spin_unlock_bh(chip->mutex); | ||
806 | |||
807 | schedule(); | ||
808 | remove_wait_queue(&chip->wq, &wait); | ||
809 | |||
810 | if(signal_pending(current)) { | ||
811 | return -EINTR; | ||
812 | } | ||
813 | |||
814 | timeo = jiffies + HZ; | ||
815 | |||
816 | goto retry; | ||
817 | } | ||
818 | |||
819 | adr += chip->start; | ||
820 | |||
821 | chip->state = FL_READY; | ||
822 | |||
823 | map_copy_from(map, buf, adr, len); | ||
824 | |||
825 | wake_up(&chip->wq); | ||
826 | spin_unlock_bh(chip->mutex); | ||
827 | |||
828 | return 0; | ||
829 | } | ||
830 | |||
831 | |||
832 | |||
833 | static int amd_flash_read(struct mtd_info *mtd, loff_t from, size_t len, | ||
834 | size_t *retlen, u_char *buf) | ||
835 | { | ||
836 | struct map_info *map = mtd->priv; | ||
837 | struct amd_flash_private *private = map->fldrv_priv; | ||
838 | unsigned long ofs; | ||
839 | int chipnum; | ||
840 | int ret = 0; | ||
841 | |||
842 | if ((from + len) > mtd->size) { | ||
843 | printk(KERN_WARNING "%s: read request past end of device " | ||
844 | "(0x%lx)\n", map->name, (unsigned long)from + len); | ||
845 | |||
846 | return -EINVAL; | ||
847 | } | ||
848 | |||
849 | /* Offset within the first chip that the first read should start. */ | ||
850 | chipnum = (from >> private->chipshift); | ||
851 | ofs = from - (chipnum << private->chipshift); | ||
852 | |||
853 | *retlen = 0; | ||
854 | |||
855 | while (len) { | ||
856 | unsigned long this_len; | ||
857 | |||
858 | if (chipnum >= private->numchips) { | ||
859 | break; | ||
860 | } | ||
861 | |||
862 | if ((len + ofs - 1) >> private->chipshift) { | ||
863 | this_len = (1 << private->chipshift) - ofs; | ||
864 | } else { | ||
865 | this_len = len; | ||
866 | } | ||
867 | |||
868 | ret = read_one_chip(map, &private->chips[chipnum], ofs, | ||
869 | this_len, buf); | ||
870 | if (ret) { | ||
871 | break; | ||
872 | } | ||
873 | |||
874 | *retlen += this_len; | ||
875 | len -= this_len; | ||
876 | buf += this_len; | ||
877 | |||
878 | ofs = 0; | ||
879 | chipnum++; | ||
880 | } | ||
881 | |||
882 | return ret; | ||
883 | } | ||
884 | |||
885 | |||
886 | |||
887 | static int write_one_word(struct map_info *map, struct flchip *chip, | ||
888 | unsigned long adr, __u32 datum) | ||
889 | { | ||
890 | unsigned long timeo = jiffies + HZ; | ||
891 | struct amd_flash_private *private = map->fldrv_priv; | ||
892 | DECLARE_WAITQUEUE(wait, current); | ||
893 | int ret = 0; | ||
894 | int times_left; | ||
895 | |||
896 | retry: | ||
897 | spin_lock_bh(chip->mutex); | ||
898 | |||
899 | if (chip->state != FL_READY){ | ||
900 | printk("%s: waiting for chip to write, state = %d\n", | ||
901 | map->name, chip->state); | ||
902 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
903 | add_wait_queue(&chip->wq, &wait); | ||
904 | |||
905 | spin_unlock_bh(chip->mutex); | ||
906 | |||
907 | schedule(); | ||
908 | remove_wait_queue(&chip->wq, &wait); | ||
909 | printk(KERN_INFO "%s: woke up to write\n", map->name); | ||
910 | if(signal_pending(current)) | ||
911 | return -EINTR; | ||
912 | |||
913 | timeo = jiffies + HZ; | ||
914 | |||
915 | goto retry; | ||
916 | } | ||
917 | |||
918 | chip->state = FL_WRITING; | ||
919 | |||
920 | adr += chip->start; | ||
921 | ENABLE_VPP(map); | ||
922 | send_cmd(map, chip->start, CMD_PROGRAM_UNLOCK_DATA); | ||
923 | wide_write(map, datum, adr); | ||
924 | |||
925 | times_left = 500000; | ||
926 | while (times_left-- && flash_is_busy(map, adr, private->interleave)) { | ||
927 | if (need_resched()) { | ||
928 | spin_unlock_bh(chip->mutex); | ||
929 | schedule(); | ||
930 | spin_lock_bh(chip->mutex); | ||
931 | } | ||
932 | } | ||
933 | |||
934 | if (!times_left) { | ||
935 | printk(KERN_WARNING "%s: write to 0x%lx timed out!\n", | ||
936 | map->name, adr); | ||
937 | ret = -EIO; | ||
938 | } else { | ||
939 | __u32 verify; | ||
940 | if ((verify = wide_read(map, adr)) != datum) { | ||
941 | printk(KERN_WARNING "%s: write to 0x%lx failed. " | ||
942 | "datum = %x, verify = %x\n", | ||
943 | map->name, adr, datum, verify); | ||
944 | ret = -EIO; | ||
945 | } | ||
946 | } | ||
947 | |||
948 | DISABLE_VPP(map); | ||
949 | chip->state = FL_READY; | ||
950 | wake_up(&chip->wq); | ||
951 | spin_unlock_bh(chip->mutex); | ||
952 | |||
953 | return ret; | ||
954 | } | ||
955 | |||
956 | |||
957 | |||
958 | static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len, | ||
959 | size_t *retlen, const u_char *buf) | ||
960 | { | ||
961 | struct map_info *map = mtd->priv; | ||
962 | struct amd_flash_private *private = map->fldrv_priv; | ||
963 | int ret = 0; | ||
964 | int chipnum; | ||
965 | unsigned long ofs; | ||
966 | unsigned long chipstart; | ||
967 | |||
968 | *retlen = 0; | ||
969 | if (!len) { | ||
970 | return 0; | ||
971 | } | ||
972 | |||
973 | chipnum = to >> private->chipshift; | ||
974 | ofs = to - (chipnum << private->chipshift); | ||
975 | chipstart = private->chips[chipnum].start; | ||
976 | |||
977 | /* If it's not bus-aligned, do the first byte write. */ | ||
978 | if (ofs & (map->buswidth - 1)) { | ||
979 | unsigned long bus_ofs = ofs & ~(map->buswidth - 1); | ||
980 | int i = ofs - bus_ofs; | ||
981 | int n = 0; | ||
982 | u_char tmp_buf[4]; | ||
983 | __u32 datum; | ||
984 | |||
985 | map_copy_from(map, tmp_buf, | ||
986 | bus_ofs + private->chips[chipnum].start, | ||
987 | map->buswidth); | ||
988 | while (len && i < map->buswidth) | ||
989 | tmp_buf[i++] = buf[n++], len--; | ||
990 | |||
991 | if (map->buswidth == 2) { | ||
992 | datum = *(__u16*)tmp_buf; | ||
993 | } else if (map->buswidth == 4) { | ||
994 | datum = *(__u32*)tmp_buf; | ||
995 | } else { | ||
996 | return -EINVAL; /* should never happen, but be safe */ | ||
997 | } | ||
998 | |||
999 | ret = write_one_word(map, &private->chips[chipnum], bus_ofs, | ||
1000 | datum); | ||
1001 | if (ret) { | ||
1002 | return ret; | ||
1003 | } | ||
1004 | |||
1005 | ofs += n; | ||
1006 | buf += n; | ||
1007 | (*retlen) += n; | ||
1008 | |||
1009 | if (ofs >> private->chipshift) { | ||
1010 | chipnum++; | ||
1011 | ofs = 0; | ||
1012 | if (chipnum == private->numchips) { | ||
1013 | return 0; | ||
1014 | } | ||
1015 | } | ||
1016 | } | ||
1017 | |||
1018 | /* We are now aligned, write as much as possible. */ | ||
1019 | while(len >= map->buswidth) { | ||
1020 | __u32 datum; | ||
1021 | |||
1022 | if (map->buswidth == 1) { | ||
1023 | datum = *(__u8*)buf; | ||
1024 | } else if (map->buswidth == 2) { | ||
1025 | datum = *(__u16*)buf; | ||
1026 | } else if (map->buswidth == 4) { | ||
1027 | datum = *(__u32*)buf; | ||
1028 | } else { | ||
1029 | return -EINVAL; | ||
1030 | } | ||
1031 | |||
1032 | ret = write_one_word(map, &private->chips[chipnum], ofs, datum); | ||
1033 | |||
1034 | if (ret) { | ||
1035 | return ret; | ||
1036 | } | ||
1037 | |||
1038 | ofs += map->buswidth; | ||
1039 | buf += map->buswidth; | ||
1040 | (*retlen) += map->buswidth; | ||
1041 | len -= map->buswidth; | ||
1042 | |||
1043 | if (ofs >> private->chipshift) { | ||
1044 | chipnum++; | ||
1045 | ofs = 0; | ||
1046 | if (chipnum == private->numchips) { | ||
1047 | return 0; | ||
1048 | } | ||
1049 | chipstart = private->chips[chipnum].start; | ||
1050 | } | ||
1051 | } | ||
1052 | |||
1053 | if (len & (map->buswidth - 1)) { | ||
1054 | int i = 0, n = 0; | ||
1055 | u_char tmp_buf[2]; | ||
1056 | __u32 datum; | ||
1057 | |||
1058 | map_copy_from(map, tmp_buf, | ||
1059 | ofs + private->chips[chipnum].start, | ||
1060 | map->buswidth); | ||
1061 | while (len--) { | ||
1062 | tmp_buf[i++] = buf[n++]; | ||
1063 | } | ||
1064 | |||
1065 | if (map->buswidth == 2) { | ||
1066 | datum = *(__u16*)tmp_buf; | ||
1067 | } else if (map->buswidth == 4) { | ||
1068 | datum = *(__u32*)tmp_buf; | ||
1069 | } else { | ||
1070 | return -EINVAL; /* should never happen, but be safe */ | ||
1071 | } | ||
1072 | |||
1073 | ret = write_one_word(map, &private->chips[chipnum], ofs, datum); | ||
1074 | |||
1075 | if (ret) { | ||
1076 | return ret; | ||
1077 | } | ||
1078 | |||
1079 | (*retlen) += n; | ||
1080 | } | ||
1081 | |||
1082 | return 0; | ||
1083 | } | ||
1084 | |||
1085 | |||
1086 | |||
1087 | static inline int erase_one_block(struct map_info *map, struct flchip *chip, | ||
1088 | unsigned long adr, u_long size) | ||
1089 | { | ||
1090 | unsigned long timeo = jiffies + HZ; | ||
1091 | struct amd_flash_private *private = map->fldrv_priv; | ||
1092 | DECLARE_WAITQUEUE(wait, current); | ||
1093 | |||
1094 | retry: | ||
1095 | spin_lock_bh(chip->mutex); | ||
1096 | |||
1097 | if (chip->state != FL_READY){ | ||
1098 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1099 | add_wait_queue(&chip->wq, &wait); | ||
1100 | |||
1101 | spin_unlock_bh(chip->mutex); | ||
1102 | |||
1103 | schedule(); | ||
1104 | remove_wait_queue(&chip->wq, &wait); | ||
1105 | |||
1106 | if (signal_pending(current)) { | ||
1107 | return -EINTR; | ||
1108 | } | ||
1109 | |||
1110 | timeo = jiffies + HZ; | ||
1111 | |||
1112 | goto retry; | ||
1113 | } | ||
1114 | |||
1115 | chip->state = FL_ERASING; | ||
1116 | |||
1117 | adr += chip->start; | ||
1118 | ENABLE_VPP(map); | ||
1119 | send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA); | ||
1120 | send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr); | ||
1121 | |||
1122 | timeo = jiffies + (HZ * 20); | ||
1123 | |||
1124 | spin_unlock_bh(chip->mutex); | ||
1125 | msleep(1000); | ||
1126 | spin_lock_bh(chip->mutex); | ||
1127 | |||
1128 | while (flash_is_busy(map, adr, private->interleave)) { | ||
1129 | |||
1130 | if (chip->state != FL_ERASING) { | ||
1131 | /* Someone's suspended the erase. Sleep */ | ||
1132 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1133 | add_wait_queue(&chip->wq, &wait); | ||
1134 | |||
1135 | spin_unlock_bh(chip->mutex); | ||
1136 | printk(KERN_INFO "%s: erase suspended. Sleeping\n", | ||
1137 | map->name); | ||
1138 | schedule(); | ||
1139 | remove_wait_queue(&chip->wq, &wait); | ||
1140 | |||
1141 | if (signal_pending(current)) { | ||
1142 | return -EINTR; | ||
1143 | } | ||
1144 | |||
1145 | timeo = jiffies + (HZ*2); /* FIXME */ | ||
1146 | spin_lock_bh(chip->mutex); | ||
1147 | continue; | ||
1148 | } | ||
1149 | |||
1150 | /* OK Still waiting */ | ||
1151 | if (time_after(jiffies, timeo)) { | ||
1152 | chip->state = FL_READY; | ||
1153 | spin_unlock_bh(chip->mutex); | ||
1154 | printk(KERN_WARNING "%s: waiting for erase to complete " | ||
1155 | "timed out.\n", map->name); | ||
1156 | DISABLE_VPP(map); | ||
1157 | |||
1158 | return -EIO; | ||
1159 | } | ||
1160 | |||
1161 | /* Latency issues. Drop the lock, wait a while and retry */ | ||
1162 | spin_unlock_bh(chip->mutex); | ||
1163 | |||
1164 | if (need_resched()) | ||
1165 | schedule(); | ||
1166 | else | ||
1167 | udelay(1); | ||
1168 | |||
1169 | spin_lock_bh(chip->mutex); | ||
1170 | } | ||
1171 | |||
1172 | /* Verify every single word */ | ||
1173 | { | ||
1174 | int address; | ||
1175 | int error = 0; | ||
1176 | __u8 verify; | ||
1177 | |||
1178 | for (address = adr; address < (adr + size); address++) { | ||
1179 | if ((verify = map_read8(map, address)) != 0xFF) { | ||
1180 | error = 1; | ||
1181 | break; | ||
1182 | } | ||
1183 | } | ||
1184 | if (error) { | ||
1185 | chip->state = FL_READY; | ||
1186 | spin_unlock_bh(chip->mutex); | ||
1187 | printk(KERN_WARNING | ||
1188 | "%s: verify error at 0x%x, size %ld.\n", | ||
1189 | map->name, address, size); | ||
1190 | DISABLE_VPP(map); | ||
1191 | |||
1192 | return -EIO; | ||
1193 | } | ||
1194 | } | ||
1195 | |||
1196 | DISABLE_VPP(map); | ||
1197 | chip->state = FL_READY; | ||
1198 | wake_up(&chip->wq); | ||
1199 | spin_unlock_bh(chip->mutex); | ||
1200 | |||
1201 | return 0; | ||
1202 | } | ||
1203 | |||
1204 | |||
1205 | |||
1206 | static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr) | ||
1207 | { | ||
1208 | struct map_info *map = mtd->priv; | ||
1209 | struct amd_flash_private *private = map->fldrv_priv; | ||
1210 | unsigned long adr, len; | ||
1211 | int chipnum; | ||
1212 | int ret = 0; | ||
1213 | int i; | ||
1214 | int first; | ||
1215 | struct mtd_erase_region_info *regions = mtd->eraseregions; | ||
1216 | |||
1217 | if (instr->addr > mtd->size) { | ||
1218 | return -EINVAL; | ||
1219 | } | ||
1220 | |||
1221 | if ((instr->len + instr->addr) > mtd->size) { | ||
1222 | return -EINVAL; | ||
1223 | } | ||
1224 | |||
1225 | /* Check that both start and end of the requested erase are | ||
1226 | * aligned with the erasesize at the appropriate addresses. | ||
1227 | */ | ||
1228 | |||
1229 | i = 0; | ||
1230 | |||
1231 | /* Skip all erase regions which are ended before the start of | ||
1232 | the requested erase. Actually, to save on the calculations, | ||
1233 | we skip to the first erase region which starts after the | ||
1234 | start of the requested erase, and then go back one. | ||
1235 | */ | ||
1236 | |||
1237 | while ((i < mtd->numeraseregions) && | ||
1238 | (instr->addr >= regions[i].offset)) { | ||
1239 | i++; | ||
1240 | } | ||
1241 | i--; | ||
1242 | |||
1243 | /* OK, now i is pointing at the erase region in which this | ||
1244 | * erase request starts. Check the start of the requested | ||
1245 | * erase range is aligned with the erase size which is in | ||
1246 | * effect here. | ||
1247 | */ | ||
1248 | |||
1249 | if (instr->addr & (regions[i].erasesize-1)) { | ||
1250 | return -EINVAL; | ||
1251 | } | ||
1252 | |||
1253 | /* Remember the erase region we start on. */ | ||
1254 | |||
1255 | first = i; | ||
1256 | |||
1257 | /* Next, check that the end of the requested erase is aligned | ||
1258 | * with the erase region at that address. | ||
1259 | */ | ||
1260 | |||
1261 | while ((i < mtd->numeraseregions) && | ||
1262 | ((instr->addr + instr->len) >= regions[i].offset)) { | ||
1263 | i++; | ||
1264 | } | ||
1265 | |||
1266 | /* As before, drop back one to point at the region in which | ||
1267 | * the address actually falls. | ||
1268 | */ | ||
1269 | |||
1270 | i--; | ||
1271 | |||
1272 | if ((instr->addr + instr->len) & (regions[i].erasesize-1)) { | ||
1273 | return -EINVAL; | ||
1274 | } | ||
1275 | |||
1276 | chipnum = instr->addr >> private->chipshift; | ||
1277 | adr = instr->addr - (chipnum << private->chipshift); | ||
1278 | len = instr->len; | ||
1279 | |||
1280 | i = first; | ||
1281 | |||
1282 | while (len) { | ||
1283 | ret = erase_one_block(map, &private->chips[chipnum], adr, | ||
1284 | regions[i].erasesize); | ||
1285 | |||
1286 | if (ret) { | ||
1287 | return ret; | ||
1288 | } | ||
1289 | |||
1290 | adr += regions[i].erasesize; | ||
1291 | len -= regions[i].erasesize; | ||
1292 | |||
1293 | if ((adr % (1 << private->chipshift)) == | ||
1294 | ((regions[i].offset + (regions[i].erasesize * | ||
1295 | regions[i].numblocks)) | ||
1296 | % (1 << private->chipshift))) { | ||
1297 | i++; | ||
1298 | } | ||
1299 | |||
1300 | if (adr >> private->chipshift) { | ||
1301 | adr = 0; | ||
1302 | chipnum++; | ||
1303 | if (chipnum >= private->numchips) { | ||
1304 | break; | ||
1305 | } | ||
1306 | } | ||
1307 | } | ||
1308 | |||
1309 | instr->state = MTD_ERASE_DONE; | ||
1310 | mtd_erase_callback(instr); | ||
1311 | |||
1312 | return 0; | ||
1313 | } | ||
1314 | |||
1315 | |||
1316 | |||
1317 | static void amd_flash_sync(struct mtd_info *mtd) | ||
1318 | { | ||
1319 | struct map_info *map = mtd->priv; | ||
1320 | struct amd_flash_private *private = map->fldrv_priv; | ||
1321 | int i; | ||
1322 | struct flchip *chip; | ||
1323 | int ret = 0; | ||
1324 | DECLARE_WAITQUEUE(wait, current); | ||
1325 | |||
1326 | for (i = 0; !ret && (i < private->numchips); i++) { | ||
1327 | chip = &private->chips[i]; | ||
1328 | |||
1329 | retry: | ||
1330 | spin_lock_bh(chip->mutex); | ||
1331 | |||
1332 | switch(chip->state) { | ||
1333 | case FL_READY: | ||
1334 | case FL_STATUS: | ||
1335 | case FL_CFI_QUERY: | ||
1336 | case FL_JEDEC_QUERY: | ||
1337 | chip->oldstate = chip->state; | ||
1338 | chip->state = FL_SYNCING; | ||
1339 | /* No need to wake_up() on this state change - | ||
1340 | * as the whole point is that nobody can do anything | ||
1341 | * with the chip now anyway. | ||
1342 | */ | ||
1343 | case FL_SYNCING: | ||
1344 | spin_unlock_bh(chip->mutex); | ||
1345 | break; | ||
1346 | |||
1347 | default: | ||
1348 | /* Not an idle state */ | ||
1349 | add_wait_queue(&chip->wq, &wait); | ||
1350 | |||
1351 | spin_unlock_bh(chip->mutex); | ||
1352 | |||
1353 | schedule(); | ||
1354 | |||
1355 | remove_wait_queue(&chip->wq, &wait); | ||
1356 | |||
1357 | goto retry; | ||
1358 | } | ||
1359 | } | ||
1360 | |||
1361 | /* Unlock the chips again */ | ||
1362 | for (i--; i >= 0; i--) { | ||
1363 | chip = &private->chips[i]; | ||
1364 | |||
1365 | spin_lock_bh(chip->mutex); | ||
1366 | |||
1367 | if (chip->state == FL_SYNCING) { | ||
1368 | chip->state = chip->oldstate; | ||
1369 | wake_up(&chip->wq); | ||
1370 | } | ||
1371 | spin_unlock_bh(chip->mutex); | ||
1372 | } | ||
1373 | } | ||
1374 | |||
1375 | |||
1376 | |||
1377 | static int amd_flash_suspend(struct mtd_info *mtd) | ||
1378 | { | ||
1379 | printk("amd_flash_suspend(): not implemented!\n"); | ||
1380 | return -EINVAL; | ||
1381 | } | ||
1382 | |||
1383 | |||
1384 | |||
1385 | static void amd_flash_resume(struct mtd_info *mtd) | ||
1386 | { | ||
1387 | printk("amd_flash_resume(): not implemented!\n"); | ||
1388 | } | ||
1389 | |||
1390 | |||
1391 | |||
1392 | static void amd_flash_destroy(struct mtd_info *mtd) | ||
1393 | { | ||
1394 | struct map_info *map = mtd->priv; | ||
1395 | struct amd_flash_private *private = map->fldrv_priv; | ||
1396 | kfree(private); | ||
1397 | } | ||
1398 | |||
1399 | int __init amd_flash_init(void) | ||
1400 | { | ||
1401 | register_mtd_chip_driver(&amd_flash_chipdrv); | ||
1402 | return 0; | ||
1403 | } | ||
1404 | |||
1405 | void __exit amd_flash_exit(void) | ||
1406 | { | ||
1407 | unregister_mtd_chip_driver(&amd_flash_chipdrv); | ||
1408 | } | ||
1409 | |||
1410 | module_init(amd_flash_init); | ||
1411 | module_exit(amd_flash_exit); | ||
1412 | |||
1413 | MODULE_LICENSE("GPL"); | ||
1414 | MODULE_AUTHOR("Jonas Holmberg <jonas.holmberg@axis.com>"); | ||
1415 | MODULE_DESCRIPTION("Old MTD chip driver for AMD flash chips"); | ||