aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/chips
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/mtd/chips
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/mtd/chips')
-rw-r--r--drivers/mtd/chips/Kconfig286
-rw-r--r--drivers/mtd/chips/Makefile26
-rw-r--r--drivers/mtd/chips/amd_flash.c1415
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c2160
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c1515
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c1418
-rw-r--r--drivers/mtd/chips/cfi_probe.c445
-rw-r--r--drivers/mtd/chips/cfi_util.c196
-rw-r--r--drivers/mtd/chips/chipreg.c111
-rw-r--r--drivers/mtd/chips/fwh_lock.h107
-rw-r--r--drivers/mtd/chips/gen_probe.c255
-rw-r--r--drivers/mtd/chips/jedec.c934
-rw-r--r--drivers/mtd/chips/jedec_probe.c2127
-rw-r--r--drivers/mtd/chips/map_absent.c117
-rw-r--r--drivers/mtd/chips/map_ram.c143
-rw-r--r--drivers/mtd/chips/map_rom.c94
-rw-r--r--drivers/mtd/chips/sharp.c596
17 files changed, 11945 insertions, 0 deletions
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
new file mode 100644
index 000000000000..d682dbc8157e
--- /dev/null
+++ b/drivers/mtd/chips/Kconfig
@@ -0,0 +1,286 @@
1# drivers/mtd/chips/Kconfig
2# $Id: Kconfig,v 1.13 2004/12/01 15:49:10 nico Exp $
3
4menu "RAM/ROM/Flash chip drivers"
5 depends on MTD!=n
6
7config MTD_CFI
8 tristate "Detect flash chips by Common Flash Interface (CFI) probe"
9 depends on MTD
10 select MTD_GEN_PROBE
11 help
12 The Common Flash Interface specification was developed by Intel,
13 AMD and other flash manufactures that provides a universal method
14 for probing the capabilities of flash devices. If you wish to
15 support any device that is CFI-compliant, you need to enable this
16 option. Visit <http://www.amd.com/products/nvd/overview/cfi.html>
17 for more information on CFI.
18
19config MTD_JEDECPROBE
20 tristate "Detect non-CFI AMD/JEDEC-compatible flash chips"
21 depends on MTD
22 select MTD_GEN_PROBE
23 help
24 This option enables JEDEC-style probing of flash chips which are not
25 compatible with the Common Flash Interface, but will use the common
26 CFI-targetted flash drivers for any chips which are identified which
27 are in fact compatible in all but the probe method. This actually
28 covers most AMD/Fujitsu-compatible chips, and will shortly cover also
29 non-CFI Intel chips (that code is in MTD CVS and should shortly be sent
30 for inclusion in Linus' tree)
31
32config MTD_GEN_PROBE
33 tristate
34
35config MTD_CFI_ADV_OPTIONS
36 bool "Flash chip driver advanced configuration options"
37 depends on MTD_GEN_PROBE
38 help
39 If you need to specify a specific endianness for access to flash
40 chips, or if you wish to reduce the size of the kernel by including
41 support for only specific arrangements of flash chips, say 'Y'. This
42 option does not directly affect the code, but will enable other
43 configuration options which allow you to do so.
44
45 If unsure, say 'N'.
46
47choice
48 prompt "Flash cmd/query data swapping"
49 depends on MTD_CFI_ADV_OPTIONS
50 default MTD_CFI_NOSWAP
51
52config MTD_CFI_NOSWAP
53 bool "NO"
54 ---help---
55 This option defines the way in which the CPU attempts to arrange
56 data bits when writing the 'magic' commands to the chips. Saying
57 'NO', which is the default when CONFIG_MTD_CFI_ADV_OPTIONS isn't
58 enabled, means that the CPU will not do any swapping; the chips
59 are expected to be wired to the CPU in 'host-endian' form.
60 Specific arrangements are possible with the BIG_ENDIAN_BYTE and
61 LITTLE_ENDIAN_BYTE, if the bytes are reversed.
62
63 If you have a LART, on which the data (and address) lines were
64 connected in a fashion which ensured that the nets were as short
65 as possible, resulting in a bit-shuffling which seems utterly
66 random to the untrained eye, you need the LART_ENDIAN_BYTE option.
67
68 Yes, there really exists something sicker than PDP-endian :)
69
70config MTD_CFI_BE_BYTE_SWAP
71 bool "BIG_ENDIAN_BYTE"
72
73config MTD_CFI_LE_BYTE_SWAP
74 bool "LITTLE_ENDIAN_BYTE"
75
76endchoice
77
78config MTD_CFI_GEOMETRY
79 bool "Specific CFI Flash geometry selection"
80 depends on MTD_CFI_ADV_OPTIONS
81 help
82 This option does not affect the code directly, but will enable
83 some other configuration options which would allow you to reduce
84 the size of the kernel by including support for only certain
85 arrangements of CFI chips. If unsure, say 'N' and all options
86 which are supported by the current code will be enabled.
87
88config MTD_MAP_BANK_WIDTH_1
89 bool "Support 8-bit buswidth" if MTD_CFI_GEOMETRY
90 default y
91 help
92 If you wish to support CFI devices on a physical bus which is
93 8 bits wide, say 'Y'.
94
95config MTD_MAP_BANK_WIDTH_2
96 bool "Support 16-bit buswidth" if MTD_CFI_GEOMETRY
97 default y
98 help
99 If you wish to support CFI devices on a physical bus which is
100 16 bits wide, say 'Y'.
101
102config MTD_MAP_BANK_WIDTH_4
103 bool "Support 32-bit buswidth" if MTD_CFI_GEOMETRY
104 default y
105 help
106 If you wish to support CFI devices on a physical bus which is
107 32 bits wide, say 'Y'.
108
109config MTD_MAP_BANK_WIDTH_8
110 bool "Support 64-bit buswidth" if MTD_CFI_GEOMETRY
111 default n
112 help
113 If you wish to support CFI devices on a physical bus which is
114 64 bits wide, say 'Y'.
115
116config MTD_MAP_BANK_WIDTH_16
117 bool "Support 128-bit buswidth" if MTD_CFI_GEOMETRY
118 default n
119 help
120 If you wish to support CFI devices on a physical bus which is
121 128 bits wide, say 'Y'.
122
123config MTD_MAP_BANK_WIDTH_32
124 bool "Support 256-bit buswidth" if MTD_CFI_GEOMETRY
125 default n
126 help
127 If you wish to support CFI devices on a physical bus which is
128 256 bits wide, say 'Y'.
129
130config MTD_CFI_I1
131 bool "Support 1-chip flash interleave" if MTD_CFI_GEOMETRY
132 default y
133 help
134 If your flash chips are not interleaved - i.e. you only have one
135 flash chip addressed by each bus cycle, then say 'Y'.
136
137config MTD_CFI_I2
138 bool "Support 2-chip flash interleave" if MTD_CFI_GEOMETRY
139 default y
140 help
141 If your flash chips are interleaved in pairs - i.e. you have two
142 flash chips addressed by each bus cycle, then say 'Y'.
143
144config MTD_CFI_I4
145 bool "Support 4-chip flash interleave" if MTD_CFI_GEOMETRY
146 default n
147 help
148 If your flash chips are interleaved in fours - i.e. you have four
149 flash chips addressed by each bus cycle, then say 'Y'.
150
151config MTD_CFI_I8
152 bool "Support 8-chip flash interleave" if MTD_CFI_GEOMETRY
153 default n
154 help
155 If your flash chips are interleaved in eights - i.e. you have eight
156 flash chips addressed by each bus cycle, then say 'Y'.
157
158config MTD_CFI_INTELEXT
159 tristate "Support for Intel/Sharp flash chips"
160 depends on MTD_GEN_PROBE
161 select MTD_CFI_UTIL
162 help
163 The Common Flash Interface defines a number of different command
164 sets which a CFI-compliant chip may claim to implement. This code
165 provides support for one of those command sets, used on Intel
166 StrataFlash and other parts.
167
168config MTD_CFI_AMDSTD
169 tristate "Support for AMD/Fujitsu flash chips"
170 depends on MTD_GEN_PROBE
171 select MTD_CFI_UTIL
172 help
173 The Common Flash Interface defines a number of different command
174 sets which a CFI-compliant chip may claim to implement. This code
175 provides support for one of those command sets, used on chips
176 including the AMD Am29LV320.
177
178config MTD_CFI_AMDSTD_RETRY
179 int "Retry failed commands (erase/program)"
180 depends on MTD_CFI_AMDSTD
181 default "0"
182 help
183 Some chips, when attached to a shared bus, don't properly filter
184 bus traffic that is destined to other devices. This broken
185 behavior causes erase and program sequences to be aborted when
186 the sequences are mixed with traffic for other devices.
187
188 SST49LF040 (and related) chips are know to be broken.
189
190config MTD_CFI_AMDSTD_RETRY_MAX
191 int "Max retries of failed commands (erase/program)"
192 depends on MTD_CFI_AMDSTD_RETRY
193 default "0"
194 help
195 If you have an SST49LF040 (or related chip) then this value should
196 be set to at least 1. This can also be adjusted at driver load
197 time with the retry_cmd_max module parameter.
198
199config MTD_CFI_STAA
200 tristate "Support for ST (Advanced Architecture) flash chips"
201 depends on MTD_GEN_PROBE
202 select MTD_CFI_UTIL
203 help
204 The Common Flash Interface defines a number of different command
205 sets which a CFI-compliant chip may claim to implement. This code
206 provides support for one of those command sets.
207
208config MTD_CFI_UTIL
209 tristate
210
211config MTD_RAM
212 tristate "Support for RAM chips in bus mapping"
213 depends on MTD
214 help
215 This option enables basic support for RAM chips accessed through
216 a bus mapping driver.
217
218config MTD_ROM
219 tristate "Support for ROM chips in bus mapping"
220 depends on MTD
221 help
222 This option enables basic support for ROM chips accessed through
223 a bus mapping driver.
224
225config MTD_ABSENT
226 tristate "Support for absent chips in bus mapping"
227 depends on MTD
228 help
229 This option enables support for a dummy probing driver used to
230 allocated placeholder MTD devices on systems that have socketed
231 or removable media. Use of this driver as a fallback chip probe
232 preserves the expected registration order of MTD device nodes on
233 the system regardless of media presence. Device nodes created
234 with this driver will return -ENODEV upon access.
235
236config MTD_OBSOLETE_CHIPS
237 depends on MTD && BROKEN
238 bool "Older (theoretically obsoleted now) drivers for non-CFI chips"
239 help
240 This option does not enable any code directly, but will allow you to
241 select some other chip drivers which are now considered obsolete,
242 because the generic CONFIG_JEDECPROBE code above should now detect
243 the chips which are supported by these drivers, and allow the generic
244 CFI-compatible drivers to drive the chips. Say 'N' here unless you have
245 already tried the CONFIG_JEDECPROBE method and reported its failure
246 to the MTD mailing list at <linux-mtd@lists.infradead.org>
247
248config MTD_AMDSTD
249 tristate "AMD compatible flash chip support (non-CFI)"
250 depends on MTD && MTD_OBSOLETE_CHIPS
251 help
252 This option enables support for flash chips using AMD-compatible
253 commands, including some which are not CFI-compatible and hence
254 cannot be used with the CONFIG_MTD_CFI_AMDSTD option.
255
256 It also works on AMD compatible chips that do conform to CFI.
257
258config MTD_SHARP
259 tristate "pre-CFI Sharp chip support"
260 depends on MTD && MTD_OBSOLETE_CHIPS
261 help
262 This option enables support for flash chips using Sharp-compatible
263 commands, including some which are not CFI-compatible and hence
264 cannot be used with the CONFIG_MTD_CFI_INTELxxx options.
265
266config MTD_JEDEC
267 tristate "JEDEC device support"
268 depends on MTD && MTD_OBSOLETE_CHIPS
269 help
270 Enable older older JEDEC flash interface devices for self
271 programming flash. It is commonly used in older AMD chips. It is
272 only called JEDEC because the JEDEC association
273 <http://www.jedec.org/> distributes the identification codes for the
274 chips.
275
276config MTD_XIP
277 bool "XIP aware MTD support"
278 depends on !SMP && MTD_CFI_INTELEXT && EXPERIMENTAL
279 default y if XIP_KERNEL
280 help
281 This allows MTD support to work with flash memory which is also
282 used for XIP purposes. If you're not sure what this is all about
283 then say N.
284
285endmenu
286
diff --git a/drivers/mtd/chips/Makefile b/drivers/mtd/chips/Makefile
new file mode 100644
index 000000000000..6830489828c6
--- /dev/null
+++ b/drivers/mtd/chips/Makefile
@@ -0,0 +1,26 @@
1#
2# linux/drivers/chips/Makefile
3#
4# $Id: Makefile.common,v 1.4 2004/07/12 16:07:30 dwmw2 Exp $
5
6# *** BIG UGLY NOTE ***
7#
8# The removal of get_module_symbol() and replacement with
9# inter_module_register() et al has introduced a link order dependency
10# here where previously there was none. We now have to ensure that
11# the CFI command set drivers are linked before gen_probe.o
12
13obj-$(CONFIG_MTD) += chipreg.o
14obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o
15obj-$(CONFIG_MTD_CFI) += cfi_probe.o
16obj-$(CONFIG_MTD_CFI_UTIL) += cfi_util.o
17obj-$(CONFIG_MTD_CFI_STAA) += cfi_cmdset_0020.o
18obj-$(CONFIG_MTD_CFI_AMDSTD) += cfi_cmdset_0002.o
19obj-$(CONFIG_MTD_CFI_INTELEXT) += cfi_cmdset_0001.o
20obj-$(CONFIG_MTD_GEN_PROBE) += gen_probe.o
21obj-$(CONFIG_MTD_JEDEC) += jedec.o
22obj-$(CONFIG_MTD_JEDECPROBE) += jedec_probe.o
23obj-$(CONFIG_MTD_RAM) += map_ram.o
24obj-$(CONFIG_MTD_ROM) += map_rom.o
25obj-$(CONFIG_MTD_SHARP) += sharp.o
26obj-$(CONFIG_MTD_ABSENT) += map_absent.o
diff --git a/drivers/mtd/chips/amd_flash.c b/drivers/mtd/chips/amd_flash.c
new file mode 100644
index 000000000000..41e2e3e31603
--- /dev/null
+++ b/drivers/mtd/chips/amd_flash.c
@@ -0,0 +1,1415 @@
1/*
2 * MTD map driver for AMD compatible flash chips (non-CFI)
3 *
4 * Author: Jonas Holmberg <jonas.holmberg@axis.com>
5 *
6 * $Id: amd_flash.c,v 1.26 2004/11/20 12:49:04 dwmw2 Exp $
7 *
8 * Copyright (c) 2001 Axis Communications AB
9 *
10 * This file is under GPL.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/errno.h>
19#include <linux/slab.h>
20#include <linux/delay.h>
21#include <linux/interrupt.h>
22#include <linux/init.h>
23#include <linux/mtd/map.h>
24#include <linux/mtd/mtd.h>
25#include <linux/mtd/flashchip.h>
26
27/* There's no limit. It exists only to avoid realloc. */
28#define MAX_AMD_CHIPS 8
29
30#define DEVICE_TYPE_X8 (8 / 8)
31#define DEVICE_TYPE_X16 (16 / 8)
32#define DEVICE_TYPE_X32 (32 / 8)
33
34/* Addresses */
35#define ADDR_MANUFACTURER 0x0000
36#define ADDR_DEVICE_ID 0x0001
37#define ADDR_SECTOR_LOCK 0x0002
38#define ADDR_HANDSHAKE 0x0003
39#define ADDR_UNLOCK_1 0x0555
40#define ADDR_UNLOCK_2 0x02AA
41
42/* Commands */
43#define CMD_UNLOCK_DATA_1 0x00AA
44#define CMD_UNLOCK_DATA_2 0x0055
45#define CMD_MANUFACTURER_UNLOCK_DATA 0x0090
46#define CMD_UNLOCK_BYPASS_MODE 0x0020
47#define CMD_PROGRAM_UNLOCK_DATA 0x00A0
48#define CMD_RESET_DATA 0x00F0
49#define CMD_SECTOR_ERASE_UNLOCK_DATA 0x0080
50#define CMD_SECTOR_ERASE_UNLOCK_DATA_2 0x0030
51
52#define CMD_UNLOCK_SECTOR 0x0060
53
54/* Manufacturers */
55#define MANUFACTURER_AMD 0x0001
56#define MANUFACTURER_ATMEL 0x001F
57#define MANUFACTURER_FUJITSU 0x0004
58#define MANUFACTURER_ST 0x0020
59#define MANUFACTURER_SST 0x00BF
60#define MANUFACTURER_TOSHIBA 0x0098
61
62/* AMD */
63#define AM29F800BB 0x2258
64#define AM29F800BT 0x22D6
65#define AM29LV800BB 0x225B
66#define AM29LV800BT 0x22DA
67#define AM29LV160DT 0x22C4
68#define AM29LV160DB 0x2249
69#define AM29BDS323D 0x22D1
70#define AM29BDS643D 0x227E
71
72/* Atmel */
73#define AT49xV16x 0x00C0
74#define AT49xV16xT 0x00C2
75
76/* Fujitsu */
77#define MBM29LV160TE 0x22C4
78#define MBM29LV160BE 0x2249
79#define MBM29LV800BB 0x225B
80
81/* ST - www.st.com */
82#define M29W800T 0x00D7
83#define M29W160DT 0x22C4
84#define M29W160DB 0x2249
85
86/* SST */
87#define SST39LF800 0x2781
88#define SST39LF160 0x2782
89
90/* Toshiba */
91#define TC58FVT160 0x00C2
92#define TC58FVB160 0x0043
93
94#define D6_MASK 0x40
95
96struct amd_flash_private {
97 int device_type;
98 int interleave;
99 int numchips;
100 unsigned long chipshift;
101// const char *im_name;
102 struct flchip chips[0];
103};
104
105struct amd_flash_info {
106 const __u16 mfr_id;
107 const __u16 dev_id;
108 const char *name;
109 const u_long size;
110 const int numeraseregions;
111 const struct mtd_erase_region_info regions[4];
112};
113
114
115
116static int amd_flash_read(struct mtd_info *, loff_t, size_t, size_t *,
117 u_char *);
118static int amd_flash_write(struct mtd_info *, loff_t, size_t, size_t *,
119 const u_char *);
120static int amd_flash_erase(struct mtd_info *, struct erase_info *);
121static void amd_flash_sync(struct mtd_info *);
122static int amd_flash_suspend(struct mtd_info *);
123static void amd_flash_resume(struct mtd_info *);
124static void amd_flash_destroy(struct mtd_info *);
125static struct mtd_info *amd_flash_probe(struct map_info *map);
126
127
128static struct mtd_chip_driver amd_flash_chipdrv = {
129 .probe = amd_flash_probe,
130 .destroy = amd_flash_destroy,
131 .name = "amd_flash",
132 .module = THIS_MODULE
133};
134
135
136
137static const char im_name[] = "amd_flash";
138
139
140
141static inline __u32 wide_read(struct map_info *map, __u32 addr)
142{
143 if (map->buswidth == 1) {
144 return map_read8(map, addr);
145 } else if (map->buswidth == 2) {
146 return map_read16(map, addr);
147 } else if (map->buswidth == 4) {
148 return map_read32(map, addr);
149 }
150
151 return 0;
152}
153
154static inline void wide_write(struct map_info *map, __u32 val, __u32 addr)
155{
156 if (map->buswidth == 1) {
157 map_write8(map, val, addr);
158 } else if (map->buswidth == 2) {
159 map_write16(map, val, addr);
160 } else if (map->buswidth == 4) {
161 map_write32(map, val, addr);
162 }
163}
164
165static inline __u32 make_cmd(struct map_info *map, __u32 cmd)
166{
167 const struct amd_flash_private *private = map->fldrv_priv;
168 if ((private->interleave == 2) &&
169 (private->device_type == DEVICE_TYPE_X16)) {
170 cmd |= (cmd << 16);
171 }
172
173 return cmd;
174}
175
176static inline void send_unlock(struct map_info *map, unsigned long base)
177{
178 wide_write(map, (CMD_UNLOCK_DATA_1 << 16) | CMD_UNLOCK_DATA_1,
179 base + (map->buswidth * ADDR_UNLOCK_1));
180 wide_write(map, (CMD_UNLOCK_DATA_2 << 16) | CMD_UNLOCK_DATA_2,
181 base + (map->buswidth * ADDR_UNLOCK_2));
182}
183
184static inline void send_cmd(struct map_info *map, unsigned long base, __u32 cmd)
185{
186 send_unlock(map, base);
187 wide_write(map, make_cmd(map, cmd),
188 base + (map->buswidth * ADDR_UNLOCK_1));
189}
190
191static inline void send_cmd_to_addr(struct map_info *map, unsigned long base,
192 __u32 cmd, unsigned long addr)
193{
194 send_unlock(map, base);
195 wide_write(map, make_cmd(map, cmd), addr);
196}
197
198static inline int flash_is_busy(struct map_info *map, unsigned long addr,
199 int interleave)
200{
201
202 if ((interleave == 2) && (map->buswidth == 4)) {
203 __u32 read1, read2;
204
205 read1 = wide_read(map, addr);
206 read2 = wide_read(map, addr);
207
208 return (((read1 >> 16) & D6_MASK) !=
209 ((read2 >> 16) & D6_MASK)) ||
210 (((read1 & 0xffff) & D6_MASK) !=
211 ((read2 & 0xffff) & D6_MASK));
212 }
213
214 return ((wide_read(map, addr) & D6_MASK) !=
215 (wide_read(map, addr) & D6_MASK));
216}
217
218static inline void unlock_sector(struct map_info *map, unsigned long sect_addr,
219 int unlock)
220{
221 /* Sector lock address. A6 = 1 for unlock, A6 = 0 for lock */
222 int SLA = unlock ?
223 (sect_addr | (0x40 * map->buswidth)) :
224 (sect_addr & ~(0x40 * map->buswidth)) ;
225
226 __u32 cmd = make_cmd(map, CMD_UNLOCK_SECTOR);
227
228 wide_write(map, make_cmd(map, CMD_RESET_DATA), 0);
229 wide_write(map, cmd, SLA); /* 1st cycle: write cmd to any address */
230 wide_write(map, cmd, SLA); /* 2nd cycle: write cmd to any address */
231 wide_write(map, cmd, SLA); /* 3rd cycle: write cmd to SLA */
232}
233
234static inline int is_sector_locked(struct map_info *map,
235 unsigned long sect_addr)
236{
237 int status;
238
239 wide_write(map, CMD_RESET_DATA, 0);
240 send_cmd(map, sect_addr, CMD_MANUFACTURER_UNLOCK_DATA);
241
242 /* status is 0x0000 for unlocked and 0x0001 for locked */
243 status = wide_read(map, sect_addr + (map->buswidth * ADDR_SECTOR_LOCK));
244 wide_write(map, CMD_RESET_DATA, 0);
245 return status;
246}
247
248static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
249 int is_unlock)
250{
251 struct map_info *map;
252 struct mtd_erase_region_info *merip;
253 int eraseoffset, erasesize, eraseblocks;
254 int i;
255 int retval = 0;
256 int lock_status;
257
258 map = mtd->priv;
259
260 /* Pass the whole chip through sector by sector and check for each
261 sector if the sector and the given interval overlap */
262 for(i = 0; i < mtd->numeraseregions; i++) {
263 merip = &mtd->eraseregions[i];
264
265 eraseoffset = merip->offset;
266 erasesize = merip->erasesize;
267 eraseblocks = merip->numblocks;
268
269 if (ofs > eraseoffset + erasesize)
270 continue;
271
272 while (eraseblocks > 0) {
273 if (ofs < eraseoffset + erasesize && ofs + len > eraseoffset) {
274 unlock_sector(map, eraseoffset, is_unlock);
275
276 lock_status = is_sector_locked(map, eraseoffset);
277
278 if (is_unlock && lock_status) {
279 printk("Cannot unlock sector at address %x length %xx\n",
280 eraseoffset, merip->erasesize);
281 retval = -1;
282 } else if (!is_unlock && !lock_status) {
283 printk("Cannot lock sector at address %x length %x\n",
284 eraseoffset, merip->erasesize);
285 retval = -1;
286 }
287 }
288 eraseoffset += erasesize;
289 eraseblocks --;
290 }
291 }
292 return retval;
293}
294
295static int amd_flash_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
296{
297 return amd_flash_do_unlock(mtd, ofs, len, 1);
298}
299
300static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
301{
302 return amd_flash_do_unlock(mtd, ofs, len, 0);
303}
304
305
306/*
307 * Reads JEDEC manufacturer ID and device ID and returns the index of the first
308 * matching table entry (-1 if not found or alias for already found chip).
309 */
310static int probe_new_chip(struct mtd_info *mtd, __u32 base,
311 struct flchip *chips,
312 struct amd_flash_private *private,
313 const struct amd_flash_info *table, int table_size)
314{
315 __u32 mfr_id;
316 __u32 dev_id;
317 struct map_info *map = mtd->priv;
318 struct amd_flash_private temp;
319 int i;
320
321 temp.device_type = DEVICE_TYPE_X16; // Assume X16 (FIXME)
322 temp.interleave = 2;
323 map->fldrv_priv = &temp;
324
325 /* Enter autoselect mode. */
326 send_cmd(map, base, CMD_RESET_DATA);
327 send_cmd(map, base, CMD_MANUFACTURER_UNLOCK_DATA);
328
329 mfr_id = wide_read(map, base + (map->buswidth * ADDR_MANUFACTURER));
330 dev_id = wide_read(map, base + (map->buswidth * ADDR_DEVICE_ID));
331
332 if ((map->buswidth == 4) && ((mfr_id >> 16) == (mfr_id & 0xffff)) &&
333 ((dev_id >> 16) == (dev_id & 0xffff))) {
334 mfr_id &= 0xffff;
335 dev_id &= 0xffff;
336 } else {
337 temp.interleave = 1;
338 }
339
340 for (i = 0; i < table_size; i++) {
341 if ((mfr_id == table[i].mfr_id) &&
342 (dev_id == table[i].dev_id)) {
343 if (chips) {
344 int j;
345
346 /* Is this an alias for an already found chip?
347 * In that case that chip should be in
348 * autoselect mode now.
349 */
350 for (j = 0; j < private->numchips; j++) {
351 __u32 mfr_id_other;
352 __u32 dev_id_other;
353
354 mfr_id_other =
355 wide_read(map, chips[j].start +
356 (map->buswidth *
357 ADDR_MANUFACTURER
358 ));
359 dev_id_other =
360 wide_read(map, chips[j].start +
361 (map->buswidth *
362 ADDR_DEVICE_ID));
363 if (temp.interleave == 2) {
364 mfr_id_other &= 0xffff;
365 dev_id_other &= 0xffff;
366 }
367 if ((mfr_id_other == mfr_id) &&
368 (dev_id_other == dev_id)) {
369
370 /* Exit autoselect mode. */
371 send_cmd(map, base,
372 CMD_RESET_DATA);
373
374 return -1;
375 }
376 }
377
378 if (private->numchips == MAX_AMD_CHIPS) {
379 printk(KERN_WARNING
380 "%s: Too many flash chips "
381 "detected. Increase "
382 "MAX_AMD_CHIPS from %d.\n",
383 map->name, MAX_AMD_CHIPS);
384
385 return -1;
386 }
387
388 chips[private->numchips].start = base;
389 chips[private->numchips].state = FL_READY;
390 chips[private->numchips].mutex =
391 &chips[private->numchips]._spinlock;
392 private->numchips++;
393 }
394
395 printk("%s: Found %d x %ldMiB %s at 0x%x\n", map->name,
396 temp.interleave, (table[i].size)/(1024*1024),
397 table[i].name, base);
398
399 mtd->size += table[i].size * temp.interleave;
400 mtd->numeraseregions += table[i].numeraseregions;
401
402 break;
403 }
404 }
405
406 /* Exit autoselect mode. */
407 send_cmd(map, base, CMD_RESET_DATA);
408
409 if (i == table_size) {
410 printk(KERN_DEBUG "%s: unknown flash device at 0x%x, "
411 "mfr id 0x%x, dev id 0x%x\n", map->name,
412 base, mfr_id, dev_id);
413 map->fldrv_priv = NULL;
414
415 return -1;
416 }
417
418 private->device_type = temp.device_type;
419 private->interleave = temp.interleave;
420
421 return i;
422}
423
424
425
426static struct mtd_info *amd_flash_probe(struct map_info *map)
427{
428 static const struct amd_flash_info table[] = {
429 {
430 .mfr_id = MANUFACTURER_AMD,
431 .dev_id = AM29LV160DT,
432 .name = "AMD AM29LV160DT",
433 .size = 0x00200000,
434 .numeraseregions = 4,
435 .regions = {
436 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
437 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
438 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
439 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
440 }
441 }, {
442 .mfr_id = MANUFACTURER_AMD,
443 .dev_id = AM29LV160DB,
444 .name = "AMD AM29LV160DB",
445 .size = 0x00200000,
446 .numeraseregions = 4,
447 .regions = {
448 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
449 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
450 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
451 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
452 }
453 }, {
454 .mfr_id = MANUFACTURER_TOSHIBA,
455 .dev_id = TC58FVT160,
456 .name = "Toshiba TC58FVT160",
457 .size = 0x00200000,
458 .numeraseregions = 4,
459 .regions = {
460 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
461 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
462 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
463 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
464 }
465 }, {
466 .mfr_id = MANUFACTURER_FUJITSU,
467 .dev_id = MBM29LV160TE,
468 .name = "Fujitsu MBM29LV160TE",
469 .size = 0x00200000,
470 .numeraseregions = 4,
471 .regions = {
472 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
473 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
474 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
475 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
476 }
477 }, {
478 .mfr_id = MANUFACTURER_TOSHIBA,
479 .dev_id = TC58FVB160,
480 .name = "Toshiba TC58FVB160",
481 .size = 0x00200000,
482 .numeraseregions = 4,
483 .regions = {
484 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
485 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
486 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
487 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
488 }
489 }, {
490 .mfr_id = MANUFACTURER_FUJITSU,
491 .dev_id = MBM29LV160BE,
492 .name = "Fujitsu MBM29LV160BE",
493 .size = 0x00200000,
494 .numeraseregions = 4,
495 .regions = {
496 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
497 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
498 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
499 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
500 }
501 }, {
502 .mfr_id = MANUFACTURER_AMD,
503 .dev_id = AM29LV800BB,
504 .name = "AMD AM29LV800BB",
505 .size = 0x00100000,
506 .numeraseregions = 4,
507 .regions = {
508 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
509 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
510 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
511 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
512 }
513 }, {
514 .mfr_id = MANUFACTURER_AMD,
515 .dev_id = AM29F800BB,
516 .name = "AMD AM29F800BB",
517 .size = 0x00100000,
518 .numeraseregions = 4,
519 .regions = {
520 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
521 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
522 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
523 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
524 }
525 }, {
526 .mfr_id = MANUFACTURER_AMD,
527 .dev_id = AM29LV800BT,
528 .name = "AMD AM29LV800BT",
529 .size = 0x00100000,
530 .numeraseregions = 4,
531 .regions = {
532 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
533 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
534 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
535 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
536 }
537 }, {
538 .mfr_id = MANUFACTURER_AMD,
539 .dev_id = AM29F800BT,
540 .name = "AMD AM29F800BT",
541 .size = 0x00100000,
542 .numeraseregions = 4,
543 .regions = {
544 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
545 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
546 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
547 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
548 }
549 }, {
550 .mfr_id = MANUFACTURER_AMD,
551 .dev_id = AM29LV800BB,
552 .name = "AMD AM29LV800BB",
553 .size = 0x00100000,
554 .numeraseregions = 4,
555 .regions = {
556 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
557 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
558 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
559 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
560 }
561 }, {
562 .mfr_id = MANUFACTURER_FUJITSU,
563 .dev_id = MBM29LV800BB,
564 .name = "Fujitsu MBM29LV800BB",
565 .size = 0x00100000,
566 .numeraseregions = 4,
567 .regions = {
568 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
569 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
570 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
571 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
572 }
573 }, {
574 .mfr_id = MANUFACTURER_ST,
575 .dev_id = M29W800T,
576 .name = "ST M29W800T",
577 .size = 0x00100000,
578 .numeraseregions = 4,
579 .regions = {
580 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
581 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
582 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
583 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
584 }
585 }, {
586 .mfr_id = MANUFACTURER_ST,
587 .dev_id = M29W160DT,
588 .name = "ST M29W160DT",
589 .size = 0x00200000,
590 .numeraseregions = 4,
591 .regions = {
592 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
593 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
594 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
595 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
596 }
597 }, {
598 .mfr_id = MANUFACTURER_ST,
599 .dev_id = M29W160DB,
600 .name = "ST M29W160DB",
601 .size = 0x00200000,
602 .numeraseregions = 4,
603 .regions = {
604 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
605 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
606 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
607 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
608 }
609 }, {
610 .mfr_id = MANUFACTURER_AMD,
611 .dev_id = AM29BDS323D,
612 .name = "AMD AM29BDS323D",
613 .size = 0x00400000,
614 .numeraseregions = 3,
615 .regions = {
616 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 48 },
617 { .offset = 0x300000, .erasesize = 0x10000, .numblocks = 15 },
618 { .offset = 0x3f0000, .erasesize = 0x02000, .numblocks = 8 },
619 }
620 }, {
621 .mfr_id = MANUFACTURER_AMD,
622 .dev_id = AM29BDS643D,
623 .name = "AMD AM29BDS643D",
624 .size = 0x00800000,
625 .numeraseregions = 3,
626 .regions = {
627 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 96 },
628 { .offset = 0x600000, .erasesize = 0x10000, .numblocks = 31 },
629 { .offset = 0x7f0000, .erasesize = 0x02000, .numblocks = 8 },
630 }
631 }, {
632 .mfr_id = MANUFACTURER_ATMEL,
633 .dev_id = AT49xV16x,
634 .name = "Atmel AT49xV16x",
635 .size = 0x00200000,
636 .numeraseregions = 2,
637 .regions = {
638 { .offset = 0x000000, .erasesize = 0x02000, .numblocks = 8 },
639 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
640 }
641 }, {
642 .mfr_id = MANUFACTURER_ATMEL,
643 .dev_id = AT49xV16xT,
644 .name = "Atmel AT49xV16xT",
645 .size = 0x00200000,
646 .numeraseregions = 2,
647 .regions = {
648 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
649 { .offset = 0x1F0000, .erasesize = 0x02000, .numblocks = 8 }
650 }
651 }
652 };
653
654 struct mtd_info *mtd;
655 struct flchip chips[MAX_AMD_CHIPS];
656 int table_pos[MAX_AMD_CHIPS];
657 struct amd_flash_private temp;
658 struct amd_flash_private *private;
659 u_long size;
660 unsigned long base;
661 int i;
662 int reg_idx;
663 int offset;
664
665 mtd = (struct mtd_info*)kmalloc(sizeof(*mtd), GFP_KERNEL);
666 if (!mtd) {
667 printk(KERN_WARNING
668 "%s: kmalloc failed for info structure\n", map->name);
669 return NULL;
670 }
671 memset(mtd, 0, sizeof(*mtd));
672 mtd->priv = map;
673
674 memset(&temp, 0, sizeof(temp));
675
676 printk("%s: Probing for AMD compatible flash...\n", map->name);
677
678 if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table,
679 sizeof(table)/sizeof(table[0])))
680 == -1) {
681 printk(KERN_WARNING
682 "%s: Found no AMD compatible device at location zero\n",
683 map->name);
684 kfree(mtd);
685
686 return NULL;
687 }
688
689 chips[0].start = 0;
690 chips[0].state = FL_READY;
691 chips[0].mutex = &chips[0]._spinlock;
692 temp.numchips = 1;
693 for (size = mtd->size; size > 1; size >>= 1) {
694 temp.chipshift++;
695 }
696 switch (temp.interleave) {
697 case 2:
698 temp.chipshift += 1;
699 break;
700 case 4:
701 temp.chipshift += 2;
702 break;
703 }
704
705 /* Find out if there are any more chips in the map. */
706 for (base = (1 << temp.chipshift);
707 base < map->size;
708 base += (1 << temp.chipshift)) {
709 int numchips = temp.numchips;
710 table_pos[numchips] = probe_new_chip(mtd, base, chips,
711 &temp, table, sizeof(table)/sizeof(table[0]));
712 }
713
714 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) *
715 mtd->numeraseregions, GFP_KERNEL);
716 if (!mtd->eraseregions) {
717 printk(KERN_WARNING "%s: Failed to allocate "
718 "memory for MTD erase region info\n", map->name);
719 kfree(mtd);
720 map->fldrv_priv = NULL;
721 return NULL;
722 }
723
724 reg_idx = 0;
725 offset = 0;
726 for (i = 0; i < temp.numchips; i++) {
727 int dev_size;
728 int j;
729
730 dev_size = 0;
731 for (j = 0; j < table[table_pos[i]].numeraseregions; j++) {
732 mtd->eraseregions[reg_idx].offset = offset +
733 (table[table_pos[i]].regions[j].offset *
734 temp.interleave);
735 mtd->eraseregions[reg_idx].erasesize =
736 table[table_pos[i]].regions[j].erasesize *
737 temp.interleave;
738 mtd->eraseregions[reg_idx].numblocks =
739 table[table_pos[i]].regions[j].numblocks;
740 if (mtd->erasesize <
741 mtd->eraseregions[reg_idx].erasesize) {
742 mtd->erasesize =
743 mtd->eraseregions[reg_idx].erasesize;
744 }
745 dev_size += mtd->eraseregions[reg_idx].erasesize *
746 mtd->eraseregions[reg_idx].numblocks;
747 reg_idx++;
748 }
749 offset += dev_size;
750 }
751 mtd->type = MTD_NORFLASH;
752 mtd->flags = MTD_CAP_NORFLASH;
753 mtd->name = map->name;
754 mtd->erase = amd_flash_erase;
755 mtd->read = amd_flash_read;
756 mtd->write = amd_flash_write;
757 mtd->sync = amd_flash_sync;
758 mtd->suspend = amd_flash_suspend;
759 mtd->resume = amd_flash_resume;
760 mtd->lock = amd_flash_lock;
761 mtd->unlock = amd_flash_unlock;
762
763 private = kmalloc(sizeof(*private) + (sizeof(struct flchip) *
764 temp.numchips), GFP_KERNEL);
765 if (!private) {
766 printk(KERN_WARNING
767 "%s: kmalloc failed for private structure\n", map->name);
768 kfree(mtd);
769 map->fldrv_priv = NULL;
770 return NULL;
771 }
772 memcpy(private, &temp, sizeof(temp));
773 memcpy(private->chips, chips,
774 sizeof(struct flchip) * private->numchips);
775 for (i = 0; i < private->numchips; i++) {
776 init_waitqueue_head(&private->chips[i].wq);
777 spin_lock_init(&private->chips[i]._spinlock);
778 }
779
780 map->fldrv_priv = private;
781
782 map->fldrv = &amd_flash_chipdrv;
783
784 __module_get(THIS_MODULE);
785 return mtd;
786}
787
788
789
790static inline int read_one_chip(struct map_info *map, struct flchip *chip,
791 loff_t adr, size_t len, u_char *buf)
792{
793 DECLARE_WAITQUEUE(wait, current);
794 unsigned long timeo = jiffies + HZ;
795
796retry:
797 spin_lock_bh(chip->mutex);
798
799 if (chip->state != FL_READY){
800 printk(KERN_INFO "%s: waiting for chip to read, state = %d\n",
801 map->name, chip->state);
802 set_current_state(TASK_UNINTERRUPTIBLE);
803 add_wait_queue(&chip->wq, &wait);
804
805 spin_unlock_bh(chip->mutex);
806
807 schedule();
808 remove_wait_queue(&chip->wq, &wait);
809
810 if(signal_pending(current)) {
811 return -EINTR;
812 }
813
814 timeo = jiffies + HZ;
815
816 goto retry;
817 }
818
819 adr += chip->start;
820
821 chip->state = FL_READY;
822
823 map_copy_from(map, buf, adr, len);
824
825 wake_up(&chip->wq);
826 spin_unlock_bh(chip->mutex);
827
828 return 0;
829}
830
831
832
833static int amd_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
834 size_t *retlen, u_char *buf)
835{
836 struct map_info *map = mtd->priv;
837 struct amd_flash_private *private = map->fldrv_priv;
838 unsigned long ofs;
839 int chipnum;
840 int ret = 0;
841
842 if ((from + len) > mtd->size) {
843 printk(KERN_WARNING "%s: read request past end of device "
844 "(0x%lx)\n", map->name, (unsigned long)from + len);
845
846 return -EINVAL;
847 }
848
849 /* Offset within the first chip that the first read should start. */
850 chipnum = (from >> private->chipshift);
851 ofs = from - (chipnum << private->chipshift);
852
853 *retlen = 0;
854
855 while (len) {
856 unsigned long this_len;
857
858 if (chipnum >= private->numchips) {
859 break;
860 }
861
862 if ((len + ofs - 1) >> private->chipshift) {
863 this_len = (1 << private->chipshift) - ofs;
864 } else {
865 this_len = len;
866 }
867
868 ret = read_one_chip(map, &private->chips[chipnum], ofs,
869 this_len, buf);
870 if (ret) {
871 break;
872 }
873
874 *retlen += this_len;
875 len -= this_len;
876 buf += this_len;
877
878 ofs = 0;
879 chipnum++;
880 }
881
882 return ret;
883}
884
885
886
887static int write_one_word(struct map_info *map, struct flchip *chip,
888 unsigned long adr, __u32 datum)
889{
890 unsigned long timeo = jiffies + HZ;
891 struct amd_flash_private *private = map->fldrv_priv;
892 DECLARE_WAITQUEUE(wait, current);
893 int ret = 0;
894 int times_left;
895
896retry:
897 spin_lock_bh(chip->mutex);
898
899 if (chip->state != FL_READY){
900 printk("%s: waiting for chip to write, state = %d\n",
901 map->name, chip->state);
902 set_current_state(TASK_UNINTERRUPTIBLE);
903 add_wait_queue(&chip->wq, &wait);
904
905 spin_unlock_bh(chip->mutex);
906
907 schedule();
908 remove_wait_queue(&chip->wq, &wait);
909 printk(KERN_INFO "%s: woke up to write\n", map->name);
910 if(signal_pending(current))
911 return -EINTR;
912
913 timeo = jiffies + HZ;
914
915 goto retry;
916 }
917
918 chip->state = FL_WRITING;
919
920 adr += chip->start;
921 ENABLE_VPP(map);
922 send_cmd(map, chip->start, CMD_PROGRAM_UNLOCK_DATA);
923 wide_write(map, datum, adr);
924
925 times_left = 500000;
926 while (times_left-- && flash_is_busy(map, adr, private->interleave)) {
927 if (need_resched()) {
928 spin_unlock_bh(chip->mutex);
929 schedule();
930 spin_lock_bh(chip->mutex);
931 }
932 }
933
934 if (!times_left) {
935 printk(KERN_WARNING "%s: write to 0x%lx timed out!\n",
936 map->name, adr);
937 ret = -EIO;
938 } else {
939 __u32 verify;
940 if ((verify = wide_read(map, adr)) != datum) {
941 printk(KERN_WARNING "%s: write to 0x%lx failed. "
942 "datum = %x, verify = %x\n",
943 map->name, adr, datum, verify);
944 ret = -EIO;
945 }
946 }
947
948 DISABLE_VPP(map);
949 chip->state = FL_READY;
950 wake_up(&chip->wq);
951 spin_unlock_bh(chip->mutex);
952
953 return ret;
954}
955
956
957
958static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
959 size_t *retlen, const u_char *buf)
960{
961 struct map_info *map = mtd->priv;
962 struct amd_flash_private *private = map->fldrv_priv;
963 int ret = 0;
964 int chipnum;
965 unsigned long ofs;
966 unsigned long chipstart;
967
968 *retlen = 0;
969 if (!len) {
970 return 0;
971 }
972
973 chipnum = to >> private->chipshift;
974 ofs = to - (chipnum << private->chipshift);
975 chipstart = private->chips[chipnum].start;
976
977 /* If it's not bus-aligned, do the first byte write. */
978 if (ofs & (map->buswidth - 1)) {
979 unsigned long bus_ofs = ofs & ~(map->buswidth - 1);
980 int i = ofs - bus_ofs;
981 int n = 0;
982 u_char tmp_buf[4];
983 __u32 datum;
984
985 map_copy_from(map, tmp_buf,
986 bus_ofs + private->chips[chipnum].start,
987 map->buswidth);
988 while (len && i < map->buswidth)
989 tmp_buf[i++] = buf[n++], len--;
990
991 if (map->buswidth == 2) {
992 datum = *(__u16*)tmp_buf;
993 } else if (map->buswidth == 4) {
994 datum = *(__u32*)tmp_buf;
995 } else {
996 return -EINVAL; /* should never happen, but be safe */
997 }
998
999 ret = write_one_word(map, &private->chips[chipnum], bus_ofs,
1000 datum);
1001 if (ret) {
1002 return ret;
1003 }
1004
1005 ofs += n;
1006 buf += n;
1007 (*retlen) += n;
1008
1009 if (ofs >> private->chipshift) {
1010 chipnum++;
1011 ofs = 0;
1012 if (chipnum == private->numchips) {
1013 return 0;
1014 }
1015 }
1016 }
1017
1018 /* We are now aligned, write as much as possible. */
1019 while(len >= map->buswidth) {
1020 __u32 datum;
1021
1022 if (map->buswidth == 1) {
1023 datum = *(__u8*)buf;
1024 } else if (map->buswidth == 2) {
1025 datum = *(__u16*)buf;
1026 } else if (map->buswidth == 4) {
1027 datum = *(__u32*)buf;
1028 } else {
1029 return -EINVAL;
1030 }
1031
1032 ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1033
1034 if (ret) {
1035 return ret;
1036 }
1037
1038 ofs += map->buswidth;
1039 buf += map->buswidth;
1040 (*retlen) += map->buswidth;
1041 len -= map->buswidth;
1042
1043 if (ofs >> private->chipshift) {
1044 chipnum++;
1045 ofs = 0;
1046 if (chipnum == private->numchips) {
1047 return 0;
1048 }
1049 chipstart = private->chips[chipnum].start;
1050 }
1051 }
1052
1053 if (len & (map->buswidth - 1)) {
1054 int i = 0, n = 0;
1055 u_char tmp_buf[2];
1056 __u32 datum;
1057
1058 map_copy_from(map, tmp_buf,
1059 ofs + private->chips[chipnum].start,
1060 map->buswidth);
1061 while (len--) {
1062 tmp_buf[i++] = buf[n++];
1063 }
1064
1065 if (map->buswidth == 2) {
1066 datum = *(__u16*)tmp_buf;
1067 } else if (map->buswidth == 4) {
1068 datum = *(__u32*)tmp_buf;
1069 } else {
1070 return -EINVAL; /* should never happen, but be safe */
1071 }
1072
1073 ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1074
1075 if (ret) {
1076 return ret;
1077 }
1078
1079 (*retlen) += n;
1080 }
1081
1082 return 0;
1083}
1084
1085
1086
1087static inline int erase_one_block(struct map_info *map, struct flchip *chip,
1088 unsigned long adr, u_long size)
1089{
1090 unsigned long timeo = jiffies + HZ;
1091 struct amd_flash_private *private = map->fldrv_priv;
1092 DECLARE_WAITQUEUE(wait, current);
1093
1094retry:
1095 spin_lock_bh(chip->mutex);
1096
1097 if (chip->state != FL_READY){
1098 set_current_state(TASK_UNINTERRUPTIBLE);
1099 add_wait_queue(&chip->wq, &wait);
1100
1101 spin_unlock_bh(chip->mutex);
1102
1103 schedule();
1104 remove_wait_queue(&chip->wq, &wait);
1105
1106 if (signal_pending(current)) {
1107 return -EINTR;
1108 }
1109
1110 timeo = jiffies + HZ;
1111
1112 goto retry;
1113 }
1114
1115 chip->state = FL_ERASING;
1116
1117 adr += chip->start;
1118 ENABLE_VPP(map);
1119 send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA);
1120 send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr);
1121
1122 timeo = jiffies + (HZ * 20);
1123
1124 spin_unlock_bh(chip->mutex);
1125 msleep(1000);
1126 spin_lock_bh(chip->mutex);
1127
1128 while (flash_is_busy(map, adr, private->interleave)) {
1129
1130 if (chip->state != FL_ERASING) {
1131 /* Someone's suspended the erase. Sleep */
1132 set_current_state(TASK_UNINTERRUPTIBLE);
1133 add_wait_queue(&chip->wq, &wait);
1134
1135 spin_unlock_bh(chip->mutex);
1136 printk(KERN_INFO "%s: erase suspended. Sleeping\n",
1137 map->name);
1138 schedule();
1139 remove_wait_queue(&chip->wq, &wait);
1140
1141 if (signal_pending(current)) {
1142 return -EINTR;
1143 }
1144
1145 timeo = jiffies + (HZ*2); /* FIXME */
1146 spin_lock_bh(chip->mutex);
1147 continue;
1148 }
1149
1150 /* OK Still waiting */
1151 if (time_after(jiffies, timeo)) {
1152 chip->state = FL_READY;
1153 spin_unlock_bh(chip->mutex);
1154 printk(KERN_WARNING "%s: waiting for erase to complete "
1155 "timed out.\n", map->name);
1156 DISABLE_VPP(map);
1157
1158 return -EIO;
1159 }
1160
1161 /* Latency issues. Drop the lock, wait a while and retry */
1162 spin_unlock_bh(chip->mutex);
1163
1164 if (need_resched())
1165 schedule();
1166 else
1167 udelay(1);
1168
1169 spin_lock_bh(chip->mutex);
1170 }
1171
1172 /* Verify every single word */
1173 {
1174 int address;
1175 int error = 0;
1176 __u8 verify;
1177
1178 for (address = adr; address < (adr + size); address++) {
1179 if ((verify = map_read8(map, address)) != 0xFF) {
1180 error = 1;
1181 break;
1182 }
1183 }
1184 if (error) {
1185 chip->state = FL_READY;
1186 spin_unlock_bh(chip->mutex);
1187 printk(KERN_WARNING
1188 "%s: verify error at 0x%x, size %ld.\n",
1189 map->name, address, size);
1190 DISABLE_VPP(map);
1191
1192 return -EIO;
1193 }
1194 }
1195
1196 DISABLE_VPP(map);
1197 chip->state = FL_READY;
1198 wake_up(&chip->wq);
1199 spin_unlock_bh(chip->mutex);
1200
1201 return 0;
1202}
1203
1204
1205
1206static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
1207{
1208 struct map_info *map = mtd->priv;
1209 struct amd_flash_private *private = map->fldrv_priv;
1210 unsigned long adr, len;
1211 int chipnum;
1212 int ret = 0;
1213 int i;
1214 int first;
1215 struct mtd_erase_region_info *regions = mtd->eraseregions;
1216
1217 if (instr->addr > mtd->size) {
1218 return -EINVAL;
1219 }
1220
1221 if ((instr->len + instr->addr) > mtd->size) {
1222 return -EINVAL;
1223 }
1224
1225 /* Check that both start and end of the requested erase are
1226 * aligned with the erasesize at the appropriate addresses.
1227 */
1228
1229 i = 0;
1230
1231 /* Skip all erase regions which are ended before the start of
1232 the requested erase. Actually, to save on the calculations,
1233 we skip to the first erase region which starts after the
1234 start of the requested erase, and then go back one.
1235 */
1236
1237 while ((i < mtd->numeraseregions) &&
1238 (instr->addr >= regions[i].offset)) {
1239 i++;
1240 }
1241 i--;
1242
1243 /* OK, now i is pointing at the erase region in which this
1244 * erase request starts. Check the start of the requested
1245 * erase range is aligned with the erase size which is in
1246 * effect here.
1247 */
1248
1249 if (instr->addr & (regions[i].erasesize-1)) {
1250 return -EINVAL;
1251 }
1252
1253 /* Remember the erase region we start on. */
1254
1255 first = i;
1256
1257 /* Next, check that the end of the requested erase is aligned
1258 * with the erase region at that address.
1259 */
1260
1261 while ((i < mtd->numeraseregions) &&
1262 ((instr->addr + instr->len) >= regions[i].offset)) {
1263 i++;
1264 }
1265
1266 /* As before, drop back one to point at the region in which
1267 * the address actually falls.
1268 */
1269
1270 i--;
1271
1272 if ((instr->addr + instr->len) & (regions[i].erasesize-1)) {
1273 return -EINVAL;
1274 }
1275
1276 chipnum = instr->addr >> private->chipshift;
1277 adr = instr->addr - (chipnum << private->chipshift);
1278 len = instr->len;
1279
1280 i = first;
1281
1282 while (len) {
1283 ret = erase_one_block(map, &private->chips[chipnum], adr,
1284 regions[i].erasesize);
1285
1286 if (ret) {
1287 return ret;
1288 }
1289
1290 adr += regions[i].erasesize;
1291 len -= regions[i].erasesize;
1292
1293 if ((adr % (1 << private->chipshift)) ==
1294 ((regions[i].offset + (regions[i].erasesize *
1295 regions[i].numblocks))
1296 % (1 << private->chipshift))) {
1297 i++;
1298 }
1299
1300 if (adr >> private->chipshift) {
1301 adr = 0;
1302 chipnum++;
1303 if (chipnum >= private->numchips) {
1304 break;
1305 }
1306 }
1307 }
1308
1309 instr->state = MTD_ERASE_DONE;
1310 mtd_erase_callback(instr);
1311
1312 return 0;
1313}
1314
1315
1316
1317static void amd_flash_sync(struct mtd_info *mtd)
1318{
1319 struct map_info *map = mtd->priv;
1320 struct amd_flash_private *private = map->fldrv_priv;
1321 int i;
1322 struct flchip *chip;
1323 int ret = 0;
1324 DECLARE_WAITQUEUE(wait, current);
1325
1326 for (i = 0; !ret && (i < private->numchips); i++) {
1327 chip = &private->chips[i];
1328
1329 retry:
1330 spin_lock_bh(chip->mutex);
1331
1332 switch(chip->state) {
1333 case FL_READY:
1334 case FL_STATUS:
1335 case FL_CFI_QUERY:
1336 case FL_JEDEC_QUERY:
1337 chip->oldstate = chip->state;
1338 chip->state = FL_SYNCING;
1339 /* No need to wake_up() on this state change -
1340 * as the whole point is that nobody can do anything
1341 * with the chip now anyway.
1342 */
1343 case FL_SYNCING:
1344 spin_unlock_bh(chip->mutex);
1345 break;
1346
1347 default:
1348 /* Not an idle state */
1349 add_wait_queue(&chip->wq, &wait);
1350
1351 spin_unlock_bh(chip->mutex);
1352
1353 schedule();
1354
1355 remove_wait_queue(&chip->wq, &wait);
1356
1357 goto retry;
1358 }
1359 }
1360
1361 /* Unlock the chips again */
1362 for (i--; i >= 0; i--) {
1363 chip = &private->chips[i];
1364
1365 spin_lock_bh(chip->mutex);
1366
1367 if (chip->state == FL_SYNCING) {
1368 chip->state = chip->oldstate;
1369 wake_up(&chip->wq);
1370 }
1371 spin_unlock_bh(chip->mutex);
1372 }
1373}
1374
1375
1376
1377static int amd_flash_suspend(struct mtd_info *mtd)
1378{
1379printk("amd_flash_suspend(): not implemented!\n");
1380 return -EINVAL;
1381}
1382
1383
1384
1385static void amd_flash_resume(struct mtd_info *mtd)
1386{
1387printk("amd_flash_resume(): not implemented!\n");
1388}
1389
1390
1391
1392static void amd_flash_destroy(struct mtd_info *mtd)
1393{
1394 struct map_info *map = mtd->priv;
1395 struct amd_flash_private *private = map->fldrv_priv;
1396 kfree(private);
1397}
1398
1399int __init amd_flash_init(void)
1400{
1401 register_mtd_chip_driver(&amd_flash_chipdrv);
1402 return 0;
1403}
1404
1405void __exit amd_flash_exit(void)
1406{
1407 unregister_mtd_chip_driver(&amd_flash_chipdrv);
1408}
1409
1410module_init(amd_flash_init);
1411module_exit(amd_flash_exit);
1412
1413MODULE_LICENSE("GPL");
1414MODULE_AUTHOR("Jonas Holmberg <jonas.holmberg@axis.com>");
1415MODULE_DESCRIPTION("Old MTD chip driver for AMD flash chips");
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
new file mode 100644
index 000000000000..c268bcd71720
--- /dev/null
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -0,0 +1,2160 @@
1/*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
7 * $Id: cfi_cmdset_0001.c,v 1.164 2004/11/16 18:29:00 dwmw2 Exp $
8 *
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <asm/io.h>
26#include <asm/byteorder.h>
27
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
32#include <linux/mtd/xip.h>
33#include <linux/mtd/map.h>
34#include <linux/mtd/mtd.h>
35#include <linux/mtd/compatmac.h>
36#include <linux/mtd/cfi.h>
37
38/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40
41// debugging, turns off buffer write mode if set to 1
42#define FORCE_WORD_WRITE 0
43
44#define MANUFACTURER_INTEL 0x0089
45#define I82802AB 0x00ad
46#define I82802AC 0x00ac
47#define MANUFACTURER_ST 0x0020
48#define M50LPW080 0x002F
49
50static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
51//static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52//static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56static void cfi_intelext_sync (struct mtd_info *);
57static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59static int cfi_intelext_suspend (struct mtd_info *);
60static void cfi_intelext_resume (struct mtd_info *);
61
62static void cfi_intelext_destroy(struct mtd_info *);
63
64struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
65
66static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
67static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
68
69static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
70 size_t *retlen, u_char **mtdbuf);
71static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
72 size_t len);
73
74static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
75static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
76#include "fwh_lock.h"
77
78
79
80/*
81 * *********** SETUP AND PROBE BITS ***********
82 */
83
84static struct mtd_chip_driver cfi_intelext_chipdrv = {
85 .probe = NULL, /* Not usable directly */
86 .destroy = cfi_intelext_destroy,
87 .name = "cfi_cmdset_0001",
88 .module = THIS_MODULE
89};
90
91/* #define DEBUG_LOCK_BITS */
92/* #define DEBUG_CFI_FEATURES */
93
94#ifdef DEBUG_CFI_FEATURES
95static void cfi_tell_features(struct cfi_pri_intelext *extp)
96{
97 int i;
98 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
99 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
100 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
101 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
102 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
103 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
104 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
105 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
106 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
107 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
108 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
109 for (i=10; i<32; i++) {
110 if (extp->FeatureSupport & (1<<i))
111 printk(" - Unknown Bit %X: supported\n", i);
112 }
113
114 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
115 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
116 for (i=1; i<8; i++) {
117 if (extp->SuspendCmdSupport & (1<<i))
118 printk(" - Unknown Bit %X: supported\n", i);
119 }
120
121 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
122 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
123 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
124 for (i=2; i<16; i++) {
125 if (extp->BlkStatusRegMask & (1<<i))
126 printk(" - Unknown Bit %X Active: yes\n",i);
127 }
128
129 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
130 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
131 if (extp->VppOptimal)
132 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
133 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
134}
135#endif
136
137#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
138/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
139static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
140{
141 struct map_info *map = mtd->priv;
142 struct cfi_private *cfi = map->fldrv_priv;
143 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
144
145 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
146 "erase on write disabled.\n");
147 extp->SuspendCmdSupport &= ~1;
148}
149#endif
150
151#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
152static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
153{
154 struct map_info *map = mtd->priv;
155 struct cfi_private *cfi = map->fldrv_priv;
156 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
157
158 if (cfip && (cfip->FeatureSupport&4)) {
159 cfip->FeatureSupport &= ~4;
160 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
161 }
162}
163#endif
164
165static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
166{
167 struct map_info *map = mtd->priv;
168 struct cfi_private *cfi = map->fldrv_priv;
169
170 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
171 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
172}
173
174static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
175{
176 struct map_info *map = mtd->priv;
177 struct cfi_private *cfi = map->fldrv_priv;
178
179 /* Note this is done after the region info is endian swapped */
180 cfi->cfiq->EraseRegionInfo[1] =
181 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
182};
183
184static void fixup_use_point(struct mtd_info *mtd, void *param)
185{
186 struct map_info *map = mtd->priv;
187 if (!mtd->point && map_is_linear(map)) {
188 mtd->point = cfi_intelext_point;
189 mtd->unpoint = cfi_intelext_unpoint;
190 }
191}
192
193static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
194{
195 struct map_info *map = mtd->priv;
196 struct cfi_private *cfi = map->fldrv_priv;
197 if (cfi->cfiq->BufWriteTimeoutTyp) {
198 printk(KERN_INFO "Using buffer write method\n" );
199 mtd->write = cfi_intelext_write_buffers;
200 }
201}
202
203static struct cfi_fixup cfi_fixup_table[] = {
204#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
205 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
206#endif
207#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
208 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
209#endif
210#if !FORCE_WORD_WRITE
211 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
212#endif
213 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
214 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
215 { 0, 0, NULL, NULL }
216};
217
218static struct cfi_fixup jedec_fixup_table[] = {
219 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
220 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
221 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
222 { 0, 0, NULL, NULL }
223};
224static struct cfi_fixup fixup_table[] = {
225 /* The CFI vendor ids and the JEDEC vendor IDs appear
226 * to be common. It is like the devices id's are as
227 * well. This table is to pick all cases where
228 * we know that is the case.
229 */
230 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
231 { 0, 0, NULL, NULL }
232};
233
234static inline struct cfi_pri_intelext *
235read_pri_intelext(struct map_info *map, __u16 adr)
236{
237 struct cfi_pri_intelext *extp;
238 unsigned int extp_size = sizeof(*extp);
239
240 again:
241 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
242 if (!extp)
243 return NULL;
244
245 /* Do some byteswapping if necessary */
246 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
247 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
248 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
249
250 if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
251 unsigned int extra_size = 0;
252 int nb_parts, i;
253
254 /* Protection Register info */
255 extra_size += (extp->NumProtectionFields - 1) * (4 + 6);
256
257 /* Burst Read info */
258 extra_size += 6;
259
260 /* Number of hardware-partitions */
261 extra_size += 1;
262 if (extp_size < sizeof(*extp) + extra_size)
263 goto need_more;
264 nb_parts = extp->extra[extra_size - 1];
265
266 for (i = 0; i < nb_parts; i++) {
267 struct cfi_intelext_regioninfo *rinfo;
268 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
269 extra_size += sizeof(*rinfo);
270 if (extp_size < sizeof(*extp) + extra_size)
271 goto need_more;
272 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
273 extra_size += (rinfo->NumBlockTypes - 1)
274 * sizeof(struct cfi_intelext_blockinfo);
275 }
276
277 if (extp_size < sizeof(*extp) + extra_size) {
278 need_more:
279 extp_size = sizeof(*extp) + extra_size;
280 kfree(extp);
281 if (extp_size > 4096) {
282 printk(KERN_ERR
283 "%s: cfi_pri_intelext is too fat\n",
284 __FUNCTION__);
285 return NULL;
286 }
287 goto again;
288 }
289 }
290
291 return extp;
292}
293
294/* This routine is made available to other mtd code via
295 * inter_module_register. It must only be accessed through
296 * inter_module_get which will bump the use count of this module. The
297 * addresses passed back in cfi are valid as long as the use count of
298 * this module is non-zero, i.e. between inter_module_get and
299 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
300 */
301struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
302{
303 struct cfi_private *cfi = map->fldrv_priv;
304 struct mtd_info *mtd;
305 int i;
306
307 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
308 if (!mtd) {
309 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
310 return NULL;
311 }
312 memset(mtd, 0, sizeof(*mtd));
313 mtd->priv = map;
314 mtd->type = MTD_NORFLASH;
315
316 /* Fill in the default mtd operations */
317 mtd->erase = cfi_intelext_erase_varsize;
318 mtd->read = cfi_intelext_read;
319 mtd->write = cfi_intelext_write_words;
320 mtd->sync = cfi_intelext_sync;
321 mtd->lock = cfi_intelext_lock;
322 mtd->unlock = cfi_intelext_unlock;
323 mtd->suspend = cfi_intelext_suspend;
324 mtd->resume = cfi_intelext_resume;
325 mtd->flags = MTD_CAP_NORFLASH;
326 mtd->name = map->name;
327
328 if (cfi->cfi_mode == CFI_MODE_CFI) {
329 /*
330 * It's a real CFI chip, not one for which the probe
331 * routine faked a CFI structure. So we read the feature
332 * table from it.
333 */
334 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
335 struct cfi_pri_intelext *extp;
336
337 extp = read_pri_intelext(map, adr);
338 if (!extp) {
339 kfree(mtd);
340 return NULL;
341 }
342
343 /* Install our own private info structure */
344 cfi->cmdset_priv = extp;
345
346 cfi_fixup(mtd, cfi_fixup_table);
347
348#ifdef DEBUG_CFI_FEATURES
349 /* Tell the user about it in lots of lovely detail */
350 cfi_tell_features(extp);
351#endif
352
353 if(extp->SuspendCmdSupport & 1) {
354 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
355 }
356 }
357 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
358 /* Apply jedec specific fixups */
359 cfi_fixup(mtd, jedec_fixup_table);
360 }
361 /* Apply generic fixups */
362 cfi_fixup(mtd, fixup_table);
363
364 for (i=0; i< cfi->numchips; i++) {
365 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
366 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
367 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
368 cfi->chips[i].ref_point_counter = 0;
369 }
370
371 map->fldrv = &cfi_intelext_chipdrv;
372
373 return cfi_intelext_setup(mtd);
374}
375
376static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
377{
378 struct map_info *map = mtd->priv;
379 struct cfi_private *cfi = map->fldrv_priv;
380 unsigned long offset = 0;
381 int i,j;
382 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
383
384 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
385
386 mtd->size = devsize * cfi->numchips;
387
388 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
389 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
390 * mtd->numeraseregions, GFP_KERNEL);
391 if (!mtd->eraseregions) {
392 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
393 goto setup_err;
394 }
395
396 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
397 unsigned long ernum, ersize;
398 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
399 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
400
401 if (mtd->erasesize < ersize) {
402 mtd->erasesize = ersize;
403 }
404 for (j=0; j<cfi->numchips; j++) {
405 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
406 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
407 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
408 }
409 offset += (ersize * ernum);
410 }
411
412 if (offset != devsize) {
413 /* Argh */
414 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
415 goto setup_err;
416 }
417
418 for (i=0; i<mtd->numeraseregions;i++){
419 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
420 i,mtd->eraseregions[i].offset,
421 mtd->eraseregions[i].erasesize,
422 mtd->eraseregions[i].numblocks);
423 }
424
425#if 0
426 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
427 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
428#endif
429
430 /* This function has the potential to distort the reality
431 a bit and therefore should be called last. */
432 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
433 goto setup_err;
434
435 __module_get(THIS_MODULE);
436 return mtd;
437
438 setup_err:
439 if(mtd) {
440 if(mtd->eraseregions)
441 kfree(mtd->eraseregions);
442 kfree(mtd);
443 }
444 kfree(cfi->cmdset_priv);
445 return NULL;
446}
447
448static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
449 struct cfi_private **pcfi)
450{
451 struct map_info *map = mtd->priv;
452 struct cfi_private *cfi = *pcfi;
453 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
454
455 /*
456 * Probing of multi-partition flash ships.
457 *
458 * To support multiple partitions when available, we simply arrange
459 * for each of them to have their own flchip structure even if they
460 * are on the same physical chip. This means completely recreating
461 * a new cfi_private structure right here which is a blatent code
462 * layering violation, but this is still the least intrusive
463 * arrangement at this point. This can be rearranged in the future
464 * if someone feels motivated enough. --nico
465 */
466 if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
467 && extp->FeatureSupport & (1 << 9)) {
468 struct cfi_private *newcfi;
469 struct flchip *chip;
470 struct flchip_shared *shared;
471 int offs, numregions, numparts, partshift, numvirtchips, i, j;
472
473 /* Protection Register info */
474 offs = (extp->NumProtectionFields - 1) * (4 + 6);
475
476 /* Burst Read info */
477 offs += 6;
478
479 /* Number of partition regions */
480 numregions = extp->extra[offs];
481 offs += 1;
482
483 /* Number of hardware partitions */
484 numparts = 0;
485 for (i = 0; i < numregions; i++) {
486 struct cfi_intelext_regioninfo *rinfo;
487 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
488 numparts += rinfo->NumIdentPartitions;
489 offs += sizeof(*rinfo)
490 + (rinfo->NumBlockTypes - 1) *
491 sizeof(struct cfi_intelext_blockinfo);
492 }
493
494 /*
495 * All functions below currently rely on all chips having
496 * the same geometry so we'll just assume that all hardware
497 * partitions are of the same size too.
498 */
499 partshift = cfi->chipshift - __ffs(numparts);
500
501 if ((1 << partshift) < mtd->erasesize) {
502 printk( KERN_ERR
503 "%s: bad number of hw partitions (%d)\n",
504 __FUNCTION__, numparts);
505 return -EINVAL;
506 }
507
508 numvirtchips = cfi->numchips * numparts;
509 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
510 if (!newcfi)
511 return -ENOMEM;
512 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
513 if (!shared) {
514 kfree(newcfi);
515 return -ENOMEM;
516 }
517 memcpy(newcfi, cfi, sizeof(struct cfi_private));
518 newcfi->numchips = numvirtchips;
519 newcfi->chipshift = partshift;
520
521 chip = &newcfi->chips[0];
522 for (i = 0; i < cfi->numchips; i++) {
523 shared[i].writing = shared[i].erasing = NULL;
524 spin_lock_init(&shared[i].lock);
525 for (j = 0; j < numparts; j++) {
526 *chip = cfi->chips[i];
527 chip->start += j << partshift;
528 chip->priv = &shared[i];
529 /* those should be reset too since
530 they create memory references. */
531 init_waitqueue_head(&chip->wq);
532 spin_lock_init(&chip->_spinlock);
533 chip->mutex = &chip->_spinlock;
534 chip++;
535 }
536 }
537
538 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
539 "--> %d partitions of %d KiB\n",
540 map->name, cfi->numchips, cfi->interleave,
541 newcfi->numchips, 1<<(newcfi->chipshift-10));
542
543 map->fldrv_priv = newcfi;
544 *pcfi = newcfi;
545 kfree(cfi);
546 }
547
548 return 0;
549}
550
551/*
552 * *********** CHIP ACCESS FUNCTIONS ***********
553 */
554
555static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
556{
557 DECLARE_WAITQUEUE(wait, current);
558 struct cfi_private *cfi = map->fldrv_priv;
559 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
560 unsigned long timeo;
561 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
562
563 resettime:
564 timeo = jiffies + HZ;
565 retry:
566 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)) {
567 /*
568 * OK. We have possibility for contension on the write/erase
569 * operations which are global to the real chip and not per
570 * partition. So let's fight it over in the partition which
571 * currently has authority on the operation.
572 *
573 * The rules are as follows:
574 *
575 * - any write operation must own shared->writing.
576 *
577 * - any erase operation must own _both_ shared->writing and
578 * shared->erasing.
579 *
580 * - contension arbitration is handled in the owner's context.
581 *
582 * The 'shared' struct can be read when its lock is taken.
583 * However any writes to it can only be made when the current
584 * owner's lock is also held.
585 */
586 struct flchip_shared *shared = chip->priv;
587 struct flchip *contender;
588 spin_lock(&shared->lock);
589 contender = shared->writing;
590 if (contender && contender != chip) {
591 /*
592 * The engine to perform desired operation on this
593 * partition is already in use by someone else.
594 * Let's fight over it in the context of the chip
595 * currently using it. If it is possible to suspend,
596 * that other partition will do just that, otherwise
597 * it'll happily send us to sleep. In any case, when
598 * get_chip returns success we're clear to go ahead.
599 */
600 int ret = spin_trylock(contender->mutex);
601 spin_unlock(&shared->lock);
602 if (!ret)
603 goto retry;
604 spin_unlock(chip->mutex);
605 ret = get_chip(map, contender, contender->start, mode);
606 spin_lock(chip->mutex);
607 if (ret) {
608 spin_unlock(contender->mutex);
609 return ret;
610 }
611 timeo = jiffies + HZ;
612 spin_lock(&shared->lock);
613 }
614
615 /* We now own it */
616 shared->writing = chip;
617 if (mode == FL_ERASING)
618 shared->erasing = chip;
619 if (contender && contender != chip)
620 spin_unlock(contender->mutex);
621 spin_unlock(&shared->lock);
622 }
623
624 switch (chip->state) {
625
626 case FL_STATUS:
627 for (;;) {
628 status = map_read(map, adr);
629 if (map_word_andequal(map, status, status_OK, status_OK))
630 break;
631
632 /* At this point we're fine with write operations
633 in other partitions as they don't conflict. */
634 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
635 break;
636
637 if (time_after(jiffies, timeo)) {
638 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
639 status.x[0]);
640 return -EIO;
641 }
642 spin_unlock(chip->mutex);
643 cfi_udelay(1);
644 spin_lock(chip->mutex);
645 /* Someone else might have been playing with it. */
646 goto retry;
647 }
648
649 case FL_READY:
650 case FL_CFI_QUERY:
651 case FL_JEDEC_QUERY:
652 return 0;
653
654 case FL_ERASING:
655 if (!cfip ||
656 !(cfip->FeatureSupport & 2) ||
657 !(mode == FL_READY || mode == FL_POINT ||
658 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
659 goto sleep;
660
661
662 /* Erase suspend */
663 map_write(map, CMD(0xB0), adr);
664
665 /* If the flash has finished erasing, then 'erase suspend'
666 * appears to make some (28F320) flash devices switch to
667 * 'read' mode. Make sure that we switch to 'read status'
668 * mode so we get the right data. --rmk
669 */
670 map_write(map, CMD(0x70), adr);
671 chip->oldstate = FL_ERASING;
672 chip->state = FL_ERASE_SUSPENDING;
673 chip->erase_suspended = 1;
674 for (;;) {
675 status = map_read(map, adr);
676 if (map_word_andequal(map, status, status_OK, status_OK))
677 break;
678
679 if (time_after(jiffies, timeo)) {
680 /* Urgh. Resume and pretend we weren't here. */
681 map_write(map, CMD(0xd0), adr);
682 /* Make sure we're in 'read status' mode if it had finished */
683 map_write(map, CMD(0x70), adr);
684 chip->state = FL_ERASING;
685 chip->oldstate = FL_READY;
686 printk(KERN_ERR "Chip not ready after erase "
687 "suspended: status = 0x%lx\n", status.x[0]);
688 return -EIO;
689 }
690
691 spin_unlock(chip->mutex);
692 cfi_udelay(1);
693 spin_lock(chip->mutex);
694 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
695 So we can just loop here. */
696 }
697 chip->state = FL_STATUS;
698 return 0;
699
700 case FL_XIP_WHILE_ERASING:
701 if (mode != FL_READY && mode != FL_POINT &&
702 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
703 goto sleep;
704 chip->oldstate = chip->state;
705 chip->state = FL_READY;
706 return 0;
707
708 case FL_POINT:
709 /* Only if there's no operation suspended... */
710 if (mode == FL_READY && chip->oldstate == FL_READY)
711 return 0;
712
713 default:
714 sleep:
715 set_current_state(TASK_UNINTERRUPTIBLE);
716 add_wait_queue(&chip->wq, &wait);
717 spin_unlock(chip->mutex);
718 schedule();
719 remove_wait_queue(&chip->wq, &wait);
720 spin_lock(chip->mutex);
721 goto resettime;
722 }
723}
724
725static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
726{
727 struct cfi_private *cfi = map->fldrv_priv;
728
729 if (chip->priv) {
730 struct flchip_shared *shared = chip->priv;
731 spin_lock(&shared->lock);
732 if (shared->writing == chip && chip->oldstate == FL_READY) {
733 /* We own the ability to write, but we're done */
734 shared->writing = shared->erasing;
735 if (shared->writing && shared->writing != chip) {
736 /* give back ownership to who we loaned it from */
737 struct flchip *loaner = shared->writing;
738 spin_lock(loaner->mutex);
739 spin_unlock(&shared->lock);
740 spin_unlock(chip->mutex);
741 put_chip(map, loaner, loaner->start);
742 spin_lock(chip->mutex);
743 spin_unlock(loaner->mutex);
744 wake_up(&chip->wq);
745 return;
746 }
747 shared->erasing = NULL;
748 shared->writing = NULL;
749 } else if (shared->erasing == chip && shared->writing != chip) {
750 /*
751 * We own the ability to erase without the ability
752 * to write, which means the erase was suspended
753 * and some other partition is currently writing.
754 * Don't let the switch below mess things up since
755 * we don't have ownership to resume anything.
756 */
757 spin_unlock(&shared->lock);
758 wake_up(&chip->wq);
759 return;
760 }
761 spin_unlock(&shared->lock);
762 }
763
764 switch(chip->oldstate) {
765 case FL_ERASING:
766 chip->state = chip->oldstate;
767 /* What if one interleaved chip has finished and the
768 other hasn't? The old code would leave the finished
769 one in READY mode. That's bad, and caused -EROFS
770 errors to be returned from do_erase_oneblock because
771 that's the only bit it checked for at the time.
772 As the state machine appears to explicitly allow
773 sending the 0x70 (Read Status) command to an erasing
774 chip and expecting it to be ignored, that's what we
775 do. */
776 map_write(map, CMD(0xd0), adr);
777 map_write(map, CMD(0x70), adr);
778 chip->oldstate = FL_READY;
779 chip->state = FL_ERASING;
780 break;
781
782 case FL_XIP_WHILE_ERASING:
783 chip->state = chip->oldstate;
784 chip->oldstate = FL_READY;
785 break;
786
787 case FL_READY:
788 case FL_STATUS:
789 case FL_JEDEC_QUERY:
790 /* We should really make set_vpp() count, rather than doing this */
791 DISABLE_VPP(map);
792 break;
793 default:
794 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
795 }
796 wake_up(&chip->wq);
797}
798
799#ifdef CONFIG_MTD_XIP
800
801/*
802 * No interrupt what so ever can be serviced while the flash isn't in array
803 * mode. This is ensured by the xip_disable() and xip_enable() functions
804 * enclosing any code path where the flash is known not to be in array mode.
805 * And within a XIP disabled code path, only functions marked with __xipram
806 * may be called and nothing else (it's a good thing to inspect generated
807 * assembly to make sure inline functions were actually inlined and that gcc
808 * didn't emit calls to its own support functions). Also configuring MTD CFI
809 * support to a single buswidth and a single interleave is also recommended.
810 * Note that not only IRQs are disabled but the preemption count is also
811 * increased to prevent other locking primitives (namely spin_unlock) from
812 * decrementing the preempt count to zero and scheduling the CPU away while
813 * not in array mode.
814 */
815
816static void xip_disable(struct map_info *map, struct flchip *chip,
817 unsigned long adr)
818{
819 /* TODO: chips with no XIP use should ignore and return */
820 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
821 preempt_disable();
822 local_irq_disable();
823}
824
825static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
826 unsigned long adr)
827{
828 struct cfi_private *cfi = map->fldrv_priv;
829 if (chip->state != FL_POINT && chip->state != FL_READY) {
830 map_write(map, CMD(0xff), adr);
831 chip->state = FL_READY;
832 }
833 (void) map_read(map, adr);
834 asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */
835 local_irq_enable();
836 preempt_enable();
837}
838
839/*
840 * When a delay is required for the flash operation to complete, the
841 * xip_udelay() function is polling for both the given timeout and pending
842 * (but still masked) hardware interrupts. Whenever there is an interrupt
843 * pending then the flash erase or write operation is suspended, array mode
844 * restored and interrupts unmasked. Task scheduling might also happen at that
845 * point. The CPU eventually returns from the interrupt or the call to
846 * schedule() and the suspended flash operation is resumed for the remaining
847 * of the delay period.
848 *
849 * Warning: this function _will_ fool interrupt latency tracing tools.
850 */
851
852static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
853 unsigned long adr, int usec)
854{
855 struct cfi_private *cfi = map->fldrv_priv;
856 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
857 map_word status, OK = CMD(0x80);
858 unsigned long suspended, start = xip_currtime();
859 flstate_t oldstate, newstate;
860
861 do {
862 cpu_relax();
863 if (xip_irqpending() && cfip &&
864 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
865 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
866 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
867 /*
868 * Let's suspend the erase or write operation when
869 * supported. Note that we currently don't try to
870 * suspend interleaved chips if there is already
871 * another operation suspended (imagine what happens
872 * when one chip was already done with the current
873 * operation while another chip suspended it, then
874 * we resume the whole thing at once). Yes, it
875 * can happen!
876 */
877 map_write(map, CMD(0xb0), adr);
878 map_write(map, CMD(0x70), adr);
879 usec -= xip_elapsed_since(start);
880 suspended = xip_currtime();
881 do {
882 if (xip_elapsed_since(suspended) > 100000) {
883 /*
884 * The chip doesn't want to suspend
885 * after waiting for 100 msecs.
886 * This is a critical error but there
887 * is not much we can do here.
888 */
889 return;
890 }
891 status = map_read(map, adr);
892 } while (!map_word_andequal(map, status, OK, OK));
893
894 /* Suspend succeeded */
895 oldstate = chip->state;
896 if (oldstate == FL_ERASING) {
897 if (!map_word_bitsset(map, status, CMD(0x40)))
898 break;
899 newstate = FL_XIP_WHILE_ERASING;
900 chip->erase_suspended = 1;
901 } else {
902 if (!map_word_bitsset(map, status, CMD(0x04)))
903 break;
904 newstate = FL_XIP_WHILE_WRITING;
905 chip->write_suspended = 1;
906 }
907 chip->state = newstate;
908 map_write(map, CMD(0xff), adr);
909 (void) map_read(map, adr);
910 asm volatile (".rep 8; nop; .endr");
911 local_irq_enable();
912 preempt_enable();
913 asm volatile (".rep 8; nop; .endr");
914 cond_resched();
915
916 /*
917 * We're back. However someone else might have
918 * decided to go write to the chip if we are in
919 * a suspended erase state. If so let's wait
920 * until it's done.
921 */
922 preempt_disable();
923 while (chip->state != newstate) {
924 DECLARE_WAITQUEUE(wait, current);
925 set_current_state(TASK_UNINTERRUPTIBLE);
926 add_wait_queue(&chip->wq, &wait);
927 preempt_enable();
928 schedule();
929 remove_wait_queue(&chip->wq, &wait);
930 preempt_disable();
931 }
932 /* Disallow XIP again */
933 local_irq_disable();
934
935 /* Resume the write or erase operation */
936 map_write(map, CMD(0xd0), adr);
937 map_write(map, CMD(0x70), adr);
938 chip->state = oldstate;
939 start = xip_currtime();
940 } else if (usec >= 1000000/HZ) {
941 /*
942 * Try to save on CPU power when waiting delay
943 * is at least a system timer tick period.
944 * No need to be extremely accurate here.
945 */
946 xip_cpu_idle();
947 }
948 status = map_read(map, adr);
949 } while (!map_word_andequal(map, status, OK, OK)
950 && xip_elapsed_since(start) < usec);
951}
952
953#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
954
955/*
956 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
957 * the flash is actively programming or erasing since we have to poll for
958 * the operation to complete anyway. We can't do that in a generic way with
959 * a XIP setup so do it before the actual flash operation in this case.
960 */
961#undef INVALIDATE_CACHED_RANGE
962#define INVALIDATE_CACHED_RANGE(x...)
963#define XIP_INVAL_CACHED_RANGE(map, from, size) \
964 do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
965
966/*
967 * Extra notes:
968 *
969 * Activating this XIP support changes the way the code works a bit. For
970 * example the code to suspend the current process when concurrent access
971 * happens is never executed because xip_udelay() will always return with the
972 * same chip state as it was entered with. This is why there is no care for
973 * the presence of add_wait_queue() or schedule() calls from within a couple
974 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
975 * The queueing and scheduling are always happening within xip_udelay().
976 *
977 * Similarly, get_chip() and put_chip() just happen to always be executed
978 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
979 * is in array mode, therefore never executing many cases therein and not
980 * causing any problem with XIP.
981 */
982
983#else
984
985#define xip_disable(map, chip, adr)
986#define xip_enable(map, chip, adr)
987
988#define UDELAY(map, chip, adr, usec) cfi_udelay(usec)
989
990#define XIP_INVAL_CACHED_RANGE(x...)
991
992#endif
993
994static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
995{
996 unsigned long cmd_addr;
997 struct cfi_private *cfi = map->fldrv_priv;
998 int ret = 0;
999
1000 adr += chip->start;
1001
1002 /* Ensure cmd read/writes are aligned. */
1003 cmd_addr = adr & ~(map_bankwidth(map)-1);
1004
1005 spin_lock(chip->mutex);
1006
1007 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1008
1009 if (!ret) {
1010 if (chip->state != FL_POINT && chip->state != FL_READY)
1011 map_write(map, CMD(0xff), cmd_addr);
1012
1013 chip->state = FL_POINT;
1014 chip->ref_point_counter++;
1015 }
1016 spin_unlock(chip->mutex);
1017
1018 return ret;
1019}
1020
1021static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1022{
1023 struct map_info *map = mtd->priv;
1024 struct cfi_private *cfi = map->fldrv_priv;
1025 unsigned long ofs;
1026 int chipnum;
1027 int ret = 0;
1028
1029 if (!map->virt || (from + len > mtd->size))
1030 return -EINVAL;
1031
1032 *mtdbuf = (void *)map->virt + from;
1033 *retlen = 0;
1034
1035 /* Now lock the chip(s) to POINT state */
1036
1037 /* ofs: offset within the first chip that the first read should start */
1038 chipnum = (from >> cfi->chipshift);
1039 ofs = from - (chipnum << cfi->chipshift);
1040
1041 while (len) {
1042 unsigned long thislen;
1043
1044 if (chipnum >= cfi->numchips)
1045 break;
1046
1047 if ((len + ofs -1) >> cfi->chipshift)
1048 thislen = (1<<cfi->chipshift) - ofs;
1049 else
1050 thislen = len;
1051
1052 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1053 if (ret)
1054 break;
1055
1056 *retlen += thislen;
1057 len -= thislen;
1058
1059 ofs = 0;
1060 chipnum++;
1061 }
1062 return 0;
1063}
1064
1065static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1066{
1067 struct map_info *map = mtd->priv;
1068 struct cfi_private *cfi = map->fldrv_priv;
1069 unsigned long ofs;
1070 int chipnum;
1071
1072 /* Now unlock the chip(s) POINT state */
1073
1074 /* ofs: offset within the first chip that the first read should start */
1075 chipnum = (from >> cfi->chipshift);
1076 ofs = from - (chipnum << cfi->chipshift);
1077
1078 while (len) {
1079 unsigned long thislen;
1080 struct flchip *chip;
1081
1082 chip = &cfi->chips[chipnum];
1083 if (chipnum >= cfi->numchips)
1084 break;
1085
1086 if ((len + ofs -1) >> cfi->chipshift)
1087 thislen = (1<<cfi->chipshift) - ofs;
1088 else
1089 thislen = len;
1090
1091 spin_lock(chip->mutex);
1092 if (chip->state == FL_POINT) {
1093 chip->ref_point_counter--;
1094 if(chip->ref_point_counter == 0)
1095 chip->state = FL_READY;
1096 } else
1097 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1098
1099 put_chip(map, chip, chip->start);
1100 spin_unlock(chip->mutex);
1101
1102 len -= thislen;
1103 ofs = 0;
1104 chipnum++;
1105 }
1106}
1107
1108static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1109{
1110 unsigned long cmd_addr;
1111 struct cfi_private *cfi = map->fldrv_priv;
1112 int ret;
1113
1114 adr += chip->start;
1115
1116 /* Ensure cmd read/writes are aligned. */
1117 cmd_addr = adr & ~(map_bankwidth(map)-1);
1118
1119 spin_lock(chip->mutex);
1120 ret = get_chip(map, chip, cmd_addr, FL_READY);
1121 if (ret) {
1122 spin_unlock(chip->mutex);
1123 return ret;
1124 }
1125
1126 if (chip->state != FL_POINT && chip->state != FL_READY) {
1127 map_write(map, CMD(0xff), cmd_addr);
1128
1129 chip->state = FL_READY;
1130 }
1131
1132 map_copy_from(map, buf, adr, len);
1133
1134 put_chip(map, chip, cmd_addr);
1135
1136 spin_unlock(chip->mutex);
1137 return 0;
1138}
1139
1140static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1141{
1142 struct map_info *map = mtd->priv;
1143 struct cfi_private *cfi = map->fldrv_priv;
1144 unsigned long ofs;
1145 int chipnum;
1146 int ret = 0;
1147
1148 /* ofs: offset within the first chip that the first read should start */
1149 chipnum = (from >> cfi->chipshift);
1150 ofs = from - (chipnum << cfi->chipshift);
1151
1152 *retlen = 0;
1153
1154 while (len) {
1155 unsigned long thislen;
1156
1157 if (chipnum >= cfi->numchips)
1158 break;
1159
1160 if ((len + ofs -1) >> cfi->chipshift)
1161 thislen = (1<<cfi->chipshift) - ofs;
1162 else
1163 thislen = len;
1164
1165 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1166 if (ret)
1167 break;
1168
1169 *retlen += thislen;
1170 len -= thislen;
1171 buf += thislen;
1172
1173 ofs = 0;
1174 chipnum++;
1175 }
1176 return ret;
1177}
1178
1179#if 0
1180static int __xipram cfi_intelext_read_prot_reg (struct mtd_info *mtd,
1181 loff_t from, size_t len,
1182 size_t *retlen,
1183 u_char *buf,
1184 int base_offst, int reg_sz)
1185{
1186 struct map_info *map = mtd->priv;
1187 struct cfi_private *cfi = map->fldrv_priv;
1188 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1189 struct flchip *chip;
1190 int ofs_factor = cfi->interleave * cfi->device_type;
1191 int count = len;
1192 int chip_num, offst;
1193 int ret;
1194
1195 chip_num = ((unsigned int)from/reg_sz);
1196 offst = from - (reg_sz*chip_num)+base_offst;
1197
1198 while (count) {
1199 /* Calculate which chip & protection register offset we need */
1200
1201 if (chip_num >= cfi->numchips)
1202 goto out;
1203
1204 chip = &cfi->chips[chip_num];
1205
1206 spin_lock(chip->mutex);
1207 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1208 if (ret) {
1209 spin_unlock(chip->mutex);
1210 return (len-count)?:ret;
1211 }
1212
1213 xip_disable(map, chip, chip->start);
1214
1215 if (chip->state != FL_JEDEC_QUERY) {
1216 map_write(map, CMD(0x90), chip->start);
1217 chip->state = FL_JEDEC_QUERY;
1218 }
1219
1220 while (count && ((offst-base_offst) < reg_sz)) {
1221 *buf = map_read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
1222 buf++;
1223 offst++;
1224 count--;
1225 }
1226
1227 xip_enable(map, chip, chip->start);
1228 put_chip(map, chip, chip->start);
1229 spin_unlock(chip->mutex);
1230
1231 /* Move on to the next chip */
1232 chip_num++;
1233 offst = base_offst;
1234 }
1235
1236 out:
1237 return len-count;
1238}
1239
1240static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1241{
1242 struct map_info *map = mtd->priv;
1243 struct cfi_private *cfi = map->fldrv_priv;
1244 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
1245 int base_offst,reg_sz;
1246
1247 /* Check that we actually have some protection registers */
1248 if(!extp || !(extp->FeatureSupport&64)){
1249 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
1250 return 0;
1251 }
1252
1253 base_offst=(1<<extp->FactProtRegSize);
1254 reg_sz=(1<<extp->UserProtRegSize);
1255
1256 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
1257}
1258
1259static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1260{
1261 struct map_info *map = mtd->priv;
1262 struct cfi_private *cfi = map->fldrv_priv;
1263 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
1264 int base_offst,reg_sz;
1265
1266 /* Check that we actually have some protection registers */
1267 if(!extp || !(extp->FeatureSupport&64)){
1268 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
1269 return 0;
1270 }
1271
1272 base_offst=0;
1273 reg_sz=(1<<extp->FactProtRegSize);
1274
1275 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
1276}
1277#endif
1278
1279static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1280 unsigned long adr, map_word datum)
1281{
1282 struct cfi_private *cfi = map->fldrv_priv;
1283 map_word status, status_OK;
1284 unsigned long timeo;
1285 int z, ret=0;
1286
1287 adr += chip->start;
1288
1289 /* Let's determine this according to the interleave only once */
1290 status_OK = CMD(0x80);
1291
1292 spin_lock(chip->mutex);
1293 ret = get_chip(map, chip, adr, FL_WRITING);
1294 if (ret) {
1295 spin_unlock(chip->mutex);
1296 return ret;
1297 }
1298
1299 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1300 ENABLE_VPP(map);
1301 xip_disable(map, chip, adr);
1302 map_write(map, CMD(0x40), adr);
1303 map_write(map, datum, adr);
1304 chip->state = FL_WRITING;
1305
1306 spin_unlock(chip->mutex);
1307 INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
1308 UDELAY(map, chip, adr, chip->word_write_time);
1309 spin_lock(chip->mutex);
1310
1311 timeo = jiffies + (HZ/2);
1312 z = 0;
1313 for (;;) {
1314 if (chip->state != FL_WRITING) {
1315 /* Someone's suspended the write. Sleep */
1316 DECLARE_WAITQUEUE(wait, current);
1317
1318 set_current_state(TASK_UNINTERRUPTIBLE);
1319 add_wait_queue(&chip->wq, &wait);
1320 spin_unlock(chip->mutex);
1321 schedule();
1322 remove_wait_queue(&chip->wq, &wait);
1323 timeo = jiffies + (HZ / 2); /* FIXME */
1324 spin_lock(chip->mutex);
1325 continue;
1326 }
1327
1328 status = map_read(map, adr);
1329 if (map_word_andequal(map, status, status_OK, status_OK))
1330 break;
1331
1332 /* OK Still waiting */
1333 if (time_after(jiffies, timeo)) {
1334 chip->state = FL_STATUS;
1335 xip_enable(map, chip, adr);
1336 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1337 ret = -EIO;
1338 goto out;
1339 }
1340
1341 /* Latency issues. Drop the lock, wait a while and retry */
1342 spin_unlock(chip->mutex);
1343 z++;
1344 UDELAY(map, chip, adr, 1);
1345 spin_lock(chip->mutex);
1346 }
1347 if (!z) {
1348 chip->word_write_time--;
1349 if (!chip->word_write_time)
1350 chip->word_write_time++;
1351 }
1352 if (z > 1)
1353 chip->word_write_time++;
1354
1355 /* Done and happy. */
1356 chip->state = FL_STATUS;
1357
1358 /* check for lock bit */
1359 if (map_word_bitsset(map, status, CMD(0x02))) {
1360 /* clear status */
1361 map_write(map, CMD(0x50), adr);
1362 /* put back into read status register mode */
1363 map_write(map, CMD(0x70), adr);
1364 ret = -EROFS;
1365 }
1366
1367 xip_enable(map, chip, adr);
1368 out: put_chip(map, chip, adr);
1369 spin_unlock(chip->mutex);
1370
1371 return ret;
1372}
1373
1374
1375static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1376{
1377 struct map_info *map = mtd->priv;
1378 struct cfi_private *cfi = map->fldrv_priv;
1379 int ret = 0;
1380 int chipnum;
1381 unsigned long ofs;
1382
1383 *retlen = 0;
1384 if (!len)
1385 return 0;
1386
1387 chipnum = to >> cfi->chipshift;
1388 ofs = to - (chipnum << cfi->chipshift);
1389
1390 /* If it's not bus-aligned, do the first byte write */
1391 if (ofs & (map_bankwidth(map)-1)) {
1392 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1393 int gap = ofs - bus_ofs;
1394 int n;
1395 map_word datum;
1396
1397 n = min_t(int, len, map_bankwidth(map)-gap);
1398 datum = map_word_ff(map);
1399 datum = map_word_load_partial(map, datum, buf, gap, n);
1400
1401 ret = do_write_oneword(map, &cfi->chips[chipnum],
1402 bus_ofs, datum);
1403 if (ret)
1404 return ret;
1405
1406 len -= n;
1407 ofs += n;
1408 buf += n;
1409 (*retlen) += n;
1410
1411 if (ofs >> cfi->chipshift) {
1412 chipnum ++;
1413 ofs = 0;
1414 if (chipnum == cfi->numchips)
1415 return 0;
1416 }
1417 }
1418
1419 while(len >= map_bankwidth(map)) {
1420 map_word datum = map_word_load(map, buf);
1421
1422 ret = do_write_oneword(map, &cfi->chips[chipnum],
1423 ofs, datum);
1424 if (ret)
1425 return ret;
1426
1427 ofs += map_bankwidth(map);
1428 buf += map_bankwidth(map);
1429 (*retlen) += map_bankwidth(map);
1430 len -= map_bankwidth(map);
1431
1432 if (ofs >> cfi->chipshift) {
1433 chipnum ++;
1434 ofs = 0;
1435 if (chipnum == cfi->numchips)
1436 return 0;
1437 }
1438 }
1439
1440 if (len & (map_bankwidth(map)-1)) {
1441 map_word datum;
1442
1443 datum = map_word_ff(map);
1444 datum = map_word_load_partial(map, datum, buf, 0, len);
1445
1446 ret = do_write_oneword(map, &cfi->chips[chipnum],
1447 ofs, datum);
1448 if (ret)
1449 return ret;
1450
1451 (*retlen) += len;
1452 }
1453
1454 return 0;
1455}
1456
1457
1458static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1459 unsigned long adr, const u_char *buf, int len)
1460{
1461 struct cfi_private *cfi = map->fldrv_priv;
1462 map_word status, status_OK;
1463 unsigned long cmd_adr, timeo;
1464 int wbufsize, z, ret=0, bytes, words;
1465
1466 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1467 adr += chip->start;
1468 cmd_adr = adr & ~(wbufsize-1);
1469
1470 /* Let's determine this according to the interleave only once */
1471 status_OK = CMD(0x80);
1472
1473 spin_lock(chip->mutex);
1474 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1475 if (ret) {
1476 spin_unlock(chip->mutex);
1477 return ret;
1478 }
1479
1480 XIP_INVAL_CACHED_RANGE(map, adr, len);
1481 ENABLE_VPP(map);
1482 xip_disable(map, chip, cmd_adr);
1483
1484 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1485 [...], the device will not accept any more Write to Buffer commands".
1486 So we must check here and reset those bits if they're set. Otherwise
1487 we're just pissing in the wind */
1488 if (chip->state != FL_STATUS)
1489 map_write(map, CMD(0x70), cmd_adr);
1490 status = map_read(map, cmd_adr);
1491 if (map_word_bitsset(map, status, CMD(0x30))) {
1492 xip_enable(map, chip, cmd_adr);
1493 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1494 xip_disable(map, chip, cmd_adr);
1495 map_write(map, CMD(0x50), cmd_adr);
1496 map_write(map, CMD(0x70), cmd_adr);
1497 }
1498
1499 chip->state = FL_WRITING_TO_BUFFER;
1500
1501 z = 0;
1502 for (;;) {
1503 map_write(map, CMD(0xe8), cmd_adr);
1504
1505 status = map_read(map, cmd_adr);
1506 if (map_word_andequal(map, status, status_OK, status_OK))
1507 break;
1508
1509 spin_unlock(chip->mutex);
1510 UDELAY(map, chip, cmd_adr, 1);
1511 spin_lock(chip->mutex);
1512
1513 if (++z > 20) {
1514 /* Argh. Not ready for write to buffer */
1515 map_word Xstatus;
1516 map_write(map, CMD(0x70), cmd_adr);
1517 chip->state = FL_STATUS;
1518 Xstatus = map_read(map, cmd_adr);
1519 /* Odd. Clear status bits */
1520 map_write(map, CMD(0x50), cmd_adr);
1521 map_write(map, CMD(0x70), cmd_adr);
1522 xip_enable(map, chip, cmd_adr);
1523 printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1524 status.x[0], Xstatus.x[0]);
1525 ret = -EIO;
1526 goto out;
1527 }
1528 }
1529
1530 /* Write length of data to come */
1531 bytes = len & (map_bankwidth(map)-1);
1532 words = len / map_bankwidth(map);
1533 map_write(map, CMD(words - !bytes), cmd_adr );
1534
1535 /* Write data */
1536 z = 0;
1537 while(z < words * map_bankwidth(map)) {
1538 map_word datum = map_word_load(map, buf);
1539 map_write(map, datum, adr+z);
1540
1541 z += map_bankwidth(map);
1542 buf += map_bankwidth(map);
1543 }
1544
1545 if (bytes) {
1546 map_word datum;
1547
1548 datum = map_word_ff(map);
1549 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1550 map_write(map, datum, adr+z);
1551 }
1552
1553 /* GO GO GO */
1554 map_write(map, CMD(0xd0), cmd_adr);
1555 chip->state = FL_WRITING;
1556
1557 spin_unlock(chip->mutex);
1558 INVALIDATE_CACHED_RANGE(map, adr, len);
1559 UDELAY(map, chip, cmd_adr, chip->buffer_write_time);
1560 spin_lock(chip->mutex);
1561
1562 timeo = jiffies + (HZ/2);
1563 z = 0;
1564 for (;;) {
1565 if (chip->state != FL_WRITING) {
1566 /* Someone's suspended the write. Sleep */
1567 DECLARE_WAITQUEUE(wait, current);
1568 set_current_state(TASK_UNINTERRUPTIBLE);
1569 add_wait_queue(&chip->wq, &wait);
1570 spin_unlock(chip->mutex);
1571 schedule();
1572 remove_wait_queue(&chip->wq, &wait);
1573 timeo = jiffies + (HZ / 2); /* FIXME */
1574 spin_lock(chip->mutex);
1575 continue;
1576 }
1577
1578 status = map_read(map, cmd_adr);
1579 if (map_word_andequal(map, status, status_OK, status_OK))
1580 break;
1581
1582 /* OK Still waiting */
1583 if (time_after(jiffies, timeo)) {
1584 chip->state = FL_STATUS;
1585 xip_enable(map, chip, cmd_adr);
1586 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1587 ret = -EIO;
1588 goto out;
1589 }
1590
1591 /* Latency issues. Drop the lock, wait a while and retry */
1592 spin_unlock(chip->mutex);
1593 UDELAY(map, chip, cmd_adr, 1);
1594 z++;
1595 spin_lock(chip->mutex);
1596 }
1597 if (!z) {
1598 chip->buffer_write_time--;
1599 if (!chip->buffer_write_time)
1600 chip->buffer_write_time++;
1601 }
1602 if (z > 1)
1603 chip->buffer_write_time++;
1604
1605 /* Done and happy. */
1606 chip->state = FL_STATUS;
1607
1608 /* check for lock bit */
1609 if (map_word_bitsset(map, status, CMD(0x02))) {
1610 /* clear status */
1611 map_write(map, CMD(0x50), cmd_adr);
1612 /* put back into read status register mode */
1613 map_write(map, CMD(0x70), adr);
1614 ret = -EROFS;
1615 }
1616
1617 xip_enable(map, chip, cmd_adr);
1618 out: put_chip(map, chip, cmd_adr);
1619 spin_unlock(chip->mutex);
1620 return ret;
1621}
1622
1623static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1624 size_t len, size_t *retlen, const u_char *buf)
1625{
1626 struct map_info *map = mtd->priv;
1627 struct cfi_private *cfi = map->fldrv_priv;
1628 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1629 int ret = 0;
1630 int chipnum;
1631 unsigned long ofs;
1632
1633 *retlen = 0;
1634 if (!len)
1635 return 0;
1636
1637 chipnum = to >> cfi->chipshift;
1638 ofs = to - (chipnum << cfi->chipshift);
1639
1640 /* If it's not bus-aligned, do the first word write */
1641 if (ofs & (map_bankwidth(map)-1)) {
1642 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1643 if (local_len > len)
1644 local_len = len;
1645 ret = cfi_intelext_write_words(mtd, to, local_len,
1646 retlen, buf);
1647 if (ret)
1648 return ret;
1649 ofs += local_len;
1650 buf += local_len;
1651 len -= local_len;
1652
1653 if (ofs >> cfi->chipshift) {
1654 chipnum ++;
1655 ofs = 0;
1656 if (chipnum == cfi->numchips)
1657 return 0;
1658 }
1659 }
1660
1661 while(len) {
1662 /* We must not cross write block boundaries */
1663 int size = wbufsize - (ofs & (wbufsize-1));
1664
1665 if (size > len)
1666 size = len;
1667 ret = do_write_buffer(map, &cfi->chips[chipnum],
1668 ofs, buf, size);
1669 if (ret)
1670 return ret;
1671
1672 ofs += size;
1673 buf += size;
1674 (*retlen) += size;
1675 len -= size;
1676
1677 if (ofs >> cfi->chipshift) {
1678 chipnum ++;
1679 ofs = 0;
1680 if (chipnum == cfi->numchips)
1681 return 0;
1682 }
1683 }
1684 return 0;
1685}
1686
1687static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1688 unsigned long adr, int len, void *thunk)
1689{
1690 struct cfi_private *cfi = map->fldrv_priv;
1691 map_word status, status_OK;
1692 unsigned long timeo;
1693 int retries = 3;
1694 DECLARE_WAITQUEUE(wait, current);
1695 int ret = 0;
1696
1697 adr += chip->start;
1698
1699 /* Let's determine this according to the interleave only once */
1700 status_OK = CMD(0x80);
1701
1702 retry:
1703 spin_lock(chip->mutex);
1704 ret = get_chip(map, chip, adr, FL_ERASING);
1705 if (ret) {
1706 spin_unlock(chip->mutex);
1707 return ret;
1708 }
1709
1710 XIP_INVAL_CACHED_RANGE(map, adr, len);
1711 ENABLE_VPP(map);
1712 xip_disable(map, chip, adr);
1713
1714 /* Clear the status register first */
1715 map_write(map, CMD(0x50), adr);
1716
1717 /* Now erase */
1718 map_write(map, CMD(0x20), adr);
1719 map_write(map, CMD(0xD0), adr);
1720 chip->state = FL_ERASING;
1721 chip->erase_suspended = 0;
1722
1723 spin_unlock(chip->mutex);
1724 INVALIDATE_CACHED_RANGE(map, adr, len);
1725 UDELAY(map, chip, adr, chip->erase_time*1000/2);
1726 spin_lock(chip->mutex);
1727
1728 /* FIXME. Use a timer to check this, and return immediately. */
1729 /* Once the state machine's known to be working I'll do that */
1730
1731 timeo = jiffies + (HZ*20);
1732 for (;;) {
1733 if (chip->state != FL_ERASING) {
1734 /* Someone's suspended the erase. Sleep */
1735 set_current_state(TASK_UNINTERRUPTIBLE);
1736 add_wait_queue(&chip->wq, &wait);
1737 spin_unlock(chip->mutex);
1738 schedule();
1739 remove_wait_queue(&chip->wq, &wait);
1740 spin_lock(chip->mutex);
1741 continue;
1742 }
1743 if (chip->erase_suspended) {
1744 /* This erase was suspended and resumed.
1745 Adjust the timeout */
1746 timeo = jiffies + (HZ*20); /* FIXME */
1747 chip->erase_suspended = 0;
1748 }
1749
1750 status = map_read(map, adr);
1751 if (map_word_andequal(map, status, status_OK, status_OK))
1752 break;
1753
1754 /* OK Still waiting */
1755 if (time_after(jiffies, timeo)) {
1756 map_word Xstatus;
1757 map_write(map, CMD(0x70), adr);
1758 chip->state = FL_STATUS;
1759 Xstatus = map_read(map, adr);
1760 /* Clear status bits */
1761 map_write(map, CMD(0x50), adr);
1762 map_write(map, CMD(0x70), adr);
1763 xip_enable(map, chip, adr);
1764 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
1765 adr, status.x[0], Xstatus.x[0]);
1766 ret = -EIO;
1767 goto out;
1768 }
1769
1770 /* Latency issues. Drop the lock, wait a while and retry */
1771 spin_unlock(chip->mutex);
1772 UDELAY(map, chip, adr, 1000000/HZ);
1773 spin_lock(chip->mutex);
1774 }
1775
1776 /* We've broken this before. It doesn't hurt to be safe */
1777 map_write(map, CMD(0x70), adr);
1778 chip->state = FL_STATUS;
1779 status = map_read(map, adr);
1780
1781 /* check for lock bit */
1782 if (map_word_bitsset(map, status, CMD(0x3a))) {
1783 unsigned char chipstatus;
1784
1785 /* Reset the error bits */
1786 map_write(map, CMD(0x50), adr);
1787 map_write(map, CMD(0x70), adr);
1788 xip_enable(map, chip, adr);
1789
1790 chipstatus = status.x[0];
1791 if (!map_word_equal(map, status, CMD(chipstatus))) {
1792 int i, w;
1793 for (w=0; w<map_words(map); w++) {
1794 for (i = 0; i<cfi_interleave(cfi); i++) {
1795 chipstatus |= status.x[w] >> (cfi->device_type * 8);
1796 }
1797 }
1798 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
1799 status.x[0], chipstatus);
1800 }
1801
1802 if ((chipstatus & 0x30) == 0x30) {
1803 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
1804 ret = -EIO;
1805 } else if (chipstatus & 0x02) {
1806 /* Protection bit set */
1807 ret = -EROFS;
1808 } else if (chipstatus & 0x8) {
1809 /* Voltage */
1810 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
1811 ret = -EIO;
1812 } else if (chipstatus & 0x20) {
1813 if (retries--) {
1814 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
1815 timeo = jiffies + HZ;
1816 put_chip(map, chip, adr);
1817 spin_unlock(chip->mutex);
1818 goto retry;
1819 }
1820 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
1821 ret = -EIO;
1822 }
1823 } else {
1824 xip_enable(map, chip, adr);
1825 ret = 0;
1826 }
1827
1828 out: put_chip(map, chip, adr);
1829 spin_unlock(chip->mutex);
1830 return ret;
1831}
1832
1833int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1834{
1835 unsigned long ofs, len;
1836 int ret;
1837
1838 ofs = instr->addr;
1839 len = instr->len;
1840
1841 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1842 if (ret)
1843 return ret;
1844
1845 instr->state = MTD_ERASE_DONE;
1846 mtd_erase_callback(instr);
1847
1848 return 0;
1849}
1850
1851static void cfi_intelext_sync (struct mtd_info *mtd)
1852{
1853 struct map_info *map = mtd->priv;
1854 struct cfi_private *cfi = map->fldrv_priv;
1855 int i;
1856 struct flchip *chip;
1857 int ret = 0;
1858
1859 for (i=0; !ret && i<cfi->numchips; i++) {
1860 chip = &cfi->chips[i];
1861
1862 spin_lock(chip->mutex);
1863 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1864
1865 if (!ret) {
1866 chip->oldstate = chip->state;
1867 chip->state = FL_SYNCING;
1868 /* No need to wake_up() on this state change -
1869 * as the whole point is that nobody can do anything
1870 * with the chip now anyway.
1871 */
1872 }
1873 spin_unlock(chip->mutex);
1874 }
1875
1876 /* Unlock the chips again */
1877
1878 for (i--; i >=0; i--) {
1879 chip = &cfi->chips[i];
1880
1881 spin_lock(chip->mutex);
1882
1883 if (chip->state == FL_SYNCING) {
1884 chip->state = chip->oldstate;
1885 wake_up(&chip->wq);
1886 }
1887 spin_unlock(chip->mutex);
1888 }
1889}
1890
1891#ifdef DEBUG_LOCK_BITS
1892static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1893 struct flchip *chip,
1894 unsigned long adr,
1895 int len, void *thunk)
1896{
1897 struct cfi_private *cfi = map->fldrv_priv;
1898 int status, ofs_factor = cfi->interleave * cfi->device_type;
1899
1900 xip_disable(map, chip, adr+(2*ofs_factor));
1901 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1902 chip->state = FL_JEDEC_QUERY;
1903 status = cfi_read_query(map, adr+(2*ofs_factor));
1904 xip_enable(map, chip, 0);
1905 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1906 adr, status);
1907 return 0;
1908}
1909#endif
1910
1911#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1912#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1913
1914static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1915 unsigned long adr, int len, void *thunk)
1916{
1917 struct cfi_private *cfi = map->fldrv_priv;
1918 map_word status, status_OK;
1919 unsigned long timeo = jiffies + HZ;
1920 int ret;
1921
1922 adr += chip->start;
1923
1924 /* Let's determine this according to the interleave only once */
1925 status_OK = CMD(0x80);
1926
1927 spin_lock(chip->mutex);
1928 ret = get_chip(map, chip, adr, FL_LOCKING);
1929 if (ret) {
1930 spin_unlock(chip->mutex);
1931 return ret;
1932 }
1933
1934 ENABLE_VPP(map);
1935 xip_disable(map, chip, adr);
1936
1937 map_write(map, CMD(0x60), adr);
1938 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1939 map_write(map, CMD(0x01), adr);
1940 chip->state = FL_LOCKING;
1941 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1942 map_write(map, CMD(0xD0), adr);
1943 chip->state = FL_UNLOCKING;
1944 } else
1945 BUG();
1946
1947 spin_unlock(chip->mutex);
1948 UDELAY(map, chip, adr, 1000000/HZ);
1949 spin_lock(chip->mutex);
1950
1951 /* FIXME. Use a timer to check this, and return immediately. */
1952 /* Once the state machine's known to be working I'll do that */
1953
1954 timeo = jiffies + (HZ*20);
1955 for (;;) {
1956
1957 status = map_read(map, adr);
1958 if (map_word_andequal(map, status, status_OK, status_OK))
1959 break;
1960
1961 /* OK Still waiting */
1962 if (time_after(jiffies, timeo)) {
1963 map_word Xstatus;
1964 map_write(map, CMD(0x70), adr);
1965 chip->state = FL_STATUS;
1966 Xstatus = map_read(map, adr);
1967 xip_enable(map, chip, adr);
1968 printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
1969 status.x[0], Xstatus.x[0]);
1970 put_chip(map, chip, adr);
1971 spin_unlock(chip->mutex);
1972 return -EIO;
1973 }
1974
1975 /* Latency issues. Drop the lock, wait a while and retry */
1976 spin_unlock(chip->mutex);
1977 UDELAY(map, chip, adr, 1);
1978 spin_lock(chip->mutex);
1979 }
1980
1981 /* Done and happy. */
1982 chip->state = FL_STATUS;
1983 xip_enable(map, chip, adr);
1984 put_chip(map, chip, adr);
1985 spin_unlock(chip->mutex);
1986 return 0;
1987}
1988
1989static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1990{
1991 int ret;
1992
1993#ifdef DEBUG_LOCK_BITS
1994 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1995 __FUNCTION__, ofs, len);
1996 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1997 ofs, len, 0);
1998#endif
1999
2000 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2001 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2002
2003#ifdef DEBUG_LOCK_BITS
2004 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2005 __FUNCTION__, ret);
2006 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2007 ofs, len, 0);
2008#endif
2009
2010 return ret;
2011}
2012
2013static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2014{
2015 int ret;
2016
2017#ifdef DEBUG_LOCK_BITS
2018 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2019 __FUNCTION__, ofs, len);
2020 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2021 ofs, len, 0);
2022#endif
2023
2024 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2025 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2026
2027#ifdef DEBUG_LOCK_BITS
2028 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2029 __FUNCTION__, ret);
2030 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2031 ofs, len, 0);
2032#endif
2033
2034 return ret;
2035}
2036
2037static int cfi_intelext_suspend(struct mtd_info *mtd)
2038{
2039 struct map_info *map = mtd->priv;
2040 struct cfi_private *cfi = map->fldrv_priv;
2041 int i;
2042 struct flchip *chip;
2043 int ret = 0;
2044
2045 for (i=0; !ret && i<cfi->numchips; i++) {
2046 chip = &cfi->chips[i];
2047
2048 spin_lock(chip->mutex);
2049
2050 switch (chip->state) {
2051 case FL_READY:
2052 case FL_STATUS:
2053 case FL_CFI_QUERY:
2054 case FL_JEDEC_QUERY:
2055 if (chip->oldstate == FL_READY) {
2056 chip->oldstate = chip->state;
2057 chip->state = FL_PM_SUSPENDED;
2058 /* No need to wake_up() on this state change -
2059 * as the whole point is that nobody can do anything
2060 * with the chip now anyway.
2061 */
2062 } else {
2063 /* There seems to be an operation pending. We must wait for it. */
2064 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2065 ret = -EAGAIN;
2066 }
2067 break;
2068 default:
2069 /* Should we actually wait? Once upon a time these routines weren't
2070 allowed to. Or should we return -EAGAIN, because the upper layers
2071 ought to have already shut down anything which was using the device
2072 anyway? The latter for now. */
2073 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2074 ret = -EAGAIN;
2075 case FL_PM_SUSPENDED:
2076 break;
2077 }
2078 spin_unlock(chip->mutex);
2079 }
2080
2081 /* Unlock the chips again */
2082
2083 if (ret) {
2084 for (i--; i >=0; i--) {
2085 chip = &cfi->chips[i];
2086
2087 spin_lock(chip->mutex);
2088
2089 if (chip->state == FL_PM_SUSPENDED) {
2090 /* No need to force it into a known state here,
2091 because we're returning failure, and it didn't
2092 get power cycled */
2093 chip->state = chip->oldstate;
2094 chip->oldstate = FL_READY;
2095 wake_up(&chip->wq);
2096 }
2097 spin_unlock(chip->mutex);
2098 }
2099 }
2100
2101 return ret;
2102}
2103
2104static void cfi_intelext_resume(struct mtd_info *mtd)
2105{
2106 struct map_info *map = mtd->priv;
2107 struct cfi_private *cfi = map->fldrv_priv;
2108 int i;
2109 struct flchip *chip;
2110
2111 for (i=0; i<cfi->numchips; i++) {
2112
2113 chip = &cfi->chips[i];
2114
2115 spin_lock(chip->mutex);
2116
2117 /* Go to known state. Chip may have been power cycled */
2118 if (chip->state == FL_PM_SUSPENDED) {
2119 map_write(map, CMD(0xFF), cfi->chips[i].start);
2120 chip->oldstate = chip->state = FL_READY;
2121 wake_up(&chip->wq);
2122 }
2123
2124 spin_unlock(chip->mutex);
2125 }
2126}
2127
2128static void cfi_intelext_destroy(struct mtd_info *mtd)
2129{
2130 struct map_info *map = mtd->priv;
2131 struct cfi_private *cfi = map->fldrv_priv;
2132 kfree(cfi->cmdset_priv);
2133 kfree(cfi->cfiq);
2134 kfree(cfi->chips[0].priv);
2135 kfree(cfi);
2136 kfree(mtd->eraseregions);
2137}
2138
2139static char im_name_1[]="cfi_cmdset_0001";
2140static char im_name_3[]="cfi_cmdset_0003";
2141
2142static int __init cfi_intelext_init(void)
2143{
2144 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
2145 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
2146 return 0;
2147}
2148
2149static void __exit cfi_intelext_exit(void)
2150{
2151 inter_module_unregister(im_name_1);
2152 inter_module_unregister(im_name_3);
2153}
2154
2155module_init(cfi_intelext_init);
2156module_exit(cfi_intelext_exit);
2157
2158MODULE_LICENSE("GPL");
2159MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2160MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
new file mode 100644
index 000000000000..fca8ff6f7e14
--- /dev/null
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -0,0 +1,1515 @@
1/*
2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4 *
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 *
8 * 2_by_8 routines added by Simon Munton
9 *
10 * 4_by_16 work by Carolyn J. Smith
11 *
12 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
13 *
14 * This code is GPL
15 *
16 * $Id: cfi_cmdset_0002.c,v 1.114 2004/12/11 15:43:53 dedekind Exp $
17 *
18 */
19
20#include <linux/config.h>
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/init.h>
26#include <asm/io.h>
27#include <asm/byteorder.h>
28
29#include <linux/errno.h>
30#include <linux/slab.h>
31#include <linux/delay.h>
32#include <linux/interrupt.h>
33#include <linux/mtd/compatmac.h>
34#include <linux/mtd/map.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/cfi.h>
37
38#define AMD_BOOTLOC_BUG
39#define FORCE_WORD_WRITE 0
40
41#define MAX_WORD_RETRIES 3
42
43#define MANUFACTURER_AMD 0x0001
44#define MANUFACTURER_SST 0x00BF
45#define SST49LF004B 0x0060
46
47static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
48static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
49static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
50static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
51static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
52static void cfi_amdstd_sync (struct mtd_info *);
53static int cfi_amdstd_suspend (struct mtd_info *);
54static void cfi_amdstd_resume (struct mtd_info *);
55static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56
57static void cfi_amdstd_destroy(struct mtd_info *);
58
59struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
60static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
61
62static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
63static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
64#include "fwh_lock.h"
65
66static struct mtd_chip_driver cfi_amdstd_chipdrv = {
67 .probe = NULL, /* Not usable directly */
68 .destroy = cfi_amdstd_destroy,
69 .name = "cfi_cmdset_0002",
70 .module = THIS_MODULE
71};
72
73
74/* #define DEBUG_CFI_FEATURES */
75
76
77#ifdef DEBUG_CFI_FEATURES
78static void cfi_tell_features(struct cfi_pri_amdstd *extp)
79{
80 const char* erase_suspend[3] = {
81 "Not supported", "Read only", "Read/write"
82 };
83 const char* top_bottom[6] = {
84 "No WP", "8x8KiB sectors at top & bottom, no WP",
85 "Bottom boot", "Top boot",
86 "Uniform, Bottom WP", "Uniform, Top WP"
87 };
88
89 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
90 printk(" Address sensitive unlock: %s\n",
91 (extp->SiliconRevision & 1) ? "Not required" : "Required");
92
93 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
94 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
95 else
96 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
97
98 if (extp->BlkProt == 0)
99 printk(" Block protection: Not supported\n");
100 else
101 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
102
103
104 printk(" Temporary block unprotect: %s\n",
105 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
106 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
107 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
108 printk(" Burst mode: %s\n",
109 extp->BurstMode ? "Supported" : "Not supported");
110 if (extp->PageMode == 0)
111 printk(" Page mode: Not supported\n");
112 else
113 printk(" Page mode: %d word page\n", extp->PageMode << 2);
114
115 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
116 extp->VppMin >> 4, extp->VppMin & 0xf);
117 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
118 extp->VppMax >> 4, extp->VppMax & 0xf);
119
120 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
121 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
122 else
123 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
124}
125#endif
126
127#ifdef AMD_BOOTLOC_BUG
128/* Wheee. Bring me the head of someone at AMD. */
129static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
130{
131 struct map_info *map = mtd->priv;
132 struct cfi_private *cfi = map->fldrv_priv;
133 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
134 __u8 major = extp->MajorVersion;
135 __u8 minor = extp->MinorVersion;
136
137 if (((major << 8) | minor) < 0x3131) {
138 /* CFI version 1.0 => don't trust bootloc */
139 if (cfi->id & 0x80) {
140 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
141 extp->TopBottom = 3; /* top boot */
142 } else {
143 extp->TopBottom = 2; /* bottom boot */
144 }
145 }
146}
147#endif
148
149static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
150{
151 struct map_info *map = mtd->priv;
152 struct cfi_private *cfi = map->fldrv_priv;
153 if (cfi->cfiq->BufWriteTimeoutTyp) {
154 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
155 mtd->write = cfi_amdstd_write_buffers;
156 }
157}
158
159static void fixup_use_secsi(struct mtd_info *mtd, void *param)
160{
161 /* Setup for chips with a secsi area */
162 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
163 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
164}
165
166static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
167{
168 struct map_info *map = mtd->priv;
169 struct cfi_private *cfi = map->fldrv_priv;
170 if ((cfi->cfiq->NumEraseRegions == 1) &&
171 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
172 mtd->erase = cfi_amdstd_erase_chip;
173 }
174
175}
176
177static struct cfi_fixup cfi_fixup_table[] = {
178#ifdef AMD_BOOTLOC_BUG
179 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
180#endif
181 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
182 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
183 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
184 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
185 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
186 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
187#if !FORCE_WORD_WRITE
188 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
189#endif
190 { 0, 0, NULL, NULL }
191};
192static struct cfi_fixup jedec_fixup_table[] = {
193 { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
194 { 0, 0, NULL, NULL }
195};
196
197static struct cfi_fixup fixup_table[] = {
198 /* The CFI vendor ids and the JEDEC vendor IDs appear
199 * to be common. It is like the devices id's are as
200 * well. This table is to pick all cases where
201 * we know that is the case.
202 */
203 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
204 { 0, 0, NULL, NULL }
205};
206
207
208struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
209{
210 struct cfi_private *cfi = map->fldrv_priv;
211 struct mtd_info *mtd;
212 int i;
213
214 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
215 if (!mtd) {
216 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
217 return NULL;
218 }
219 memset(mtd, 0, sizeof(*mtd));
220 mtd->priv = map;
221 mtd->type = MTD_NORFLASH;
222
223 /* Fill in the default mtd operations */
224 mtd->erase = cfi_amdstd_erase_varsize;
225 mtd->write = cfi_amdstd_write_words;
226 mtd->read = cfi_amdstd_read;
227 mtd->sync = cfi_amdstd_sync;
228 mtd->suspend = cfi_amdstd_suspend;
229 mtd->resume = cfi_amdstd_resume;
230 mtd->flags = MTD_CAP_NORFLASH;
231 mtd->name = map->name;
232
233 if (cfi->cfi_mode==CFI_MODE_CFI){
234 unsigned char bootloc;
235 /*
236 * It's a real CFI chip, not one for which the probe
237 * routine faked a CFI structure. So we read the feature
238 * table from it.
239 */
240 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
241 struct cfi_pri_amdstd *extp;
242
243 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
244 if (!extp) {
245 kfree(mtd);
246 return NULL;
247 }
248
249 /* Install our own private info structure */
250 cfi->cmdset_priv = extp;
251
252 /* Apply cfi device specific fixups */
253 cfi_fixup(mtd, cfi_fixup_table);
254
255#ifdef DEBUG_CFI_FEATURES
256 /* Tell the user about it in lots of lovely detail */
257 cfi_tell_features(extp);
258#endif
259
260 bootloc = extp->TopBottom;
261 if ((bootloc != 2) && (bootloc != 3)) {
262 printk(KERN_WARNING "%s: CFI does not contain boot "
263 "bank location. Assuming top.\n", map->name);
264 bootloc = 2;
265 }
266
267 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
268 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
269
270 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
271 int j = (cfi->cfiq->NumEraseRegions-1)-i;
272 __u32 swap;
273
274 swap = cfi->cfiq->EraseRegionInfo[i];
275 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
276 cfi->cfiq->EraseRegionInfo[j] = swap;
277 }
278 }
279 /* Set the default CFI lock/unlock addresses */
280 cfi->addr_unlock1 = 0x555;
281 cfi->addr_unlock2 = 0x2aa;
282 /* Modify the unlock address if we are in compatibility mode */
283 if ( /* x16 in x8 mode */
284 ((cfi->device_type == CFI_DEVICETYPE_X8) &&
285 (cfi->cfiq->InterfaceDesc == 2)) ||
286 /* x32 in x16 mode */
287 ((cfi->device_type == CFI_DEVICETYPE_X16) &&
288 (cfi->cfiq->InterfaceDesc == 4)))
289 {
290 cfi->addr_unlock1 = 0xaaa;
291 cfi->addr_unlock2 = 0x555;
292 }
293
294 } /* CFI mode */
295 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
296 /* Apply jedec specific fixups */
297 cfi_fixup(mtd, jedec_fixup_table);
298 }
299 /* Apply generic fixups */
300 cfi_fixup(mtd, fixup_table);
301
302 for (i=0; i< cfi->numchips; i++) {
303 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
304 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
305 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
306 }
307
308 map->fldrv = &cfi_amdstd_chipdrv;
309
310 return cfi_amdstd_setup(mtd);
311}
312
313
314static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
315{
316 struct map_info *map = mtd->priv;
317 struct cfi_private *cfi = map->fldrv_priv;
318 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
319 unsigned long offset = 0;
320 int i,j;
321
322 printk(KERN_NOTICE "number of %s chips: %d\n",
323 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
324 /* Select the correct geometry setup */
325 mtd->size = devsize * cfi->numchips;
326
327 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
328 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
329 * mtd->numeraseregions, GFP_KERNEL);
330 if (!mtd->eraseregions) {
331 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
332 goto setup_err;
333 }
334
335 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
336 unsigned long ernum, ersize;
337 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
338 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
339
340 if (mtd->erasesize < ersize) {
341 mtd->erasesize = ersize;
342 }
343 for (j=0; j<cfi->numchips; j++) {
344 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
345 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
346 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
347 }
348 offset += (ersize * ernum);
349 }
350 if (offset != devsize) {
351 /* Argh */
352 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
353 goto setup_err;
354 }
355#if 0
356 // debug
357 for (i=0; i<mtd->numeraseregions;i++){
358 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
359 i,mtd->eraseregions[i].offset,
360 mtd->eraseregions[i].erasesize,
361 mtd->eraseregions[i].numblocks);
362 }
363#endif
364
365 /* FIXME: erase-suspend-program is broken. See
366 http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
367 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
368
369 __module_get(THIS_MODULE);
370 return mtd;
371
372 setup_err:
373 if(mtd) {
374 if(mtd->eraseregions)
375 kfree(mtd->eraseregions);
376 kfree(mtd);
377 }
378 kfree(cfi->cmdset_priv);
379 kfree(cfi->cfiq);
380 return NULL;
381}
382
383/*
384 * Return true if the chip is ready.
385 *
386 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
387 * non-suspended sector) and is indicated by no toggle bits toggling.
388 *
389 * Note that anything more complicated than checking if no bits are toggling
390 * (including checking DQ5 for an error status) is tricky to get working
391 * correctly and is therefore not done (particulary with interleaved chips
392 * as each chip must be checked independantly of the others).
393 */
394static int chip_ready(struct map_info *map, unsigned long addr)
395{
396 map_word d, t;
397
398 d = map_read(map, addr);
399 t = map_read(map, addr);
400
401 return map_word_equal(map, d, t);
402}
403
404static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
405{
406 DECLARE_WAITQUEUE(wait, current);
407 struct cfi_private *cfi = map->fldrv_priv;
408 unsigned long timeo;
409 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
410
411 resettime:
412 timeo = jiffies + HZ;
413 retry:
414 switch (chip->state) {
415
416 case FL_STATUS:
417 for (;;) {
418 if (chip_ready(map, adr))
419 break;
420
421 if (time_after(jiffies, timeo)) {
422 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
423 cfi_spin_unlock(chip->mutex);
424 return -EIO;
425 }
426 cfi_spin_unlock(chip->mutex);
427 cfi_udelay(1);
428 cfi_spin_lock(chip->mutex);
429 /* Someone else might have been playing with it. */
430 goto retry;
431 }
432
433 case FL_READY:
434 case FL_CFI_QUERY:
435 case FL_JEDEC_QUERY:
436 return 0;
437
438 case FL_ERASING:
439 if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
440 goto sleep;
441
442 if (!(mode == FL_READY || mode == FL_POINT
443 || !cfip
444 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
445 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1))))
446 goto sleep;
447
448 /* We could check to see if we're trying to access the sector
449 * that is currently being erased. However, no user will try
450 * anything like that so we just wait for the timeout. */
451
452 /* Erase suspend */
453 /* It's harmless to issue the Erase-Suspend and Erase-Resume
454 * commands when the erase algorithm isn't in progress. */
455 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
456 chip->oldstate = FL_ERASING;
457 chip->state = FL_ERASE_SUSPENDING;
458 chip->erase_suspended = 1;
459 for (;;) {
460 if (chip_ready(map, adr))
461 break;
462
463 if (time_after(jiffies, timeo)) {
464 /* Should have suspended the erase by now.
465 * Send an Erase-Resume command as either
466 * there was an error (so leave the erase
467 * routine to recover from it) or we trying to
468 * use the erase-in-progress sector. */
469 map_write(map, CMD(0x30), chip->in_progress_block_addr);
470 chip->state = FL_ERASING;
471 chip->oldstate = FL_READY;
472 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
473 return -EIO;
474 }
475
476 cfi_spin_unlock(chip->mutex);
477 cfi_udelay(1);
478 cfi_spin_lock(chip->mutex);
479 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
480 So we can just loop here. */
481 }
482 chip->state = FL_READY;
483 return 0;
484
485 case FL_POINT:
486 /* Only if there's no operation suspended... */
487 if (mode == FL_READY && chip->oldstate == FL_READY)
488 return 0;
489
490 default:
491 sleep:
492 set_current_state(TASK_UNINTERRUPTIBLE);
493 add_wait_queue(&chip->wq, &wait);
494 cfi_spin_unlock(chip->mutex);
495 schedule();
496 remove_wait_queue(&chip->wq, &wait);
497 cfi_spin_lock(chip->mutex);
498 goto resettime;
499 }
500}
501
502
503static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
504{
505 struct cfi_private *cfi = map->fldrv_priv;
506
507 switch(chip->oldstate) {
508 case FL_ERASING:
509 chip->state = chip->oldstate;
510 map_write(map, CMD(0x30), chip->in_progress_block_addr);
511 chip->oldstate = FL_READY;
512 chip->state = FL_ERASING;
513 break;
514
515 case FL_READY:
516 case FL_STATUS:
517 /* We should really make set_vpp() count, rather than doing this */
518 DISABLE_VPP(map);
519 break;
520 default:
521 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
522 }
523 wake_up(&chip->wq);
524}
525
526
527static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
528{
529 unsigned long cmd_addr;
530 struct cfi_private *cfi = map->fldrv_priv;
531 int ret;
532
533 adr += chip->start;
534
535 /* Ensure cmd read/writes are aligned. */
536 cmd_addr = adr & ~(map_bankwidth(map)-1);
537
538 cfi_spin_lock(chip->mutex);
539 ret = get_chip(map, chip, cmd_addr, FL_READY);
540 if (ret) {
541 cfi_spin_unlock(chip->mutex);
542 return ret;
543 }
544
545 if (chip->state != FL_POINT && chip->state != FL_READY) {
546 map_write(map, CMD(0xf0), cmd_addr);
547 chip->state = FL_READY;
548 }
549
550 map_copy_from(map, buf, adr, len);
551
552 put_chip(map, chip, cmd_addr);
553
554 cfi_spin_unlock(chip->mutex);
555 return 0;
556}
557
558
559static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
560{
561 struct map_info *map = mtd->priv;
562 struct cfi_private *cfi = map->fldrv_priv;
563 unsigned long ofs;
564 int chipnum;
565 int ret = 0;
566
567 /* ofs: offset within the first chip that the first read should start */
568
569 chipnum = (from >> cfi->chipshift);
570 ofs = from - (chipnum << cfi->chipshift);
571
572
573 *retlen = 0;
574
575 while (len) {
576 unsigned long thislen;
577
578 if (chipnum >= cfi->numchips)
579 break;
580
581 if ((len + ofs -1) >> cfi->chipshift)
582 thislen = (1<<cfi->chipshift) - ofs;
583 else
584 thislen = len;
585
586 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
587 if (ret)
588 break;
589
590 *retlen += thislen;
591 len -= thislen;
592 buf += thislen;
593
594 ofs = 0;
595 chipnum++;
596 }
597 return ret;
598}
599
600
601static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
602{
603 DECLARE_WAITQUEUE(wait, current);
604 unsigned long timeo = jiffies + HZ;
605 struct cfi_private *cfi = map->fldrv_priv;
606
607 retry:
608 cfi_spin_lock(chip->mutex);
609
610 if (chip->state != FL_READY){
611#if 0
612 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
613#endif
614 set_current_state(TASK_UNINTERRUPTIBLE);
615 add_wait_queue(&chip->wq, &wait);
616
617 cfi_spin_unlock(chip->mutex);
618
619 schedule();
620 remove_wait_queue(&chip->wq, &wait);
621#if 0
622 if(signal_pending(current))
623 return -EINTR;
624#endif
625 timeo = jiffies + HZ;
626
627 goto retry;
628 }
629
630 adr += chip->start;
631
632 chip->state = FL_READY;
633
634 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
635 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
636 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
637
638 map_copy_from(map, buf, adr, len);
639
640 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
641 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
642 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
643 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
644
645 wake_up(&chip->wq);
646 cfi_spin_unlock(chip->mutex);
647
648 return 0;
649}
650
651static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
652{
653 struct map_info *map = mtd->priv;
654 struct cfi_private *cfi = map->fldrv_priv;
655 unsigned long ofs;
656 int chipnum;
657 int ret = 0;
658
659
660 /* ofs: offset within the first chip that the first read should start */
661
662 /* 8 secsi bytes per chip */
663 chipnum=from>>3;
664 ofs=from & 7;
665
666
667 *retlen = 0;
668
669 while (len) {
670 unsigned long thislen;
671
672 if (chipnum >= cfi->numchips)
673 break;
674
675 if ((len + ofs -1) >> 3)
676 thislen = (1<<3) - ofs;
677 else
678 thislen = len;
679
680 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
681 if (ret)
682 break;
683
684 *retlen += thislen;
685 len -= thislen;
686 buf += thislen;
687
688 ofs = 0;
689 chipnum++;
690 }
691 return ret;
692}
693
694
695static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
696{
697 struct cfi_private *cfi = map->fldrv_priv;
698 unsigned long timeo = jiffies + HZ;
699 /*
700 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
701 * have a max write time of a few hundreds usec). However, we should
702 * use the maximum timeout value given by the chip at probe time
703 * instead. Unfortunately, struct flchip does have a field for
704 * maximum timeout, only for typical which can be far too short
705 * depending of the conditions. The ' + 1' is to avoid having a
706 * timeout of 0 jiffies if HZ is smaller than 1000.
707 */
708 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
709 int ret = 0;
710 map_word oldd;
711 int retry_cnt = 0;
712
713 adr += chip->start;
714
715 cfi_spin_lock(chip->mutex);
716 ret = get_chip(map, chip, adr, FL_WRITING);
717 if (ret) {
718 cfi_spin_unlock(chip->mutex);
719 return ret;
720 }
721
722 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
723 __func__, adr, datum.x[0] );
724
725 /*
726 * Check for a NOP for the case when the datum to write is already
727 * present - it saves time and works around buggy chips that corrupt
728 * data at other locations when 0xff is written to a location that
729 * already contains 0xff.
730 */
731 oldd = map_read(map, adr);
732 if (map_word_equal(map, oldd, datum)) {
733 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
734 __func__);
735 goto op_done;
736 }
737
738 ENABLE_VPP(map);
739 retry:
740 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
741 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
742 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
743 map_write(map, datum, adr);
744 chip->state = FL_WRITING;
745
746 cfi_spin_unlock(chip->mutex);
747 cfi_udelay(chip->word_write_time);
748 cfi_spin_lock(chip->mutex);
749
750 /* See comment above for timeout value. */
751 timeo = jiffies + uWriteTimeout;
752 for (;;) {
753 if (chip->state != FL_WRITING) {
754 /* Someone's suspended the write. Sleep */
755 DECLARE_WAITQUEUE(wait, current);
756
757 set_current_state(TASK_UNINTERRUPTIBLE);
758 add_wait_queue(&chip->wq, &wait);
759 cfi_spin_unlock(chip->mutex);
760 schedule();
761 remove_wait_queue(&chip->wq, &wait);
762 timeo = jiffies + (HZ / 2); /* FIXME */
763 cfi_spin_lock(chip->mutex);
764 continue;
765 }
766
767 if (chip_ready(map, adr))
768 goto op_done;
769
770 if (time_after(jiffies, timeo))
771 break;
772
773 /* Latency issues. Drop the lock, wait a while and retry */
774 cfi_spin_unlock(chip->mutex);
775 cfi_udelay(1);
776 cfi_spin_lock(chip->mutex);
777 }
778
779 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
780
781 /* reset on all failures. */
782 map_write( map, CMD(0xF0), chip->start );
783 /* FIXME - should have reset delay before continuing */
784 if (++retry_cnt <= MAX_WORD_RETRIES)
785 goto retry;
786
787 ret = -EIO;
788 op_done:
789 chip->state = FL_READY;
790 put_chip(map, chip, adr);
791 cfi_spin_unlock(chip->mutex);
792
793 return ret;
794}
795
796
797static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
798 size_t *retlen, const u_char *buf)
799{
800 struct map_info *map = mtd->priv;
801 struct cfi_private *cfi = map->fldrv_priv;
802 int ret = 0;
803 int chipnum;
804 unsigned long ofs, chipstart;
805 DECLARE_WAITQUEUE(wait, current);
806
807 *retlen = 0;
808 if (!len)
809 return 0;
810
811 chipnum = to >> cfi->chipshift;
812 ofs = to - (chipnum << cfi->chipshift);
813 chipstart = cfi->chips[chipnum].start;
814
815 /* If it's not bus-aligned, do the first byte write */
816 if (ofs & (map_bankwidth(map)-1)) {
817 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
818 int i = ofs - bus_ofs;
819 int n = 0;
820 map_word tmp_buf;
821
822 retry:
823 cfi_spin_lock(cfi->chips[chipnum].mutex);
824
825 if (cfi->chips[chipnum].state != FL_READY) {
826#if 0
827 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
828#endif
829 set_current_state(TASK_UNINTERRUPTIBLE);
830 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
831
832 cfi_spin_unlock(cfi->chips[chipnum].mutex);
833
834 schedule();
835 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
836#if 0
837 if(signal_pending(current))
838 return -EINTR;
839#endif
840 goto retry;
841 }
842
843 /* Load 'tmp_buf' with old contents of flash */
844 tmp_buf = map_read(map, bus_ofs+chipstart);
845
846 cfi_spin_unlock(cfi->chips[chipnum].mutex);
847
848 /* Number of bytes to copy from buffer */
849 n = min_t(int, len, map_bankwidth(map)-i);
850
851 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
852
853 ret = do_write_oneword(map, &cfi->chips[chipnum],
854 bus_ofs, tmp_buf);
855 if (ret)
856 return ret;
857
858 ofs += n;
859 buf += n;
860 (*retlen) += n;
861 len -= n;
862
863 if (ofs >> cfi->chipshift) {
864 chipnum ++;
865 ofs = 0;
866 if (chipnum == cfi->numchips)
867 return 0;
868 }
869 }
870
871 /* We are now aligned, write as much as possible */
872 while(len >= map_bankwidth(map)) {
873 map_word datum;
874
875 datum = map_word_load(map, buf);
876
877 ret = do_write_oneword(map, &cfi->chips[chipnum],
878 ofs, datum);
879 if (ret)
880 return ret;
881
882 ofs += map_bankwidth(map);
883 buf += map_bankwidth(map);
884 (*retlen) += map_bankwidth(map);
885 len -= map_bankwidth(map);
886
887 if (ofs >> cfi->chipshift) {
888 chipnum ++;
889 ofs = 0;
890 if (chipnum == cfi->numchips)
891 return 0;
892 chipstart = cfi->chips[chipnum].start;
893 }
894 }
895
896 /* Write the trailing bytes if any */
897 if (len & (map_bankwidth(map)-1)) {
898 map_word tmp_buf;
899
900 retry1:
901 cfi_spin_lock(cfi->chips[chipnum].mutex);
902
903 if (cfi->chips[chipnum].state != FL_READY) {
904#if 0
905 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
906#endif
907 set_current_state(TASK_UNINTERRUPTIBLE);
908 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
909
910 cfi_spin_unlock(cfi->chips[chipnum].mutex);
911
912 schedule();
913 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
914#if 0
915 if(signal_pending(current))
916 return -EINTR;
917#endif
918 goto retry1;
919 }
920
921 tmp_buf = map_read(map, ofs + chipstart);
922
923 cfi_spin_unlock(cfi->chips[chipnum].mutex);
924
925 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
926
927 ret = do_write_oneword(map, &cfi->chips[chipnum],
928 ofs, tmp_buf);
929 if (ret)
930 return ret;
931
932 (*retlen) += len;
933 }
934
935 return 0;
936}
937
938
939/*
940 * FIXME: interleaved mode not tested, and probably not supported!
941 */
942static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
943 unsigned long adr, const u_char *buf, int len)
944{
945 struct cfi_private *cfi = map->fldrv_priv;
946 unsigned long timeo = jiffies + HZ;
947 /* see comments in do_write_oneword() regarding uWriteTimeo. */
948 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
949 int ret = -EIO;
950 unsigned long cmd_adr;
951 int z, words;
952 map_word datum;
953
954 adr += chip->start;
955 cmd_adr = adr;
956
957 cfi_spin_lock(chip->mutex);
958 ret = get_chip(map, chip, adr, FL_WRITING);
959 if (ret) {
960 cfi_spin_unlock(chip->mutex);
961 return ret;
962 }
963
964 datum = map_word_load(map, buf);
965
966 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
967 __func__, adr, datum.x[0] );
968
969 ENABLE_VPP(map);
970 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
971 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
972 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
973
974 /* Write Buffer Load */
975 map_write(map, CMD(0x25), cmd_adr);
976
977 chip->state = FL_WRITING_TO_BUFFER;
978
979 /* Write length of data to come */
980 words = len / map_bankwidth(map);
981 map_write(map, CMD(words - 1), cmd_adr);
982 /* Write data */
983 z = 0;
984 while(z < words * map_bankwidth(map)) {
985 datum = map_word_load(map, buf);
986 map_write(map, datum, adr + z);
987
988 z += map_bankwidth(map);
989 buf += map_bankwidth(map);
990 }
991 z -= map_bankwidth(map);
992
993 adr += z;
994
995 /* Write Buffer Program Confirm: GO GO GO */
996 map_write(map, CMD(0x29), cmd_adr);
997 chip->state = FL_WRITING;
998
999 cfi_spin_unlock(chip->mutex);
1000 cfi_udelay(chip->buffer_write_time);
1001 cfi_spin_lock(chip->mutex);
1002
1003 timeo = jiffies + uWriteTimeout;
1004
1005 for (;;) {
1006 if (chip->state != FL_WRITING) {
1007 /* Someone's suspended the write. Sleep */
1008 DECLARE_WAITQUEUE(wait, current);
1009
1010 set_current_state(TASK_UNINTERRUPTIBLE);
1011 add_wait_queue(&chip->wq, &wait);
1012 cfi_spin_unlock(chip->mutex);
1013 schedule();
1014 remove_wait_queue(&chip->wq, &wait);
1015 timeo = jiffies + (HZ / 2); /* FIXME */
1016 cfi_spin_lock(chip->mutex);
1017 continue;
1018 }
1019
1020 if (chip_ready(map, adr))
1021 goto op_done;
1022
1023 if( time_after(jiffies, timeo))
1024 break;
1025
1026 /* Latency issues. Drop the lock, wait a while and retry */
1027 cfi_spin_unlock(chip->mutex);
1028 cfi_udelay(1);
1029 cfi_spin_lock(chip->mutex);
1030 }
1031
1032 printk(KERN_WARNING "MTD %s(): software timeout\n",
1033 __func__ );
1034
1035 /* reset on all failures. */
1036 map_write( map, CMD(0xF0), chip->start );
1037 /* FIXME - should have reset delay before continuing */
1038
1039 ret = -EIO;
1040 op_done:
1041 chip->state = FL_READY;
1042 put_chip(map, chip, adr);
1043 cfi_spin_unlock(chip->mutex);
1044
1045 return ret;
1046}
1047
1048
1049static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1050 size_t *retlen, const u_char *buf)
1051{
1052 struct map_info *map = mtd->priv;
1053 struct cfi_private *cfi = map->fldrv_priv;
1054 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1055 int ret = 0;
1056 int chipnum;
1057 unsigned long ofs;
1058
1059 *retlen = 0;
1060 if (!len)
1061 return 0;
1062
1063 chipnum = to >> cfi->chipshift;
1064 ofs = to - (chipnum << cfi->chipshift);
1065
1066 /* If it's not bus-aligned, do the first word write */
1067 if (ofs & (map_bankwidth(map)-1)) {
1068 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1069 if (local_len > len)
1070 local_len = len;
1071 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1072 local_len, retlen, buf);
1073 if (ret)
1074 return ret;
1075 ofs += local_len;
1076 buf += local_len;
1077 len -= local_len;
1078
1079 if (ofs >> cfi->chipshift) {
1080 chipnum ++;
1081 ofs = 0;
1082 if (chipnum == cfi->numchips)
1083 return 0;
1084 }
1085 }
1086
1087 /* Write buffer is worth it only if more than one word to write... */
1088 while (len >= map_bankwidth(map) * 2) {
1089 /* We must not cross write block boundaries */
1090 int size = wbufsize - (ofs & (wbufsize-1));
1091
1092 if (size > len)
1093 size = len;
1094 if (size % map_bankwidth(map))
1095 size -= size % map_bankwidth(map);
1096
1097 ret = do_write_buffer(map, &cfi->chips[chipnum],
1098 ofs, buf, size);
1099 if (ret)
1100 return ret;
1101
1102 ofs += size;
1103 buf += size;
1104 (*retlen) += size;
1105 len -= size;
1106
1107 if (ofs >> cfi->chipshift) {
1108 chipnum ++;
1109 ofs = 0;
1110 if (chipnum == cfi->numchips)
1111 return 0;
1112 }
1113 }
1114
1115 if (len) {
1116 size_t retlen_dregs = 0;
1117
1118 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1119 len, &retlen_dregs, buf);
1120
1121 *retlen += retlen_dregs;
1122 return ret;
1123 }
1124
1125 return 0;
1126}
1127
1128
1129/*
1130 * Handle devices with one erase region, that only implement
1131 * the chip erase command.
1132 */
1133static inline int do_erase_chip(struct map_info *map, struct flchip *chip)
1134{
1135 struct cfi_private *cfi = map->fldrv_priv;
1136 unsigned long timeo = jiffies + HZ;
1137 unsigned long int adr;
1138 DECLARE_WAITQUEUE(wait, current);
1139 int ret = 0;
1140
1141 adr = cfi->addr_unlock1;
1142
1143 cfi_spin_lock(chip->mutex);
1144 ret = get_chip(map, chip, adr, FL_WRITING);
1145 if (ret) {
1146 cfi_spin_unlock(chip->mutex);
1147 return ret;
1148 }
1149
1150 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1151 __func__, chip->start );
1152
1153 ENABLE_VPP(map);
1154 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1155 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1156 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1157 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1158 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1159 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1160
1161 chip->state = FL_ERASING;
1162 chip->erase_suspended = 0;
1163 chip->in_progress_block_addr = adr;
1164
1165 cfi_spin_unlock(chip->mutex);
1166 msleep(chip->erase_time/2);
1167 cfi_spin_lock(chip->mutex);
1168
1169 timeo = jiffies + (HZ*20);
1170
1171 for (;;) {
1172 if (chip->state != FL_ERASING) {
1173 /* Someone's suspended the erase. Sleep */
1174 set_current_state(TASK_UNINTERRUPTIBLE);
1175 add_wait_queue(&chip->wq, &wait);
1176 cfi_spin_unlock(chip->mutex);
1177 schedule();
1178 remove_wait_queue(&chip->wq, &wait);
1179 cfi_spin_lock(chip->mutex);
1180 continue;
1181 }
1182 if (chip->erase_suspended) {
1183 /* This erase was suspended and resumed.
1184 Adjust the timeout */
1185 timeo = jiffies + (HZ*20); /* FIXME */
1186 chip->erase_suspended = 0;
1187 }
1188
1189 if (chip_ready(map, adr))
1190 goto op_done;
1191
1192 if (time_after(jiffies, timeo))
1193 break;
1194
1195 /* Latency issues. Drop the lock, wait a while and retry */
1196 cfi_spin_unlock(chip->mutex);
1197 set_current_state(TASK_UNINTERRUPTIBLE);
1198 schedule_timeout(1);
1199 cfi_spin_lock(chip->mutex);
1200 }
1201
1202 printk(KERN_WARNING "MTD %s(): software timeout\n",
1203 __func__ );
1204
1205 /* reset on all failures. */
1206 map_write( map, CMD(0xF0), chip->start );
1207 /* FIXME - should have reset delay before continuing */
1208
1209 ret = -EIO;
1210 op_done:
1211 chip->state = FL_READY;
1212 put_chip(map, chip, adr);
1213 cfi_spin_unlock(chip->mutex);
1214
1215 return ret;
1216}
1217
1218
1219static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1220{
1221 struct cfi_private *cfi = map->fldrv_priv;
1222 unsigned long timeo = jiffies + HZ;
1223 DECLARE_WAITQUEUE(wait, current);
1224 int ret = 0;
1225
1226 adr += chip->start;
1227
1228 cfi_spin_lock(chip->mutex);
1229 ret = get_chip(map, chip, adr, FL_ERASING);
1230 if (ret) {
1231 cfi_spin_unlock(chip->mutex);
1232 return ret;
1233 }
1234
1235 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1236 __func__, adr );
1237
1238 ENABLE_VPP(map);
1239 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1240 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1241 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1242 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1243 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1244 map_write(map, CMD(0x30), adr);
1245
1246 chip->state = FL_ERASING;
1247 chip->erase_suspended = 0;
1248 chip->in_progress_block_addr = adr;
1249
1250 cfi_spin_unlock(chip->mutex);
1251 msleep(chip->erase_time/2);
1252 cfi_spin_lock(chip->mutex);
1253
1254 timeo = jiffies + (HZ*20);
1255
1256 for (;;) {
1257 if (chip->state != FL_ERASING) {
1258 /* Someone's suspended the erase. Sleep */
1259 set_current_state(TASK_UNINTERRUPTIBLE);
1260 add_wait_queue(&chip->wq, &wait);
1261 cfi_spin_unlock(chip->mutex);
1262 schedule();
1263 remove_wait_queue(&chip->wq, &wait);
1264 cfi_spin_lock(chip->mutex);
1265 continue;
1266 }
1267 if (chip->erase_suspended) {
1268 /* This erase was suspended and resumed.
1269 Adjust the timeout */
1270 timeo = jiffies + (HZ*20); /* FIXME */
1271 chip->erase_suspended = 0;
1272 }
1273
1274 if (chip_ready(map, adr))
1275 goto op_done;
1276
1277 if (time_after(jiffies, timeo))
1278 break;
1279
1280 /* Latency issues. Drop the lock, wait a while and retry */
1281 cfi_spin_unlock(chip->mutex);
1282 set_current_state(TASK_UNINTERRUPTIBLE);
1283 schedule_timeout(1);
1284 cfi_spin_lock(chip->mutex);
1285 }
1286
1287 printk(KERN_WARNING "MTD %s(): software timeout\n",
1288 __func__ );
1289
1290 /* reset on all failures. */
1291 map_write( map, CMD(0xF0), chip->start );
1292 /* FIXME - should have reset delay before continuing */
1293
1294 ret = -EIO;
1295 op_done:
1296 chip->state = FL_READY;
1297 put_chip(map, chip, adr);
1298 cfi_spin_unlock(chip->mutex);
1299 return ret;
1300}
1301
1302
1303int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1304{
1305 unsigned long ofs, len;
1306 int ret;
1307
1308 ofs = instr->addr;
1309 len = instr->len;
1310
1311 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1312 if (ret)
1313 return ret;
1314
1315 instr->state = MTD_ERASE_DONE;
1316 mtd_erase_callback(instr);
1317
1318 return 0;
1319}
1320
1321
1322static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1323{
1324 struct map_info *map = mtd->priv;
1325 struct cfi_private *cfi = map->fldrv_priv;
1326 int ret = 0;
1327
1328 if (instr->addr != 0)
1329 return -EINVAL;
1330
1331 if (instr->len != mtd->size)
1332 return -EINVAL;
1333
1334 ret = do_erase_chip(map, &cfi->chips[0]);
1335 if (ret)
1336 return ret;
1337
1338 instr->state = MTD_ERASE_DONE;
1339 mtd_erase_callback(instr);
1340
1341 return 0;
1342}
1343
1344
1345static void cfi_amdstd_sync (struct mtd_info *mtd)
1346{
1347 struct map_info *map = mtd->priv;
1348 struct cfi_private *cfi = map->fldrv_priv;
1349 int i;
1350 struct flchip *chip;
1351 int ret = 0;
1352 DECLARE_WAITQUEUE(wait, current);
1353
1354 for (i=0; !ret && i<cfi->numchips; i++) {
1355 chip = &cfi->chips[i];
1356
1357 retry:
1358 cfi_spin_lock(chip->mutex);
1359
1360 switch(chip->state) {
1361 case FL_READY:
1362 case FL_STATUS:
1363 case FL_CFI_QUERY:
1364 case FL_JEDEC_QUERY:
1365 chip->oldstate = chip->state;
1366 chip->state = FL_SYNCING;
1367 /* No need to wake_up() on this state change -
1368 * as the whole point is that nobody can do anything
1369 * with the chip now anyway.
1370 */
1371 case FL_SYNCING:
1372 cfi_spin_unlock(chip->mutex);
1373 break;
1374
1375 default:
1376 /* Not an idle state */
1377 add_wait_queue(&chip->wq, &wait);
1378
1379 cfi_spin_unlock(chip->mutex);
1380
1381 schedule();
1382
1383 remove_wait_queue(&chip->wq, &wait);
1384
1385 goto retry;
1386 }
1387 }
1388
1389 /* Unlock the chips again */
1390
1391 for (i--; i >=0; i--) {
1392 chip = &cfi->chips[i];
1393
1394 cfi_spin_lock(chip->mutex);
1395
1396 if (chip->state == FL_SYNCING) {
1397 chip->state = chip->oldstate;
1398 wake_up(&chip->wq);
1399 }
1400 cfi_spin_unlock(chip->mutex);
1401 }
1402}
1403
1404
1405static int cfi_amdstd_suspend(struct mtd_info *mtd)
1406{
1407 struct map_info *map = mtd->priv;
1408 struct cfi_private *cfi = map->fldrv_priv;
1409 int i;
1410 struct flchip *chip;
1411 int ret = 0;
1412
1413 for (i=0; !ret && i<cfi->numchips; i++) {
1414 chip = &cfi->chips[i];
1415
1416 cfi_spin_lock(chip->mutex);
1417
1418 switch(chip->state) {
1419 case FL_READY:
1420 case FL_STATUS:
1421 case FL_CFI_QUERY:
1422 case FL_JEDEC_QUERY:
1423 chip->oldstate = chip->state;
1424 chip->state = FL_PM_SUSPENDED;
1425 /* No need to wake_up() on this state change -
1426 * as the whole point is that nobody can do anything
1427 * with the chip now anyway.
1428 */
1429 case FL_PM_SUSPENDED:
1430 break;
1431
1432 default:
1433 ret = -EAGAIN;
1434 break;
1435 }
1436 cfi_spin_unlock(chip->mutex);
1437 }
1438
1439 /* Unlock the chips again */
1440
1441 if (ret) {
1442 for (i--; i >=0; i--) {
1443 chip = &cfi->chips[i];
1444
1445 cfi_spin_lock(chip->mutex);
1446
1447 if (chip->state == FL_PM_SUSPENDED) {
1448 chip->state = chip->oldstate;
1449 wake_up(&chip->wq);
1450 }
1451 cfi_spin_unlock(chip->mutex);
1452 }
1453 }
1454
1455 return ret;
1456}
1457
1458
1459static void cfi_amdstd_resume(struct mtd_info *mtd)
1460{
1461 struct map_info *map = mtd->priv;
1462 struct cfi_private *cfi = map->fldrv_priv;
1463 int i;
1464 struct flchip *chip;
1465
1466 for (i=0; i<cfi->numchips; i++) {
1467
1468 chip = &cfi->chips[i];
1469
1470 cfi_spin_lock(chip->mutex);
1471
1472 if (chip->state == FL_PM_SUSPENDED) {
1473 chip->state = FL_READY;
1474 map_write(map, CMD(0xF0), chip->start);
1475 wake_up(&chip->wq);
1476 }
1477 else
1478 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1479
1480 cfi_spin_unlock(chip->mutex);
1481 }
1482}
1483
1484static void cfi_amdstd_destroy(struct mtd_info *mtd)
1485{
1486 struct map_info *map = mtd->priv;
1487 struct cfi_private *cfi = map->fldrv_priv;
1488 kfree(cfi->cmdset_priv);
1489 kfree(cfi->cfiq);
1490 kfree(cfi);
1491 kfree(mtd->eraseregions);
1492}
1493
1494static char im_name[]="cfi_cmdset_0002";
1495
1496
1497static int __init cfi_amdstd_init(void)
1498{
1499 inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
1500 return 0;
1501}
1502
1503
1504static void __exit cfi_amdstd_exit(void)
1505{
1506 inter_module_unregister(im_name);
1507}
1508
1509
1510module_init(cfi_amdstd_init);
1511module_exit(cfi_amdstd_exit);
1512
1513MODULE_LICENSE("GPL");
1514MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1515MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
new file mode 100644
index 000000000000..8c24e18db3b4
--- /dev/null
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -0,0 +1,1418 @@
1/*
2 * Common Flash Interface support:
3 * ST Advanced Architecture Command Set (ID 0x0020)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
7 * $Id: cfi_cmdset_0020.c,v 1.17 2004/11/20 12:49:04 dwmw2 Exp $
8 *
9 * 10/10/2000 Nicolas Pitre <nico@cam.org>
10 * - completely revamped method functions so they are aware and
11 * independent of the flash geometry (buswidth, interleave, etc.)
12 * - scalability vs code size is completely set at compile-time
13 * (see include/linux/mtd/cfi.h for selection)
14 * - optimized write buffer method
15 * 06/21/2002 Joern Engel <joern@wh.fh-wedel.de> and others
16 * - modified Intel Command Set 0x0001 to support ST Advanced Architecture
17 * (command set 0x0020)
18 * - added a writev function
19 */
20
21#include <linux/version.h>
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/sched.h>
26#include <linux/init.h>
27#include <asm/io.h>
28#include <asm/byteorder.h>
29
30#include <linux/errno.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/interrupt.h>
34#include <linux/mtd/map.h>
35#include <linux/mtd/cfi.h>
36#include <linux/mtd/mtd.h>
37#include <linux/mtd/compatmac.h>
38
39
40static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
41static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
42static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
43 unsigned long count, loff_t to, size_t *retlen);
44static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
45static void cfi_staa_sync (struct mtd_info *);
46static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
47static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
48static int cfi_staa_suspend (struct mtd_info *);
49static void cfi_staa_resume (struct mtd_info *);
50
51static void cfi_staa_destroy(struct mtd_info *);
52
53struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
54
55static struct mtd_info *cfi_staa_setup (struct map_info *);
56
57static struct mtd_chip_driver cfi_staa_chipdrv = {
58 .probe = NULL, /* Not usable directly */
59 .destroy = cfi_staa_destroy,
60 .name = "cfi_cmdset_0020",
61 .module = THIS_MODULE
62};
63
64/* #define DEBUG_LOCK_BITS */
65//#define DEBUG_CFI_FEATURES
66
67#ifdef DEBUG_CFI_FEATURES
68static void cfi_tell_features(struct cfi_pri_intelext *extp)
69{
70 int i;
71 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
72 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
73 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
74 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
75 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
76 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
77 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
78 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
79 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
80 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
81 for (i=9; i<32; i++) {
82 if (extp->FeatureSupport & (1<<i))
83 printk(" - Unknown Bit %X: supported\n", i);
84 }
85
86 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
87 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
88 for (i=1; i<8; i++) {
89 if (extp->SuspendCmdSupport & (1<<i))
90 printk(" - Unknown Bit %X: supported\n", i);
91 }
92
93 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
94 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
95 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
96 for (i=2; i<16; i++) {
97 if (extp->BlkStatusRegMask & (1<<i))
98 printk(" - Unknown Bit %X Active: yes\n",i);
99 }
100
101 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
102 extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
103 if (extp->VppOptimal)
104 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
105 extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
106}
107#endif
108
109/* This routine is made available to other mtd code via
110 * inter_module_register. It must only be accessed through
111 * inter_module_get which will bump the use count of this module. The
112 * addresses passed back in cfi are valid as long as the use count of
113 * this module is non-zero, i.e. between inter_module_get and
114 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
115 */
116struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
117{
118 struct cfi_private *cfi = map->fldrv_priv;
119 int i;
120
121 if (cfi->cfi_mode) {
122 /*
123 * It's a real CFI chip, not one for which the probe
124 * routine faked a CFI structure. So we read the feature
125 * table from it.
126 */
127 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
128 struct cfi_pri_intelext *extp;
129
130 extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
131 if (!extp)
132 return NULL;
133
134 /* Do some byteswapping if necessary */
135 extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
136 extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
137
138#ifdef DEBUG_CFI_FEATURES
139 /* Tell the user about it in lots of lovely detail */
140 cfi_tell_features(extp);
141#endif
142
143 /* Install our own private info structure */
144 cfi->cmdset_priv = extp;
145 }
146
147 for (i=0; i< cfi->numchips; i++) {
148 cfi->chips[i].word_write_time = 128;
149 cfi->chips[i].buffer_write_time = 128;
150 cfi->chips[i].erase_time = 1024;
151 }
152
153 return cfi_staa_setup(map);
154}
155
156static struct mtd_info *cfi_staa_setup(struct map_info *map)
157{
158 struct cfi_private *cfi = map->fldrv_priv;
159 struct mtd_info *mtd;
160 unsigned long offset = 0;
161 int i,j;
162 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
163
164 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
165 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
166
167 if (!mtd) {
168 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
169 kfree(cfi->cmdset_priv);
170 return NULL;
171 }
172
173 memset(mtd, 0, sizeof(*mtd));
174 mtd->priv = map;
175 mtd->type = MTD_NORFLASH;
176 mtd->size = devsize * cfi->numchips;
177
178 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
179 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
180 * mtd->numeraseregions, GFP_KERNEL);
181 if (!mtd->eraseregions) {
182 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
183 kfree(cfi->cmdset_priv);
184 kfree(mtd);
185 return NULL;
186 }
187
188 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
189 unsigned long ernum, ersize;
190 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
191 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
192
193 if (mtd->erasesize < ersize) {
194 mtd->erasesize = ersize;
195 }
196 for (j=0; j<cfi->numchips; j++) {
197 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
198 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
199 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
200 }
201 offset += (ersize * ernum);
202 }
203
204 if (offset != devsize) {
205 /* Argh */
206 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
207 kfree(mtd->eraseregions);
208 kfree(cfi->cmdset_priv);
209 kfree(mtd);
210 return NULL;
211 }
212
213 for (i=0; i<mtd->numeraseregions;i++){
214 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
215 i,mtd->eraseregions[i].offset,
216 mtd->eraseregions[i].erasesize,
217 mtd->eraseregions[i].numblocks);
218 }
219
220 /* Also select the correct geometry setup too */
221 mtd->erase = cfi_staa_erase_varsize;
222 mtd->read = cfi_staa_read;
223 mtd->write = cfi_staa_write_buffers;
224 mtd->writev = cfi_staa_writev;
225 mtd->sync = cfi_staa_sync;
226 mtd->lock = cfi_staa_lock;
227 mtd->unlock = cfi_staa_unlock;
228 mtd->suspend = cfi_staa_suspend;
229 mtd->resume = cfi_staa_resume;
230 mtd->flags = MTD_CAP_NORFLASH;
231 mtd->flags |= MTD_ECC; /* FIXME: Not all STMicro flashes have this */
232 mtd->eccsize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
233 map->fldrv = &cfi_staa_chipdrv;
234 __module_get(THIS_MODULE);
235 mtd->name = map->name;
236 return mtd;
237}
238
239
240static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
241{
242 map_word status, status_OK;
243 unsigned long timeo;
244 DECLARE_WAITQUEUE(wait, current);
245 int suspended = 0;
246 unsigned long cmd_addr;
247 struct cfi_private *cfi = map->fldrv_priv;
248
249 adr += chip->start;
250
251 /* Ensure cmd read/writes are aligned. */
252 cmd_addr = adr & ~(map_bankwidth(map)-1);
253
254 /* Let's determine this according to the interleave only once */
255 status_OK = CMD(0x80);
256
257 timeo = jiffies + HZ;
258 retry:
259 spin_lock_bh(chip->mutex);
260
261 /* Check that the chip's ready to talk to us.
262 * If it's in FL_ERASING state, suspend it and make it talk now.
263 */
264 switch (chip->state) {
265 case FL_ERASING:
266 if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
267 goto sleep; /* We don't support erase suspend */
268
269 map_write (map, CMD(0xb0), cmd_addr);
270 /* If the flash has finished erasing, then 'erase suspend'
271 * appears to make some (28F320) flash devices switch to
272 * 'read' mode. Make sure that we switch to 'read status'
273 * mode so we get the right data. --rmk
274 */
275 map_write(map, CMD(0x70), cmd_addr);
276 chip->oldstate = FL_ERASING;
277 chip->state = FL_ERASE_SUSPENDING;
278 // printk("Erase suspending at 0x%lx\n", cmd_addr);
279 for (;;) {
280 status = map_read(map, cmd_addr);
281 if (map_word_andequal(map, status, status_OK, status_OK))
282 break;
283
284 if (time_after(jiffies, timeo)) {
285 /* Urgh */
286 map_write(map, CMD(0xd0), cmd_addr);
287 /* make sure we're in 'read status' mode */
288 map_write(map, CMD(0x70), cmd_addr);
289 chip->state = FL_ERASING;
290 spin_unlock_bh(chip->mutex);
291 printk(KERN_ERR "Chip not ready after erase "
292 "suspended: status = 0x%lx\n", status.x[0]);
293 return -EIO;
294 }
295
296 spin_unlock_bh(chip->mutex);
297 cfi_udelay(1);
298 spin_lock_bh(chip->mutex);
299 }
300
301 suspended = 1;
302 map_write(map, CMD(0xff), cmd_addr);
303 chip->state = FL_READY;
304 break;
305
306#if 0
307 case FL_WRITING:
308 /* Not quite yet */
309#endif
310
311 case FL_READY:
312 break;
313
314 case FL_CFI_QUERY:
315 case FL_JEDEC_QUERY:
316 map_write(map, CMD(0x70), cmd_addr);
317 chip->state = FL_STATUS;
318
319 case FL_STATUS:
320 status = map_read(map, cmd_addr);
321 if (map_word_andequal(map, status, status_OK, status_OK)) {
322 map_write(map, CMD(0xff), cmd_addr);
323 chip->state = FL_READY;
324 break;
325 }
326
327 /* Urgh. Chip not yet ready to talk to us. */
328 if (time_after(jiffies, timeo)) {
329 spin_unlock_bh(chip->mutex);
330 printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
331 return -EIO;
332 }
333
334 /* Latency issues. Drop the lock, wait a while and retry */
335 spin_unlock_bh(chip->mutex);
336 cfi_udelay(1);
337 goto retry;
338
339 default:
340 sleep:
341 /* Stick ourselves on a wait queue to be woken when
342 someone changes the status */
343 set_current_state(TASK_UNINTERRUPTIBLE);
344 add_wait_queue(&chip->wq, &wait);
345 spin_unlock_bh(chip->mutex);
346 schedule();
347 remove_wait_queue(&chip->wq, &wait);
348 timeo = jiffies + HZ;
349 goto retry;
350 }
351
352 map_copy_from(map, buf, adr, len);
353
354 if (suspended) {
355 chip->state = chip->oldstate;
356 /* What if one interleaved chip has finished and the
357 other hasn't? The old code would leave the finished
358 one in READY mode. That's bad, and caused -EROFS
359 errors to be returned from do_erase_oneblock because
360 that's the only bit it checked for at the time.
361 As the state machine appears to explicitly allow
362 sending the 0x70 (Read Status) command to an erasing
363 chip and expecting it to be ignored, that's what we
364 do. */
365 map_write(map, CMD(0xd0), cmd_addr);
366 map_write(map, CMD(0x70), cmd_addr);
367 }
368
369 wake_up(&chip->wq);
370 spin_unlock_bh(chip->mutex);
371 return 0;
372}
373
374static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
375{
376 struct map_info *map = mtd->priv;
377 struct cfi_private *cfi = map->fldrv_priv;
378 unsigned long ofs;
379 int chipnum;
380 int ret = 0;
381
382 /* ofs: offset within the first chip that the first read should start */
383 chipnum = (from >> cfi->chipshift);
384 ofs = from - (chipnum << cfi->chipshift);
385
386 *retlen = 0;
387
388 while (len) {
389 unsigned long thislen;
390
391 if (chipnum >= cfi->numchips)
392 break;
393
394 if ((len + ofs -1) >> cfi->chipshift)
395 thislen = (1<<cfi->chipshift) - ofs;
396 else
397 thislen = len;
398
399 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
400 if (ret)
401 break;
402
403 *retlen += thislen;
404 len -= thislen;
405 buf += thislen;
406
407 ofs = 0;
408 chipnum++;
409 }
410 return ret;
411}
412
413static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
414 unsigned long adr, const u_char *buf, int len)
415{
416 struct cfi_private *cfi = map->fldrv_priv;
417 map_word status, status_OK;
418 unsigned long cmd_adr, timeo;
419 DECLARE_WAITQUEUE(wait, current);
420 int wbufsize, z;
421
422 /* M58LW064A requires bus alignment for buffer wriets -- saw */
423 if (adr & (map_bankwidth(map)-1))
424 return -EINVAL;
425
426 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
427 adr += chip->start;
428 cmd_adr = adr & ~(wbufsize-1);
429
430 /* Let's determine this according to the interleave only once */
431 status_OK = CMD(0x80);
432
433 timeo = jiffies + HZ;
434 retry:
435
436#ifdef DEBUG_CFI_FEATURES
437 printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state);
438#endif
439 spin_lock_bh(chip->mutex);
440
441 /* Check that the chip's ready to talk to us.
442 * Later, we can actually think about interrupting it
443 * if it's in FL_ERASING state.
444 * Not just yet, though.
445 */
446 switch (chip->state) {
447 case FL_READY:
448 break;
449
450 case FL_CFI_QUERY:
451 case FL_JEDEC_QUERY:
452 map_write(map, CMD(0x70), cmd_adr);
453 chip->state = FL_STATUS;
454#ifdef DEBUG_CFI_FEATURES
455 printk("%s: 1 status[%x]\n", __FUNCTION__, map_read(map, cmd_adr));
456#endif
457
458 case FL_STATUS:
459 status = map_read(map, cmd_adr);
460 if (map_word_andequal(map, status, status_OK, status_OK))
461 break;
462 /* Urgh. Chip not yet ready to talk to us. */
463 if (time_after(jiffies, timeo)) {
464 spin_unlock_bh(chip->mutex);
465 printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
466 status.x[0], map_read(map, cmd_adr).x[0]);
467 return -EIO;
468 }
469
470 /* Latency issues. Drop the lock, wait a while and retry */
471 spin_unlock_bh(chip->mutex);
472 cfi_udelay(1);
473 goto retry;
474
475 default:
476 /* Stick ourselves on a wait queue to be woken when
477 someone changes the status */
478 set_current_state(TASK_UNINTERRUPTIBLE);
479 add_wait_queue(&chip->wq, &wait);
480 spin_unlock_bh(chip->mutex);
481 schedule();
482 remove_wait_queue(&chip->wq, &wait);
483 timeo = jiffies + HZ;
484 goto retry;
485 }
486
487 ENABLE_VPP(map);
488 map_write(map, CMD(0xe8), cmd_adr);
489 chip->state = FL_WRITING_TO_BUFFER;
490
491 z = 0;
492 for (;;) {
493 status = map_read(map, cmd_adr);
494 if (map_word_andequal(map, status, status_OK, status_OK))
495 break;
496
497 spin_unlock_bh(chip->mutex);
498 cfi_udelay(1);
499 spin_lock_bh(chip->mutex);
500
501 if (++z > 100) {
502 /* Argh. Not ready for write to buffer */
503 DISABLE_VPP(map);
504 map_write(map, CMD(0x70), cmd_adr);
505 chip->state = FL_STATUS;
506 spin_unlock_bh(chip->mutex);
507 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
508 return -EIO;
509 }
510 }
511
512 /* Write length of data to come */
513 map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
514
515 /* Write data */
516 for (z = 0; z < len;
517 z += map_bankwidth(map), buf += map_bankwidth(map)) {
518 map_word d;
519 d = map_word_load(map, buf);
520 map_write(map, d, adr+z);
521 }
522 /* GO GO GO */
523 map_write(map, CMD(0xd0), cmd_adr);
524 chip->state = FL_WRITING;
525
526 spin_unlock_bh(chip->mutex);
527 cfi_udelay(chip->buffer_write_time);
528 spin_lock_bh(chip->mutex);
529
530 timeo = jiffies + (HZ/2);
531 z = 0;
532 for (;;) {
533 if (chip->state != FL_WRITING) {
534 /* Someone's suspended the write. Sleep */
535 set_current_state(TASK_UNINTERRUPTIBLE);
536 add_wait_queue(&chip->wq, &wait);
537 spin_unlock_bh(chip->mutex);
538 schedule();
539 remove_wait_queue(&chip->wq, &wait);
540 timeo = jiffies + (HZ / 2); /* FIXME */
541 spin_lock_bh(chip->mutex);
542 continue;
543 }
544
545 status = map_read(map, cmd_adr);
546 if (map_word_andequal(map, status, status_OK, status_OK))
547 break;
548
549 /* OK Still waiting */
550 if (time_after(jiffies, timeo)) {
551 /* clear status */
552 map_write(map, CMD(0x50), cmd_adr);
553 /* put back into read status register mode */
554 map_write(map, CMD(0x70), adr);
555 chip->state = FL_STATUS;
556 DISABLE_VPP(map);
557 spin_unlock_bh(chip->mutex);
558 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
559 return -EIO;
560 }
561
562 /* Latency issues. Drop the lock, wait a while and retry */
563 spin_unlock_bh(chip->mutex);
564 cfi_udelay(1);
565 z++;
566 spin_lock_bh(chip->mutex);
567 }
568 if (!z) {
569 chip->buffer_write_time--;
570 if (!chip->buffer_write_time)
571 chip->buffer_write_time++;
572 }
573 if (z > 1)
574 chip->buffer_write_time++;
575
576 /* Done and happy. */
577 DISABLE_VPP(map);
578 chip->state = FL_STATUS;
579
580 /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
581 if (map_word_bitsset(map, status, CMD(0x3a))) {
582#ifdef DEBUG_CFI_FEATURES
583 printk("%s: 2 status[%lx]\n", __FUNCTION__, status.x[0]);
584#endif
585 /* clear status */
586 map_write(map, CMD(0x50), cmd_adr);
587 /* put back into read status register mode */
588 map_write(map, CMD(0x70), adr);
589 wake_up(&chip->wq);
590 spin_unlock_bh(chip->mutex);
591 return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
592 }
593 wake_up(&chip->wq);
594 spin_unlock_bh(chip->mutex);
595
596 return 0;
597}
598
599static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
600 size_t len, size_t *retlen, const u_char *buf)
601{
602 struct map_info *map = mtd->priv;
603 struct cfi_private *cfi = map->fldrv_priv;
604 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
605 int ret = 0;
606 int chipnum;
607 unsigned long ofs;
608
609 *retlen = 0;
610 if (!len)
611 return 0;
612
613 chipnum = to >> cfi->chipshift;
614 ofs = to - (chipnum << cfi->chipshift);
615
616#ifdef DEBUG_CFI_FEATURES
617 printk("%s: map_bankwidth(map)[%x]\n", __FUNCTION__, map_bankwidth(map));
618 printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize);
619 printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len);
620#endif
621
622 /* Write buffer is worth it only if more than one word to write... */
623 while (len > 0) {
624 /* We must not cross write block boundaries */
625 int size = wbufsize - (ofs & (wbufsize-1));
626
627 if (size > len)
628 size = len;
629
630 ret = do_write_buffer(map, &cfi->chips[chipnum],
631 ofs, buf, size);
632 if (ret)
633 return ret;
634
635 ofs += size;
636 buf += size;
637 (*retlen) += size;
638 len -= size;
639
640 if (ofs >> cfi->chipshift) {
641 chipnum ++;
642 ofs = 0;
643 if (chipnum == cfi->numchips)
644 return 0;
645 }
646 }
647
648 return 0;
649}
650
651/*
652 * Writev for ECC-Flashes is a little more complicated. We need to maintain
653 * a small buffer for this.
654 * XXX: If the buffer size is not a multiple of 2, this will break
655 */
656#define ECCBUF_SIZE (mtd->eccsize)
657#define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
658#define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1))
659static int
660cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
661 unsigned long count, loff_t to, size_t *retlen)
662{
663 unsigned long i;
664 size_t totlen = 0, thislen;
665 int ret = 0;
666 size_t buflen = 0;
667 static char *buffer;
668
669 if (!ECCBUF_SIZE) {
670 /* We should fall back to a general writev implementation.
671 * Until that is written, just break.
672 */
673 return -EIO;
674 }
675 buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
676 if (!buffer)
677 return -ENOMEM;
678
679 for (i=0; i<count; i++) {
680 size_t elem_len = vecs[i].iov_len;
681 void *elem_base = vecs[i].iov_base;
682 if (!elem_len) /* FIXME: Might be unnecessary. Check that */
683 continue;
684 if (buflen) { /* cut off head */
685 if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
686 memcpy(buffer+buflen, elem_base, elem_len);
687 buflen += elem_len;
688 continue;
689 }
690 memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
691 ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
692 totlen += thislen;
693 if (ret || thislen != ECCBUF_SIZE)
694 goto write_error;
695 elem_len -= thislen-buflen;
696 elem_base += thislen-buflen;
697 to += ECCBUF_SIZE;
698 }
699 if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
700 ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
701 totlen += thislen;
702 if (ret || thislen != ECCBUF_DIV(elem_len))
703 goto write_error;
704 to += thislen;
705 }
706 buflen = ECCBUF_MOD(elem_len); /* cut off tail */
707 if (buflen) {
708 memset(buffer, 0xff, ECCBUF_SIZE);
709 memcpy(buffer, elem_base + thislen, buflen);
710 }
711 }
712 if (buflen) { /* flush last page, even if not full */
713 /* This is sometimes intended behaviour, really */
714 ret = mtd->write(mtd, to, buflen, &thislen, buffer);
715 totlen += thislen;
716 if (ret || thislen != ECCBUF_SIZE)
717 goto write_error;
718 }
719write_error:
720 if (retlen)
721 *retlen = totlen;
722 return ret;
723}
724
725
726static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
727{
728 struct cfi_private *cfi = map->fldrv_priv;
729 map_word status, status_OK;
730 unsigned long timeo;
731 int retries = 3;
732 DECLARE_WAITQUEUE(wait, current);
733 int ret = 0;
734
735 adr += chip->start;
736
737 /* Let's determine this according to the interleave only once */
738 status_OK = CMD(0x80);
739
740 timeo = jiffies + HZ;
741retry:
742 spin_lock_bh(chip->mutex);
743
744 /* Check that the chip's ready to talk to us. */
745 switch (chip->state) {
746 case FL_CFI_QUERY:
747 case FL_JEDEC_QUERY:
748 case FL_READY:
749 map_write(map, CMD(0x70), adr);
750 chip->state = FL_STATUS;
751
752 case FL_STATUS:
753 status = map_read(map, adr);
754 if (map_word_andequal(map, status, status_OK, status_OK))
755 break;
756
757 /* Urgh. Chip not yet ready to talk to us. */
758 if (time_after(jiffies, timeo)) {
759 spin_unlock_bh(chip->mutex);
760 printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
761 return -EIO;
762 }
763
764 /* Latency issues. Drop the lock, wait a while and retry */
765 spin_unlock_bh(chip->mutex);
766 cfi_udelay(1);
767 goto retry;
768
769 default:
770 /* Stick ourselves on a wait queue to be woken when
771 someone changes the status */
772 set_current_state(TASK_UNINTERRUPTIBLE);
773 add_wait_queue(&chip->wq, &wait);
774 spin_unlock_bh(chip->mutex);
775 schedule();
776 remove_wait_queue(&chip->wq, &wait);
777 timeo = jiffies + HZ;
778 goto retry;
779 }
780
781 ENABLE_VPP(map);
782 /* Clear the status register first */
783 map_write(map, CMD(0x50), adr);
784
785 /* Now erase */
786 map_write(map, CMD(0x20), adr);
787 map_write(map, CMD(0xD0), adr);
788 chip->state = FL_ERASING;
789
790 spin_unlock_bh(chip->mutex);
791 msleep(1000);
792 spin_lock_bh(chip->mutex);
793
794 /* FIXME. Use a timer to check this, and return immediately. */
795 /* Once the state machine's known to be working I'll do that */
796
797 timeo = jiffies + (HZ*20);
798 for (;;) {
799 if (chip->state != FL_ERASING) {
800 /* Someone's suspended the erase. Sleep */
801 set_current_state(TASK_UNINTERRUPTIBLE);
802 add_wait_queue(&chip->wq, &wait);
803 spin_unlock_bh(chip->mutex);
804 schedule();
805 remove_wait_queue(&chip->wq, &wait);
806 timeo = jiffies + (HZ*20); /* FIXME */
807 spin_lock_bh(chip->mutex);
808 continue;
809 }
810
811 status = map_read(map, adr);
812 if (map_word_andequal(map, status, status_OK, status_OK))
813 break;
814
815 /* OK Still waiting */
816 if (time_after(jiffies, timeo)) {
817 map_write(map, CMD(0x70), adr);
818 chip->state = FL_STATUS;
819 printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
820 DISABLE_VPP(map);
821 spin_unlock_bh(chip->mutex);
822 return -EIO;
823 }
824
825 /* Latency issues. Drop the lock, wait a while and retry */
826 spin_unlock_bh(chip->mutex);
827 cfi_udelay(1);
828 spin_lock_bh(chip->mutex);
829 }
830
831 DISABLE_VPP(map);
832 ret = 0;
833
834 /* We've broken this before. It doesn't hurt to be safe */
835 map_write(map, CMD(0x70), adr);
836 chip->state = FL_STATUS;
837 status = map_read(map, adr);
838
839 /* check for lock bit */
840 if (map_word_bitsset(map, status, CMD(0x3a))) {
841 unsigned char chipstatus = status.x[0];
842 if (!map_word_equal(map, status, CMD(chipstatus))) {
843 int i, w;
844 for (w=0; w<map_words(map); w++) {
845 for (i = 0; i<cfi_interleave(cfi); i++) {
846 chipstatus |= status.x[w] >> (cfi->device_type * 8);
847 }
848 }
849 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
850 status.x[0], chipstatus);
851 }
852 /* Reset the error bits */
853 map_write(map, CMD(0x50), adr);
854 map_write(map, CMD(0x70), adr);
855
856 if ((chipstatus & 0x30) == 0x30) {
857 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
858 ret = -EIO;
859 } else if (chipstatus & 0x02) {
860 /* Protection bit set */
861 ret = -EROFS;
862 } else if (chipstatus & 0x8) {
863 /* Voltage */
864 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
865 ret = -EIO;
866 } else if (chipstatus & 0x20) {
867 if (retries--) {
868 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
869 timeo = jiffies + HZ;
870 chip->state = FL_STATUS;
871 spin_unlock_bh(chip->mutex);
872 goto retry;
873 }
874 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
875 ret = -EIO;
876 }
877 }
878
879 wake_up(&chip->wq);
880 spin_unlock_bh(chip->mutex);
881 return ret;
882}
883
884int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
885{ struct map_info *map = mtd->priv;
886 struct cfi_private *cfi = map->fldrv_priv;
887 unsigned long adr, len;
888 int chipnum, ret = 0;
889 int i, first;
890 struct mtd_erase_region_info *regions = mtd->eraseregions;
891
892 if (instr->addr > mtd->size)
893 return -EINVAL;
894
895 if ((instr->len + instr->addr) > mtd->size)
896 return -EINVAL;
897
898 /* Check that both start and end of the requested erase are
899 * aligned with the erasesize at the appropriate addresses.
900 */
901
902 i = 0;
903
904 /* Skip all erase regions which are ended before the start of
905 the requested erase. Actually, to save on the calculations,
906 we skip to the first erase region which starts after the
907 start of the requested erase, and then go back one.
908 */
909
910 while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
911 i++;
912 i--;
913
914 /* OK, now i is pointing at the erase region in which this
915 erase request starts. Check the start of the requested
916 erase range is aligned with the erase size which is in
917 effect here.
918 */
919
920 if (instr->addr & (regions[i].erasesize-1))
921 return -EINVAL;
922
923 /* Remember the erase region we start on */
924 first = i;
925
926 /* Next, check that the end of the requested erase is aligned
927 * with the erase region at that address.
928 */
929
930 while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
931 i++;
932
933 /* As before, drop back one to point at the region in which
934 the address actually falls
935 */
936 i--;
937
938 if ((instr->addr + instr->len) & (regions[i].erasesize-1))
939 return -EINVAL;
940
941 chipnum = instr->addr >> cfi->chipshift;
942 adr = instr->addr - (chipnum << cfi->chipshift);
943 len = instr->len;
944
945 i=first;
946
947 while(len) {
948 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
949
950 if (ret)
951 return ret;
952
953 adr += regions[i].erasesize;
954 len -= regions[i].erasesize;
955
956 if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
957 i++;
958
959 if (adr >> cfi->chipshift) {
960 adr = 0;
961 chipnum++;
962
963 if (chipnum >= cfi->numchips)
964 break;
965 }
966 }
967
968 instr->state = MTD_ERASE_DONE;
969 mtd_erase_callback(instr);
970
971 return 0;
972}
973
974static void cfi_staa_sync (struct mtd_info *mtd)
975{
976 struct map_info *map = mtd->priv;
977 struct cfi_private *cfi = map->fldrv_priv;
978 int i;
979 struct flchip *chip;
980 int ret = 0;
981 DECLARE_WAITQUEUE(wait, current);
982
983 for (i=0; !ret && i<cfi->numchips; i++) {
984 chip = &cfi->chips[i];
985
986 retry:
987 spin_lock_bh(chip->mutex);
988
989 switch(chip->state) {
990 case FL_READY:
991 case FL_STATUS:
992 case FL_CFI_QUERY:
993 case FL_JEDEC_QUERY:
994 chip->oldstate = chip->state;
995 chip->state = FL_SYNCING;
996 /* No need to wake_up() on this state change -
997 * as the whole point is that nobody can do anything
998 * with the chip now anyway.
999 */
1000 case FL_SYNCING:
1001 spin_unlock_bh(chip->mutex);
1002 break;
1003
1004 default:
1005 /* Not an idle state */
1006 add_wait_queue(&chip->wq, &wait);
1007
1008 spin_unlock_bh(chip->mutex);
1009 schedule();
1010 remove_wait_queue(&chip->wq, &wait);
1011
1012 goto retry;
1013 }
1014 }
1015
1016 /* Unlock the chips again */
1017
1018 for (i--; i >=0; i--) {
1019 chip = &cfi->chips[i];
1020
1021 spin_lock_bh(chip->mutex);
1022
1023 if (chip->state == FL_SYNCING) {
1024 chip->state = chip->oldstate;
1025 wake_up(&chip->wq);
1026 }
1027 spin_unlock_bh(chip->mutex);
1028 }
1029}
1030
1031static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1032{
1033 struct cfi_private *cfi = map->fldrv_priv;
1034 map_word status, status_OK;
1035 unsigned long timeo = jiffies + HZ;
1036 DECLARE_WAITQUEUE(wait, current);
1037
1038 adr += chip->start;
1039
1040 /* Let's determine this according to the interleave only once */
1041 status_OK = CMD(0x80);
1042
1043 timeo = jiffies + HZ;
1044retry:
1045 spin_lock_bh(chip->mutex);
1046
1047 /* Check that the chip's ready to talk to us. */
1048 switch (chip->state) {
1049 case FL_CFI_QUERY:
1050 case FL_JEDEC_QUERY:
1051 case FL_READY:
1052 map_write(map, CMD(0x70), adr);
1053 chip->state = FL_STATUS;
1054
1055 case FL_STATUS:
1056 status = map_read(map, adr);
1057 if (map_word_andequal(map, status, status_OK, status_OK))
1058 break;
1059
1060 /* Urgh. Chip not yet ready to talk to us. */
1061 if (time_after(jiffies, timeo)) {
1062 spin_unlock_bh(chip->mutex);
1063 printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1064 return -EIO;
1065 }
1066
1067 /* Latency issues. Drop the lock, wait a while and retry */
1068 spin_unlock_bh(chip->mutex);
1069 cfi_udelay(1);
1070 goto retry;
1071
1072 default:
1073 /* Stick ourselves on a wait queue to be woken when
1074 someone changes the status */
1075 set_current_state(TASK_UNINTERRUPTIBLE);
1076 add_wait_queue(&chip->wq, &wait);
1077 spin_unlock_bh(chip->mutex);
1078 schedule();
1079 remove_wait_queue(&chip->wq, &wait);
1080 timeo = jiffies + HZ;
1081 goto retry;
1082 }
1083
1084 ENABLE_VPP(map);
1085 map_write(map, CMD(0x60), adr);
1086 map_write(map, CMD(0x01), adr);
1087 chip->state = FL_LOCKING;
1088
1089 spin_unlock_bh(chip->mutex);
1090 msleep(1000);
1091 spin_lock_bh(chip->mutex);
1092
1093 /* FIXME. Use a timer to check this, and return immediately. */
1094 /* Once the state machine's known to be working I'll do that */
1095
1096 timeo = jiffies + (HZ*2);
1097 for (;;) {
1098
1099 status = map_read(map, adr);
1100 if (map_word_andequal(map, status, status_OK, status_OK))
1101 break;
1102
1103 /* OK Still waiting */
1104 if (time_after(jiffies, timeo)) {
1105 map_write(map, CMD(0x70), adr);
1106 chip->state = FL_STATUS;
1107 printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1108 DISABLE_VPP(map);
1109 spin_unlock_bh(chip->mutex);
1110 return -EIO;
1111 }
1112
1113 /* Latency issues. Drop the lock, wait a while and retry */
1114 spin_unlock_bh(chip->mutex);
1115 cfi_udelay(1);
1116 spin_lock_bh(chip->mutex);
1117 }
1118
1119 /* Done and happy. */
1120 chip->state = FL_STATUS;
1121 DISABLE_VPP(map);
1122 wake_up(&chip->wq);
1123 spin_unlock_bh(chip->mutex);
1124 return 0;
1125}
1126static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1127{
1128 struct map_info *map = mtd->priv;
1129 struct cfi_private *cfi = map->fldrv_priv;
1130 unsigned long adr;
1131 int chipnum, ret = 0;
1132#ifdef DEBUG_LOCK_BITS
1133 int ofs_factor = cfi->interleave * cfi->device_type;
1134#endif
1135
1136 if (ofs & (mtd->erasesize - 1))
1137 return -EINVAL;
1138
1139 if (len & (mtd->erasesize -1))
1140 return -EINVAL;
1141
1142 if ((len + ofs) > mtd->size)
1143 return -EINVAL;
1144
1145 chipnum = ofs >> cfi->chipshift;
1146 adr = ofs - (chipnum << cfi->chipshift);
1147
1148 while(len) {
1149
1150#ifdef DEBUG_LOCK_BITS
1151 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1152 printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1153 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1154#endif
1155
1156 ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1157
1158#ifdef DEBUG_LOCK_BITS
1159 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1160 printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1161 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1162#endif
1163
1164 if (ret)
1165 return ret;
1166
1167 adr += mtd->erasesize;
1168 len -= mtd->erasesize;
1169
1170 if (adr >> cfi->chipshift) {
1171 adr = 0;
1172 chipnum++;
1173
1174 if (chipnum >= cfi->numchips)
1175 break;
1176 }
1177 }
1178 return 0;
1179}
1180static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1181{
1182 struct cfi_private *cfi = map->fldrv_priv;
1183 map_word status, status_OK;
1184 unsigned long timeo = jiffies + HZ;
1185 DECLARE_WAITQUEUE(wait, current);
1186
1187 adr += chip->start;
1188
1189 /* Let's determine this according to the interleave only once */
1190 status_OK = CMD(0x80);
1191
1192 timeo = jiffies + HZ;
1193retry:
1194 spin_lock_bh(chip->mutex);
1195
1196 /* Check that the chip's ready to talk to us. */
1197 switch (chip->state) {
1198 case FL_CFI_QUERY:
1199 case FL_JEDEC_QUERY:
1200 case FL_READY:
1201 map_write(map, CMD(0x70), adr);
1202 chip->state = FL_STATUS;
1203
1204 case FL_STATUS:
1205 status = map_read(map, adr);
1206 if (map_word_andequal(map, status, status_OK, status_OK))
1207 break;
1208
1209 /* Urgh. Chip not yet ready to talk to us. */
1210 if (time_after(jiffies, timeo)) {
1211 spin_unlock_bh(chip->mutex);
1212 printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1213 return -EIO;
1214 }
1215
1216 /* Latency issues. Drop the lock, wait a while and retry */
1217 spin_unlock_bh(chip->mutex);
1218 cfi_udelay(1);
1219 goto retry;
1220
1221 default:
1222 /* Stick ourselves on a wait queue to be woken when
1223 someone changes the status */
1224 set_current_state(TASK_UNINTERRUPTIBLE);
1225 add_wait_queue(&chip->wq, &wait);
1226 spin_unlock_bh(chip->mutex);
1227 schedule();
1228 remove_wait_queue(&chip->wq, &wait);
1229 timeo = jiffies + HZ;
1230 goto retry;
1231 }
1232
1233 ENABLE_VPP(map);
1234 map_write(map, CMD(0x60), adr);
1235 map_write(map, CMD(0xD0), adr);
1236 chip->state = FL_UNLOCKING;
1237
1238 spin_unlock_bh(chip->mutex);
1239 msleep(1000);
1240 spin_lock_bh(chip->mutex);
1241
1242 /* FIXME. Use a timer to check this, and return immediately. */
1243 /* Once the state machine's known to be working I'll do that */
1244
1245 timeo = jiffies + (HZ*2);
1246 for (;;) {
1247
1248 status = map_read(map, adr);
1249 if (map_word_andequal(map, status, status_OK, status_OK))
1250 break;
1251
1252 /* OK Still waiting */
1253 if (time_after(jiffies, timeo)) {
1254 map_write(map, CMD(0x70), adr);
1255 chip->state = FL_STATUS;
1256 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1257 DISABLE_VPP(map);
1258 spin_unlock_bh(chip->mutex);
1259 return -EIO;
1260 }
1261
1262 /* Latency issues. Drop the unlock, wait a while and retry */
1263 spin_unlock_bh(chip->mutex);
1264 cfi_udelay(1);
1265 spin_lock_bh(chip->mutex);
1266 }
1267
1268 /* Done and happy. */
1269 chip->state = FL_STATUS;
1270 DISABLE_VPP(map);
1271 wake_up(&chip->wq);
1272 spin_unlock_bh(chip->mutex);
1273 return 0;
1274}
1275static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1276{
1277 struct map_info *map = mtd->priv;
1278 struct cfi_private *cfi = map->fldrv_priv;
1279 unsigned long adr;
1280 int chipnum, ret = 0;
1281#ifdef DEBUG_LOCK_BITS
1282 int ofs_factor = cfi->interleave * cfi->device_type;
1283#endif
1284
1285 chipnum = ofs >> cfi->chipshift;
1286 adr = ofs - (chipnum << cfi->chipshift);
1287
1288#ifdef DEBUG_LOCK_BITS
1289 {
1290 unsigned long temp_adr = adr;
1291 unsigned long temp_len = len;
1292
1293 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1294 while (temp_len) {
1295 printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1296 temp_adr += mtd->erasesize;
1297 temp_len -= mtd->erasesize;
1298 }
1299 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1300 }
1301#endif
1302
1303 ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1304
1305#ifdef DEBUG_LOCK_BITS
1306 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1307 printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1308 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1309#endif
1310
1311 return ret;
1312}
1313
1314static int cfi_staa_suspend(struct mtd_info *mtd)
1315{
1316 struct map_info *map = mtd->priv;
1317 struct cfi_private *cfi = map->fldrv_priv;
1318 int i;
1319 struct flchip *chip;
1320 int ret = 0;
1321
1322 for (i=0; !ret && i<cfi->numchips; i++) {
1323 chip = &cfi->chips[i];
1324
1325 spin_lock_bh(chip->mutex);
1326
1327 switch(chip->state) {
1328 case FL_READY:
1329 case FL_STATUS:
1330 case FL_CFI_QUERY:
1331 case FL_JEDEC_QUERY:
1332 chip->oldstate = chip->state;
1333 chip->state = FL_PM_SUSPENDED;
1334 /* No need to wake_up() on this state change -
1335 * as the whole point is that nobody can do anything
1336 * with the chip now anyway.
1337 */
1338 case FL_PM_SUSPENDED:
1339 break;
1340
1341 default:
1342 ret = -EAGAIN;
1343 break;
1344 }
1345 spin_unlock_bh(chip->mutex);
1346 }
1347
1348 /* Unlock the chips again */
1349
1350 if (ret) {
1351 for (i--; i >=0; i--) {
1352 chip = &cfi->chips[i];
1353
1354 spin_lock_bh(chip->mutex);
1355
1356 if (chip->state == FL_PM_SUSPENDED) {
1357 /* No need to force it into a known state here,
1358 because we're returning failure, and it didn't
1359 get power cycled */
1360 chip->state = chip->oldstate;
1361 wake_up(&chip->wq);
1362 }
1363 spin_unlock_bh(chip->mutex);
1364 }
1365 }
1366
1367 return ret;
1368}
1369
1370static void cfi_staa_resume(struct mtd_info *mtd)
1371{
1372 struct map_info *map = mtd->priv;
1373 struct cfi_private *cfi = map->fldrv_priv;
1374 int i;
1375 struct flchip *chip;
1376
1377 for (i=0; i<cfi->numchips; i++) {
1378
1379 chip = &cfi->chips[i];
1380
1381 spin_lock_bh(chip->mutex);
1382
1383 /* Go to known state. Chip may have been power cycled */
1384 if (chip->state == FL_PM_SUSPENDED) {
1385 map_write(map, CMD(0xFF), 0);
1386 chip->state = FL_READY;
1387 wake_up(&chip->wq);
1388 }
1389
1390 spin_unlock_bh(chip->mutex);
1391 }
1392}
1393
1394static void cfi_staa_destroy(struct mtd_info *mtd)
1395{
1396 struct map_info *map = mtd->priv;
1397 struct cfi_private *cfi = map->fldrv_priv;
1398 kfree(cfi->cmdset_priv);
1399 kfree(cfi);
1400}
1401
1402static char im_name[]="cfi_cmdset_0020";
1403
1404static int __init cfi_staa_init(void)
1405{
1406 inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0020);
1407 return 0;
1408}
1409
1410static void __exit cfi_staa_exit(void)
1411{
1412 inter_module_unregister(im_name);
1413}
1414
1415module_init(cfi_staa_init);
1416module_exit(cfi_staa_exit);
1417
1418MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
new file mode 100644
index 000000000000..cf750038ce6a
--- /dev/null
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -0,0 +1,445 @@
1/*
2 Common Flash Interface probe code.
3 (C) 2000 Red Hat. GPL'd.
4 $Id: cfi_probe.c,v 1.83 2004/11/16 18:19:02 nico Exp $
5*/
6
7#include <linux/config.h>
8#include <linux/module.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <asm/io.h>
13#include <asm/byteorder.h>
14#include <linux/errno.h>
15#include <linux/slab.h>
16#include <linux/interrupt.h>
17
18#include <linux/mtd/xip.h>
19#include <linux/mtd/map.h>
20#include <linux/mtd/cfi.h>
21#include <linux/mtd/gen_probe.h>
22
23//#define DEBUG_CFI
24
25#ifdef DEBUG_CFI
26static void print_cfi_ident(struct cfi_ident *);
27#endif
28
29static int cfi_probe_chip(struct map_info *map, __u32 base,
30 unsigned long *chip_map, struct cfi_private *cfi);
31static int cfi_chip_setup(struct map_info *map, struct cfi_private *cfi);
32
33struct mtd_info *cfi_probe(struct map_info *map);
34
35#ifdef CONFIG_MTD_XIP
36
37/* only needed for short periods, so this is rather simple */
38#define xip_disable() local_irq_disable()
39
40#define xip_allowed(base, map) \
41do { \
42 (void) map_read(map, base); \
43 asm volatile (".rep 8; nop; .endr"); \
44 local_irq_enable(); \
45} while (0)
46
47#define xip_enable(base, map, cfi) \
48do { \
49 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); \
50 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); \
51 xip_allowed(base, map); \
52} while (0)
53
54#define xip_disable_qry(base, map, cfi) \
55do { \
56 xip_disable(); \
57 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); \
58 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); \
59 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); \
60} while (0)
61
62#else
63
64#define xip_disable() do { } while (0)
65#define xip_allowed(base, map) do { } while (0)
66#define xip_enable(base, map, cfi) do { } while (0)
67#define xip_disable_qry(base, map, cfi) do { } while (0)
68
69#endif
70
71/* check for QRY.
72 in: interleave,type,mode
73 ret: table index, <0 for error
74 */
75static int __xipram qry_present(struct map_info *map, __u32 base,
76 struct cfi_private *cfi)
77{
78 int osf = cfi->interleave * cfi->device_type; // scale factor
79 map_word val[3];
80 map_word qry[3];
81
82 qry[0] = cfi_build_cmd('Q', map, cfi);
83 qry[1] = cfi_build_cmd('R', map, cfi);
84 qry[2] = cfi_build_cmd('Y', map, cfi);
85
86 val[0] = map_read(map, base + osf*0x10);
87 val[1] = map_read(map, base + osf*0x11);
88 val[2] = map_read(map, base + osf*0x12);
89
90 if (!map_word_equal(map, qry[0], val[0]))
91 return 0;
92
93 if (!map_word_equal(map, qry[1], val[1]))
94 return 0;
95
96 if (!map_word_equal(map, qry[2], val[2]))
97 return 0;
98
99 return 1; // "QRY" found
100}
101
102static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
103 unsigned long *chip_map, struct cfi_private *cfi)
104{
105 int i;
106
107 if ((base + 0) >= map->size) {
108 printk(KERN_NOTICE
109 "Probe at base[0x00](0x%08lx) past the end of the map(0x%08lx)\n",
110 (unsigned long)base, map->size -1);
111 return 0;
112 }
113 if ((base + 0xff) >= map->size) {
114 printk(KERN_NOTICE
115 "Probe at base[0x55](0x%08lx) past the end of the map(0x%08lx)\n",
116 (unsigned long)base + 0x55, map->size -1);
117 return 0;
118 }
119
120 xip_disable();
121 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
122 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
123 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
124
125 if (!qry_present(map,base,cfi)) {
126 xip_enable(base, map, cfi);
127 return 0;
128 }
129
130 if (!cfi->numchips) {
131 /* This is the first time we're called. Set up the CFI
132 stuff accordingly and return */
133 return cfi_chip_setup(map, cfi);
134 }
135
136 /* Check each previous chip to see if it's an alias */
137 for (i=0; i < (base >> cfi->chipshift); i++) {
138 unsigned long start;
139 if(!test_bit(i, chip_map)) {
140 /* Skip location; no valid chip at this address */
141 continue;
142 }
143 start = i << cfi->chipshift;
144 /* This chip should be in read mode if it's one
145 we've already touched. */
146 if (qry_present(map, start, cfi)) {
147 /* Eep. This chip also had the QRY marker.
148 * Is it an alias for the new one? */
149 cfi_send_gen_cmd(0xF0, 0, start, map, cfi, cfi->device_type, NULL);
150 cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL);
151
152 /* If the QRY marker goes away, it's an alias */
153 if (!qry_present(map, start, cfi)) {
154 xip_allowed(base, map);
155 printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
156 map->name, base, start);
157 return 0;
158 }
159 /* Yes, it's actually got QRY for data. Most
160 * unfortunate. Stick the new chip in read mode
161 * too and if it's the same, assume it's an alias. */
162 /* FIXME: Use other modes to do a proper check */
163 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
164 cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL);
165
166 if (qry_present(map, base, cfi)) {
167 xip_allowed(base, map);
168 printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
169 map->name, base, start);
170 return 0;
171 }
172 }
173 }
174
175 /* OK, if we got to here, then none of the previous chips appear to
176 be aliases for the current one. */
177 set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
178 cfi->numchips++;
179
180 /* Put it back into Read Mode */
181 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
182 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
183 xip_allowed(base, map);
184
185 printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
186 map->name, cfi->interleave, cfi->device_type*8, base,
187 map->bankwidth*8);
188
189 return 1;
190}
191
192static int __xipram cfi_chip_setup(struct map_info *map,
193 struct cfi_private *cfi)
194{
195 int ofs_factor = cfi->interleave*cfi->device_type;
196 __u32 base = 0;
197 int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor);
198 int i;
199
200 xip_enable(base, map, cfi);
201#ifdef DEBUG_CFI
202 printk("Number of erase regions: %d\n", num_erase_regions);
203#endif
204 if (!num_erase_regions)
205 return 0;
206
207 cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
208 if (!cfi->cfiq) {
209 printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
210 return 0;
211 }
212
213 memset(cfi->cfiq,0,sizeof(struct cfi_ident));
214
215 cfi->cfi_mode = CFI_MODE_CFI;
216
217 /* Read the CFI info structure */
218 xip_disable_qry(base, map, cfi);
219 for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++)
220 ((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor);
221
222 /* Note we put the device back into Read Mode BEFORE going into Auto
223 * Select Mode, as some devices support nesting of modes, others
224 * don't. This way should always work.
225 * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
226 * so should be treated as nops or illegal (and so put the device
227 * back into Read Mode, which is a nop in this case).
228 */
229 cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
230 cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
231 cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
232 cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
233 cfi->mfr = cfi_read_query(map, base);
234 cfi->id = cfi_read_query(map, base + ofs_factor);
235
236 /* Put it back into Read Mode */
237 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
238 /* ... even if it's an Intel chip */
239 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
240 xip_allowed(base, map);
241
242 /* Do any necessary byteswapping */
243 cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID);
244
245 cfi->cfiq->P_ADR = le16_to_cpu(cfi->cfiq->P_ADR);
246 cfi->cfiq->A_ID = le16_to_cpu(cfi->cfiq->A_ID);
247 cfi->cfiq->A_ADR = le16_to_cpu(cfi->cfiq->A_ADR);
248 cfi->cfiq->InterfaceDesc = le16_to_cpu(cfi->cfiq->InterfaceDesc);
249 cfi->cfiq->MaxBufWriteSize = le16_to_cpu(cfi->cfiq->MaxBufWriteSize);
250
251#ifdef DEBUG_CFI
252 /* Dump the information therein */
253 print_cfi_ident(cfi->cfiq);
254#endif
255
256 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
257 cfi->cfiq->EraseRegionInfo[i] = le32_to_cpu(cfi->cfiq->EraseRegionInfo[i]);
258
259#ifdef DEBUG_CFI
260 printk(" Erase Region #%d: BlockSize 0x%4.4X bytes, %d blocks\n",
261 i, (cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff,
262 (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1);
263#endif
264 }
265
266 printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
267 map->name, cfi->interleave, cfi->device_type*8, base,
268 map->bankwidth*8);
269
270 return 1;
271}
272
273#ifdef DEBUG_CFI
274static char *vendorname(__u16 vendor)
275{
276 switch (vendor) {
277 case P_ID_NONE:
278 return "None";
279
280 case P_ID_INTEL_EXT:
281 return "Intel/Sharp Extended";
282
283 case P_ID_AMD_STD:
284 return "AMD/Fujitsu Standard";
285
286 case P_ID_INTEL_STD:
287 return "Intel/Sharp Standard";
288
289 case P_ID_AMD_EXT:
290 return "AMD/Fujitsu Extended";
291
292 case P_ID_WINBOND:
293 return "Winbond Standard";
294
295 case P_ID_ST_ADV:
296 return "ST Advanced";
297
298 case P_ID_MITSUBISHI_STD:
299 return "Mitsubishi Standard";
300
301 case P_ID_MITSUBISHI_EXT:
302 return "Mitsubishi Extended";
303
304 case P_ID_SST_PAGE:
305 return "SST Page Write";
306
307 case P_ID_INTEL_PERFORMANCE:
308 return "Intel Performance Code";
309
310 case P_ID_INTEL_DATA:
311 return "Intel Data";
312
313 case P_ID_RESERVED:
314 return "Not Allowed / Reserved for Future Use";
315
316 default:
317 return "Unknown";
318 }
319}
320
321
322static void print_cfi_ident(struct cfi_ident *cfip)
323{
324#if 0
325 if (cfip->qry[0] != 'Q' || cfip->qry[1] != 'R' || cfip->qry[2] != 'Y') {
326 printk("Invalid CFI ident structure.\n");
327 return;
328 }
329#endif
330 printk("Primary Vendor Command Set: %4.4X (%s)\n", cfip->P_ID, vendorname(cfip->P_ID));
331 if (cfip->P_ADR)
332 printk("Primary Algorithm Table at %4.4X\n", cfip->P_ADR);
333 else
334 printk("No Primary Algorithm Table\n");
335
336 printk("Alternative Vendor Command Set: %4.4X (%s)\n", cfip->A_ID, vendorname(cfip->A_ID));
337 if (cfip->A_ADR)
338 printk("Alternate Algorithm Table at %4.4X\n", cfip->A_ADR);
339 else
340 printk("No Alternate Algorithm Table\n");
341
342
343 printk("Vcc Minimum: %2d.%d V\n", cfip->VccMin >> 4, cfip->VccMin & 0xf);
344 printk("Vcc Maximum: %2d.%d V\n", cfip->VccMax >> 4, cfip->VccMax & 0xf);
345 if (cfip->VppMin) {
346 printk("Vpp Minimum: %2d.%d V\n", cfip->VppMin >> 4, cfip->VppMin & 0xf);
347 printk("Vpp Maximum: %2d.%d V\n", cfip->VppMax >> 4, cfip->VppMax & 0xf);
348 }
349 else
350 printk("No Vpp line\n");
351
352 printk("Typical byte/word write timeout: %d µs\n", 1<<cfip->WordWriteTimeoutTyp);
353 printk("Maximum byte/word write timeout: %d µs\n", (1<<cfip->WordWriteTimeoutMax) * (1<<cfip->WordWriteTimeoutTyp));
354
355 if (cfip->BufWriteTimeoutTyp || cfip->BufWriteTimeoutMax) {
356 printk("Typical full buffer write timeout: %d µs\n", 1<<cfip->BufWriteTimeoutTyp);
357 printk("Maximum full buffer write timeout: %d µs\n", (1<<cfip->BufWriteTimeoutMax) * (1<<cfip->BufWriteTimeoutTyp));
358 }
359 else
360 printk("Full buffer write not supported\n");
361
362 printk("Typical block erase timeout: %d ms\n", 1<<cfip->BlockEraseTimeoutTyp);
363 printk("Maximum block erase timeout: %d ms\n", (1<<cfip->BlockEraseTimeoutMax) * (1<<cfip->BlockEraseTimeoutTyp));
364 if (cfip->ChipEraseTimeoutTyp || cfip->ChipEraseTimeoutMax) {
365 printk("Typical chip erase timeout: %d ms\n", 1<<cfip->ChipEraseTimeoutTyp);
366 printk("Maximum chip erase timeout: %d ms\n", (1<<cfip->ChipEraseTimeoutMax) * (1<<cfip->ChipEraseTimeoutTyp));
367 }
368 else
369 printk("Chip erase not supported\n");
370
371 printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20));
372 printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc);
373 switch(cfip->InterfaceDesc) {
374 case 0:
375 printk(" - x8-only asynchronous interface\n");
376 break;
377
378 case 1:
379 printk(" - x16-only asynchronous interface\n");
380 break;
381
382 case 2:
383 printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n");
384 break;
385
386 case 3:
387 printk(" - x32-only asynchronous interface\n");
388 break;
389
390 case 4:
391 printk(" - supports x16 and x32 via Word# with asynchronous interface\n");
392 break;
393
394 case 65535:
395 printk(" - Not Allowed / Reserved\n");
396 break;
397
398 default:
399 printk(" - Unknown\n");
400 break;
401 }
402
403 printk("Max. bytes in buffer write: 0x%x\n", 1<< cfip->MaxBufWriteSize);
404 printk("Number of Erase Block Regions: %d\n", cfip->NumEraseRegions);
405
406}
407#endif /* DEBUG_CFI */
408
409static struct chip_probe cfi_chip_probe = {
410 .name = "CFI",
411 .probe_chip = cfi_probe_chip
412};
413
414struct mtd_info *cfi_probe(struct map_info *map)
415{
416 /*
417 * Just use the generic probe stuff to call our CFI-specific
418 * chip_probe routine in all the possible permutations, etc.
419 */
420 return mtd_do_chip_probe(map, &cfi_chip_probe);
421}
422
423static struct mtd_chip_driver cfi_chipdrv = {
424 .probe = cfi_probe,
425 .name = "cfi_probe",
426 .module = THIS_MODULE
427};
428
429int __init cfi_probe_init(void)
430{
431 register_mtd_chip_driver(&cfi_chipdrv);
432 return 0;
433}
434
435static void __exit cfi_probe_exit(void)
436{
437 unregister_mtd_chip_driver(&cfi_chipdrv);
438}
439
440module_init(cfi_probe_init);
441module_exit(cfi_probe_exit);
442
443MODULE_LICENSE("GPL");
444MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
445MODULE_DESCRIPTION("Probe code for CFI-compliant flash chips");
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
new file mode 100644
index 000000000000..2b2ede2bfcca
--- /dev/null
+++ b/drivers/mtd/chips/cfi_util.c
@@ -0,0 +1,196 @@
1/*
2 * Common Flash Interface support:
3 * Generic utility functions not dependant on command set
4 *
5 * Copyright (C) 2002 Red Hat
6 * Copyright (C) 2003 STMicroelectronics Limited
7 *
8 * This code is covered by the GPL.
9 *
10 * $Id: cfi_util.c,v 1.8 2004/12/14 19:55:56 nico Exp $
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <asm/io.h>
19#include <asm/byteorder.h>
20
21#include <linux/errno.h>
22#include <linux/slab.h>
23#include <linux/delay.h>
24#include <linux/interrupt.h>
25#include <linux/mtd/xip.h>
26#include <linux/mtd/mtd.h>
27#include <linux/mtd/map.h>
28#include <linux/mtd/cfi.h>
29#include <linux/mtd/compatmac.h>
30
31struct cfi_extquery *
32__xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
33{
34 struct cfi_private *cfi = map->fldrv_priv;
35 __u32 base = 0; // cfi->chips[0].start;
36 int ofs_factor = cfi->interleave * cfi->device_type;
37 int i;
38 struct cfi_extquery *extp = NULL;
39
40 printk(" %s Extended Query Table at 0x%4.4X\n", name, adr);
41 if (!adr)
42 goto out;
43
44 extp = kmalloc(size, GFP_KERNEL);
45 if (!extp) {
46 printk(KERN_ERR "Failed to allocate memory\n");
47 goto out;
48 }
49
50#ifdef CONFIG_MTD_XIP
51 local_irq_disable();
52#endif
53
54 /* Switch it into Query Mode */
55 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
56
57 /* Read in the Extended Query Table */
58 for (i=0; i<size; i++) {
59 ((unsigned char *)extp)[i] =
60 cfi_read_query(map, base+((adr+i)*ofs_factor));
61 }
62
63 /* Make sure it returns to read mode */
64 cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
65 cfi_send_gen_cmd(0xff, 0, base, map, cfi, cfi->device_type, NULL);
66
67#ifdef CONFIG_MTD_XIP
68 (void) map_read(map, base);
69 asm volatile (".rep 8; nop; .endr");
70 local_irq_enable();
71#endif
72
73 if (extp->MajorVersion != '1' ||
74 (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
75 printk(KERN_WARNING " Unknown %s Extended Query "
76 "version %c.%c.\n", name, extp->MajorVersion,
77 extp->MinorVersion);
78 kfree(extp);
79 extp = NULL;
80 }
81
82 out: return extp;
83}
84
85EXPORT_SYMBOL(cfi_read_pri);
86
87void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
88{
89 struct map_info *map = mtd->priv;
90 struct cfi_private *cfi = map->fldrv_priv;
91 struct cfi_fixup *f;
92
93 for (f=fixups; f->fixup; f++) {
94 if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
95 ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
96 f->fixup(mtd, f->param);
97 }
98 }
99}
100
101EXPORT_SYMBOL(cfi_fixup);
102
103int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
104 loff_t ofs, size_t len, void *thunk)
105{
106 struct map_info *map = mtd->priv;
107 struct cfi_private *cfi = map->fldrv_priv;
108 unsigned long adr;
109 int chipnum, ret = 0;
110 int i, first;
111 struct mtd_erase_region_info *regions = mtd->eraseregions;
112
113 if (ofs > mtd->size)
114 return -EINVAL;
115
116 if ((len + ofs) > mtd->size)
117 return -EINVAL;
118
119 /* Check that both start and end of the requested erase are
120 * aligned with the erasesize at the appropriate addresses.
121 */
122
123 i = 0;
124
125 /* Skip all erase regions which are ended before the start of
126 the requested erase. Actually, to save on the calculations,
127 we skip to the first erase region which starts after the
128 start of the requested erase, and then go back one.
129 */
130
131 while (i < mtd->numeraseregions && ofs >= regions[i].offset)
132 i++;
133 i--;
134
135 /* OK, now i is pointing at the erase region in which this
136 erase request starts. Check the start of the requested
137 erase range is aligned with the erase size which is in
138 effect here.
139 */
140
141 if (ofs & (regions[i].erasesize-1))
142 return -EINVAL;
143
144 /* Remember the erase region we start on */
145 first = i;
146
147 /* Next, check that the end of the requested erase is aligned
148 * with the erase region at that address.
149 */
150
151 while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
152 i++;
153
154 /* As before, drop back one to point at the region in which
155 the address actually falls
156 */
157 i--;
158
159 if ((ofs + len) & (regions[i].erasesize-1))
160 return -EINVAL;
161
162 chipnum = ofs >> cfi->chipshift;
163 adr = ofs - (chipnum << cfi->chipshift);
164
165 i=first;
166
167 while(len) {
168 int size = regions[i].erasesize;
169
170 ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
171
172 if (ret)
173 return ret;
174
175 adr += size;
176 ofs += size;
177 len -= size;
178
179 if (ofs == regions[i].offset + size * regions[i].numblocks)
180 i++;
181
182 if (adr >> cfi->chipshift) {
183 adr = 0;
184 chipnum++;
185
186 if (chipnum >= cfi->numchips)
187 break;
188 }
189 }
190
191 return 0;
192}
193
194EXPORT_SYMBOL(cfi_varsize_frob);
195
196MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/chips/chipreg.c b/drivers/mtd/chips/chipreg.c
new file mode 100644
index 000000000000..d7d739a108ae
--- /dev/null
+++ b/drivers/mtd/chips/chipreg.c
@@ -0,0 +1,111 @@
1/*
2 * $Id: chipreg.c,v 1.17 2004/11/16 18:29:00 dwmw2 Exp $
3 *
4 * Registration for chip drivers
5 *
6 */
7
8#include <linux/kernel.h>
9#include <linux/config.h>
10#include <linux/module.h>
11#include <linux/kmod.h>
12#include <linux/spinlock.h>
13#include <linux/slab.h>
14#include <linux/mtd/map.h>
15#include <linux/mtd/mtd.h>
16#include <linux/mtd/compatmac.h>
17
18static DEFINE_SPINLOCK(chip_drvs_lock);
19static LIST_HEAD(chip_drvs_list);
20
21void register_mtd_chip_driver(struct mtd_chip_driver *drv)
22{
23 spin_lock(&chip_drvs_lock);
24 list_add(&drv->list, &chip_drvs_list);
25 spin_unlock(&chip_drvs_lock);
26}
27
28void unregister_mtd_chip_driver(struct mtd_chip_driver *drv)
29{
30 spin_lock(&chip_drvs_lock);
31 list_del(&drv->list);
32 spin_unlock(&chip_drvs_lock);
33}
34
35static struct mtd_chip_driver *get_mtd_chip_driver (const char *name)
36{
37 struct list_head *pos;
38 struct mtd_chip_driver *ret = NULL, *this;
39
40 spin_lock(&chip_drvs_lock);
41
42 list_for_each(pos, &chip_drvs_list) {
43 this = list_entry(pos, typeof(*this), list);
44
45 if (!strcmp(this->name, name)) {
46 ret = this;
47 break;
48 }
49 }
50 if (ret && !try_module_get(ret->module))
51 ret = NULL;
52
53 spin_unlock(&chip_drvs_lock);
54
55 return ret;
56}
57
58 /* Hide all the horrid details, like some silly person taking
59 get_module_symbol() away from us, from the caller. */
60
61struct mtd_info *do_map_probe(const char *name, struct map_info *map)
62{
63 struct mtd_chip_driver *drv;
64 struct mtd_info *ret;
65
66 drv = get_mtd_chip_driver(name);
67
68 if (!drv && !request_module("%s", name))
69 drv = get_mtd_chip_driver(name);
70
71 if (!drv)
72 return NULL;
73
74 ret = drv->probe(map);
75
76 /* We decrease the use count here. It may have been a
77 probe-only module, which is no longer required from this
78 point, having given us a handle on (and increased the use
79 count of) the actual driver code.
80 */
81 module_put(drv->module);
82
83 if (ret)
84 return ret;
85
86 return NULL;
87}
88/*
89 * Destroy an MTD device which was created for a map device.
90 * Make sure the MTD device is already unregistered before calling this
91 */
92void map_destroy(struct mtd_info *mtd)
93{
94 struct map_info *map = mtd->priv;
95
96 if (map->fldrv->destroy)
97 map->fldrv->destroy(mtd);
98
99 module_put(map->fldrv->module);
100
101 kfree(mtd);
102}
103
104EXPORT_SYMBOL(register_mtd_chip_driver);
105EXPORT_SYMBOL(unregister_mtd_chip_driver);
106EXPORT_SYMBOL(do_map_probe);
107EXPORT_SYMBOL(map_destroy);
108
109MODULE_LICENSE("GPL");
110MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
111MODULE_DESCRIPTION("Core routines for registering and invoking MTD chip drivers");
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
new file mode 100644
index 000000000000..fbf44708a861
--- /dev/null
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -0,0 +1,107 @@
1#ifndef FWH_LOCK_H
2#define FWH_LOCK_H
3
4
5enum fwh_lock_state {
6 FWH_UNLOCKED = 0,
7 FWH_DENY_WRITE = 1,
8 FWH_IMMUTABLE = 2,
9 FWH_DENY_READ = 4,
10};
11
12struct fwh_xxlock_thunk {
13 enum fwh_lock_state val;
14 flstate_t state;
15};
16
17
18#define FWH_XXLOCK_ONEBLOCK_LOCK ((struct fwh_xxlock_thunk){ FWH_DENY_WRITE, FL_LOCKING})
19#define FWH_XXLOCK_ONEBLOCK_UNLOCK ((struct fwh_xxlock_thunk){ FWH_UNLOCKED, FL_UNLOCKING})
20
21/*
22 * This locking/unlock is specific to firmware hub parts. Only one
23 * is known that supports the Intel command set. Firmware
24 * hub parts cannot be interleaved as they are on the LPC bus
25 * so this code has not been tested with interleaved chips,
26 * and will likely fail in that context.
27 */
28static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
29 unsigned long adr, int len, void *thunk)
30{
31 struct cfi_private *cfi = map->fldrv_priv;
32 struct fwh_xxlock_thunk *xxlt = (struct fwh_xxlock_thunk *)thunk;
33 int ret;
34
35 /* Refuse the operation if the we cannot look behind the chip */
36 if (chip->start < 0x400000) {
37 DEBUG( MTD_DEBUG_LEVEL3,
38 "MTD %s(): chip->start: %lx wanted >= 0x400000\n",
39 __func__, chip->start );
40 return -EIO;
41 }
42 /*
43 * lock block registers:
44 * - on 64k boundariesand
45 * - bit 1 set high
46 * - block lock registers are 4MiB lower - overflow subtract (danger)
47 *
48 * The address manipulation is first done on the logical address
49 * which is 0 at the start of the chip, and then the offset of
50 * the individual chip is addted to it. Any other order a weird
51 * map offset could cause problems.
52 */
53 adr = (adr & ~0xffffUL) | 0x2;
54 adr += chip->start - 0x400000;
55
56 /*
57 * This is easy because these are writes to registers and not writes
58 * to flash memory - that means that we don't have to check status
59 * and timeout.
60 */
61 cfi_spin_lock(chip->mutex);
62 ret = get_chip(map, chip, adr, FL_LOCKING);
63 if (ret) {
64 cfi_spin_unlock(chip->mutex);
65 return ret;
66 }
67
68 chip->state = xxlt->state;
69 map_write(map, CMD(xxlt->val), adr);
70
71 /* Done and happy. */
72 chip->state = FL_READY;
73 put_chip(map, chip, adr);
74 cfi_spin_unlock(chip->mutex);
75 return 0;
76}
77
78
79static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len)
80{
81 int ret;
82
83 ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len,
84 (void *)&FWH_XXLOCK_ONEBLOCK_LOCK);
85
86 return ret;
87}
88
89
90static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len)
91{
92 int ret;
93
94 ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len,
95 (void *)&FWH_XXLOCK_ONEBLOCK_UNLOCK);
96
97 return ret;
98}
99
100static void fixup_use_fwh_lock(struct mtd_info *mtd, void *param)
101{
102 printk(KERN_NOTICE "using fwh lock/unlock method\n");
103 /* Setup for the chips with the fwh lock method */
104 mtd->lock = fwh_lock_varsize;
105 mtd->unlock = fwh_unlock_varsize;
106}
107#endif /* FWH_LOCK_H */
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
new file mode 100644
index 000000000000..fc982c4671f0
--- /dev/null
+++ b/drivers/mtd/chips/gen_probe.c
@@ -0,0 +1,255 @@
1/*
2 * Routines common to all CFI-type probes.
3 * (C) 2001-2003 Red Hat, Inc.
4 * GPL'd
5 * $Id: gen_probe.c,v 1.21 2004/08/14 15:14:05 dwmw2 Exp $
6 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/module.h>
11#include <linux/mtd/mtd.h>
12#include <linux/mtd/map.h>
13#include <linux/mtd/cfi.h>
14#include <linux/mtd/gen_probe.h>
15
16static struct mtd_info *check_cmd_set(struct map_info *, int);
17static struct cfi_private *genprobe_ident_chips(struct map_info *map,
18 struct chip_probe *cp);
19static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
20 struct cfi_private *cfi);
21
22struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp)
23{
24 struct mtd_info *mtd = NULL;
25 struct cfi_private *cfi;
26
27 /* First probe the map to see if we have CFI stuff there. */
28 cfi = genprobe_ident_chips(map, cp);
29
30 if (!cfi)
31 return NULL;
32
33 map->fldrv_priv = cfi;
34 /* OK we liked it. Now find a driver for the command set it talks */
35
36 mtd = check_cmd_set(map, 1); /* First the primary cmdset */
37 if (!mtd)
38 mtd = check_cmd_set(map, 0); /* Then the secondary */
39
40 if (mtd)
41 return mtd;
42
43 printk(KERN_WARNING"gen_probe: No supported Vendor Command Set found\n");
44
45 kfree(cfi->cfiq);
46 kfree(cfi);
47 map->fldrv_priv = NULL;
48 return NULL;
49}
50EXPORT_SYMBOL(mtd_do_chip_probe);
51
52
53static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chip_probe *cp)
54{
55 struct cfi_private cfi;
56 struct cfi_private *retcfi;
57 unsigned long *chip_map;
58 int i, j, mapsize;
59 int max_chips;
60
61 memset(&cfi, 0, sizeof(cfi));
62
63 /* Call the probetype-specific code with all permutations of
64 interleave and device type, etc. */
65 if (!genprobe_new_chip(map, cp, &cfi)) {
66 /* The probe didn't like it */
67 printk(KERN_DEBUG "%s: Found no %s device at location zero\n",
68 cp->name, map->name);
69 return NULL;
70 }
71
72#if 0 /* Let the CFI probe routine do this sanity check. The Intel and AMD
73 probe routines won't ever return a broken CFI structure anyway,
74 because they make them up themselves.
75 */
76 if (cfi.cfiq->NumEraseRegions == 0) {
77 printk(KERN_WARNING "Number of erase regions is zero\n");
78 kfree(cfi.cfiq);
79 return NULL;
80 }
81#endif
82 cfi.chipshift = cfi.cfiq->DevSize;
83
84 if (cfi_interleave_is_1(&cfi)) {
85 ;
86 } else if (cfi_interleave_is_2(&cfi)) {
87 cfi.chipshift++;
88 } else if (cfi_interleave_is_4((&cfi))) {
89 cfi.chipshift += 2;
90 } else if (cfi_interleave_is_8(&cfi)) {
91 cfi.chipshift += 3;
92 } else {
93 BUG();
94 }
95
96 cfi.numchips = 1;
97
98 /*
99 * Allocate memory for bitmap of valid chips.
100 * Align bitmap storage size to full byte.
101 */
102 max_chips = map->size >> cfi.chipshift;
103 mapsize = (max_chips / 8) + ((max_chips % 8) ? 1 : 0);
104 chip_map = kmalloc(mapsize, GFP_KERNEL);
105 if (!chip_map) {
106 printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name);
107 kfree(cfi.cfiq);
108 return NULL;
109 }
110 memset (chip_map, 0, mapsize);
111
112 set_bit(0, chip_map); /* Mark first chip valid */
113
114 /*
115 * Now probe for other chips, checking sensibly for aliases while
116 * we're at it. The new_chip probe above should have let the first
117 * chip in read mode.
118 */
119
120 for (i = 1; i < max_chips; i++) {
121 cp->probe_chip(map, i << cfi.chipshift, chip_map, &cfi);
122 }
123
124 /*
125 * Now allocate the space for the structures we need to return to
126 * our caller, and copy the appropriate data into them.
127 */
128
129 retcfi = kmalloc(sizeof(struct cfi_private) + cfi.numchips * sizeof(struct flchip), GFP_KERNEL);
130
131 if (!retcfi) {
132 printk(KERN_WARNING "%s: kmalloc failed for CFI private structure\n", map->name);
133 kfree(cfi.cfiq);
134 kfree(chip_map);
135 return NULL;
136 }
137
138 memcpy(retcfi, &cfi, sizeof(cfi));
139 memset(&retcfi->chips[0], 0, sizeof(struct flchip) * cfi.numchips);
140
141 for (i = 0, j = 0; (j < cfi.numchips) && (i < max_chips); i++) {
142 if(test_bit(i, chip_map)) {
143 struct flchip *pchip = &retcfi->chips[j++];
144
145 pchip->start = (i << cfi.chipshift);
146 pchip->state = FL_READY;
147 init_waitqueue_head(&pchip->wq);
148 spin_lock_init(&pchip->_spinlock);
149 pchip->mutex = &pchip->_spinlock;
150 }
151 }
152
153 kfree(chip_map);
154 return retcfi;
155}
156
157
158static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
159 struct cfi_private *cfi)
160{
161 int min_chips = (map_bankwidth(map)/4?:1); /* At most 4-bytes wide. */
162 int max_chips = map_bankwidth(map); /* And minimum 1 */
163 int nr_chips, type;
164
165 for (nr_chips = min_chips; nr_chips <= max_chips; nr_chips <<= 1) {
166
167 if (!cfi_interleave_supported(nr_chips))
168 continue;
169
170 cfi->interleave = nr_chips;
171
172 /* Minimum device size. Don't look for one 8-bit device
173 in a 16-bit bus, etc. */
174 type = map_bankwidth(map) / nr_chips;
175
176 for (; type <= CFI_DEVICETYPE_X32; type<<=1) {
177 cfi->device_type = type;
178
179 if (cp->probe_chip(map, 0, NULL, cfi))
180 return 1;
181 }
182 }
183 return 0;
184}
185
186typedef struct mtd_info *cfi_cmdset_fn_t(struct map_info *, int);
187
188extern cfi_cmdset_fn_t cfi_cmdset_0001;
189extern cfi_cmdset_fn_t cfi_cmdset_0002;
190extern cfi_cmdset_fn_t cfi_cmdset_0020;
191
192static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
193 int primary)
194{
195 struct cfi_private *cfi = map->fldrv_priv;
196 __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
197#if defined(CONFIG_MODULES) && defined(HAVE_INTER_MODULE)
198 char probename[32];
199 cfi_cmdset_fn_t *probe_function;
200
201 sprintf(probename, "cfi_cmdset_%4.4X", type);
202
203 probe_function = inter_module_get_request(probename, probename);
204
205 if (probe_function) {
206 struct mtd_info *mtd;
207
208 mtd = (*probe_function)(map, primary);
209 /* If it was happy, it'll have increased its own use count */
210 inter_module_put(probename);
211 return mtd;
212 }
213#endif
214 printk(KERN_NOTICE "Support for command set %04X not present\n",
215 type);
216
217 return NULL;
218}
219
220static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
221{
222 struct cfi_private *cfi = map->fldrv_priv;
223 __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
224
225 if (type == P_ID_NONE || type == P_ID_RESERVED)
226 return NULL;
227
228 switch(type){
229 /* Urgh. Ifdefs. The version with weak symbols was
230 * _much_ nicer. Shame it didn't seem to work on
231 * anything but x86, really.
232 * But we can't rely in inter_module_get() because
233 * that'd mean we depend on link order.
234 */
235#ifdef CONFIG_MTD_CFI_INTELEXT
236 case 0x0001:
237 case 0x0003:
238 return cfi_cmdset_0001(map, primary);
239#endif
240#ifdef CONFIG_MTD_CFI_AMDSTD
241 case 0x0002:
242 return cfi_cmdset_0002(map, primary);
243#endif
244#ifdef CONFIG_MTD_CFI_STAA
245 case 0x0020:
246 return cfi_cmdset_0020(map, primary);
247#endif
248 }
249
250 return cfi_cmdset_unknown(map, primary);
251}
252
253MODULE_LICENSE("GPL");
254MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
255MODULE_DESCRIPTION("Helper routines for flash chip probe code");
diff --git a/drivers/mtd/chips/jedec.c b/drivers/mtd/chips/jedec.c
new file mode 100644
index 000000000000..62d235a9a4e2
--- /dev/null
+++ b/drivers/mtd/chips/jedec.c
@@ -0,0 +1,934 @@
1
2/* JEDEC Flash Interface.
3 * This is an older type of interface for self programming flash. It is
4 * commonly use in older AMD chips and is obsolete compared with CFI.
5 * It is called JEDEC because the JEDEC association distributes the ID codes
6 * for the chips.
7 *
8 * See the AMD flash databook for information on how to operate the interface.
9 *
10 * This code does not support anything wider than 8 bit flash chips, I am
11 * not going to guess how to send commands to them, plus I expect they will
12 * all speak CFI..
13 *
14 * $Id: jedec.c,v 1.22 2005/01/05 18:05:11 dwmw2 Exp $
15 */
16
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/mtd/jedec.h>
21#include <linux/mtd/map.h>
22#include <linux/mtd/mtd.h>
23#include <linux/mtd/compatmac.h>
24
25static struct mtd_info *jedec_probe(struct map_info *);
26static int jedec_probe8(struct map_info *map,unsigned long base,
27 struct jedec_private *priv);
28static int jedec_probe16(struct map_info *map,unsigned long base,
29 struct jedec_private *priv);
30static int jedec_probe32(struct map_info *map,unsigned long base,
31 struct jedec_private *priv);
32static void jedec_flash_chip_scan(struct jedec_private *priv,unsigned long start,
33 unsigned long len);
34static int flash_erase(struct mtd_info *mtd, struct erase_info *instr);
35static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
36 size_t *retlen, const u_char *buf);
37
38static unsigned long my_bank_size;
39
40/* Listing of parts and sizes. We need this table to learn the sector
41 size of the chip and the total length */
42static const struct JEDECTable JEDEC_table[] = {
43 {
44 .jedec = 0x013D,
45 .name = "AMD Am29F017D",
46 .size = 2*1024*1024,
47 .sectorsize = 64*1024,
48 .capabilities = MTD_CAP_NORFLASH
49 },
50 {
51 .jedec = 0x01AD,
52 .name = "AMD Am29F016",
53 .size = 2*1024*1024,
54 .sectorsize = 64*1024,
55 .capabilities = MTD_CAP_NORFLASH
56 },
57 {
58 .jedec = 0x01D5,
59 .name = "AMD Am29F080",
60 .size = 1*1024*1024,
61 .sectorsize = 64*1024,
62 .capabilities = MTD_CAP_NORFLASH
63 },
64 {
65 .jedec = 0x01A4,
66 .name = "AMD Am29F040",
67 .size = 512*1024,
68 .sectorsize = 64*1024,
69 .capabilities = MTD_CAP_NORFLASH
70 },
71 {
72 .jedec = 0x20E3,
73 .name = "AMD Am29W040B",
74 .size = 512*1024,
75 .sectorsize = 64*1024,
76 .capabilities = MTD_CAP_NORFLASH
77 },
78 {
79 .jedec = 0xC2AD,
80 .name = "Macronix MX29F016",
81 .size = 2*1024*1024,
82 .sectorsize = 64*1024,
83 .capabilities = MTD_CAP_NORFLASH
84 },
85 { .jedec = 0x0 }
86};
87
88static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id);
89static void jedec_sync(struct mtd_info *mtd) {};
90static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len,
91 size_t *retlen, u_char *buf);
92static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
93 size_t *retlen, u_char *buf);
94
95static struct mtd_info *jedec_probe(struct map_info *map);
96
97
98
99static struct mtd_chip_driver jedec_chipdrv = {
100 .probe = jedec_probe,
101 .name = "jedec",
102 .module = THIS_MODULE
103};
104
105/* Probe entry point */
106
107static struct mtd_info *jedec_probe(struct map_info *map)
108{
109 struct mtd_info *MTD;
110 struct jedec_private *priv;
111 unsigned long Base;
112 unsigned long SectorSize;
113 unsigned count;
114 unsigned I,Uniq;
115 char Part[200];
116 memset(&priv,0,sizeof(priv));
117
118 MTD = kmalloc(sizeof(struct mtd_info) + sizeof(struct jedec_private), GFP_KERNEL);
119 if (!MTD)
120 return NULL;
121
122 memset(MTD, 0, sizeof(struct mtd_info) + sizeof(struct jedec_private));
123 priv = (struct jedec_private *)&MTD[1];
124
125 my_bank_size = map->size;
126
127 if (map->size/my_bank_size > MAX_JEDEC_CHIPS)
128 {
129 printk("mtd: Increase MAX_JEDEC_CHIPS, too many banks.\n");
130 kfree(MTD);
131 return NULL;
132 }
133
134 for (Base = 0; Base < map->size; Base += my_bank_size)
135 {
136 // Perhaps zero could designate all tests?
137 if (map->buswidth == 0)
138 map->buswidth = 1;
139
140 if (map->buswidth == 1){
141 if (jedec_probe8(map,Base,priv) == 0) {
142 printk("did recognize jedec chip\n");
143 kfree(MTD);
144 return NULL;
145 }
146 }
147 if (map->buswidth == 2)
148 jedec_probe16(map,Base,priv);
149 if (map->buswidth == 4)
150 jedec_probe32(map,Base,priv);
151 }
152
153 // Get the biggest sector size
154 SectorSize = 0;
155 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
156 {
157 // printk("priv->chips[%d].jedec is %x\n",I,priv->chips[I].jedec);
158 // printk("priv->chips[%d].sectorsize is %lx\n",I,priv->chips[I].sectorsize);
159 if (priv->chips[I].sectorsize > SectorSize)
160 SectorSize = priv->chips[I].sectorsize;
161 }
162
163 // Quickly ensure that the other sector sizes are factors of the largest
164 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
165 {
166 if ((SectorSize/priv->chips[I].sectorsize)*priv->chips[I].sectorsize != SectorSize)
167 {
168 printk("mtd: Failed. Device has incompatible mixed sector sizes\n");
169 kfree(MTD);
170 return NULL;
171 }
172 }
173
174 /* Generate a part name that includes the number of different chips and
175 other configuration information */
176 count = 1;
177 strlcpy(Part,map->name,sizeof(Part)-10);
178 strcat(Part," ");
179 Uniq = 0;
180 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
181 {
182 const struct JEDECTable *JEDEC;
183
184 if (priv->chips[I+1].jedec == priv->chips[I].jedec)
185 {
186 count++;
187 continue;
188 }
189
190 // Locate the chip in the jedec table
191 JEDEC = jedec_idtoinf(priv->chips[I].jedec >> 8,priv->chips[I].jedec);
192 if (JEDEC == 0)
193 {
194 printk("mtd: Internal Error, JEDEC not set\n");
195 kfree(MTD);
196 return NULL;
197 }
198
199 if (Uniq != 0)
200 strcat(Part,",");
201 Uniq++;
202
203 if (count != 1)
204 sprintf(Part+strlen(Part),"%x*[%s]",count,JEDEC->name);
205 else
206 sprintf(Part+strlen(Part),"%s",JEDEC->name);
207 if (strlen(Part) > sizeof(Part)*2/3)
208 break;
209 count = 1;
210 }
211
212 /* Determine if the chips are organized in a linear fashion, or if there
213 are empty banks. Note, the last bank does not count here, only the
214 first banks are important. Holes on non-bank boundaries can not exist
215 due to the way the detection algorithm works. */
216 if (priv->size < my_bank_size)
217 my_bank_size = priv->size;
218 priv->is_banked = 0;
219 //printk("priv->size is %x, my_bank_size is %x\n",priv->size,my_bank_size);
220 //printk("priv->bank_fill[0] is %x\n",priv->bank_fill[0]);
221 if (!priv->size) {
222 printk("priv->size is zero\n");
223 kfree(MTD);
224 return NULL;
225 }
226 if (priv->size/my_bank_size) {
227 if (priv->size/my_bank_size == 1) {
228 priv->size = my_bank_size;
229 }
230 else {
231 for (I = 0; I != priv->size/my_bank_size - 1; I++)
232 {
233 if (priv->bank_fill[I] != my_bank_size)
234 priv->is_banked = 1;
235
236 /* This even could be eliminated, but new de-optimized read/write
237 functions have to be written */
238 printk("priv->bank_fill[%d] is %lx, priv->bank_fill[0] is %lx\n",I,priv->bank_fill[I],priv->bank_fill[0]);
239 if (priv->bank_fill[I] != priv->bank_fill[0])
240 {
241 printk("mtd: Failed. Cannot handle unsymmetric banking\n");
242 kfree(MTD);
243 return NULL;
244 }
245 }
246 }
247 }
248 if (priv->is_banked == 1)
249 strcat(Part,", banked");
250
251 // printk("Part: '%s'\n",Part);
252
253 memset(MTD,0,sizeof(*MTD));
254 // strlcpy(MTD->name,Part,sizeof(MTD->name));
255 MTD->name = map->name;
256 MTD->type = MTD_NORFLASH;
257 MTD->flags = MTD_CAP_NORFLASH;
258 MTD->erasesize = SectorSize*(map->buswidth);
259 // printk("MTD->erasesize is %x\n",(unsigned int)MTD->erasesize);
260 MTD->size = priv->size;
261 // printk("MTD->size is %x\n",(unsigned int)MTD->size);
262 //MTD->module = THIS_MODULE; // ? Maybe this should be the low level module?
263 MTD->erase = flash_erase;
264 if (priv->is_banked == 1)
265 MTD->read = jedec_read_banked;
266 else
267 MTD->read = jedec_read;
268 MTD->write = flash_write;
269 MTD->sync = jedec_sync;
270 MTD->priv = map;
271 map->fldrv_priv = priv;
272 map->fldrv = &jedec_chipdrv;
273 __module_get(THIS_MODULE);
274 return MTD;
275}
276
277/* Helper for the JEDEC function, JEDEC numbers all have odd parity */
278static int checkparity(u_char C)
279{
280 u_char parity = 0;
281 while (C != 0)
282 {
283 parity ^= C & 1;
284 C >>= 1;
285 }
286
287 return parity == 1;
288}
289
290
291/* Take an array of JEDEC numbers that represent interleved flash chips
292 and process them. Check to make sure they are good JEDEC numbers, look
293 them up and then add them to the chip list */
294static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
295 unsigned long base,struct jedec_private *priv)
296{
297 unsigned I,J;
298 unsigned long Size;
299 unsigned long SectorSize;
300 const struct JEDECTable *JEDEC;
301
302 // Test #2 JEDEC numbers exhibit odd parity
303 for (I = 0; I != Count; I++)
304 {
305 if (checkparity(Mfg[I]) == 0 || checkparity(Id[I]) == 0)
306 return 0;
307 }
308
309 // Finally, just make sure all the chip sizes are the same
310 JEDEC = jedec_idtoinf(Mfg[0],Id[0]);
311
312 if (JEDEC == 0)
313 {
314 printk("mtd: Found JEDEC flash chip, but do not have a table entry for %x:%x\n",Mfg[0],Mfg[1]);
315 return 0;
316 }
317
318 Size = JEDEC->size;
319 SectorSize = JEDEC->sectorsize;
320 for (I = 0; I != Count; I++)
321 {
322 JEDEC = jedec_idtoinf(Mfg[0],Id[0]);
323 if (JEDEC == 0)
324 {
325 printk("mtd: Found JEDEC flash chip, but do not have a table entry for %x:%x\n",Mfg[0],Mfg[1]);
326 return 0;
327 }
328
329 if (Size != JEDEC->size || SectorSize != JEDEC->sectorsize)
330 {
331 printk("mtd: Failed. Interleved flash does not have matching characteristics\n");
332 return 0;
333 }
334 }
335
336 // Load the Chips
337 for (I = 0; I != MAX_JEDEC_CHIPS; I++)
338 {
339 if (priv->chips[I].jedec == 0)
340 break;
341 }
342
343 if (I + Count > MAX_JEDEC_CHIPS)
344 {
345 printk("mtd: Device has too many chips. Increase MAX_JEDEC_CHIPS\n");
346 return 0;
347 }
348
349 // Add them to the table
350 for (J = 0; J != Count; J++)
351 {
352 unsigned long Bank;
353
354 JEDEC = jedec_idtoinf(Mfg[J],Id[J]);
355 priv->chips[I].jedec = (Mfg[J] << 8) | Id[J];
356 priv->chips[I].size = JEDEC->size;
357 priv->chips[I].sectorsize = JEDEC->sectorsize;
358 priv->chips[I].base = base + J;
359 priv->chips[I].datashift = J*8;
360 priv->chips[I].capabilities = JEDEC->capabilities;
361 priv->chips[I].offset = priv->size + J;
362
363 // log2 n :|
364 priv->chips[I].addrshift = 0;
365 for (Bank = Count; Bank != 1; Bank >>= 1, priv->chips[I].addrshift++);
366
367 // Determine how filled this bank is.
368 Bank = base & (~(my_bank_size-1));
369 if (priv->bank_fill[Bank/my_bank_size] < base +
370 (JEDEC->size << priv->chips[I].addrshift) - Bank)
371 priv->bank_fill[Bank/my_bank_size] = base + (JEDEC->size << priv->chips[I].addrshift) - Bank;
372 I++;
373 }
374
375 priv->size += priv->chips[I-1].size*Count;
376
377 return priv->chips[I-1].size;
378}
379
380/* Lookup the chip information from the JEDEC ID table. */
381static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id)
382{
383 __u16 Id = (mfr << 8) | id;
384 unsigned long I = 0;
385 for (I = 0; JEDEC_table[I].jedec != 0; I++)
386 if (JEDEC_table[I].jedec == Id)
387 return JEDEC_table + I;
388 return NULL;
389}
390
391// Look for flash using an 8 bit bus interface
392static int jedec_probe8(struct map_info *map,unsigned long base,
393 struct jedec_private *priv)
394{
395 #define flread(x) map_read8(map,base+x)
396 #define flwrite(v,x) map_write8(map,v,base+x)
397
398 const unsigned long AutoSel1 = 0xAA;
399 const unsigned long AutoSel2 = 0x55;
400 const unsigned long AutoSel3 = 0x90;
401 const unsigned long Reset = 0xF0;
402 __u32 OldVal;
403 __u8 Mfg[1];
404 __u8 Id[1];
405 unsigned I;
406 unsigned long Size;
407
408 // Wait for any write/erase operation to settle
409 OldVal = flread(base);
410 for (I = 0; OldVal != flread(base) && I < 10000; I++)
411 OldVal = flread(base);
412
413 // Reset the chip
414 flwrite(Reset,0x555);
415
416 // Send the sequence
417 flwrite(AutoSel1,0x555);
418 flwrite(AutoSel2,0x2AA);
419 flwrite(AutoSel3,0x555);
420
421 // Get the JEDEC numbers
422 Mfg[0] = flread(0);
423 Id[0] = flread(1);
424 // printk("Mfg is %x, Id is %x\n",Mfg[0],Id[0]);
425
426 Size = handle_jedecs(map,Mfg,Id,1,base,priv);
427 // printk("handle_jedecs Size is %x\n",(unsigned int)Size);
428 if (Size == 0)
429 {
430 flwrite(Reset,0x555);
431 return 0;
432 }
433
434
435 // Reset.
436 flwrite(Reset,0x555);
437
438 return 1;
439
440 #undef flread
441 #undef flwrite
442}
443
444// Look for flash using a 16 bit bus interface (ie 2 8-bit chips)
445static int jedec_probe16(struct map_info *map,unsigned long base,
446 struct jedec_private *priv)
447{
448 return 0;
449}
450
451// Look for flash using a 32 bit bus interface (ie 4 8-bit chips)
452static int jedec_probe32(struct map_info *map,unsigned long base,
453 struct jedec_private *priv)
454{
455 #define flread(x) map_read32(map,base+((x)<<2))
456 #define flwrite(v,x) map_write32(map,v,base+((x)<<2))
457
458 const unsigned long AutoSel1 = 0xAAAAAAAA;
459 const unsigned long AutoSel2 = 0x55555555;
460 const unsigned long AutoSel3 = 0x90909090;
461 const unsigned long Reset = 0xF0F0F0F0;
462 __u32 OldVal;
463 __u8 Mfg[4];
464 __u8 Id[4];
465 unsigned I;
466 unsigned long Size;
467
468 // Wait for any write/erase operation to settle
469 OldVal = flread(base);
470 for (I = 0; OldVal != flread(base) && I < 10000; I++)
471 OldVal = flread(base);
472
473 // Reset the chip
474 flwrite(Reset,0x555);
475
476 // Send the sequence
477 flwrite(AutoSel1,0x555);
478 flwrite(AutoSel2,0x2AA);
479 flwrite(AutoSel3,0x555);
480
481 // Test #1, JEDEC numbers are readable from 0x??00/0x??01
482 if (flread(0) != flread(0x100) ||
483 flread(1) != flread(0x101))
484 {
485 flwrite(Reset,0x555);
486 return 0;
487 }
488
489 // Split up the JEDEC numbers
490 OldVal = flread(0);
491 for (I = 0; I != 4; I++)
492 Mfg[I] = (OldVal >> (I*8));
493 OldVal = flread(1);
494 for (I = 0; I != 4; I++)
495 Id[I] = (OldVal >> (I*8));
496
497 Size = handle_jedecs(map,Mfg,Id,4,base,priv);
498 if (Size == 0)
499 {
500 flwrite(Reset,0x555);
501 return 0;
502 }
503
504 /* Check if there is address wrap around within a single bank, if this
505 returns JEDEC numbers then we assume that it is wrap around. Notice
506 we call this routine with the JEDEC return still enabled, if two or
507 more flashes have a truncated address space the probe test will still
508 work */
509 if (base + (Size<<2)+0x555 < map->size &&
510 base + (Size<<2)+0x555 < (base & (~(my_bank_size-1))) + my_bank_size)
511 {
512 if (flread(base+Size) != flread(base+Size + 0x100) ||
513 flread(base+Size + 1) != flread(base+Size + 0x101))
514 {
515 jedec_probe32(map,base+Size,priv);
516 }
517 }
518
519 // Reset.
520 flwrite(0xF0F0F0F0,0x555);
521
522 return 1;
523
524 #undef flread
525 #undef flwrite
526}
527
528/* Linear read. */
529static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len,
530 size_t *retlen, u_char *buf)
531{
532 struct map_info *map = mtd->priv;
533
534 map_copy_from(map, buf, from, len);
535 *retlen = len;
536 return 0;
537}
538
539/* Banked read. Take special care to jump past the holes in the bank
540 mapping. This version assumes symetry in the holes.. */
541static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
542 size_t *retlen, u_char *buf)
543{
544 struct map_info *map = mtd->priv;
545 struct jedec_private *priv = map->fldrv_priv;
546
547 *retlen = 0;
548 while (len > 0)
549 {
550 // Determine what bank and offset into that bank the first byte is
551 unsigned long bank = from & (~(priv->bank_fill[0]-1));
552 unsigned long offset = from & (priv->bank_fill[0]-1);
553 unsigned long get = len;
554 if (priv->bank_fill[0] - offset < len)
555 get = priv->bank_fill[0] - offset;
556
557 bank /= priv->bank_fill[0];
558 map_copy_from(map,buf + *retlen,bank*my_bank_size + offset,get);
559
560 len -= get;
561 *retlen += get;
562 from += get;
563 }
564 return 0;
565}
566
567/* Pass the flags value that the flash return before it re-entered read
568 mode. */
569static void jedec_flash_failed(unsigned char code)
570{
571 /* Bit 5 being high indicates that there was an internal device
572 failure, erasure time limits exceeded or something */
573 if ((code & (1 << 5)) != 0)
574 {
575 printk("mtd: Internal Flash failure\n");
576 return;
577 }
578 printk("mtd: Programming didn't take\n");
579}
580
581/* This uses the erasure function described in the AMD Flash Handbook,
582 it will work for flashes with a fixed sector size only. Flashes with
583 a selection of sector sizes (ie the AMD Am29F800B) will need a different
584 routine. This routine tries to parallize erasing multiple chips/sectors
585 where possible */
586static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
587{
588 // Does IO to the currently selected chip
589 #define flread(x) map_read8(map,chip->base+((x)<<chip->addrshift))
590 #define flwrite(v,x) map_write8(map,v,chip->base+((x)<<chip->addrshift))
591
592 unsigned long Time = 0;
593 unsigned long NoTime = 0;
594 unsigned long start = instr->addr, len = instr->len;
595 unsigned int I;
596 struct map_info *map = mtd->priv;
597 struct jedec_private *priv = map->fldrv_priv;
598
599 // Verify the arguments..
600 if (start + len > mtd->size ||
601 (start % mtd->erasesize) != 0 ||
602 (len % mtd->erasesize) != 0 ||
603 (len/mtd->erasesize) == 0)
604 return -EINVAL;
605
606 jedec_flash_chip_scan(priv,start,len);
607
608 // Start the erase sequence on each chip
609 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
610 {
611 unsigned long off;
612 struct jedec_flash_chip *chip = priv->chips + I;
613
614 if (chip->length == 0)
615 continue;
616
617 if (chip->start + chip->length > chip->size)
618 {
619 printk("DIE\n");
620 return -EIO;
621 }
622
623 flwrite(0xF0,chip->start + 0x555);
624 flwrite(0xAA,chip->start + 0x555);
625 flwrite(0x55,chip->start + 0x2AA);
626 flwrite(0x80,chip->start + 0x555);
627 flwrite(0xAA,chip->start + 0x555);
628 flwrite(0x55,chip->start + 0x2AA);
629
630 /* Once we start selecting the erase sectors the delay between each
631 command must not exceed 50us or it will immediately start erasing
632 and ignore the other sectors */
633 for (off = 0; off < len; off += chip->sectorsize)
634 {
635 // Check to make sure we didn't timeout
636 flwrite(0x30,chip->start + off);
637 if (off == 0)
638 continue;
639 if ((flread(chip->start + off) & (1 << 3)) != 0)
640 {
641 printk("mtd: Ack! We timed out the erase timer!\n");
642 return -EIO;
643 }
644 }
645 }
646
647 /* We could split this into a timer routine and return early, performing
648 background erasure.. Maybe later if the need warrents */
649
650 /* Poll the flash for erasure completion, specs say this can take as long
651 as 480 seconds to do all the sectors (for a 2 meg flash).
652 Erasure time is dependent on chip age, temp and wear.. */
653
654 /* This being a generic routine assumes a 32 bit bus. It does read32s
655 and bundles interleved chips into the same grouping. This will work
656 for all bus widths */
657 Time = 0;
658 NoTime = 0;
659 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
660 {
661 struct jedec_flash_chip *chip = priv->chips + I;
662 unsigned long off = 0;
663 unsigned todo[4] = {0,0,0,0};
664 unsigned todo_left = 0;
665 unsigned J;
666
667 if (chip->length == 0)
668 continue;
669
670 /* Find all chips in this data line, realistically this is all
671 or nothing up to the interleve count */
672 for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++)
673 {
674 if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) ==
675 (chip->base & (~((1<<chip->addrshift)-1))))
676 {
677 todo_left++;
678 todo[priv->chips[J].base & ((1<<chip->addrshift)-1)] = 1;
679 }
680 }
681
682 /* printk("todo: %x %x %x %x\n",(short)todo[0],(short)todo[1],
683 (short)todo[2],(short)todo[3]);
684 */
685 while (1)
686 {
687 __u32 Last[4];
688 unsigned long Count = 0;
689
690 /* During erase bit 7 is held low and bit 6 toggles, we watch this,
691 should it stop toggling or go high then the erase is completed,
692 or this is not really flash ;> */
693 switch (map->buswidth) {
694 case 1:
695 Last[0] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
696 Last[1] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
697 Last[2] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
698 break;
699 case 2:
700 Last[0] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
701 Last[1] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
702 Last[2] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
703 break;
704 case 3:
705 Last[0] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
706 Last[1] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
707 Last[2] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
708 break;
709 }
710 Count = 3;
711 while (todo_left != 0)
712 {
713 for (J = 0; J != 4; J++)
714 {
715 __u8 Byte1 = (Last[(Count-1)%4] >> (J*8)) & 0xFF;
716 __u8 Byte2 = (Last[(Count-2)%4] >> (J*8)) & 0xFF;
717 __u8 Byte3 = (Last[(Count-3)%4] >> (J*8)) & 0xFF;
718 if (todo[J] == 0)
719 continue;
720
721 if ((Byte1 & (1 << 7)) == 0 && Byte1 != Byte2)
722 {
723// printk("Check %x %x %x\n",(short)J,(short)Byte1,(short)Byte2);
724 continue;
725 }
726
727 if (Byte1 == Byte2)
728 {
729 jedec_flash_failed(Byte3);
730 return -EIO;
731 }
732
733 todo[J] = 0;
734 todo_left--;
735 }
736
737/* if (NoTime == 0)
738 Time += HZ/10 - schedule_timeout(HZ/10);*/
739 NoTime = 0;
740
741 switch (map->buswidth) {
742 case 1:
743 Last[Count % 4] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
744 break;
745 case 2:
746 Last[Count % 4] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
747 break;
748 case 4:
749 Last[Count % 4] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
750 break;
751 }
752 Count++;
753
754/* // Count time, max of 15s per sector (according to AMD)
755 if (Time > 15*len/mtd->erasesize*HZ)
756 {
757 printk("mtd: Flash Erase Timed out\n");
758 return -EIO;
759 } */
760 }
761
762 // Skip to the next chip if we used chip erase
763 if (chip->length == chip->size)
764 off = chip->size;
765 else
766 off += chip->sectorsize;
767
768 if (off >= chip->length)
769 break;
770 NoTime = 1;
771 }
772
773 for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++)
774 {
775 if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) ==
776 (chip->base & (~((1<<chip->addrshift)-1))))
777 priv->chips[J].length = 0;
778 }
779 }
780
781 //printk("done\n");
782 instr->state = MTD_ERASE_DONE;
783 mtd_erase_callback(instr);
784 return 0;
785
786 #undef flread
787 #undef flwrite
788}
789
790/* This is the simple flash writing function. It writes to every byte, in
791 sequence. It takes care of how to properly address the flash if
792 the flash is interleved. It can only be used if all the chips in the
793 array are identical!*/
794static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
795 size_t *retlen, const u_char *buf)
796{
797 /* Does IO to the currently selected chip. It takes the bank addressing
798 base (which is divisible by the chip size) adds the necessary lower bits
799 of addrshift (interleave index) and then adds the control register index. */
800 #define flread(x) map_read8(map,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift))
801 #define flwrite(v,x) map_write8(map,v,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift))
802
803 struct map_info *map = mtd->priv;
804 struct jedec_private *priv = map->fldrv_priv;
805 unsigned long base;
806 unsigned long off;
807 size_t save_len = len;
808
809 if (start + len > mtd->size)
810 return -EIO;
811
812 //printk("Here");
813
814 //printk("flash_write: start is %x, len is %x\n",start,(unsigned long)len);
815 while (len != 0)
816 {
817 struct jedec_flash_chip *chip = priv->chips;
818 unsigned long bank;
819 unsigned long boffset;
820
821 // Compute the base of the flash.
822 off = ((unsigned long)start) % (chip->size << chip->addrshift);
823 base = start - off;
824
825 // Perform banked addressing translation.
826 bank = base & (~(priv->bank_fill[0]-1));
827 boffset = base & (priv->bank_fill[0]-1);
828 bank = (bank/priv->bank_fill[0])*my_bank_size;
829 base = bank + boffset;
830
831 // printk("Flasing %X %X %X\n",base,chip->size,len);
832 // printk("off is %x, compare with %x\n",off,chip->size << chip->addrshift);
833
834 // Loop over this page
835 for (; off != (chip->size << chip->addrshift) && len != 0; start++, len--, off++,buf++)
836 {
837 unsigned char oldbyte = map_read8(map,base+off);
838 unsigned char Last[4];
839 unsigned long Count = 0;
840
841 if (oldbyte == *buf) {
842 // printk("oldbyte and *buf is %x,len is %x\n",oldbyte,len);
843 continue;
844 }
845 if (((~oldbyte) & *buf) != 0)
846 printk("mtd: warn: Trying to set a 0 to a 1\n");
847
848 // Write
849 flwrite(0xAA,0x555);
850 flwrite(0x55,0x2AA);
851 flwrite(0xA0,0x555);
852 map_write8(map,*buf,base + off);
853 Last[0] = map_read8(map,base + off);
854 Last[1] = map_read8(map,base + off);
855 Last[2] = map_read8(map,base + off);
856
857 /* Wait for the flash to finish the operation. We store the last 4
858 status bytes that have been retrieved so we can determine why
859 it failed. The toggle bits keep toggling when there is a
860 failure */
861 for (Count = 3; Last[(Count - 1) % 4] != Last[(Count - 2) % 4] &&
862 Count < 10000; Count++)
863 Last[Count % 4] = map_read8(map,base + off);
864 if (Last[(Count - 1) % 4] != *buf)
865 {
866 jedec_flash_failed(Last[(Count - 3) % 4]);
867 return -EIO;
868 }
869 }
870 }
871 *retlen = save_len;
872 return 0;
873}
874
875/* This is used to enhance the speed of the erase routine,
876 when things are being done to multiple chips it is possible to
877 parallize the operations, particularly full memory erases of multi
878 chip memories benifit */
879static void jedec_flash_chip_scan(struct jedec_private *priv,unsigned long start,
880 unsigned long len)
881{
882 unsigned int I;
883
884 // Zero the records
885 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
886 priv->chips[I].start = priv->chips[I].length = 0;
887
888 // Intersect the region with each chip
889 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
890 {
891 struct jedec_flash_chip *chip = priv->chips + I;
892 unsigned long ByteStart;
893 unsigned long ChipEndByte = chip->offset + (chip->size << chip->addrshift);
894
895 // End is before this chip or the start is after it
896 if (start+len < chip->offset ||
897 ChipEndByte - (1 << chip->addrshift) < start)
898 continue;
899
900 if (start < chip->offset)
901 {
902 ByteStart = chip->offset;
903 chip->start = 0;
904 }
905 else
906 {
907 chip->start = (start - chip->offset + (1 << chip->addrshift)-1) >> chip->addrshift;
908 ByteStart = start;
909 }
910
911 if (start + len >= ChipEndByte)
912 chip->length = (ChipEndByte - ByteStart) >> chip->addrshift;
913 else
914 chip->length = (start + len - ByteStart + (1 << chip->addrshift)-1) >> chip->addrshift;
915 }
916}
917
918int __init jedec_init(void)
919{
920 register_mtd_chip_driver(&jedec_chipdrv);
921 return 0;
922}
923
924static void __exit jedec_exit(void)
925{
926 unregister_mtd_chip_driver(&jedec_chipdrv);
927}
928
929module_init(jedec_init);
930module_exit(jedec_exit);
931
932MODULE_LICENSE("GPL");
933MODULE_AUTHOR("Jason Gunthorpe <jgg@deltatee.com> et al.");
934MODULE_DESCRIPTION("Old MTD chip driver for JEDEC-compliant flash chips");
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
new file mode 100644
index 000000000000..30325a25ab95
--- /dev/null
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -0,0 +1,2127 @@
1/*
2 Common Flash Interface probe code.
3 (C) 2000 Red Hat. GPL'd.
4 $Id: jedec_probe.c,v 1.61 2004/11/19 20:52:16 thayne Exp $
5 See JEDEC (http://www.jedec.org/) standard JESD21C (section 3.5)
6 for the standard this probe goes back to.
7
8 Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
9*/
10
11#include <linux/config.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <asm/io.h>
17#include <asm/byteorder.h>
18#include <linux/errno.h>
19#include <linux/slab.h>
20#include <linux/interrupt.h>
21#include <linux/init.h>
22
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/map.h>
25#include <linux/mtd/cfi.h>
26#include <linux/mtd/gen_probe.h>
27
28/* Manufacturers */
29#define MANUFACTURER_AMD 0x0001
30#define MANUFACTURER_ATMEL 0x001f
31#define MANUFACTURER_FUJITSU 0x0004
32#define MANUFACTURER_HYUNDAI 0x00AD
33#define MANUFACTURER_INTEL 0x0089
34#define MANUFACTURER_MACRONIX 0x00C2
35#define MANUFACTURER_NEC 0x0010
36#define MANUFACTURER_PMC 0x009D
37#define MANUFACTURER_SST 0x00BF
38#define MANUFACTURER_ST 0x0020
39#define MANUFACTURER_TOSHIBA 0x0098
40#define MANUFACTURER_WINBOND 0x00da
41
42
43/* AMD */
44#define AM29DL800BB 0x22C8
45#define AM29DL800BT 0x224A
46
47#define AM29F800BB 0x2258
48#define AM29F800BT 0x22D6
49#define AM29LV400BB 0x22BA
50#define AM29LV400BT 0x22B9
51#define AM29LV800BB 0x225B
52#define AM29LV800BT 0x22DA
53#define AM29LV160DT 0x22C4
54#define AM29LV160DB 0x2249
55#define AM29F017D 0x003D
56#define AM29F016D 0x00AD
57#define AM29F080 0x00D5
58#define AM29F040 0x00A4
59#define AM29LV040B 0x004F
60#define AM29F032B 0x0041
61#define AM29F002T 0x00B0
62
63/* Atmel */
64#define AT49BV512 0x0003
65#define AT29LV512 0x003d
66#define AT49BV16X 0x00C0
67#define AT49BV16XT 0x00C2
68#define AT49BV32X 0x00C8
69#define AT49BV32XT 0x00C9
70
71/* Fujitsu */
72#define MBM29F040C 0x00A4
73#define MBM29LV650UE 0x22D7
74#define MBM29LV320TE 0x22F6
75#define MBM29LV320BE 0x22F9
76#define MBM29LV160TE 0x22C4
77#define MBM29LV160BE 0x2249
78#define MBM29LV800BA 0x225B
79#define MBM29LV800TA 0x22DA
80#define MBM29LV400TC 0x22B9
81#define MBM29LV400BC 0x22BA
82
83/* Hyundai */
84#define HY29F002T 0x00B0
85
86/* Intel */
87#define I28F004B3T 0x00d4
88#define I28F004B3B 0x00d5
89#define I28F400B3T 0x8894
90#define I28F400B3B 0x8895
91#define I28F008S5 0x00a6
92#define I28F016S5 0x00a0
93#define I28F008SA 0x00a2
94#define I28F008B3T 0x00d2
95#define I28F008B3B 0x00d3
96#define I28F800B3T 0x8892
97#define I28F800B3B 0x8893
98#define I28F016S3 0x00aa
99#define I28F016B3T 0x00d0
100#define I28F016B3B 0x00d1
101#define I28F160B3T 0x8890
102#define I28F160B3B 0x8891
103#define I28F320B3T 0x8896
104#define I28F320B3B 0x8897
105#define I28F640B3T 0x8898
106#define I28F640B3B 0x8899
107#define I82802AB 0x00ad
108#define I82802AC 0x00ac
109
110/* Macronix */
111#define MX29LV040C 0x004F
112#define MX29LV160T 0x22C4
113#define MX29LV160B 0x2249
114#define MX29F016 0x00AD
115#define MX29F002T 0x00B0
116#define MX29F004T 0x0045
117#define MX29F004B 0x0046
118
119/* NEC */
120#define UPD29F064115 0x221C
121
122/* PMC */
123#define PM49FL002 0x006D
124#define PM49FL004 0x006E
125#define PM49FL008 0x006A
126
127/* ST - www.st.com */
128#define M29W800DT 0x00D7
129#define M29W800DB 0x005B
130#define M29W160DT 0x22C4
131#define M29W160DB 0x2249
132#define M29W040B 0x00E3
133#define M50FW040 0x002C
134#define M50FW080 0x002D
135#define M50FW016 0x002E
136#define M50LPW080 0x002F
137
138/* SST */
139#define SST29EE020 0x0010
140#define SST29LE020 0x0012
141#define SST29EE512 0x005d
142#define SST29LE512 0x003d
143#define SST39LF800 0x2781
144#define SST39LF160 0x2782
145#define SST39LF512 0x00D4
146#define SST39LF010 0x00D5
147#define SST39LF020 0x00D6
148#define SST39LF040 0x00D7
149#define SST39SF010A 0x00B5
150#define SST39SF020A 0x00B6
151#define SST49LF004B 0x0060
152#define SST49LF008A 0x005a
153#define SST49LF030A 0x001C
154#define SST49LF040A 0x0051
155#define SST49LF080A 0x005B
156
157/* Toshiba */
158#define TC58FVT160 0x00C2
159#define TC58FVB160 0x0043
160#define TC58FVT321 0x009A
161#define TC58FVB321 0x009C
162#define TC58FVT641 0x0093
163#define TC58FVB641 0x0095
164
165/* Winbond */
166#define W49V002A 0x00b0
167
168
169/*
170 * Unlock address sets for AMD command sets.
171 * Intel command sets use the MTD_UADDR_UNNECESSARY.
172 * Each identifier, except MTD_UADDR_UNNECESSARY, and
173 * MTD_UADDR_NO_SUPPORT must be defined below in unlock_addrs[].
174 * MTD_UADDR_NOT_SUPPORTED must be 0 so that structure
175 * initialization need not require initializing all of the
176 * unlock addresses for all bit widths.
177 */
178enum uaddr {
179 MTD_UADDR_NOT_SUPPORTED = 0, /* data width not supported */
180 MTD_UADDR_0x0555_0x02AA,
181 MTD_UADDR_0x0555_0x0AAA,
182 MTD_UADDR_0x5555_0x2AAA,
183 MTD_UADDR_0x0AAA_0x0555,
184 MTD_UADDR_DONT_CARE, /* Requires an arbitrary address */
185 MTD_UADDR_UNNECESSARY, /* Does not require any address */
186};
187
188
189struct unlock_addr {
190 u32 addr1;
191 u32 addr2;
192};
193
194
195/*
196 * I don't like the fact that the first entry in unlock_addrs[]
197 * exists, but is for MTD_UADDR_NOT_SUPPORTED - and, therefore,
198 * should not be used. The problem is that structures with
199 * initializers have extra fields initialized to 0. It is _very_
200 * desireable to have the unlock address entries for unsupported
201 * data widths automatically initialized - that means that
202 * MTD_UADDR_NOT_SUPPORTED must be 0 and the first entry here
203 * must go unused.
204 */
205static const struct unlock_addr unlock_addrs[] = {
206 [MTD_UADDR_NOT_SUPPORTED] = {
207 .addr1 = 0xffff,
208 .addr2 = 0xffff
209 },
210
211 [MTD_UADDR_0x0555_0x02AA] = {
212 .addr1 = 0x0555,
213 .addr2 = 0x02aa
214 },
215
216 [MTD_UADDR_0x0555_0x0AAA] = {
217 .addr1 = 0x0555,
218 .addr2 = 0x0aaa
219 },
220
221 [MTD_UADDR_0x5555_0x2AAA] = {
222 .addr1 = 0x5555,
223 .addr2 = 0x2aaa
224 },
225
226 [MTD_UADDR_0x0AAA_0x0555] = {
227 .addr1 = 0x0AAA,
228 .addr2 = 0x0555
229 },
230
231 [MTD_UADDR_DONT_CARE] = {
232 .addr1 = 0x0000, /* Doesn't matter which address */
233 .addr2 = 0x0000 /* is used - must be last entry */
234 },
235
236 [MTD_UADDR_UNNECESSARY] = {
237 .addr1 = 0x0000,
238 .addr2 = 0x0000
239 }
240};
241
242
243struct amd_flash_info {
244 const __u16 mfr_id;
245 const __u16 dev_id;
246 const char *name;
247 const int DevSize;
248 const int NumEraseRegions;
249 const int CmdSet;
250 const __u8 uaddr[4]; /* unlock addrs for 8, 16, 32, 64 */
251 const ulong regions[6];
252};
253
254#define ERASEINFO(size,blocks) (size<<8)|(blocks-1)
255
256#define SIZE_64KiB 16
257#define SIZE_128KiB 17
258#define SIZE_256KiB 18
259#define SIZE_512KiB 19
260#define SIZE_1MiB 20
261#define SIZE_2MiB 21
262#define SIZE_4MiB 22
263#define SIZE_8MiB 23
264
265
266/*
267 * Please keep this list ordered by manufacturer!
268 * Fortunately, the list isn't searched often and so a
269 * slow, linear search isn't so bad.
270 */
271static const struct amd_flash_info jedec_table[] = {
272 {
273 .mfr_id = MANUFACTURER_AMD,
274 .dev_id = AM29F032B,
275 .name = "AMD AM29F032B",
276 .uaddr = {
277 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
278 },
279 .DevSize = SIZE_4MiB,
280 .CmdSet = P_ID_AMD_STD,
281 .NumEraseRegions= 1,
282 .regions = {
283 ERASEINFO(0x10000,64)
284 }
285 }, {
286 .mfr_id = MANUFACTURER_AMD,
287 .dev_id = AM29LV160DT,
288 .name = "AMD AM29LV160DT",
289 .uaddr = {
290 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
291 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
292 },
293 .DevSize = SIZE_2MiB,
294 .CmdSet = P_ID_AMD_STD,
295 .NumEraseRegions= 4,
296 .regions = {
297 ERASEINFO(0x10000,31),
298 ERASEINFO(0x08000,1),
299 ERASEINFO(0x02000,2),
300 ERASEINFO(0x04000,1)
301 }
302 }, {
303 .mfr_id = MANUFACTURER_AMD,
304 .dev_id = AM29LV160DB,
305 .name = "AMD AM29LV160DB",
306 .uaddr = {
307 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
308 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
309 },
310 .DevSize = SIZE_2MiB,
311 .CmdSet = P_ID_AMD_STD,
312 .NumEraseRegions= 4,
313 .regions = {
314 ERASEINFO(0x04000,1),
315 ERASEINFO(0x02000,2),
316 ERASEINFO(0x08000,1),
317 ERASEINFO(0x10000,31)
318 }
319 }, {
320 .mfr_id = MANUFACTURER_AMD,
321 .dev_id = AM29LV400BB,
322 .name = "AMD AM29LV400BB",
323 .uaddr = {
324 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
325 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
326 },
327 .DevSize = SIZE_512KiB,
328 .CmdSet = P_ID_AMD_STD,
329 .NumEraseRegions= 4,
330 .regions = {
331 ERASEINFO(0x04000,1),
332 ERASEINFO(0x02000,2),
333 ERASEINFO(0x08000,1),
334 ERASEINFO(0x10000,7)
335 }
336 }, {
337 .mfr_id = MANUFACTURER_AMD,
338 .dev_id = AM29LV400BT,
339 .name = "AMD AM29LV400BT",
340 .uaddr = {
341 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
342 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
343 },
344 .DevSize = SIZE_512KiB,
345 .CmdSet = P_ID_AMD_STD,
346 .NumEraseRegions= 4,
347 .regions = {
348 ERASEINFO(0x10000,7),
349 ERASEINFO(0x08000,1),
350 ERASEINFO(0x02000,2),
351 ERASEINFO(0x04000,1)
352 }
353 }, {
354 .mfr_id = MANUFACTURER_AMD,
355 .dev_id = AM29LV800BB,
356 .name = "AMD AM29LV800BB",
357 .uaddr = {
358 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
359 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
360 },
361 .DevSize = SIZE_1MiB,
362 .CmdSet = P_ID_AMD_STD,
363 .NumEraseRegions= 4,
364 .regions = {
365 ERASEINFO(0x04000,1),
366 ERASEINFO(0x02000,2),
367 ERASEINFO(0x08000,1),
368 ERASEINFO(0x10000,15),
369 }
370 }, {
371/* add DL */
372 .mfr_id = MANUFACTURER_AMD,
373 .dev_id = AM29DL800BB,
374 .name = "AMD AM29DL800BB",
375 .uaddr = {
376 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
377 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
378 },
379 .DevSize = SIZE_1MiB,
380 .CmdSet = P_ID_AMD_STD,
381 .NumEraseRegions= 6,
382 .regions = {
383 ERASEINFO(0x04000,1),
384 ERASEINFO(0x08000,1),
385 ERASEINFO(0x02000,4),
386 ERASEINFO(0x08000,1),
387 ERASEINFO(0x04000,1),
388 ERASEINFO(0x10000,14)
389 }
390 }, {
391 .mfr_id = MANUFACTURER_AMD,
392 .dev_id = AM29DL800BT,
393 .name = "AMD AM29DL800BT",
394 .uaddr = {
395 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
396 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
397 },
398 .DevSize = SIZE_1MiB,
399 .CmdSet = P_ID_AMD_STD,
400 .NumEraseRegions= 6,
401 .regions = {
402 ERASEINFO(0x10000,14),
403 ERASEINFO(0x04000,1),
404 ERASEINFO(0x08000,1),
405 ERASEINFO(0x02000,4),
406 ERASEINFO(0x08000,1),
407 ERASEINFO(0x04000,1)
408 }
409 }, {
410 .mfr_id = MANUFACTURER_AMD,
411 .dev_id = AM29F800BB,
412 .name = "AMD AM29F800BB",
413 .uaddr = {
414 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
415 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
416 },
417 .DevSize = SIZE_1MiB,
418 .CmdSet = P_ID_AMD_STD,
419 .NumEraseRegions= 4,
420 .regions = {
421 ERASEINFO(0x04000,1),
422 ERASEINFO(0x02000,2),
423 ERASEINFO(0x08000,1),
424 ERASEINFO(0x10000,15),
425 }
426 }, {
427 .mfr_id = MANUFACTURER_AMD,
428 .dev_id = AM29LV800BT,
429 .name = "AMD AM29LV800BT",
430 .uaddr = {
431 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
432 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
433 },
434 .DevSize = SIZE_1MiB,
435 .CmdSet = P_ID_AMD_STD,
436 .NumEraseRegions= 4,
437 .regions = {
438 ERASEINFO(0x10000,15),
439 ERASEINFO(0x08000,1),
440 ERASEINFO(0x02000,2),
441 ERASEINFO(0x04000,1)
442 }
443 }, {
444 .mfr_id = MANUFACTURER_AMD,
445 .dev_id = AM29F800BT,
446 .name = "AMD AM29F800BT",
447 .uaddr = {
448 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
449 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
450 },
451 .DevSize = SIZE_1MiB,
452 .CmdSet = P_ID_AMD_STD,
453 .NumEraseRegions= 4,
454 .regions = {
455 ERASEINFO(0x10000,15),
456 ERASEINFO(0x08000,1),
457 ERASEINFO(0x02000,2),
458 ERASEINFO(0x04000,1)
459 }
460 }, {
461 .mfr_id = MANUFACTURER_AMD,
462 .dev_id = AM29F017D,
463 .name = "AMD AM29F017D",
464 .uaddr = {
465 [0] = MTD_UADDR_DONT_CARE /* x8 */
466 },
467 .DevSize = SIZE_2MiB,
468 .CmdSet = P_ID_AMD_STD,
469 .NumEraseRegions= 1,
470 .regions = {
471 ERASEINFO(0x10000,32),
472 }
473 }, {
474 .mfr_id = MANUFACTURER_AMD,
475 .dev_id = AM29F016D,
476 .name = "AMD AM29F016D",
477 .uaddr = {
478 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
479 },
480 .DevSize = SIZE_2MiB,
481 .CmdSet = P_ID_AMD_STD,
482 .NumEraseRegions= 1,
483 .regions = {
484 ERASEINFO(0x10000,32),
485 }
486 }, {
487 .mfr_id = MANUFACTURER_AMD,
488 .dev_id = AM29F080,
489 .name = "AMD AM29F080",
490 .uaddr = {
491 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
492 },
493 .DevSize = SIZE_1MiB,
494 .CmdSet = P_ID_AMD_STD,
495 .NumEraseRegions= 1,
496 .regions = {
497 ERASEINFO(0x10000,16),
498 }
499 }, {
500 .mfr_id = MANUFACTURER_AMD,
501 .dev_id = AM29F040,
502 .name = "AMD AM29F040",
503 .uaddr = {
504 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
505 },
506 .DevSize = SIZE_512KiB,
507 .CmdSet = P_ID_AMD_STD,
508 .NumEraseRegions= 1,
509 .regions = {
510 ERASEINFO(0x10000,8),
511 }
512 }, {
513 .mfr_id = MANUFACTURER_AMD,
514 .dev_id = AM29LV040B,
515 .name = "AMD AM29LV040B",
516 .uaddr = {
517 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
518 },
519 .DevSize = SIZE_512KiB,
520 .CmdSet = P_ID_AMD_STD,
521 .NumEraseRegions= 1,
522 .regions = {
523 ERASEINFO(0x10000,8),
524 }
525 }, {
526 .mfr_id = MANUFACTURER_AMD,
527 .dev_id = AM29F002T,
528 .name = "AMD AM29F002T",
529 .uaddr = {
530 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
531 },
532 .DevSize = SIZE_256KiB,
533 .CmdSet = P_ID_AMD_STD,
534 .NumEraseRegions= 4,
535 .regions = {
536 ERASEINFO(0x10000,3),
537 ERASEINFO(0x08000,1),
538 ERASEINFO(0x02000,2),
539 ERASEINFO(0x04000,1),
540 }
541 }, {
542 .mfr_id = MANUFACTURER_ATMEL,
543 .dev_id = AT49BV512,
544 .name = "Atmel AT49BV512",
545 .uaddr = {
546 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
547 },
548 .DevSize = SIZE_64KiB,
549 .CmdSet = P_ID_AMD_STD,
550 .NumEraseRegions= 1,
551 .regions = {
552 ERASEINFO(0x10000,1)
553 }
554 }, {
555 .mfr_id = MANUFACTURER_ATMEL,
556 .dev_id = AT29LV512,
557 .name = "Atmel AT29LV512",
558 .uaddr = {
559 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
560 },
561 .DevSize = SIZE_64KiB,
562 .CmdSet = P_ID_AMD_STD,
563 .NumEraseRegions= 1,
564 .regions = {
565 ERASEINFO(0x80,256),
566 ERASEINFO(0x80,256)
567 }
568 }, {
569 .mfr_id = MANUFACTURER_ATMEL,
570 .dev_id = AT49BV16X,
571 .name = "Atmel AT49BV16X",
572 .uaddr = {
573 [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
574 [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
575 },
576 .DevSize = SIZE_2MiB,
577 .CmdSet = P_ID_AMD_STD,
578 .NumEraseRegions= 2,
579 .regions = {
580 ERASEINFO(0x02000,8),
581 ERASEINFO(0x10000,31)
582 }
583 }, {
584 .mfr_id = MANUFACTURER_ATMEL,
585 .dev_id = AT49BV16XT,
586 .name = "Atmel AT49BV16XT",
587 .uaddr = {
588 [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
589 [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
590 },
591 .DevSize = SIZE_2MiB,
592 .CmdSet = P_ID_AMD_STD,
593 .NumEraseRegions= 2,
594 .regions = {
595 ERASEINFO(0x10000,31),
596 ERASEINFO(0x02000,8)
597 }
598 }, {
599 .mfr_id = MANUFACTURER_ATMEL,
600 .dev_id = AT49BV32X,
601 .name = "Atmel AT49BV32X",
602 .uaddr = {
603 [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
604 [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
605 },
606 .DevSize = SIZE_4MiB,
607 .CmdSet = P_ID_AMD_STD,
608 .NumEraseRegions= 2,
609 .regions = {
610 ERASEINFO(0x02000,8),
611 ERASEINFO(0x10000,63)
612 }
613 }, {
614 .mfr_id = MANUFACTURER_ATMEL,
615 .dev_id = AT49BV32XT,
616 .name = "Atmel AT49BV32XT",
617 .uaddr = {
618 [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
619 [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
620 },
621 .DevSize = SIZE_4MiB,
622 .CmdSet = P_ID_AMD_STD,
623 .NumEraseRegions= 2,
624 .regions = {
625 ERASEINFO(0x10000,63),
626 ERASEINFO(0x02000,8)
627 }
628 }, {
629 .mfr_id = MANUFACTURER_FUJITSU,
630 .dev_id = MBM29F040C,
631 .name = "Fujitsu MBM29F040C",
632 .uaddr = {
633 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
634 },
635 .DevSize = SIZE_512KiB,
636 .CmdSet = P_ID_AMD_STD,
637 .NumEraseRegions= 1,
638 .regions = {
639 ERASEINFO(0x10000,8)
640 }
641 }, {
642 .mfr_id = MANUFACTURER_FUJITSU,
643 .dev_id = MBM29LV650UE,
644 .name = "Fujitsu MBM29LV650UE",
645 .uaddr = {
646 [0] = MTD_UADDR_DONT_CARE /* x16 */
647 },
648 .DevSize = SIZE_8MiB,
649 .CmdSet = P_ID_AMD_STD,
650 .NumEraseRegions= 1,
651 .regions = {
652 ERASEINFO(0x10000,128)
653 }
654 }, {
655 .mfr_id = MANUFACTURER_FUJITSU,
656 .dev_id = MBM29LV320TE,
657 .name = "Fujitsu MBM29LV320TE",
658 .uaddr = {
659 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
660 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
661 },
662 .DevSize = SIZE_4MiB,
663 .CmdSet = P_ID_AMD_STD,
664 .NumEraseRegions= 2,
665 .regions = {
666 ERASEINFO(0x10000,63),
667 ERASEINFO(0x02000,8)
668 }
669 }, {
670 .mfr_id = MANUFACTURER_FUJITSU,
671 .dev_id = MBM29LV320BE,
672 .name = "Fujitsu MBM29LV320BE",
673 .uaddr = {
674 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
675 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
676 },
677 .DevSize = SIZE_4MiB,
678 .CmdSet = P_ID_AMD_STD,
679 .NumEraseRegions= 2,
680 .regions = {
681 ERASEINFO(0x02000,8),
682 ERASEINFO(0x10000,63)
683 }
684 }, {
685 .mfr_id = MANUFACTURER_FUJITSU,
686 .dev_id = MBM29LV160TE,
687 .name = "Fujitsu MBM29LV160TE",
688 .uaddr = {
689 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
690 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
691 },
692 .DevSize = SIZE_2MiB,
693 .CmdSet = P_ID_AMD_STD,
694 .NumEraseRegions= 4,
695 .regions = {
696 ERASEINFO(0x10000,31),
697 ERASEINFO(0x08000,1),
698 ERASEINFO(0x02000,2),
699 ERASEINFO(0x04000,1)
700 }
701 }, {
702 .mfr_id = MANUFACTURER_FUJITSU,
703 .dev_id = MBM29LV160BE,
704 .name = "Fujitsu MBM29LV160BE",
705 .uaddr = {
706 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
707 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
708 },
709 .DevSize = SIZE_2MiB,
710 .CmdSet = P_ID_AMD_STD,
711 .NumEraseRegions= 4,
712 .regions = {
713 ERASEINFO(0x04000,1),
714 ERASEINFO(0x02000,2),
715 ERASEINFO(0x08000,1),
716 ERASEINFO(0x10000,31)
717 }
718 }, {
719 .mfr_id = MANUFACTURER_FUJITSU,
720 .dev_id = MBM29LV800BA,
721 .name = "Fujitsu MBM29LV800BA",
722 .uaddr = {
723 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
724 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
725 },
726 .DevSize = SIZE_1MiB,
727 .CmdSet = P_ID_AMD_STD,
728 .NumEraseRegions= 4,
729 .regions = {
730 ERASEINFO(0x04000,1),
731 ERASEINFO(0x02000,2),
732 ERASEINFO(0x08000,1),
733 ERASEINFO(0x10000,15)
734 }
735 }, {
736 .mfr_id = MANUFACTURER_FUJITSU,
737 .dev_id = MBM29LV800TA,
738 .name = "Fujitsu MBM29LV800TA",
739 .uaddr = {
740 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
741 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
742 },
743 .DevSize = SIZE_1MiB,
744 .CmdSet = P_ID_AMD_STD,
745 .NumEraseRegions= 4,
746 .regions = {
747 ERASEINFO(0x10000,15),
748 ERASEINFO(0x08000,1),
749 ERASEINFO(0x02000,2),
750 ERASEINFO(0x04000,1)
751 }
752 }, {
753 .mfr_id = MANUFACTURER_FUJITSU,
754 .dev_id = MBM29LV400BC,
755 .name = "Fujitsu MBM29LV400BC",
756 .uaddr = {
757 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
758 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
759 },
760 .DevSize = SIZE_512KiB,
761 .CmdSet = P_ID_AMD_STD,
762 .NumEraseRegions= 4,
763 .regions = {
764 ERASEINFO(0x04000,1),
765 ERASEINFO(0x02000,2),
766 ERASEINFO(0x08000,1),
767 ERASEINFO(0x10000,7)
768 }
769 }, {
770 .mfr_id = MANUFACTURER_FUJITSU,
771 .dev_id = MBM29LV400TC,
772 .name = "Fujitsu MBM29LV400TC",
773 .uaddr = {
774 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
775 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
776 },
777 .DevSize = SIZE_512KiB,
778 .CmdSet = P_ID_AMD_STD,
779 .NumEraseRegions= 4,
780 .regions = {
781 ERASEINFO(0x10000,7),
782 ERASEINFO(0x08000,1),
783 ERASEINFO(0x02000,2),
784 ERASEINFO(0x04000,1)
785 }
786 }, {
787 .mfr_id = MANUFACTURER_HYUNDAI,
788 .dev_id = HY29F002T,
789 .name = "Hyundai HY29F002T",
790 .uaddr = {
791 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
792 },
793 .DevSize = SIZE_256KiB,
794 .CmdSet = P_ID_AMD_STD,
795 .NumEraseRegions= 4,
796 .regions = {
797 ERASEINFO(0x10000,3),
798 ERASEINFO(0x08000,1),
799 ERASEINFO(0x02000,2),
800 ERASEINFO(0x04000,1),
801 }
802 }, {
803 .mfr_id = MANUFACTURER_INTEL,
804 .dev_id = I28F004B3B,
805 .name = "Intel 28F004B3B",
806 .uaddr = {
807 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
808 },
809 .DevSize = SIZE_512KiB,
810 .CmdSet = P_ID_INTEL_STD,
811 .NumEraseRegions= 2,
812 .regions = {
813 ERASEINFO(0x02000, 8),
814 ERASEINFO(0x10000, 7),
815 }
816 }, {
817 .mfr_id = MANUFACTURER_INTEL,
818 .dev_id = I28F004B3T,
819 .name = "Intel 28F004B3T",
820 .uaddr = {
821 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
822 },
823 .DevSize = SIZE_512KiB,
824 .CmdSet = P_ID_INTEL_STD,
825 .NumEraseRegions= 2,
826 .regions = {
827 ERASEINFO(0x10000, 7),
828 ERASEINFO(0x02000, 8),
829 }
830 }, {
831 .mfr_id = MANUFACTURER_INTEL,
832 .dev_id = I28F400B3B,
833 .name = "Intel 28F400B3B",
834 .uaddr = {
835 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
836 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
837 },
838 .DevSize = SIZE_512KiB,
839 .CmdSet = P_ID_INTEL_STD,
840 .NumEraseRegions= 2,
841 .regions = {
842 ERASEINFO(0x02000, 8),
843 ERASEINFO(0x10000, 7),
844 }
845 }, {
846 .mfr_id = MANUFACTURER_INTEL,
847 .dev_id = I28F400B3T,
848 .name = "Intel 28F400B3T",
849 .uaddr = {
850 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
851 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
852 },
853 .DevSize = SIZE_512KiB,
854 .CmdSet = P_ID_INTEL_STD,
855 .NumEraseRegions= 2,
856 .regions = {
857 ERASEINFO(0x10000, 7),
858 ERASEINFO(0x02000, 8),
859 }
860 }, {
861 .mfr_id = MANUFACTURER_INTEL,
862 .dev_id = I28F008B3B,
863 .name = "Intel 28F008B3B",
864 .uaddr = {
865 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
866 },
867 .DevSize = SIZE_1MiB,
868 .CmdSet = P_ID_INTEL_STD,
869 .NumEraseRegions= 2,
870 .regions = {
871 ERASEINFO(0x02000, 8),
872 ERASEINFO(0x10000, 15),
873 }
874 }, {
875 .mfr_id = MANUFACTURER_INTEL,
876 .dev_id = I28F008B3T,
877 .name = "Intel 28F008B3T",
878 .uaddr = {
879 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
880 },
881 .DevSize = SIZE_1MiB,
882 .CmdSet = P_ID_INTEL_STD,
883 .NumEraseRegions= 2,
884 .regions = {
885 ERASEINFO(0x10000, 15),
886 ERASEINFO(0x02000, 8),
887 }
888 }, {
889 .mfr_id = MANUFACTURER_INTEL,
890 .dev_id = I28F008S5,
891 .name = "Intel 28F008S5",
892 .uaddr = {
893 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
894 },
895 .DevSize = SIZE_1MiB,
896 .CmdSet = P_ID_INTEL_EXT,
897 .NumEraseRegions= 1,
898 .regions = {
899 ERASEINFO(0x10000,16),
900 }
901 }, {
902 .mfr_id = MANUFACTURER_INTEL,
903 .dev_id = I28F016S5,
904 .name = "Intel 28F016S5",
905 .uaddr = {
906 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
907 },
908 .DevSize = SIZE_2MiB,
909 .CmdSet = P_ID_INTEL_EXT,
910 .NumEraseRegions= 1,
911 .regions = {
912 ERASEINFO(0x10000,32),
913 }
914 }, {
915 .mfr_id = MANUFACTURER_INTEL,
916 .dev_id = I28F008SA,
917 .name = "Intel 28F008SA",
918 .uaddr = {
919 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
920 },
921 .DevSize = SIZE_1MiB,
922 .CmdSet = P_ID_INTEL_STD,
923 .NumEraseRegions= 1,
924 .regions = {
925 ERASEINFO(0x10000, 16),
926 }
927 }, {
928 .mfr_id = MANUFACTURER_INTEL,
929 .dev_id = I28F800B3B,
930 .name = "Intel 28F800B3B",
931 .uaddr = {
932 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
933 },
934 .DevSize = SIZE_1MiB,
935 .CmdSet = P_ID_INTEL_STD,
936 .NumEraseRegions= 2,
937 .regions = {
938 ERASEINFO(0x02000, 8),
939 ERASEINFO(0x10000, 15),
940 }
941 }, {
942 .mfr_id = MANUFACTURER_INTEL,
943 .dev_id = I28F800B3T,
944 .name = "Intel 28F800B3T",
945 .uaddr = {
946 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
947 },
948 .DevSize = SIZE_1MiB,
949 .CmdSet = P_ID_INTEL_STD,
950 .NumEraseRegions= 2,
951 .regions = {
952 ERASEINFO(0x10000, 15),
953 ERASEINFO(0x02000, 8),
954 }
955 }, {
956 .mfr_id = MANUFACTURER_INTEL,
957 .dev_id = I28F016B3B,
958 .name = "Intel 28F016B3B",
959 .uaddr = {
960 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
961 },
962 .DevSize = SIZE_2MiB,
963 .CmdSet = P_ID_INTEL_STD,
964 .NumEraseRegions= 2,
965 .regions = {
966 ERASEINFO(0x02000, 8),
967 ERASEINFO(0x10000, 31),
968 }
969 }, {
970 .mfr_id = MANUFACTURER_INTEL,
971 .dev_id = I28F016S3,
972 .name = "Intel I28F016S3",
973 .uaddr = {
974 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
975 },
976 .DevSize = SIZE_2MiB,
977 .CmdSet = P_ID_INTEL_STD,
978 .NumEraseRegions= 1,
979 .regions = {
980 ERASEINFO(0x10000, 32),
981 }
982 }, {
983 .mfr_id = MANUFACTURER_INTEL,
984 .dev_id = I28F016B3T,
985 .name = "Intel 28F016B3T",
986 .uaddr = {
987 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
988 },
989 .DevSize = SIZE_2MiB,
990 .CmdSet = P_ID_INTEL_STD,
991 .NumEraseRegions= 2,
992 .regions = {
993 ERASEINFO(0x10000, 31),
994 ERASEINFO(0x02000, 8),
995 }
996 }, {
997 .mfr_id = MANUFACTURER_INTEL,
998 .dev_id = I28F160B3B,
999 .name = "Intel 28F160B3B",
1000 .uaddr = {
1001 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
1002 },
1003 .DevSize = SIZE_2MiB,
1004 .CmdSet = P_ID_INTEL_STD,
1005 .NumEraseRegions= 2,
1006 .regions = {
1007 ERASEINFO(0x02000, 8),
1008 ERASEINFO(0x10000, 31),
1009 }
1010 }, {
1011 .mfr_id = MANUFACTURER_INTEL,
1012 .dev_id = I28F160B3T,
1013 .name = "Intel 28F160B3T",
1014 .uaddr = {
1015 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
1016 },
1017 .DevSize = SIZE_2MiB,
1018 .CmdSet = P_ID_INTEL_STD,
1019 .NumEraseRegions= 2,
1020 .regions = {
1021 ERASEINFO(0x10000, 31),
1022 ERASEINFO(0x02000, 8),
1023 }
1024 }, {
1025 .mfr_id = MANUFACTURER_INTEL,
1026 .dev_id = I28F320B3B,
1027 .name = "Intel 28F320B3B",
1028 .uaddr = {
1029 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
1030 },
1031 .DevSize = SIZE_4MiB,
1032 .CmdSet = P_ID_INTEL_STD,
1033 .NumEraseRegions= 2,
1034 .regions = {
1035 ERASEINFO(0x02000, 8),
1036 ERASEINFO(0x10000, 63),
1037 }
1038 }, {
1039 .mfr_id = MANUFACTURER_INTEL,
1040 .dev_id = I28F320B3T,
1041 .name = "Intel 28F320B3T",
1042 .uaddr = {
1043 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
1044 },
1045 .DevSize = SIZE_4MiB,
1046 .CmdSet = P_ID_INTEL_STD,
1047 .NumEraseRegions= 2,
1048 .regions = {
1049 ERASEINFO(0x10000, 63),
1050 ERASEINFO(0x02000, 8),
1051 }
1052 }, {
1053 .mfr_id = MANUFACTURER_INTEL,
1054 .dev_id = I28F640B3B,
1055 .name = "Intel 28F640B3B",
1056 .uaddr = {
1057 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
1058 },
1059 .DevSize = SIZE_8MiB,
1060 .CmdSet = P_ID_INTEL_STD,
1061 .NumEraseRegions= 2,
1062 .regions = {
1063 ERASEINFO(0x02000, 8),
1064 ERASEINFO(0x10000, 127),
1065 }
1066 }, {
1067 .mfr_id = MANUFACTURER_INTEL,
1068 .dev_id = I28F640B3T,
1069 .name = "Intel 28F640B3T",
1070 .uaddr = {
1071 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
1072 },
1073 .DevSize = SIZE_8MiB,
1074 .CmdSet = P_ID_INTEL_STD,
1075 .NumEraseRegions= 2,
1076 .regions = {
1077 ERASEINFO(0x10000, 127),
1078 ERASEINFO(0x02000, 8),
1079 }
1080 }, {
1081 .mfr_id = MANUFACTURER_INTEL,
1082 .dev_id = I82802AB,
1083 .name = "Intel 82802AB",
1084 .uaddr = {
1085 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
1086 },
1087 .DevSize = SIZE_512KiB,
1088 .CmdSet = P_ID_INTEL_EXT,
1089 .NumEraseRegions= 1,
1090 .regions = {
1091 ERASEINFO(0x10000,8),
1092 }
1093 }, {
1094 .mfr_id = MANUFACTURER_INTEL,
1095 .dev_id = I82802AC,
1096 .name = "Intel 82802AC",
1097 .uaddr = {
1098 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
1099 },
1100 .DevSize = SIZE_1MiB,
1101 .CmdSet = P_ID_INTEL_EXT,
1102 .NumEraseRegions= 1,
1103 .regions = {
1104 ERASEINFO(0x10000,16),
1105 }
1106 }, {
1107 .mfr_id = MANUFACTURER_MACRONIX,
1108 .dev_id = MX29LV040C,
1109 .name = "Macronix MX29LV040C",
1110 .uaddr = {
1111 [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
1112 },
1113 .DevSize = SIZE_512KiB,
1114 .CmdSet = P_ID_AMD_STD,
1115 .NumEraseRegions= 1,
1116 .regions = {
1117 ERASEINFO(0x10000,8),
1118 }
1119 }, {
1120 .mfr_id = MANUFACTURER_MACRONIX,
1121 .dev_id = MX29LV160T,
1122 .name = "MXIC MX29LV160T",
1123 .uaddr = {
1124 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
1125 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
1126 },
1127 .DevSize = SIZE_2MiB,
1128 .CmdSet = P_ID_AMD_STD,
1129 .NumEraseRegions= 4,
1130 .regions = {
1131 ERASEINFO(0x10000,31),
1132 ERASEINFO(0x08000,1),
1133 ERASEINFO(0x02000,2),
1134 ERASEINFO(0x04000,1)
1135 }
1136 }, {
1137 .mfr_id = MANUFACTURER_NEC,
1138 .dev_id = UPD29F064115,
1139 .name = "NEC uPD29F064115",
1140 .uaddr = {
1141 [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
1142 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
1143 },
1144 .DevSize = SIZE_8MiB,
1145 .CmdSet = P_ID_AMD_STD,
1146 .NumEraseRegions= 3,
1147 .regions = {
1148 ERASEINFO(0x2000,8),
1149 ERASEINFO(0x10000,126),
1150 ERASEINFO(0x2000,8),
1151 }
1152 }, {
1153 .mfr_id = MANUFACTURER_MACRONIX,
1154 .dev_id = MX29LV160B,
1155 .name = "MXIC MX29LV160B",
1156 .uaddr = {
1157 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
1158 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
1159 },
1160 .DevSize = SIZE_2MiB,
1161 .CmdSet = P_ID_AMD_STD,
1162 .NumEraseRegions= 4,
1163 .regions = {
1164 ERASEINFO(0x04000,1),
1165 ERASEINFO(0x02000,2),
1166 ERASEINFO(0x08000,1),
1167 ERASEINFO(0x10000,31)
1168 }
1169 }, {
1170 .mfr_id = MANUFACTURER_MACRONIX,
1171 .dev_id = MX29F016,
1172 .name = "Macronix MX29F016",
1173 .uaddr = {
1174 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
1175 },
1176 .DevSize = SIZE_2MiB,
1177 .CmdSet = P_ID_AMD_STD,
1178 .NumEraseRegions= 1,
1179 .regions = {
1180 ERASEINFO(0x10000,32),
1181 }
1182 }, {
1183 .mfr_id = MANUFACTURER_MACRONIX,
1184 .dev_id = MX29F004T,
1185 .name = "Macronix MX29F004T",
1186 .uaddr = {
1187 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
1188 },
1189 .DevSize = SIZE_512KiB,
1190 .CmdSet = P_ID_AMD_STD,
1191 .NumEraseRegions= 4,
1192 .regions = {
1193 ERASEINFO(0x10000,7),
1194 ERASEINFO(0x08000,1),
1195 ERASEINFO(0x02000,2),
1196 ERASEINFO(0x04000,1),
1197 }
1198 }, {
1199 .mfr_id = MANUFACTURER_MACRONIX,
1200 .dev_id = MX29F004B,
1201 .name = "Macronix MX29F004B",
1202 .uaddr = {
1203 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
1204 },
1205 .DevSize = SIZE_512KiB,
1206 .CmdSet = P_ID_AMD_STD,
1207 .NumEraseRegions= 4,
1208 .regions = {
1209 ERASEINFO(0x04000,1),
1210 ERASEINFO(0x02000,2),
1211 ERASEINFO(0x08000,1),
1212 ERASEINFO(0x10000,7),
1213 }
1214 }, {
1215 .mfr_id = MANUFACTURER_MACRONIX,
1216 .dev_id = MX29F002T,
1217 .name = "Macronix MX29F002T",
1218 .uaddr = {
1219 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
1220 },
1221 .DevSize = SIZE_256KiB,
1222 .CmdSet = P_ID_AMD_STD,
1223 .NumEraseRegions= 4,
1224 .regions = {
1225 ERASEINFO(0x10000,3),
1226 ERASEINFO(0x08000,1),
1227 ERASEINFO(0x02000,2),
1228 ERASEINFO(0x04000,1),
1229 }
1230 }, {
1231 .mfr_id = MANUFACTURER_PMC,
1232 .dev_id = PM49FL002,
1233 .name = "PMC Pm49FL002",
1234 .uaddr = {
1235 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1236 },
1237 .DevSize = SIZE_256KiB,
1238 .CmdSet = P_ID_AMD_STD,
1239 .NumEraseRegions= 1,
1240 .regions = {
1241 ERASEINFO( 0x01000, 64 )
1242 }
1243 }, {
1244 .mfr_id = MANUFACTURER_PMC,
1245 .dev_id = PM49FL004,
1246 .name = "PMC Pm49FL004",
1247 .uaddr = {
1248 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1249 },
1250 .DevSize = SIZE_512KiB,
1251 .CmdSet = P_ID_AMD_STD,
1252 .NumEraseRegions= 1,
1253 .regions = {
1254 ERASEINFO( 0x01000, 128 )
1255 }
1256 }, {
1257 .mfr_id = MANUFACTURER_PMC,
1258 .dev_id = PM49FL008,
1259 .name = "PMC Pm49FL008",
1260 .uaddr = {
1261 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1262 },
1263 .DevSize = SIZE_1MiB,
1264 .CmdSet = P_ID_AMD_STD,
1265 .NumEraseRegions= 1,
1266 .regions = {
1267 ERASEINFO( 0x01000, 256 )
1268 }
1269 }, {
1270 .mfr_id = MANUFACTURER_SST,
1271 .dev_id = SST39LF512,
1272 .name = "SST 39LF512",
1273 .uaddr = {
1274 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1275 },
1276 .DevSize = SIZE_64KiB,
1277 .CmdSet = P_ID_AMD_STD,
1278 .NumEraseRegions= 1,
1279 .regions = {
1280 ERASEINFO(0x01000,16),
1281 }
1282 }, {
1283 .mfr_id = MANUFACTURER_SST,
1284 .dev_id = SST39LF010,
1285 .name = "SST 39LF010",
1286 .uaddr = {
1287 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1288 },
1289 .DevSize = SIZE_128KiB,
1290 .CmdSet = P_ID_AMD_STD,
1291 .NumEraseRegions= 1,
1292 .regions = {
1293 ERASEINFO(0x01000,32),
1294 }
1295 }, {
1296 .mfr_id = MANUFACTURER_SST,
1297 .dev_id = SST29EE020,
1298 .name = "SST 29EE020",
1299 .uaddr = {
1300 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1301 },
1302 .DevSize = SIZE_256KiB,
1303 .CmdSet = P_ID_SST_PAGE,
1304 .NumEraseRegions= 1,
1305 .regions = {ERASEINFO(0x01000,64),
1306 }
1307 }, {
1308 .mfr_id = MANUFACTURER_SST,
1309 .dev_id = SST29LE020,
1310 .name = "SST 29LE020",
1311 .uaddr = {
1312 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1313 },
1314 .DevSize = SIZE_256KiB,
1315 .CmdSet = P_ID_SST_PAGE,
1316 .NumEraseRegions= 1,
1317 .regions = {ERASEINFO(0x01000,64),
1318 }
1319 }, {
1320 .mfr_id = MANUFACTURER_SST,
1321 .dev_id = SST39LF020,
1322 .name = "SST 39LF020",
1323 .uaddr = {
1324 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1325 },
1326 .DevSize = SIZE_256KiB,
1327 .CmdSet = P_ID_AMD_STD,
1328 .NumEraseRegions= 1,
1329 .regions = {
1330 ERASEINFO(0x01000,64),
1331 }
1332 }, {
1333 .mfr_id = MANUFACTURER_SST,
1334 .dev_id = SST39LF040,
1335 .name = "SST 39LF040",
1336 .uaddr = {
1337 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1338 },
1339 .DevSize = SIZE_512KiB,
1340 .CmdSet = P_ID_AMD_STD,
1341 .NumEraseRegions= 1,
1342 .regions = {
1343 ERASEINFO(0x01000,128),
1344 }
1345 }, {
1346 .mfr_id = MANUFACTURER_SST,
1347 .dev_id = SST39SF010A,
1348 .name = "SST 39SF010A",
1349 .uaddr = {
1350 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1351 },
1352 .DevSize = SIZE_128KiB,
1353 .CmdSet = P_ID_AMD_STD,
1354 .NumEraseRegions= 1,
1355 .regions = {
1356 ERASEINFO(0x01000,32),
1357 }
1358 }, {
1359 .mfr_id = MANUFACTURER_SST,
1360 .dev_id = SST39SF020A,
1361 .name = "SST 39SF020A",
1362 .uaddr = {
1363 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1364 },
1365 .DevSize = SIZE_256KiB,
1366 .CmdSet = P_ID_AMD_STD,
1367 .NumEraseRegions= 1,
1368 .regions = {
1369 ERASEINFO(0x01000,64),
1370 }
1371 }, {
1372 .mfr_id = MANUFACTURER_SST,
1373 .dev_id = SST49LF004B,
1374 .name = "SST 49LF004B",
1375 .uaddr = {
1376 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1377 },
1378 .DevSize = SIZE_512KiB,
1379 .CmdSet = P_ID_AMD_STD,
1380 .NumEraseRegions= 1,
1381 .regions = {
1382 ERASEINFO(0x01000,128),
1383 }
1384 }, {
1385 .mfr_id = MANUFACTURER_SST,
1386 .dev_id = SST49LF008A,
1387 .name = "SST 49LF008A",
1388 .uaddr = {
1389 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1390 },
1391 .DevSize = SIZE_1MiB,
1392 .CmdSet = P_ID_AMD_STD,
1393 .NumEraseRegions= 1,
1394 .regions = {
1395 ERASEINFO(0x01000,256),
1396 }
1397 }, {
1398 .mfr_id = MANUFACTURER_SST,
1399 .dev_id = SST49LF030A,
1400 .name = "SST 49LF030A",
1401 .uaddr = {
1402 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1403 },
1404 .DevSize = SIZE_512KiB,
1405 .CmdSet = P_ID_AMD_STD,
1406 .NumEraseRegions= 1,
1407 .regions = {
1408 ERASEINFO(0x01000,96),
1409 }
1410 }, {
1411 .mfr_id = MANUFACTURER_SST,
1412 .dev_id = SST49LF040A,
1413 .name = "SST 49LF040A",
1414 .uaddr = {
1415 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1416 },
1417 .DevSize = SIZE_512KiB,
1418 .CmdSet = P_ID_AMD_STD,
1419 .NumEraseRegions= 1,
1420 .regions = {
1421 ERASEINFO(0x01000,128),
1422 }
1423 }, {
1424 .mfr_id = MANUFACTURER_SST,
1425 .dev_id = SST49LF080A,
1426 .name = "SST 49LF080A",
1427 .uaddr = {
1428 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1429 },
1430 .DevSize = SIZE_1MiB,
1431 .CmdSet = P_ID_AMD_STD,
1432 .NumEraseRegions= 1,
1433 .regions = {
1434 ERASEINFO(0x01000,256),
1435 }
1436 }, {
1437 .mfr_id = MANUFACTURER_SST, /* should be CFI */
1438 .dev_id = SST39LF160,
1439 .name = "SST 39LF160",
1440 .uaddr = {
1441 [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */
1442 [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */
1443 },
1444 .DevSize = SIZE_2MiB,
1445 .CmdSet = P_ID_AMD_STD,
1446 .NumEraseRegions= 2,
1447 .regions = {
1448 ERASEINFO(0x1000,256),
1449 ERASEINFO(0x1000,256)
1450 }
1451
1452 }, {
1453 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1454 .dev_id = M29W800DT,
1455 .name = "ST M29W800DT",
1456 .uaddr = {
1457 [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */
1458 [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */
1459 },
1460 .DevSize = SIZE_1MiB,
1461 .CmdSet = P_ID_AMD_STD,
1462 .NumEraseRegions= 4,
1463 .regions = {
1464 ERASEINFO(0x10000,15),
1465 ERASEINFO(0x08000,1),
1466 ERASEINFO(0x02000,2),
1467 ERASEINFO(0x04000,1)
1468 }
1469 }, {
1470 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1471 .dev_id = M29W800DB,
1472 .name = "ST M29W800DB",
1473 .uaddr = {
1474 [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */
1475 [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */
1476 },
1477 .DevSize = SIZE_1MiB,
1478 .CmdSet = P_ID_AMD_STD,
1479 .NumEraseRegions= 4,
1480 .regions = {
1481 ERASEINFO(0x04000,1),
1482 ERASEINFO(0x02000,2),
1483 ERASEINFO(0x08000,1),
1484 ERASEINFO(0x10000,15)
1485 }
1486 }, {
1487 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1488 .dev_id = M29W160DT,
1489 .name = "ST M29W160DT",
1490 .uaddr = {
1491 [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
1492 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
1493 },
1494 .DevSize = SIZE_2MiB,
1495 .CmdSet = P_ID_AMD_STD,
1496 .NumEraseRegions= 4,
1497 .regions = {
1498 ERASEINFO(0x10000,31),
1499 ERASEINFO(0x08000,1),
1500 ERASEINFO(0x02000,2),
1501 ERASEINFO(0x04000,1)
1502 }
1503 }, {
1504 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1505 .dev_id = M29W160DB,
1506 .name = "ST M29W160DB",
1507 .uaddr = {
1508 [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
1509 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
1510 },
1511 .DevSize = SIZE_2MiB,
1512 .CmdSet = P_ID_AMD_STD,
1513 .NumEraseRegions= 4,
1514 .regions = {
1515 ERASEINFO(0x04000,1),
1516 ERASEINFO(0x02000,2),
1517 ERASEINFO(0x08000,1),
1518 ERASEINFO(0x10000,31)
1519 }
1520 }, {
1521 .mfr_id = MANUFACTURER_ST,
1522 .dev_id = M29W040B,
1523 .name = "ST M29W040B",
1524 .uaddr = {
1525 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
1526 },
1527 .DevSize = SIZE_512KiB,
1528 .CmdSet = P_ID_AMD_STD,
1529 .NumEraseRegions= 1,
1530 .regions = {
1531 ERASEINFO(0x10000,8),
1532 }
1533 }, {
1534 .mfr_id = MANUFACTURER_ST,
1535 .dev_id = M50FW040,
1536 .name = "ST M50FW040",
1537 .uaddr = {
1538 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
1539 },
1540 .DevSize = SIZE_512KiB,
1541 .CmdSet = P_ID_INTEL_EXT,
1542 .NumEraseRegions= 1,
1543 .regions = {
1544 ERASEINFO(0x10000,8),
1545 }
1546 }, {
1547 .mfr_id = MANUFACTURER_ST,
1548 .dev_id = M50FW080,
1549 .name = "ST M50FW080",
1550 .uaddr = {
1551 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
1552 },
1553 .DevSize = SIZE_1MiB,
1554 .CmdSet = P_ID_INTEL_EXT,
1555 .NumEraseRegions= 1,
1556 .regions = {
1557 ERASEINFO(0x10000,16),
1558 }
1559 }, {
1560 .mfr_id = MANUFACTURER_ST,
1561 .dev_id = M50FW016,
1562 .name = "ST M50FW016",
1563 .uaddr = {
1564 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
1565 },
1566 .DevSize = SIZE_2MiB,
1567 .CmdSet = P_ID_INTEL_EXT,
1568 .NumEraseRegions= 1,
1569 .regions = {
1570 ERASEINFO(0x10000,32),
1571 }
1572 }, {
1573 .mfr_id = MANUFACTURER_ST,
1574 .dev_id = M50LPW080,
1575 .name = "ST M50LPW080",
1576 .uaddr = {
1577 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
1578 },
1579 .DevSize = SIZE_1MiB,
1580 .CmdSet = P_ID_INTEL_EXT,
1581 .NumEraseRegions= 1,
1582 .regions = {
1583 ERASEINFO(0x10000,16),
1584 }
1585 }, {
1586 .mfr_id = MANUFACTURER_TOSHIBA,
1587 .dev_id = TC58FVT160,
1588 .name = "Toshiba TC58FVT160",
1589 .uaddr = {
1590 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
1591 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
1592 },
1593 .DevSize = SIZE_2MiB,
1594 .CmdSet = P_ID_AMD_STD,
1595 .NumEraseRegions= 4,
1596 .regions = {
1597 ERASEINFO(0x10000,31),
1598 ERASEINFO(0x08000,1),
1599 ERASEINFO(0x02000,2),
1600 ERASEINFO(0x04000,1)
1601 }
1602 }, {
1603 .mfr_id = MANUFACTURER_TOSHIBA,
1604 .dev_id = TC58FVB160,
1605 .name = "Toshiba TC58FVB160",
1606 .uaddr = {
1607 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
1608 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
1609 },
1610 .DevSize = SIZE_2MiB,
1611 .CmdSet = P_ID_AMD_STD,
1612 .NumEraseRegions= 4,
1613 .regions = {
1614 ERASEINFO(0x04000,1),
1615 ERASEINFO(0x02000,2),
1616 ERASEINFO(0x08000,1),
1617 ERASEINFO(0x10000,31)
1618 }
1619 }, {
1620 .mfr_id = MANUFACTURER_TOSHIBA,
1621 .dev_id = TC58FVB321,
1622 .name = "Toshiba TC58FVB321",
1623 .uaddr = {
1624 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
1625 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
1626 },
1627 .DevSize = SIZE_4MiB,
1628 .CmdSet = P_ID_AMD_STD,
1629 .NumEraseRegions= 2,
1630 .regions = {
1631 ERASEINFO(0x02000,8),
1632 ERASEINFO(0x10000,63)
1633 }
1634 }, {
1635 .mfr_id = MANUFACTURER_TOSHIBA,
1636 .dev_id = TC58FVT321,
1637 .name = "Toshiba TC58FVT321",
1638 .uaddr = {
1639 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
1640 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
1641 },
1642 .DevSize = SIZE_4MiB,
1643 .CmdSet = P_ID_AMD_STD,
1644 .NumEraseRegions= 2,
1645 .regions = {
1646 ERASEINFO(0x10000,63),
1647 ERASEINFO(0x02000,8)
1648 }
1649 }, {
1650 .mfr_id = MANUFACTURER_TOSHIBA,
1651 .dev_id = TC58FVB641,
1652 .name = "Toshiba TC58FVB641",
1653 .uaddr = {
1654 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
1655 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
1656 },
1657 .DevSize = SIZE_8MiB,
1658 .CmdSet = P_ID_AMD_STD,
1659 .NumEraseRegions= 2,
1660 .regions = {
1661 ERASEINFO(0x02000,8),
1662 ERASEINFO(0x10000,127)
1663 }
1664 }, {
1665 .mfr_id = MANUFACTURER_TOSHIBA,
1666 .dev_id = TC58FVT641,
1667 .name = "Toshiba TC58FVT641",
1668 .uaddr = {
1669 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
1670 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
1671 },
1672 .DevSize = SIZE_8MiB,
1673 .CmdSet = P_ID_AMD_STD,
1674 .NumEraseRegions= 2,
1675 .regions = {
1676 ERASEINFO(0x10000,127),
1677 ERASEINFO(0x02000,8)
1678 }
1679 }, {
1680 .mfr_id = MANUFACTURER_WINBOND,
1681 .dev_id = W49V002A,
1682 .name = "Winbond W49V002A",
1683 .uaddr = {
1684 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1685 },
1686 .DevSize = SIZE_256KiB,
1687 .CmdSet = P_ID_AMD_STD,
1688 .NumEraseRegions= 4,
1689 .regions = {
1690 ERASEINFO(0x10000, 3),
1691 ERASEINFO(0x08000, 1),
1692 ERASEINFO(0x02000, 2),
1693 ERASEINFO(0x04000, 1),
1694 }
1695 }
1696};
1697
1698
1699static int cfi_jedec_setup(struct cfi_private *p_cfi, int index);
1700
1701static int jedec_probe_chip(struct map_info *map, __u32 base,
1702 unsigned long *chip_map, struct cfi_private *cfi);
1703
1704static struct mtd_info *jedec_probe(struct map_info *map);
1705
1706static inline u32 jedec_read_mfr(struct map_info *map, __u32 base,
1707 struct cfi_private *cfi)
1708{
1709 map_word result;
1710 unsigned long mask;
1711 u32 ofs = cfi_build_cmd_addr(0, cfi_interleave(cfi), cfi->device_type);
1712 mask = (1 << (cfi->device_type * 8)) -1;
1713 result = map_read(map, base + ofs);
1714 return result.x[0] & mask;
1715}
1716
1717static inline u32 jedec_read_id(struct map_info *map, __u32 base,
1718 struct cfi_private *cfi)
1719{
1720 map_word result;
1721 unsigned long mask;
1722 u32 ofs = cfi_build_cmd_addr(1, cfi_interleave(cfi), cfi->device_type);
1723 mask = (1 << (cfi->device_type * 8)) -1;
1724 result = map_read(map, base + ofs);
1725 return result.x[0] & mask;
1726}
1727
1728static inline void jedec_reset(u32 base, struct map_info *map,
1729 struct cfi_private *cfi)
1730{
1731 /* Reset */
1732
1733 /* after checking the datasheets for SST, MACRONIX and ATMEL
1734 * (oh and incidentaly the jedec spec - 3.5.3.3) the reset
1735 * sequence is *supposed* to be 0xaa at 0x5555, 0x55 at
1736 * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips
1737 * as they will ignore the writes and dont care what address
1738 * the F0 is written to */
1739 if(cfi->addr_unlock1) {
1740 DEBUG( MTD_DEBUG_LEVEL3,
1741 "reset unlock called %x %x \n",
1742 cfi->addr_unlock1,cfi->addr_unlock2);
1743 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
1744 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
1745 }
1746
1747 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
1748 /* Some misdesigned intel chips do not respond for 0xF0 for a reset,
1749 * so ensure we're in read mode. Send both the Intel and the AMD command
1750 * for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so
1751 * this should be safe.
1752 */
1753 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
1754 /* FIXME - should have reset delay before continuing */
1755}
1756
1757
1758static inline __u8 finfo_uaddr(const struct amd_flash_info *finfo, int device_type)
1759{
1760 int uaddr_idx;
1761 __u8 uaddr = MTD_UADDR_NOT_SUPPORTED;
1762
1763 switch ( device_type ) {
1764 case CFI_DEVICETYPE_X8: uaddr_idx = 0; break;
1765 case CFI_DEVICETYPE_X16: uaddr_idx = 1; break;
1766 case CFI_DEVICETYPE_X32: uaddr_idx = 2; break;
1767 default:
1768 printk(KERN_NOTICE "MTD: %s(): unknown device_type %d\n",
1769 __func__, device_type);
1770 goto uaddr_done;
1771 }
1772
1773 uaddr = finfo->uaddr[uaddr_idx];
1774
1775 if (uaddr != MTD_UADDR_NOT_SUPPORTED ) {
1776 /* ASSERT("The unlock addresses for non-8-bit mode
1777 are bollocks. We don't really need an array."); */
1778 uaddr = finfo->uaddr[0];
1779 }
1780
1781 uaddr_done:
1782 return uaddr;
1783}
1784
1785
1786static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
1787{
1788 int i,num_erase_regions;
1789 __u8 uaddr;
1790
1791 printk("Found: %s\n",jedec_table[index].name);
1792
1793 num_erase_regions = jedec_table[index].NumEraseRegions;
1794
1795 p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
1796 if (!p_cfi->cfiq) {
1797 //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
1798 return 0;
1799 }
1800
1801 memset(p_cfi->cfiq,0,sizeof(struct cfi_ident));
1802
1803 p_cfi->cfiq->P_ID = jedec_table[index].CmdSet;
1804 p_cfi->cfiq->NumEraseRegions = jedec_table[index].NumEraseRegions;
1805 p_cfi->cfiq->DevSize = jedec_table[index].DevSize;
1806 p_cfi->cfi_mode = CFI_MODE_JEDEC;
1807
1808 for (i=0; i<num_erase_regions; i++){
1809 p_cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i];
1810 }
1811 p_cfi->cmdset_priv = NULL;
1812
1813 /* This may be redundant for some cases, but it doesn't hurt */
1814 p_cfi->mfr = jedec_table[index].mfr_id;
1815 p_cfi->id = jedec_table[index].dev_id;
1816
1817 uaddr = finfo_uaddr(&jedec_table[index], p_cfi->device_type);
1818 if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) {
1819 kfree( p_cfi->cfiq );
1820 return 0;
1821 }
1822
1823 p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1;
1824 p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2;
1825
1826 return 1; /* ok */
1827}
1828
1829
1830/*
1831 * There is a BIG problem properly ID'ing the JEDEC devic and guaranteeing
1832 * the mapped address, unlock addresses, and proper chip ID. This function
1833 * attempts to minimize errors. It is doubtfull that this probe will ever
1834 * be perfect - consequently there should be some module parameters that
1835 * could be manually specified to force the chip info.
1836 */
1837static inline int jedec_match( __u32 base,
1838 struct map_info *map,
1839 struct cfi_private *cfi,
1840 const struct amd_flash_info *finfo )
1841{
1842 int rc = 0; /* failure until all tests pass */
1843 u32 mfr, id;
1844 __u8 uaddr;
1845
1846 /*
1847 * The IDs must match. For X16 and X32 devices operating in
1848 * a lower width ( X8 or X16 ), the device ID's are usually just
1849 * the lower byte(s) of the larger device ID for wider mode. If
1850 * a part is found that doesn't fit this assumption (device id for
1851 * smaller width mode is completely unrealated to full-width mode)
1852 * then the jedec_table[] will have to be augmented with the IDs
1853 * for different widths.
1854 */
1855 switch (cfi->device_type) {
1856 case CFI_DEVICETYPE_X8:
1857 mfr = (__u8)finfo->mfr_id;
1858 id = (__u8)finfo->dev_id;
1859 break;
1860 case CFI_DEVICETYPE_X16:
1861 mfr = (__u16)finfo->mfr_id;
1862 id = (__u16)finfo->dev_id;
1863 break;
1864 case CFI_DEVICETYPE_X32:
1865 mfr = (__u16)finfo->mfr_id;
1866 id = (__u32)finfo->dev_id;
1867 break;
1868 default:
1869 printk(KERN_WARNING
1870 "MTD %s(): Unsupported device type %d\n",
1871 __func__, cfi->device_type);
1872 goto match_done;
1873 }
1874 if ( cfi->mfr != mfr || cfi->id != id ) {
1875 goto match_done;
1876 }
1877
1878 /* the part size must fit in the memory window */
1879 DEBUG( MTD_DEBUG_LEVEL3,
1880 "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
1881 __func__, base, 1 << finfo->DevSize, base + (1 << finfo->DevSize) );
1882 if ( base + cfi_interleave(cfi) * ( 1 << finfo->DevSize ) > map->size ) {
1883 DEBUG( MTD_DEBUG_LEVEL3,
1884 "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
1885 __func__, finfo->mfr_id, finfo->dev_id,
1886 1 << finfo->DevSize );
1887 goto match_done;
1888 }
1889
1890 uaddr = finfo_uaddr(finfo, cfi->device_type);
1891 if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) {
1892 goto match_done;
1893 }
1894
1895 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
1896 __func__, cfi->addr_unlock1, cfi->addr_unlock2 );
1897 if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr
1898 && ( unlock_addrs[uaddr].addr1 != cfi->addr_unlock1 ||
1899 unlock_addrs[uaddr].addr2 != cfi->addr_unlock2 ) ) {
1900 DEBUG( MTD_DEBUG_LEVEL3,
1901 "MTD %s(): 0x%.4x 0x%.4x did not match\n",
1902 __func__,
1903 unlock_addrs[uaddr].addr1,
1904 unlock_addrs[uaddr].addr2);
1905 goto match_done;
1906 }
1907
1908 /*
1909 * Make sure the ID's dissappear when the device is taken out of
1910 * ID mode. The only time this should fail when it should succeed
1911 * is when the ID's are written as data to the same
1912 * addresses. For this rare and unfortunate case the chip
1913 * cannot be probed correctly.
1914 * FIXME - write a driver that takes all of the chip info as
1915 * module parameters, doesn't probe but forces a load.
1916 */
1917 DEBUG( MTD_DEBUG_LEVEL3,
1918 "MTD %s(): check ID's disappear when not in ID mode\n",
1919 __func__ );
1920 jedec_reset( base, map, cfi );
1921 mfr = jedec_read_mfr( map, base, cfi );
1922 id = jedec_read_id( map, base, cfi );
1923 if ( mfr == cfi->mfr && id == cfi->id ) {
1924 DEBUG( MTD_DEBUG_LEVEL3,
1925 "MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n"
1926 "You might need to manually specify JEDEC parameters.\n",
1927 __func__, cfi->mfr, cfi->id );
1928 goto match_done;
1929 }
1930
1931 /* all tests passed - mark as success */
1932 rc = 1;
1933
1934 /*
1935 * Put the device back in ID mode - only need to do this if we
1936 * were truly frobbing a real device.
1937 */
1938 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ );
1939 if(cfi->addr_unlock1) {
1940 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
1941 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
1942 }
1943 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
1944 /* FIXME - should have a delay before continuing */
1945
1946 match_done:
1947 return rc;
1948}
1949
1950
1951static int jedec_probe_chip(struct map_info *map, __u32 base,
1952 unsigned long *chip_map, struct cfi_private *cfi)
1953{
1954 int i;
1955 enum uaddr uaddr_idx = MTD_UADDR_NOT_SUPPORTED;
1956 u32 probe_offset1, probe_offset2;
1957
1958 retry:
1959 if (!cfi->numchips) {
1960 uaddr_idx++;
1961
1962 if (MTD_UADDR_UNNECESSARY == uaddr_idx)
1963 return 0;
1964
1965 cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1;
1966 cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2;
1967 }
1968
1969 /* Make certain we aren't probing past the end of map */
1970 if (base >= map->size) {
1971 printk(KERN_NOTICE
1972 "Probe at base(0x%08x) past the end of the map(0x%08lx)\n",
1973 base, map->size -1);
1974 return 0;
1975
1976 }
1977 /* Ensure the unlock addresses we try stay inside the map */
1978 probe_offset1 = cfi_build_cmd_addr(
1979 cfi->addr_unlock1,
1980 cfi_interleave(cfi),
1981 cfi->device_type);
1982 probe_offset2 = cfi_build_cmd_addr(
1983 cfi->addr_unlock1,
1984 cfi_interleave(cfi),
1985 cfi->device_type);
1986 if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
1987 ((base + probe_offset2 + map_bankwidth(map)) >= map->size))
1988 {
1989 goto retry;
1990 }
1991
1992 /* Reset */
1993 jedec_reset(base, map, cfi);
1994
1995 /* Autoselect Mode */
1996 if(cfi->addr_unlock1) {
1997 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
1998 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
1999 }
2000 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
2001 /* FIXME - should have a delay before continuing */
2002
2003 if (!cfi->numchips) {
2004 /* This is the first time we're called. Set up the CFI
2005 stuff accordingly and return */
2006
2007 cfi->mfr = jedec_read_mfr(map, base, cfi);
2008 cfi->id = jedec_read_id(map, base, cfi);
2009 DEBUG(MTD_DEBUG_LEVEL3,
2010 "Search for id:(%02x %02x) interleave(%d) type(%d)\n",
2011 cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type);
2012 for (i=0; i<sizeof(jedec_table)/sizeof(jedec_table[0]); i++) {
2013 if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) {
2014 DEBUG( MTD_DEBUG_LEVEL3,
2015 "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
2016 __func__, cfi->mfr, cfi->id,
2017 cfi->addr_unlock1, cfi->addr_unlock2 );
2018 if (!cfi_jedec_setup(cfi, i))
2019 return 0;
2020 goto ok_out;
2021 }
2022 }
2023 goto retry;
2024 } else {
2025 __u16 mfr;
2026 __u16 id;
2027
2028 /* Make sure it is a chip of the same manufacturer and id */
2029 mfr = jedec_read_mfr(map, base, cfi);
2030 id = jedec_read_id(map, base, cfi);
2031
2032 if ((mfr != cfi->mfr) || (id != cfi->id)) {
2033 printk(KERN_DEBUG "%s: Found different chip or no chip at all (mfr 0x%x, id 0x%x) at 0x%x\n",
2034 map->name, mfr, id, base);
2035 jedec_reset(base, map, cfi);
2036 return 0;
2037 }
2038 }
2039
2040 /* Check each previous chip locations to see if it's an alias */
2041 for (i=0; i < (base >> cfi->chipshift); i++) {
2042 unsigned long start;
2043 if(!test_bit(i, chip_map)) {
2044 continue; /* Skip location; no valid chip at this address */
2045 }
2046 start = i << cfi->chipshift;
2047 if (jedec_read_mfr(map, start, cfi) == cfi->mfr &&
2048 jedec_read_id(map, start, cfi) == cfi->id) {
2049 /* Eep. This chip also looks like it's in autoselect mode.
2050 Is it an alias for the new one? */
2051 jedec_reset(start, map, cfi);
2052
2053 /* If the device IDs go away, it's an alias */
2054 if (jedec_read_mfr(map, base, cfi) != cfi->mfr ||
2055 jedec_read_id(map, base, cfi) != cfi->id) {
2056 printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
2057 map->name, base, start);
2058 return 0;
2059 }
2060
2061 /* Yes, it's actually got the device IDs as data. Most
2062 * unfortunate. Stick the new chip in read mode
2063 * too and if it's the same, assume it's an alias. */
2064 /* FIXME: Use other modes to do a proper check */
2065 jedec_reset(base, map, cfi);
2066 if (jedec_read_mfr(map, base, cfi) == cfi->mfr &&
2067 jedec_read_id(map, base, cfi) == cfi->id) {
2068 printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
2069 map->name, base, start);
2070 return 0;
2071 }
2072 }
2073 }
2074
2075 /* OK, if we got to here, then none of the previous chips appear to
2076 be aliases for the current one. */
2077 set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
2078 cfi->numchips++;
2079
2080ok_out:
2081 /* Put it back into Read Mode */
2082 jedec_reset(base, map, cfi);
2083
2084 printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
2085 map->name, cfi_interleave(cfi), cfi->device_type*8, base,
2086 map->bankwidth*8);
2087
2088 return 1;
2089}
2090
2091static struct chip_probe jedec_chip_probe = {
2092 .name = "JEDEC",
2093 .probe_chip = jedec_probe_chip
2094};
2095
2096static struct mtd_info *jedec_probe(struct map_info *map)
2097{
2098 /*
2099 * Just use the generic probe stuff to call our CFI-specific
2100 * chip_probe routine in all the possible permutations, etc.
2101 */
2102 return mtd_do_chip_probe(map, &jedec_chip_probe);
2103}
2104
2105static struct mtd_chip_driver jedec_chipdrv = {
2106 .probe = jedec_probe,
2107 .name = "jedec_probe",
2108 .module = THIS_MODULE
2109};
2110
2111static int __init jedec_probe_init(void)
2112{
2113 register_mtd_chip_driver(&jedec_chipdrv);
2114 return 0;
2115}
2116
2117static void __exit jedec_probe_exit(void)
2118{
2119 unregister_mtd_chip_driver(&jedec_chipdrv);
2120}
2121
2122module_init(jedec_probe_init);
2123module_exit(jedec_probe_exit);
2124
2125MODULE_LICENSE("GPL");
2126MODULE_AUTHOR("Erwin Authried <eauth@softsys.co.at> et al.");
2127MODULE_DESCRIPTION("Probe code for JEDEC-compliant flash chips");
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c
new file mode 100644
index 000000000000..c6c83833cc32
--- /dev/null
+++ b/drivers/mtd/chips/map_absent.c
@@ -0,0 +1,117 @@
1/*
2 * Common code to handle absent "placeholder" devices
3 * Copyright 2001 Resilience Corporation <ebrower@resilience.com>
4 * $Id: map_absent.c,v 1.5 2004/11/16 18:29:00 dwmw2 Exp $
5 *
6 * This map driver is used to allocate "placeholder" MTD
7 * devices on systems that have socketed/removable media.
8 * Use of this driver as a fallback preserves the expected
9 * registration of MTD device nodes regardless of probe outcome.
10 * A usage example is as follows:
11 *
12 * my_dev[i] = do_map_probe("cfi", &my_map[i]);
13 * if(NULL == my_dev[i]) {
14 * my_dev[i] = do_map_probe("map_absent", &my_map[i]);
15 * }
16 *
17 * Any device 'probed' with this driver will return -ENODEV
18 * upon open.
19 */
20
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/errno.h>
25#include <linux/slab.h>
26#include <linux/init.h>
27#include <linux/mtd/mtd.h>
28#include <linux/mtd/map.h>
29#include <linux/mtd/compatmac.h>
30
31static int map_absent_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
32static int map_absent_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
33static int map_absent_erase (struct mtd_info *, struct erase_info *);
34static void map_absent_sync (struct mtd_info *);
35static struct mtd_info *map_absent_probe(struct map_info *map);
36static void map_absent_destroy (struct mtd_info *);
37
38
39static struct mtd_chip_driver map_absent_chipdrv = {
40 .probe = map_absent_probe,
41 .destroy = map_absent_destroy,
42 .name = "map_absent",
43 .module = THIS_MODULE
44};
45
46static struct mtd_info *map_absent_probe(struct map_info *map)
47{
48 struct mtd_info *mtd;
49
50 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
51 if (!mtd) {
52 return NULL;
53 }
54
55 memset(mtd, 0, sizeof(*mtd));
56
57 map->fldrv = &map_absent_chipdrv;
58 mtd->priv = map;
59 mtd->name = map->name;
60 mtd->type = MTD_ABSENT;
61 mtd->size = map->size;
62 mtd->erase = map_absent_erase;
63 mtd->read = map_absent_read;
64 mtd->write = map_absent_write;
65 mtd->sync = map_absent_sync;
66 mtd->flags = 0;
67 mtd->erasesize = PAGE_SIZE;
68
69 __module_get(THIS_MODULE);
70 return mtd;
71}
72
73
74static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
75{
76 *retlen = 0;
77 return -ENODEV;
78}
79
80static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
81{
82 *retlen = 0;
83 return -ENODEV;
84}
85
86static int map_absent_erase(struct mtd_info *mtd, struct erase_info *instr)
87{
88 return -ENODEV;
89}
90
91static void map_absent_sync(struct mtd_info *mtd)
92{
93 /* nop */
94}
95
96static void map_absent_destroy(struct mtd_info *mtd)
97{
98 /* nop */
99}
100
101static int __init map_absent_init(void)
102{
103 register_mtd_chip_driver(&map_absent_chipdrv);
104 return 0;
105}
106
107static void __exit map_absent_exit(void)
108{
109 unregister_mtd_chip_driver(&map_absent_chipdrv);
110}
111
112module_init(map_absent_init);
113module_exit(map_absent_exit);
114
115MODULE_LICENSE("GPL");
116MODULE_AUTHOR("Resilience Corporation - Eric Brower <ebrower@resilience.com>");
117MODULE_DESCRIPTION("Placeholder MTD chip driver for 'absent' chips");
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
new file mode 100644
index 000000000000..bd2e876a814b
--- /dev/null
+++ b/drivers/mtd/chips/map_ram.c
@@ -0,0 +1,143 @@
1/*
2 * Common code to handle map devices which are simple RAM
3 * (C) 2000 Red Hat. GPL'd.
4 * $Id: map_ram.c,v 1.22 2005/01/05 18:05:12 dwmw2 Exp $
5 */
6
7#include <linux/module.h>
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <asm/io.h>
11#include <asm/byteorder.h>
12#include <linux/errno.h>
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/mtd/mtd.h>
16#include <linux/mtd/map.h>
17#include <linux/mtd/compatmac.h>
18
19
20static int mapram_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
21static int mapram_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
22static int mapram_erase (struct mtd_info *, struct erase_info *);
23static void mapram_nop (struct mtd_info *);
24static struct mtd_info *map_ram_probe(struct map_info *map);
25
26
27static struct mtd_chip_driver mapram_chipdrv = {
28 .probe = map_ram_probe,
29 .name = "map_ram",
30 .module = THIS_MODULE
31};
32
33static struct mtd_info *map_ram_probe(struct map_info *map)
34{
35 struct mtd_info *mtd;
36
37 /* Check the first byte is RAM */
38#if 0
39 map_write8(map, 0x55, 0);
40 if (map_read8(map, 0) != 0x55)
41 return NULL;
42
43 map_write8(map, 0xAA, 0);
44 if (map_read8(map, 0) != 0xAA)
45 return NULL;
46
47 /* Check the last byte is RAM */
48 map_write8(map, 0x55, map->size-1);
49 if (map_read8(map, map->size-1) != 0x55)
50 return NULL;
51
52 map_write8(map, 0xAA, map->size-1);
53 if (map_read8(map, map->size-1) != 0xAA)
54 return NULL;
55#endif
56 /* OK. It seems to be RAM. */
57
58 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
59 if (!mtd)
60 return NULL;
61
62 memset(mtd, 0, sizeof(*mtd));
63
64 map->fldrv = &mapram_chipdrv;
65 mtd->priv = map;
66 mtd->name = map->name;
67 mtd->type = MTD_RAM;
68 mtd->size = map->size;
69 mtd->erase = mapram_erase;
70 mtd->read = mapram_read;
71 mtd->write = mapram_write;
72 mtd->sync = mapram_nop;
73 mtd->flags = MTD_CAP_RAM | MTD_VOLATILE;
74
75 mtd->erasesize = PAGE_SIZE;
76 while(mtd->size & (mtd->erasesize - 1))
77 mtd->erasesize >>= 1;
78
79 __module_get(THIS_MODULE);
80 return mtd;
81}
82
83
84static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
85{
86 struct map_info *map = mtd->priv;
87
88 map_copy_from(map, buf, from, len);
89 *retlen = len;
90 return 0;
91}
92
93static int mapram_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
94{
95 struct map_info *map = mtd->priv;
96
97 map_copy_to(map, to, buf, len);
98 *retlen = len;
99 return 0;
100}
101
102static int mapram_erase (struct mtd_info *mtd, struct erase_info *instr)
103{
104 /* Yeah, it's inefficient. Who cares? It's faster than a _real_
105 flash erase. */
106 struct map_info *map = mtd->priv;
107 map_word allff;
108 unsigned long i;
109
110 allff = map_word_ff(map);
111
112 for (i=0; i<instr->len; i += map_bankwidth(map))
113 map_write(map, allff, instr->addr + i);
114
115 instr->state = MTD_ERASE_DONE;
116
117 mtd_erase_callback(instr);
118
119 return 0;
120}
121
122static void mapram_nop(struct mtd_info *mtd)
123{
124 /* Nothing to see here */
125}
126
127static int __init map_ram_init(void)
128{
129 register_mtd_chip_driver(&mapram_chipdrv);
130 return 0;
131}
132
133static void __exit map_ram_exit(void)
134{
135 unregister_mtd_chip_driver(&mapram_chipdrv);
136}
137
138module_init(map_ram_init);
139module_exit(map_ram_exit);
140
141MODULE_LICENSE("GPL");
142MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
143MODULE_DESCRIPTION("MTD chip driver for RAM chips");
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
new file mode 100644
index 000000000000..624c12c232c8
--- /dev/null
+++ b/drivers/mtd/chips/map_rom.c
@@ -0,0 +1,94 @@
1/*
2 * Common code to handle map devices which are simple ROM
3 * (C) 2000 Red Hat. GPL'd.
4 * $Id: map_rom.c,v 1.23 2005/01/05 18:05:12 dwmw2 Exp $
5 */
6
7#include <linux/module.h>
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <asm/io.h>
11#include <asm/byteorder.h>
12#include <linux/errno.h>
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/mtd/mtd.h>
16#include <linux/mtd/map.h>
17#include <linux/mtd/compatmac.h>
18
19static int maprom_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
20static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
21static void maprom_nop (struct mtd_info *);
22static struct mtd_info *map_rom_probe(struct map_info *map);
23
24static struct mtd_chip_driver maprom_chipdrv = {
25 .probe = map_rom_probe,
26 .name = "map_rom",
27 .module = THIS_MODULE
28};
29
30static struct mtd_info *map_rom_probe(struct map_info *map)
31{
32 struct mtd_info *mtd;
33
34 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
35 if (!mtd)
36 return NULL;
37
38 memset(mtd, 0, sizeof(*mtd));
39
40 map->fldrv = &maprom_chipdrv;
41 mtd->priv = map;
42 mtd->name = map->name;
43 mtd->type = MTD_ROM;
44 mtd->size = map->size;
45 mtd->read = maprom_read;
46 mtd->write = maprom_write;
47 mtd->sync = maprom_nop;
48 mtd->flags = MTD_CAP_ROM;
49 mtd->erasesize = 131072;
50 while(mtd->size & (mtd->erasesize - 1))
51 mtd->erasesize >>= 1;
52
53 __module_get(THIS_MODULE);
54 return mtd;
55}
56
57
58static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
59{
60 struct map_info *map = mtd->priv;
61
62 map_copy_from(map, buf, from, len);
63 *retlen = len;
64 return 0;
65}
66
67static void maprom_nop(struct mtd_info *mtd)
68{
69 /* Nothing to see here */
70}
71
72static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
73{
74 printk(KERN_NOTICE "maprom_write called\n");
75 return -EIO;
76}
77
78static int __init map_rom_init(void)
79{
80 register_mtd_chip_driver(&maprom_chipdrv);
81 return 0;
82}
83
84static void __exit map_rom_exit(void)
85{
86 unregister_mtd_chip_driver(&maprom_chipdrv);
87}
88
89module_init(map_rom_init);
90module_exit(map_rom_exit);
91
92MODULE_LICENSE("GPL");
93MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
94MODULE_DESCRIPTION("MTD chip driver for ROM chips");
diff --git a/drivers/mtd/chips/sharp.c b/drivers/mtd/chips/sharp.c
new file mode 100644
index 000000000000..c3cf0f63bc93
--- /dev/null
+++ b/drivers/mtd/chips/sharp.c
@@ -0,0 +1,596 @@
1/*
2 * MTD chip driver for pre-CFI Sharp flash chips
3 *
4 * Copyright 2000,2001 David A. Schleef <ds@schleef.org>
5 * 2000,2001 Lineo, Inc.
6 *
7 * $Id: sharp.c,v 1.14 2004/08/09 13:19:43 dwmw2 Exp $
8 *
9 * Devices supported:
10 * LH28F016SCT Symmetrical block flash memory, 2Mx8
11 * LH28F008SCT Symmetrical block flash memory, 1Mx8
12 *
13 * Documentation:
14 * http://www.sharpmeg.com/datasheets/memic/flashcmp/
15 * http://www.sharpmeg.com/datasheets/memic/flashcmp/01symf/16m/016sctl9.pdf
16 * 016sctl9.pdf
17 *
18 * Limitations:
19 * This driver only supports 4x1 arrangement of chips.
20 * Not tested on anything but PowerPC.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/sched.h>
27#include <linux/errno.h>
28#include <linux/interrupt.h>
29#include <linux/mtd/map.h>
30#include <linux/mtd/mtd.h>
31#include <linux/mtd/cfi.h>
32#include <linux/delay.h>
33#include <linux/init.h>
34
35#define CMD_RESET 0xffffffff
36#define CMD_READ_ID 0x90909090
37#define CMD_READ_STATUS 0x70707070
38#define CMD_CLEAR_STATUS 0x50505050
39#define CMD_BLOCK_ERASE_1 0x20202020
40#define CMD_BLOCK_ERASE_2 0xd0d0d0d0
41#define CMD_BYTE_WRITE 0x40404040
42#define CMD_SUSPEND 0xb0b0b0b0
43#define CMD_RESUME 0xd0d0d0d0
44#define CMD_SET_BLOCK_LOCK_1 0x60606060
45#define CMD_SET_BLOCK_LOCK_2 0x01010101
46#define CMD_SET_MASTER_LOCK_1 0x60606060
47#define CMD_SET_MASTER_LOCK_2 0xf1f1f1f1
48#define CMD_CLEAR_BLOCK_LOCKS_1 0x60606060
49#define CMD_CLEAR_BLOCK_LOCKS_2 0xd0d0d0d0
50
51#define SR_READY 0x80808080 // 1 = ready
52#define SR_ERASE_SUSPEND 0x40404040 // 1 = block erase suspended
53#define SR_ERROR_ERASE 0x20202020 // 1 = error in block erase or clear lock bits
54#define SR_ERROR_WRITE 0x10101010 // 1 = error in byte write or set lock bit
55#define SR_VPP 0x08080808 // 1 = Vpp is low
56#define SR_WRITE_SUSPEND 0x04040404 // 1 = byte write suspended
57#define SR_PROTECT 0x02020202 // 1 = lock bit set
58#define SR_RESERVED 0x01010101
59
60#define SR_ERRORS (SR_ERROR_ERASE|SR_ERROR_WRITE|SR_VPP|SR_PROTECT)
61
62/* Configuration options */
63
64#undef AUTOUNLOCK /* automatically unlocks blocks before erasing */
65
66struct mtd_info *sharp_probe(struct map_info *);
67
68static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd);
69
70static int sharp_read(struct mtd_info *mtd, loff_t from, size_t len,
71 size_t *retlen, u_char *buf);
72static int sharp_write(struct mtd_info *mtd, loff_t from, size_t len,
73 size_t *retlen, const u_char *buf);
74static int sharp_erase(struct mtd_info *mtd, struct erase_info *instr);
75static void sharp_sync(struct mtd_info *mtd);
76static int sharp_suspend(struct mtd_info *mtd);
77static void sharp_resume(struct mtd_info *mtd);
78static void sharp_destroy(struct mtd_info *mtd);
79
80static int sharp_write_oneword(struct map_info *map, struct flchip *chip,
81 unsigned long adr, __u32 datum);
82static int sharp_erase_oneblock(struct map_info *map, struct flchip *chip,
83 unsigned long adr);
84#ifdef AUTOUNLOCK
85static void sharp_unlock_oneblock(struct map_info *map, struct flchip *chip,
86 unsigned long adr);
87#endif
88
89
90struct sharp_info{
91 struct flchip *chip;
92 int bogus;
93 int chipshift;
94 int numchips;
95 struct flchip chips[1];
96};
97
98struct mtd_info *sharp_probe(struct map_info *map);
99static void sharp_destroy(struct mtd_info *mtd);
100
101static struct mtd_chip_driver sharp_chipdrv = {
102 .probe = sharp_probe,
103 .destroy = sharp_destroy,
104 .name = "sharp",
105 .module = THIS_MODULE
106};
107
108
109struct mtd_info *sharp_probe(struct map_info *map)
110{
111 struct mtd_info *mtd = NULL;
112 struct sharp_info *sharp = NULL;
113 int width;
114
115 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
116 if(!mtd)
117 return NULL;
118
119 sharp = kmalloc(sizeof(*sharp), GFP_KERNEL);
120 if(!sharp) {
121 kfree(mtd);
122 return NULL;
123 }
124
125 memset(mtd, 0, sizeof(*mtd));
126
127 width = sharp_probe_map(map,mtd);
128 if(!width){
129 kfree(mtd);
130 kfree(sharp);
131 return NULL;
132 }
133
134 mtd->priv = map;
135 mtd->type = MTD_NORFLASH;
136 mtd->erase = sharp_erase;
137 mtd->read = sharp_read;
138 mtd->write = sharp_write;
139 mtd->sync = sharp_sync;
140 mtd->suspend = sharp_suspend;
141 mtd->resume = sharp_resume;
142 mtd->flags = MTD_CAP_NORFLASH;
143 mtd->name = map->name;
144
145 memset(sharp, 0, sizeof(*sharp));
146 sharp->chipshift = 23;
147 sharp->numchips = 1;
148 sharp->chips[0].start = 0;
149 sharp->chips[0].state = FL_READY;
150 sharp->chips[0].mutex = &sharp->chips[0]._spinlock;
151 sharp->chips[0].word_write_time = 0;
152 init_waitqueue_head(&sharp->chips[0].wq);
153 spin_lock_init(&sharp->chips[0]._spinlock);
154
155 map->fldrv = &sharp_chipdrv;
156 map->fldrv_priv = sharp;
157
158 __module_get(THIS_MODULE);
159 return mtd;
160}
161
162static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd)
163{
164 unsigned long tmp;
165 unsigned long base = 0;
166 u32 read0, read4;
167 int width = 4;
168
169 tmp = map_read32(map, base+0);
170
171 map_write32(map, CMD_READ_ID, base+0);
172
173 read0=map_read32(map, base+0);
174 read4=map_read32(map, base+4);
175 if(read0 == 0x89898989){
176 printk("Looks like sharp flash\n");
177 switch(read4){
178 case 0xaaaaaaaa:
179 case 0xa0a0a0a0:
180 /* aa - LH28F016SCT-L95 2Mx8, 32 64k blocks*/
181 /* a0 - LH28F016SCT-Z4 2Mx8, 32 64k blocks*/
182 mtd->erasesize = 0x10000 * width;
183 mtd->size = 0x200000 * width;
184 return width;
185 case 0xa6a6a6a6:
186 /* a6 - LH28F008SCT-L12 1Mx8, 16 64k blocks*/
187 /* a6 - LH28F008SCR-L85 1Mx8, 16 64k blocks*/
188 mtd->erasesize = 0x10000 * width;
189 mtd->size = 0x100000 * width;
190 return width;
191#if 0
192 case 0x00000000: /* unknown */
193 /* XX - LH28F004SCT 512kx8, 8 64k blocks*/
194 mtd->erasesize = 0x10000 * width;
195 mtd->size = 0x80000 * width;
196 return width;
197#endif
198 default:
199 printk("Sort-of looks like sharp flash, 0x%08x 0x%08x\n",
200 read0,read4);
201 }
202 }else if((map_read32(map, base+0) == CMD_READ_ID)){
203 /* RAM, probably */
204 printk("Looks like RAM\n");
205 map_write32(map, tmp, base+0);
206 }else{
207 printk("Doesn't look like sharp flash, 0x%08x 0x%08x\n",
208 read0,read4);
209 }
210
211 return 0;
212}
213
214/* This function returns with the chip->mutex lock held. */
215static int sharp_wait(struct map_info *map, struct flchip *chip)
216{
217 __u16 status;
218 unsigned long timeo = jiffies + HZ;
219 DECLARE_WAITQUEUE(wait, current);
220 int adr = 0;
221
222retry:
223 spin_lock_bh(chip->mutex);
224
225 switch(chip->state){
226 case FL_READY:
227 map_write32(map,CMD_READ_STATUS,adr);
228 chip->state = FL_STATUS;
229 case FL_STATUS:
230 status = map_read32(map,adr);
231//printk("status=%08x\n",status);
232
233 udelay(100);
234 if((status & SR_READY)!=SR_READY){
235//printk(".status=%08x\n",status);
236 udelay(100);
237 }
238 break;
239 default:
240 printk("Waiting for chip\n");
241
242 set_current_state(TASK_INTERRUPTIBLE);
243 add_wait_queue(&chip->wq, &wait);
244
245 spin_unlock_bh(chip->mutex);
246
247 schedule();
248 remove_wait_queue(&chip->wq, &wait);
249
250 if(signal_pending(current))
251 return -EINTR;
252
253 timeo = jiffies + HZ;
254
255 goto retry;
256 }
257
258 map_write32(map,CMD_RESET, adr);
259
260 chip->state = FL_READY;
261
262 return 0;
263}
264
265static void sharp_release(struct flchip *chip)
266{
267 wake_up(&chip->wq);
268 spin_unlock_bh(chip->mutex);
269}
270
271static int sharp_read(struct mtd_info *mtd, loff_t from, size_t len,
272 size_t *retlen, u_char *buf)
273{
274 struct map_info *map = mtd->priv;
275 struct sharp_info *sharp = map->fldrv_priv;
276 int chipnum;
277 int ret = 0;
278 int ofs = 0;
279
280 chipnum = (from >> sharp->chipshift);
281 ofs = from & ((1 << sharp->chipshift)-1);
282
283 *retlen = 0;
284
285 while(len){
286 unsigned long thislen;
287
288 if(chipnum>=sharp->numchips)
289 break;
290
291 thislen = len;
292 if(ofs+thislen >= (1<<sharp->chipshift))
293 thislen = (1<<sharp->chipshift) - ofs;
294
295 ret = sharp_wait(map,&sharp->chips[chipnum]);
296 if(ret<0)
297 break;
298
299 map_copy_from(map,buf,ofs,thislen);
300
301 sharp_release(&sharp->chips[chipnum]);
302
303 *retlen += thislen;
304 len -= thislen;
305 buf += thislen;
306
307 ofs = 0;
308 chipnum++;
309 }
310 return ret;
311}
312
313static int sharp_write(struct mtd_info *mtd, loff_t to, size_t len,
314 size_t *retlen, const u_char *buf)
315{
316 struct map_info *map = mtd->priv;
317 struct sharp_info *sharp = map->fldrv_priv;
318 int ret = 0;
319 int i,j;
320 int chipnum;
321 unsigned long ofs;
322 union { u32 l; unsigned char uc[4]; } tbuf;
323
324 *retlen = 0;
325
326 while(len){
327 tbuf.l = 0xffffffff;
328 chipnum = to >> sharp->chipshift;
329 ofs = to & ((1<<sharp->chipshift)-1);
330
331 j=0;
332 for(i=ofs&3;i<4 && len;i++){
333 tbuf.uc[i] = *buf;
334 buf++;
335 to++;
336 len--;
337 j++;
338 }
339 sharp_write_oneword(map, &sharp->chips[chipnum], ofs&~3, tbuf.l);
340 if(ret<0)
341 return ret;
342 (*retlen)+=j;
343 }
344
345 return 0;
346}
347
348static int sharp_write_oneword(struct map_info *map, struct flchip *chip,
349 unsigned long adr, __u32 datum)
350{
351 int ret;
352 int timeo;
353 int try;
354 int i;
355 int status = 0;
356
357 ret = sharp_wait(map,chip);
358
359 for(try=0;try<10;try++){
360 map_write32(map,CMD_BYTE_WRITE,adr);
361 /* cpu_to_le32 -> hack to fix the writel be->le conversion */
362 map_write32(map,cpu_to_le32(datum),adr);
363
364 chip->state = FL_WRITING;
365
366 timeo = jiffies + (HZ/2);
367
368 map_write32(map,CMD_READ_STATUS,adr);
369 for(i=0;i<100;i++){
370 status = map_read32(map,adr);
371 if((status & SR_READY)==SR_READY)
372 break;
373 }
374 if(i==100){
375 printk("sharp: timed out writing\n");
376 }
377
378 if(!(status&SR_ERRORS))
379 break;
380
381 printk("sharp: error writing byte at addr=%08lx status=%08x\n",adr,status);
382
383 map_write32(map,CMD_CLEAR_STATUS,adr);
384 }
385 map_write32(map,CMD_RESET,adr);
386 chip->state = FL_READY;
387
388 wake_up(&chip->wq);
389 spin_unlock_bh(chip->mutex);
390
391 return 0;
392}
393
394static int sharp_erase(struct mtd_info *mtd, struct erase_info *instr)
395{
396 struct map_info *map = mtd->priv;
397 struct sharp_info *sharp = map->fldrv_priv;
398 unsigned long adr,len;
399 int chipnum, ret=0;
400
401//printk("sharp_erase()\n");
402 if(instr->addr & (mtd->erasesize - 1))
403 return -EINVAL;
404 if(instr->len & (mtd->erasesize - 1))
405 return -EINVAL;
406 if(instr->len + instr->addr > mtd->size)
407 return -EINVAL;
408
409 chipnum = instr->addr >> sharp->chipshift;
410 adr = instr->addr & ((1<<sharp->chipshift)-1);
411 len = instr->len;
412
413 while(len){
414 ret = sharp_erase_oneblock(map, &sharp->chips[chipnum], adr);
415 if(ret)return ret;
416
417 adr += mtd->erasesize;
418 len -= mtd->erasesize;
419 if(adr >> sharp->chipshift){
420 adr = 0;
421 chipnum++;
422 if(chipnum>=sharp->numchips)
423 break;
424 }
425 }
426
427 instr->state = MTD_ERASE_DONE;
428 mtd_erase_callback(instr);
429
430 return 0;
431}
432
433static int sharp_do_wait_for_ready(struct map_info *map, struct flchip *chip,
434 unsigned long adr)
435{
436 int ret;
437 unsigned long timeo;
438 int status;
439 DECLARE_WAITQUEUE(wait, current);
440
441 map_write32(map,CMD_READ_STATUS,adr);
442 status = map_read32(map,adr);
443
444 timeo = jiffies + HZ;
445
446 while(time_before(jiffies, timeo)){
447 map_write32(map,CMD_READ_STATUS,adr);
448 status = map_read32(map,adr);
449 if((status & SR_READY)==SR_READY){
450 ret = 0;
451 goto out;
452 }
453 set_current_state(TASK_INTERRUPTIBLE);
454 add_wait_queue(&chip->wq, &wait);
455
456 //spin_unlock_bh(chip->mutex);
457
458 schedule_timeout(1);
459 schedule();
460 remove_wait_queue(&chip->wq, &wait);
461
462 //spin_lock_bh(chip->mutex);
463
464 if (signal_pending(current)){
465 ret = -EINTR;
466 goto out;
467 }
468
469 }
470 ret = -ETIME;
471out:
472 return ret;
473}
474
475static int sharp_erase_oneblock(struct map_info *map, struct flchip *chip,
476 unsigned long adr)
477{
478 int ret;
479 //int timeo;
480 int status;
481 //int i;
482
483//printk("sharp_erase_oneblock()\n");
484
485#ifdef AUTOUNLOCK
486 /* This seems like a good place to do an unlock */
487 sharp_unlock_oneblock(map,chip,adr);
488#endif
489
490 map_write32(map,CMD_BLOCK_ERASE_1,adr);
491 map_write32(map,CMD_BLOCK_ERASE_2,adr);
492
493 chip->state = FL_ERASING;
494
495 ret = sharp_do_wait_for_ready(map,chip,adr);
496 if(ret<0)return ret;
497
498 map_write32(map,CMD_READ_STATUS,adr);
499 status = map_read32(map,adr);
500
501 if(!(status&SR_ERRORS)){
502 map_write32(map,CMD_RESET,adr);
503 chip->state = FL_READY;
504 //spin_unlock_bh(chip->mutex);
505 return 0;
506 }
507
508 printk("sharp: error erasing block at addr=%08lx status=%08x\n",adr,status);
509 map_write32(map,CMD_CLEAR_STATUS,adr);
510
511 //spin_unlock_bh(chip->mutex);
512
513 return -EIO;
514}
515
516#ifdef AUTOUNLOCK
517static void sharp_unlock_oneblock(struct map_info *map, struct flchip *chip,
518 unsigned long adr)
519{
520 int i;
521 int status;
522
523 map_write32(map,CMD_CLEAR_BLOCK_LOCKS_1,adr);
524 map_write32(map,CMD_CLEAR_BLOCK_LOCKS_2,adr);
525
526 udelay(100);
527
528 status = map_read32(map,adr);
529 printk("status=%08x\n",status);
530
531 for(i=0;i<1000;i++){
532 //map_write32(map,CMD_READ_STATUS,adr);
533 status = map_read32(map,adr);
534 if((status & SR_READY)==SR_READY)
535 break;
536 udelay(100);
537 }
538 if(i==1000){
539 printk("sharp: timed out unlocking block\n");
540 }
541
542 if(!(status&SR_ERRORS)){
543 map_write32(map,CMD_RESET,adr);
544 chip->state = FL_READY;
545 return;
546 }
547
548 printk("sharp: error unlocking block at addr=%08lx status=%08x\n",adr,status);
549 map_write32(map,CMD_CLEAR_STATUS,adr);
550}
551#endif
552
553static void sharp_sync(struct mtd_info *mtd)
554{
555 //printk("sharp_sync()\n");
556}
557
558static int sharp_suspend(struct mtd_info *mtd)
559{
560 printk("sharp_suspend()\n");
561 return -EINVAL;
562}
563
564static void sharp_resume(struct mtd_info *mtd)
565{
566 printk("sharp_resume()\n");
567
568}
569
570static void sharp_destroy(struct mtd_info *mtd)
571{
572 printk("sharp_destroy()\n");
573
574}
575
576int __init sharp_probe_init(void)
577{
578 printk("MTD Sharp chip driver <ds@lineo.com>\n");
579
580 register_mtd_chip_driver(&sharp_chipdrv);
581
582 return 0;
583}
584
585static void __exit sharp_probe_exit(void)
586{
587 unregister_mtd_chip_driver(&sharp_chipdrv);
588}
589
590module_init(sharp_probe_init);
591module_exit(sharp_probe_exit);
592
593
594MODULE_LICENSE("GPL");
595MODULE_AUTHOR("David Schleef <ds@schleef.org>");
596MODULE_DESCRIPTION("Old MTD chip driver for pre-CFI Sharp flash chips");