aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/mtd
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/Kconfig265
-rw-r--r--drivers/mtd/Makefile27
-rw-r--r--drivers/mtd/afs.c296
-rw-r--r--drivers/mtd/chips/Kconfig286
-rw-r--r--drivers/mtd/chips/Makefile26
-rw-r--r--drivers/mtd/chips/amd_flash.c1415
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c2160
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c1515
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c1418
-rw-r--r--drivers/mtd/chips/cfi_probe.c445
-rw-r--r--drivers/mtd/chips/cfi_util.c196
-rw-r--r--drivers/mtd/chips/chipreg.c111
-rw-r--r--drivers/mtd/chips/fwh_lock.h107
-rw-r--r--drivers/mtd/chips/gen_probe.c255
-rw-r--r--drivers/mtd/chips/jedec.c934
-rw-r--r--drivers/mtd/chips/jedec_probe.c2127
-rw-r--r--drivers/mtd/chips/map_absent.c117
-rw-r--r--drivers/mtd/chips/map_ram.c143
-rw-r--r--drivers/mtd/chips/map_rom.c94
-rw-r--r--drivers/mtd/chips/sharp.c596
-rw-r--r--drivers/mtd/cmdlinepart.c367
-rw-r--r--drivers/mtd/devices/Kconfig259
-rw-r--r--drivers/mtd/devices/Makefile25
-rw-r--r--drivers/mtd/devices/blkmtd.c823
-rw-r--r--drivers/mtd/devices/block2mtd.c495
-rw-r--r--drivers/mtd/devices/doc2000.c1309
-rw-r--r--drivers/mtd/devices/doc2001.c888
-rw-r--r--drivers/mtd/devices/doc2001plus.c1154
-rw-r--r--drivers/mtd/devices/docecc.c526
-rw-r--r--drivers/mtd/devices/docprobe.c355
-rw-r--r--drivers/mtd/devices/lart.c711
-rw-r--r--drivers/mtd/devices/ms02-nv.c326
-rw-r--r--drivers/mtd/devices/ms02-nv.h107
-rw-r--r--drivers/mtd/devices/mtdram.c235
-rw-r--r--drivers/mtd/devices/phram.c285
-rw-r--r--drivers/mtd/devices/pmc551.c843
-rw-r--r--drivers/mtd/devices/slram.c357
-rw-r--r--drivers/mtd/ftl.c1115
-rw-r--r--drivers/mtd/inftlcore.c912
-rw-r--r--drivers/mtd/inftlmount.c804
-rw-r--r--drivers/mtd/maps/Kconfig663
-rw-r--r--drivers/mtd/maps/Makefile73
-rw-r--r--drivers/mtd/maps/amd76xrom.c332
-rw-r--r--drivers/mtd/maps/arctic-mtd.c135
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c127
-rw-r--r--drivers/mtd/maps/bast-flash.c227
-rw-r--r--drivers/mtd/maps/beech-mtd.c112
-rw-r--r--drivers/mtd/maps/cdb89712.c268
-rw-r--r--drivers/mtd/maps/ceiva.c350
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c139
-rw-r--r--drivers/mtd/maps/cstm_mips_ixx.c270
-rw-r--r--drivers/mtd/maps/db1550-flash.c187
-rw-r--r--drivers/mtd/maps/db1x00-flash.c226
-rw-r--r--drivers/mtd/maps/dbox2-flash.c126
-rw-r--r--drivers/mtd/maps/dc21285.c253
-rw-r--r--drivers/mtd/maps/dilnetpc.c495
-rw-r--r--drivers/mtd/maps/dmv182.c149
-rw-r--r--drivers/mtd/maps/ebony.c163
-rw-r--r--drivers/mtd/maps/edb7312.c147
-rw-r--r--drivers/mtd/maps/elan-104nc.c228
-rw-r--r--drivers/mtd/maps/epxa10db-flash.c176
-rw-r--r--drivers/mtd/maps/fortunet.c271
-rw-r--r--drivers/mtd/maps/h720x-flash.c144
-rw-r--r--drivers/mtd/maps/ichxrom.c383
-rw-r--r--drivers/mtd/maps/impa7.c161
-rw-r--r--drivers/mtd/maps/integrator-flash.c217
-rw-r--r--drivers/mtd/maps/ipaq-flash.c464
-rw-r--r--drivers/mtd/maps/iq80310.c119
-rw-r--r--drivers/mtd/maps/ixp2000.c280
-rw-r--r--drivers/mtd/maps/ixp4xx.c259
-rw-r--r--drivers/mtd/maps/l440gx.c157
-rw-r--r--drivers/mtd/maps/lasat.c102
-rw-r--r--drivers/mtd/maps/lubbock-flash.c168
-rw-r--r--drivers/mtd/maps/map_funcs.c44
-rw-r--r--drivers/mtd/maps/mbx860.c100
-rw-r--r--drivers/mtd/maps/mpc1211.c81
-rw-r--r--drivers/mtd/maps/netsc520.c140
-rw-r--r--drivers/mtd/maps/nettel.c496
-rw-r--r--drivers/mtd/maps/ocelot.c175
-rw-r--r--drivers/mtd/maps/ocotea.c154
-rw-r--r--drivers/mtd/maps/octagon-5066.c248
-rw-r--r--drivers/mtd/maps/omap-toto-flash.c137
-rw-r--r--drivers/mtd/maps/pb1550-flash.c203
-rw-r--r--drivers/mtd/maps/pb1xxx-flash.c178
-rw-r--r--drivers/mtd/maps/pci.c388
-rw-r--r--drivers/mtd/maps/pcmciamtd.c860
-rw-r--r--drivers/mtd/maps/physmap.c125
-rw-r--r--drivers/mtd/maps/pnc2000.c93
-rw-r--r--drivers/mtd/maps/redwood.c169
-rw-r--r--drivers/mtd/maps/rpxlite.c66
-rw-r--r--drivers/mtd/maps/sa1100-flash.c453
-rw-r--r--drivers/mtd/maps/sbc8240.c247
-rw-r--r--drivers/mtd/maps/sbc_gxx.c239
-rw-r--r--drivers/mtd/maps/sc520cdp.c304
-rw-r--r--drivers/mtd/maps/scb2_flash.c256
-rw-r--r--drivers/mtd/maps/scx200_docflash.c233
-rw-r--r--drivers/mtd/maps/sharpsl-flash.c101
-rw-r--r--drivers/mtd/maps/solutionengine.c137
-rw-r--r--drivers/mtd/maps/sun_uflash.c177
-rw-r--r--drivers/mtd/maps/tqm8xxl.c263
-rw-r--r--drivers/mtd/maps/ts5500_flash.c141
-rw-r--r--drivers/mtd/maps/tsunami_flash.c108
-rw-r--r--drivers/mtd/maps/uclinux.c127
-rw-r--r--drivers/mtd/maps/vmax301.c198
-rw-r--r--drivers/mtd/maps/walnut.c122
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c181
-rw-r--r--drivers/mtd/mtd_blkdevs.c478
-rw-r--r--drivers/mtd/mtdblock.c394
-rw-r--r--drivers/mtd/mtdblock_ro.c87
-rw-r--r--drivers/mtd/mtdchar.c562
-rw-r--r--drivers/mtd/mtdconcat.c897
-rw-r--r--drivers/mtd/mtdcore.c419
-rw-r--r--drivers/mtd/mtdpart.c599
-rw-r--r--drivers/mtd/nand/Kconfig207
-rw-r--r--drivers/mtd/nand/Makefile24
-rw-r--r--drivers/mtd/nand/au1550nd.c477
-rw-r--r--drivers/mtd/nand/autcpu12.c225
-rw-r--r--drivers/mtd/nand/diskonchip.c1782
-rw-r--r--drivers/mtd/nand/edb7312.c218
-rw-r--r--drivers/mtd/nand/h1910.c208
-rw-r--r--drivers/mtd/nand/nand_base.c2563
-rw-r--r--drivers/mtd/nand/nand_bbt.c1056
-rw-r--r--drivers/mtd/nand/nand_ecc.c250
-rw-r--r--drivers/mtd/nand/nand_ids.c129
-rw-r--r--drivers/mtd/nand/nandsim.c1613
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c420
-rw-r--r--drivers/mtd/nand/rtc_from4.c559
-rw-r--r--drivers/mtd/nand/s3c2410.c704
-rwxr-xr-xdrivers/mtd/nand/sharpsl.c260
-rw-r--r--drivers/mtd/nand/spia.c173
-rw-r--r--drivers/mtd/nand/toto.c205
-rw-r--r--drivers/mtd/nand/tx4925ndfmc.c416
-rw-r--r--drivers/mtd/nand/tx4938ndfmc.c406
-rw-r--r--drivers/mtd/nftlcore.c767
-rw-r--r--drivers/mtd/nftlmount.c770
-rw-r--r--drivers/mtd/redboot.c235
136 files changed, 56047 insertions, 0 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
new file mode 100644
index 000000000000..027054dea032
--- /dev/null
+++ b/drivers/mtd/Kconfig
@@ -0,0 +1,265 @@
1# $Id: Kconfig,v 1.7 2004/11/22 11:33:56 ijc Exp $
2
3menu "Memory Technology Devices (MTD)"
4
5config MTD
6 tristate "Memory Technology Device (MTD) support"
7 help
8 Memory Technology Devices are flash, RAM and similar chips, often
9 used for solid state file systems on embedded devices. This option
10 will provide the generic support for MTD drivers to register
11 themselves with the kernel and for potential users of MTD devices
12 to enumerate the devices which are present and obtain a handle on
13 them. It will also allow you to select individual drivers for
14 particular hardware and users of MTD devices. If unsure, say N.
15
16config MTD_DEBUG
17 bool "Debugging"
18 depends on MTD
19 help
20 This turns on low-level debugging for the entire MTD sub-system.
21 Normally, you should say 'N'.
22
23config MTD_DEBUG_VERBOSE
24 int "Debugging verbosity (0 = quiet, 3 = noisy)"
25 depends on MTD_DEBUG
26 default "0"
27 help
28 Determines the verbosity level of the MTD debugging messages.
29
30config MTD_CONCAT
31 tristate "MTD concatenating support"
32 depends on MTD
33 help
34 Support for concatenating several MTD devices into a single
35 (virtual) one. This allows you to have -for example- a JFFS(2)
36 file system spanning multiple physical flash chips. If unsure,
37 say 'Y'.
38
39config MTD_PARTITIONS
40 bool "MTD partitioning support"
41 depends on MTD
42 help
43 If you have a device which needs to divide its flash chip(s) up
44 into multiple 'partitions', each of which appears to the user as
45 a separate MTD device, you require this option to be enabled. If
46 unsure, say 'Y'.
47
48 Note, however, that you don't need this option for the DiskOnChip
49 devices. Partitioning on NFTL 'devices' is a different - that's the
50 'normal' form of partitioning used on a block device.
51
52config MTD_REDBOOT_PARTS
53 tristate "RedBoot partition table parsing"
54 depends on MTD_PARTITIONS
55 ---help---
56 RedBoot is a ROM monitor and bootloader which deals with multiple
57 'images' in flash devices by putting a table one of the erase
58 blocks on the device, similar to a partition table, which gives
59 the offsets, lengths and names of all the images stored in the
60 flash.
61
62 If you need code which can detect and parse this table, and register
63 MTD 'partitions' corresponding to each image in the table, enable
64 this option.
65
66 You will still need the parsing functions to be called by the driver
67 for your particular device. It won't happen automatically. The
68 SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for
69 example.
70
71config MTD_REDBOOT_DIRECTORY_BLOCK
72 int "Location of RedBoot partition table"
73 depends on MTD_REDBOOT_PARTS
74 default "-1"
75 ---help---
76 This option is the Linux counterpart to the
77 CYGNUM_REDBOOT_FIS_DIRECTORY_BLOCK RedBoot compile time
78 option.
79
80 The option specifies which Flash sectors holds the RedBoot
81 partition table. A zero or positive value gives an absolete
82 erase block number. A negative value specifies a number of
83 sectors before the end of the device.
84
85 For example "2" means block number 2, "-1" means the last
86 block and "-2" means the penultimate block.
87
88config MTD_REDBOOT_PARTS_UNALLOCATED
89 bool " Include unallocated flash regions"
90 depends on MTD_REDBOOT_PARTS
91 help
92 If you need to register each unallocated flash region as a MTD
93 'partition', enable this option.
94
95config MTD_REDBOOT_PARTS_READONLY
96 bool " Force read-only for RedBoot system images"
97 depends on MTD_REDBOOT_PARTS
98 help
99 If you need to force read-only for 'RedBoot', 'RedBoot Config' and
100 'FIS directory' images, enable this option.
101
102config MTD_CMDLINE_PARTS
103 bool "Command line partition table parsing"
104 depends on MTD_PARTITIONS = "y"
105 ---help---
106 Allow generic configuration of the MTD paritition tables via the kernel
107 command line. Multiple flash resources are supported for hardware where
108 different kinds of flash memory are available.
109
110 You will still need the parsing functions to be called by the driver
111 for your particular device. It won't happen automatically. The
112 SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for
113 example.
114
115 The format for the command line is as follows:
116
117 mtdparts=<mtddef>[;<mtddef]
118 <mtddef> := <mtd-id>:<partdef>[,<partdef>]
119 <partdef> := <size>[@offset][<name>][ro]
120 <mtd-id> := unique id used in mapping driver/device
121 <size> := standard linux memsize OR "-" to denote all
122 remaining space
123 <name> := (NAME)
124
125 Due to the way Linux handles the command line, no spaces are
126 allowed in the partition definition, including mtd id's and partition
127 names.
128
129 Examples:
130
131 1 flash resource (mtd-id "sa1100"), with 1 single writable partition:
132 mtdparts=sa1100:-
133
134 Same flash, but 2 named partitions, the first one being read-only:
135 mtdparts=sa1100:256k(ARMboot)ro,-(root)
136
137 If unsure, say 'N'.
138
139config MTD_AFS_PARTS
140 tristate "ARM Firmware Suite partition parsing"
141 depends on ARM && MTD_PARTITIONS
142 ---help---
143 The ARM Firmware Suite allows the user to divide flash devices into
144 multiple 'images'. Each such image has a header containing its name
145 and offset/size etc.
146
147 If you need code which can detect and parse these tables, and
148 register MTD 'partitions' corresponding to each image detected,
149 enable this option.
150
151 You will still need the parsing functions to be called by the driver
152 for your particular device. It won't happen automatically. The
153 'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example.
154
155comment "User Modules And Translation Layers"
156 depends on MTD
157
158config MTD_CHAR
159 tristate "Direct char device access to MTD devices"
160 depends on MTD
161 help
162 This provides a character device for each MTD device present in
163 the system, allowing the user to read and write directly to the
164 memory chips, and also use ioctl() to obtain information about
165 the device, or to erase parts of it.
166
167config MTD_BLOCK
168 tristate "Caching block device access to MTD devices"
169 depends on MTD
170 ---help---
171 Although most flash chips have an erase size too large to be useful
172 as block devices, it is possible to use MTD devices which are based
173 on RAM chips in this manner. This block device is a user of MTD
174 devices performing that function.
175
176 At the moment, it is also required for the Journalling Flash File
177 System(s) to obtain a handle on the MTD device when it's mounted
178 (although JFFS and JFFS2 don't actually use any of the functionality
179 of the mtdblock device).
180
181 Later, it may be extended to perform read/erase/modify/write cycles
182 on flash chips to emulate a smaller block size. Needless to say,
183 this is very unsafe, but could be useful for file systems which are
184 almost never written to.
185
186 You do not need this option for use with the DiskOnChip devices. For
187 those, enable NFTL support (CONFIG_NFTL) instead.
188
189config MTD_BLOCK_RO
190 tristate "Readonly block device access to MTD devices"
191 depends on MTD_BLOCK!=y && MTD
192 help
193 This allows you to mount read-only file systems (such as cramfs)
194 from an MTD device, without the overhead (and danger) of the caching
195 driver.
196
197 You do not need this option for use with the DiskOnChip devices. For
198 those, enable NFTL support (CONFIG_NFTL) instead.
199
200config FTL
201 tristate "FTL (Flash Translation Layer) support"
202 depends on MTD
203 ---help---
204 This provides support for the original Flash Translation Layer which
205 is part of the PCMCIA specification. It uses a kind of pseudo-
206 file system on a flash device to emulate a block device with
207 512-byte sectors, on top of which you put a 'normal' file system.
208
209 You may find that the algorithms used in this code are patented
210 unless you live in the Free World where software patents aren't
211 legal - in the USA you are only permitted to use this on PCMCIA
212 hardware, although under the terms of the GPL you're obviously
213 permitted to copy, modify and distribute the code as you wish. Just
214 not use it.
215
216config NFTL
217 tristate "NFTL (NAND Flash Translation Layer) support"
218 depends on MTD
219 ---help---
220 This provides support for the NAND Flash Translation Layer which is
221 used on M-Systems' DiskOnChip devices. It uses a kind of pseudo-
222 file system on a flash device to emulate a block device with
223 512-byte sectors, on top of which you put a 'normal' file system.
224
225 You may find that the algorithms used in this code are patented
226 unless you live in the Free World where software patents aren't
227 legal - in the USA you are only permitted to use this on DiskOnChip
228 hardware, although under the terms of the GPL you're obviously
229 permitted to copy, modify and distribute the code as you wish. Just
230 not use it.
231
232config NFTL_RW
233 bool "Write support for NFTL"
234 depends on NFTL
235 help
236 Support for writing to the NAND Flash Translation Layer, as used
237 on the DiskOnChip.
238
239config INFTL
240 tristate "INFTL (Inverse NAND Flash Translation Layer) support"
241 depends on MTD
242 ---help---
243 This provides support for the Inverse NAND Flash Translation
244 Layer which is used on M-Systems' newer DiskOnChip devices. It
245 uses a kind of pseudo-file system on a flash device to emulate
246 a block device with 512-byte sectors, on top of which you put
247 a 'normal' file system.
248
249 You may find that the algorithms used in this code are patented
250 unless you live in the Free World where software patents aren't
251 legal - in the USA you are only permitted to use this on DiskOnChip
252 hardware, although under the terms of the GPL you're obviously
253 permitted to copy, modify and distribute the code as you wish. Just
254 not use it.
255
256source "drivers/mtd/chips/Kconfig"
257
258source "drivers/mtd/maps/Kconfig"
259
260source "drivers/mtd/devices/Kconfig"
261
262source "drivers/mtd/nand/Kconfig"
263
264endmenu
265
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
new file mode 100644
index 000000000000..e4ad588327f7
--- /dev/null
+++ b/drivers/mtd/Makefile
@@ -0,0 +1,27 @@
1#
2# Makefile for the memory technology device drivers.
3#
4# $Id: Makefile.common,v 1.5 2004/08/10 20:51:49 dwmw2 Exp $
5
6# Core functionality.
7mtd-y := mtdcore.o
8mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
9obj-$(CONFIG_MTD) += $(mtd-y)
10
11obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
12obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
13obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
14obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
15
16# 'Users' - code which presents functionality to userspace.
17obj-$(CONFIG_MTD_CHAR) += mtdchar.o
18obj-$(CONFIG_MTD_BLOCK) += mtdblock.o mtd_blkdevs.o
19obj-$(CONFIG_MTD_BLOCK_RO) += mtdblock_ro.o mtd_blkdevs.o
20obj-$(CONFIG_FTL) += ftl.o mtd_blkdevs.o
21obj-$(CONFIG_NFTL) += nftl.o mtd_blkdevs.o
22obj-$(CONFIG_INFTL) += inftl.o mtd_blkdevs.o
23
24nftl-objs := nftlcore.o nftlmount.o
25inftl-objs := inftlcore.o inftlmount.o
26
27obj-y += chips/ maps/ devices/ nand/
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c
new file mode 100644
index 000000000000..801e6c7d0892
--- /dev/null
+++ b/drivers/mtd/afs.c
@@ -0,0 +1,296 @@
1/*======================================================================
2
3 drivers/mtd/afs.c: ARM Flash Layout/Partitioning
4
5 Copyright (C) 2000 ARM Limited
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20
21 This is access code for flashes using ARM's flash partitioning
22 standards.
23
24 $Id: afs.c,v 1.13 2004/02/27 22:09:59 rmk Exp $
25
26======================================================================*/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/kernel.h>
31#include <linux/slab.h>
32#include <linux/string.h>
33#include <linux/init.h>
34
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/map.h>
37#include <linux/mtd/partitions.h>
38
39struct footer_struct {
40 u32 image_info_base; /* Address of first word of ImageFooter */
41 u32 image_start; /* Start of area reserved by this footer */
42 u32 signature; /* 'Magic' number proves it's a footer */
43 u32 type; /* Area type: ARM Image, SIB, customer */
44 u32 checksum; /* Just this structure */
45};
46
47struct image_info_struct {
48 u32 bootFlags; /* Boot flags, compression etc. */
49 u32 imageNumber; /* Unique number, selects for boot etc. */
50 u32 loadAddress; /* Address program should be loaded to */
51 u32 length; /* Actual size of image */
52 u32 address; /* Image is executed from here */
53 char name[16]; /* Null terminated */
54 u32 headerBase; /* Flash Address of any stripped header */
55 u32 header_length; /* Length of header in memory */
56 u32 headerType; /* AIF, RLF, s-record etc. */
57 u32 checksum; /* Image checksum (inc. this struct) */
58};
59
60static u32 word_sum(void *words, int num)
61{
62 u32 *p = words;
63 u32 sum = 0;
64
65 while (num--)
66 sum += *p++;
67
68 return sum;
69}
70
71static int
72afs_read_footer(struct mtd_info *mtd, u_int *img_start, u_int *iis_start,
73 u_int off, u_int mask)
74{
75 struct footer_struct fs;
76 u_int ptr = off + mtd->erasesize - sizeof(fs);
77 size_t sz;
78 int ret;
79
80 ret = mtd->read(mtd, ptr, sizeof(fs), &sz, (u_char *) &fs);
81 if (ret >= 0 && sz != sizeof(fs))
82 ret = -EINVAL;
83
84 if (ret < 0) {
85 printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
86 ptr, ret);
87 return ret;
88 }
89
90 ret = 1;
91
92 /*
93 * Does it contain the magic number?
94 */
95 if (fs.signature != 0xa0ffff9f)
96 ret = 0;
97
98 /*
99 * Check the checksum.
100 */
101 if (word_sum(&fs, sizeof(fs) / sizeof(u32)) != 0xffffffff)
102 ret = 0;
103
104 /*
105 * Don't touch the SIB.
106 */
107 if (fs.type == 2)
108 ret = 0;
109
110 *iis_start = fs.image_info_base & mask;
111 *img_start = fs.image_start & mask;
112
113 /*
114 * Check the image info base. This can not
115 * be located after the footer structure.
116 */
117 if (*iis_start >= ptr)
118 ret = 0;
119
120 /*
121 * Check the start of this image. The image
122 * data can not be located after this block.
123 */
124 if (*img_start > off)
125 ret = 0;
126
127 return ret;
128}
129
130static int
131afs_read_iis(struct mtd_info *mtd, struct image_info_struct *iis, u_int ptr)
132{
133 size_t sz;
134 int ret, i;
135
136 memset(iis, 0, sizeof(*iis));
137 ret = mtd->read(mtd, ptr, sizeof(*iis), &sz, (u_char *) iis);
138 if (ret < 0)
139 goto failed;
140
141 if (sz != sizeof(*iis)) {
142 ret = -EINVAL;
143 goto failed;
144 }
145
146 ret = 0;
147
148 /*
149 * Validate the name - it must be NUL terminated.
150 */
151 for (i = 0; i < sizeof(iis->name); i++)
152 if (iis->name[i] == '\0')
153 break;
154
155 if (i < sizeof(iis->name))
156 ret = 1;
157
158 return ret;
159
160 failed:
161 printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
162 ptr, ret);
163 return ret;
164}
165
166static int parse_afs_partitions(struct mtd_info *mtd,
167 struct mtd_partition **pparts,
168 unsigned long origin)
169{
170 struct mtd_partition *parts;
171 u_int mask, off, idx, sz;
172 int ret = 0;
173 char *str;
174
175 /*
176 * This is the address mask; we use this to mask off out of
177 * range address bits.
178 */
179 mask = mtd->size - 1;
180
181 /*
182 * First, calculate the size of the array we need for the
183 * partition information. We include in this the size of
184 * the strings.
185 */
186 for (idx = off = sz = 0; off < mtd->size; off += mtd->erasesize) {
187 struct image_info_struct iis;
188 u_int iis_ptr, img_ptr;
189
190 ret = afs_read_footer(mtd, &img_ptr, &iis_ptr, off, mask);
191 if (ret < 0)
192 break;
193 if (ret == 0)
194 continue;
195
196 ret = afs_read_iis(mtd, &iis, iis_ptr);
197 if (ret < 0)
198 break;
199 if (ret == 0)
200 continue;
201
202 sz += sizeof(struct mtd_partition);
203 sz += strlen(iis.name) + 1;
204 idx += 1;
205 }
206
207 if (!sz)
208 return ret;
209
210 parts = kmalloc(sz, GFP_KERNEL);
211 if (!parts)
212 return -ENOMEM;
213
214 memset(parts, 0, sz);
215 str = (char *)(parts + idx);
216
217 /*
218 * Identify the partitions
219 */
220 for (idx = off = 0; off < mtd->size; off += mtd->erasesize) {
221 struct image_info_struct iis;
222 u_int iis_ptr, img_ptr, size;
223
224 /* Read the footer. */
225 ret = afs_read_footer(mtd, &img_ptr, &iis_ptr, off, mask);
226 if (ret < 0)
227 break;
228 if (ret == 0)
229 continue;
230
231 /* Read the image info block */
232 ret = afs_read_iis(mtd, &iis, iis_ptr);
233 if (ret < 0)
234 break;
235 if (ret == 0)
236 continue;
237
238 strcpy(str, iis.name);
239 size = mtd->erasesize + off - img_ptr;
240
241 /*
242 * In order to support JFFS2 partitions on this layout,
243 * we must lie to MTD about the real size of JFFS2
244 * partitions; this ensures that the AFS flash footer
245 * won't be erased by JFFS2. Please ensure that your
246 * JFFS2 partitions are given image numbers between
247 * 1000 and 2000 inclusive.
248 */
249 if (iis.imageNumber >= 1000 && iis.imageNumber < 2000)
250 size -= mtd->erasesize;
251
252 parts[idx].name = str;
253 parts[idx].size = size;
254 parts[idx].offset = img_ptr;
255 parts[idx].mask_flags = 0;
256
257 printk(" mtd%d: at 0x%08x, %5dKB, %8u, %s\n",
258 idx, img_ptr, parts[idx].size / 1024,
259 iis.imageNumber, str);
260
261 idx += 1;
262 str = str + strlen(iis.name) + 1;
263 }
264
265 if (!idx) {
266 kfree(parts);
267 parts = NULL;
268 }
269
270 *pparts = parts;
271 return idx ? idx : ret;
272}
273
274static struct mtd_part_parser afs_parser = {
275 .owner = THIS_MODULE,
276 .parse_fn = parse_afs_partitions,
277 .name = "afs",
278};
279
280static int __init afs_parser_init(void)
281{
282 return register_mtd_parser(&afs_parser);
283}
284
285static void __exit afs_parser_exit(void)
286{
287 deregister_mtd_parser(&afs_parser);
288}
289
290module_init(afs_parser_init);
291module_exit(afs_parser_exit);
292
293
294MODULE_AUTHOR("ARM Ltd");
295MODULE_DESCRIPTION("ARM Firmware Suite partition parser");
296MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
new file mode 100644
index 000000000000..d682dbc8157e
--- /dev/null
+++ b/drivers/mtd/chips/Kconfig
@@ -0,0 +1,286 @@
1# drivers/mtd/chips/Kconfig
2# $Id: Kconfig,v 1.13 2004/12/01 15:49:10 nico Exp $
3
4menu "RAM/ROM/Flash chip drivers"
5 depends on MTD!=n
6
7config MTD_CFI
8 tristate "Detect flash chips by Common Flash Interface (CFI) probe"
9 depends on MTD
10 select MTD_GEN_PROBE
11 help
12 The Common Flash Interface specification was developed by Intel,
13 AMD and other flash manufactures that provides a universal method
14 for probing the capabilities of flash devices. If you wish to
15 support any device that is CFI-compliant, you need to enable this
16 option. Visit <http://www.amd.com/products/nvd/overview/cfi.html>
17 for more information on CFI.
18
19config MTD_JEDECPROBE
20 tristate "Detect non-CFI AMD/JEDEC-compatible flash chips"
21 depends on MTD
22 select MTD_GEN_PROBE
23 help
24 This option enables JEDEC-style probing of flash chips which are not
25 compatible with the Common Flash Interface, but will use the common
26 CFI-targetted flash drivers for any chips which are identified which
27 are in fact compatible in all but the probe method. This actually
28 covers most AMD/Fujitsu-compatible chips, and will shortly cover also
29 non-CFI Intel chips (that code is in MTD CVS and should shortly be sent
30 for inclusion in Linus' tree)
31
32config MTD_GEN_PROBE
33 tristate
34
35config MTD_CFI_ADV_OPTIONS
36 bool "Flash chip driver advanced configuration options"
37 depends on MTD_GEN_PROBE
38 help
39 If you need to specify a specific endianness for access to flash
40 chips, or if you wish to reduce the size of the kernel by including
41 support for only specific arrangements of flash chips, say 'Y'. This
42 option does not directly affect the code, but will enable other
43 configuration options which allow you to do so.
44
45 If unsure, say 'N'.
46
47choice
48 prompt "Flash cmd/query data swapping"
49 depends on MTD_CFI_ADV_OPTIONS
50 default MTD_CFI_NOSWAP
51
52config MTD_CFI_NOSWAP
53 bool "NO"
54 ---help---
55 This option defines the way in which the CPU attempts to arrange
56 data bits when writing the 'magic' commands to the chips. Saying
57 'NO', which is the default when CONFIG_MTD_CFI_ADV_OPTIONS isn't
58 enabled, means that the CPU will not do any swapping; the chips
59 are expected to be wired to the CPU in 'host-endian' form.
60 Specific arrangements are possible with the BIG_ENDIAN_BYTE and
61 LITTLE_ENDIAN_BYTE, if the bytes are reversed.
62
63 If you have a LART, on which the data (and address) lines were
64 connected in a fashion which ensured that the nets were as short
65 as possible, resulting in a bit-shuffling which seems utterly
66 random to the untrained eye, you need the LART_ENDIAN_BYTE option.
67
68 Yes, there really exists something sicker than PDP-endian :)
69
70config MTD_CFI_BE_BYTE_SWAP
71 bool "BIG_ENDIAN_BYTE"
72
73config MTD_CFI_LE_BYTE_SWAP
74 bool "LITTLE_ENDIAN_BYTE"
75
76endchoice
77
78config MTD_CFI_GEOMETRY
79 bool "Specific CFI Flash geometry selection"
80 depends on MTD_CFI_ADV_OPTIONS
81 help
82 This option does not affect the code directly, but will enable
83 some other configuration options which would allow you to reduce
84 the size of the kernel by including support for only certain
85 arrangements of CFI chips. If unsure, say 'N' and all options
86 which are supported by the current code will be enabled.
87
88config MTD_MAP_BANK_WIDTH_1
89 bool "Support 8-bit buswidth" if MTD_CFI_GEOMETRY
90 default y
91 help
92 If you wish to support CFI devices on a physical bus which is
93 8 bits wide, say 'Y'.
94
95config MTD_MAP_BANK_WIDTH_2
96 bool "Support 16-bit buswidth" if MTD_CFI_GEOMETRY
97 default y
98 help
99 If you wish to support CFI devices on a physical bus which is
100 16 bits wide, say 'Y'.
101
102config MTD_MAP_BANK_WIDTH_4
103 bool "Support 32-bit buswidth" if MTD_CFI_GEOMETRY
104 default y
105 help
106 If you wish to support CFI devices on a physical bus which is
107 32 bits wide, say 'Y'.
108
109config MTD_MAP_BANK_WIDTH_8
110 bool "Support 64-bit buswidth" if MTD_CFI_GEOMETRY
111 default n
112 help
113 If you wish to support CFI devices on a physical bus which is
114 64 bits wide, say 'Y'.
115
116config MTD_MAP_BANK_WIDTH_16
117 bool "Support 128-bit buswidth" if MTD_CFI_GEOMETRY
118 default n
119 help
120 If you wish to support CFI devices on a physical bus which is
121 128 bits wide, say 'Y'.
122
123config MTD_MAP_BANK_WIDTH_32
124 bool "Support 256-bit buswidth" if MTD_CFI_GEOMETRY
125 default n
126 help
127 If you wish to support CFI devices on a physical bus which is
128 256 bits wide, say 'Y'.
129
130config MTD_CFI_I1
131 bool "Support 1-chip flash interleave" if MTD_CFI_GEOMETRY
132 default y
133 help
134 If your flash chips are not interleaved - i.e. you only have one
135 flash chip addressed by each bus cycle, then say 'Y'.
136
137config MTD_CFI_I2
138 bool "Support 2-chip flash interleave" if MTD_CFI_GEOMETRY
139 default y
140 help
141 If your flash chips are interleaved in pairs - i.e. you have two
142 flash chips addressed by each bus cycle, then say 'Y'.
143
144config MTD_CFI_I4
145 bool "Support 4-chip flash interleave" if MTD_CFI_GEOMETRY
146 default n
147 help
148 If your flash chips are interleaved in fours - i.e. you have four
149 flash chips addressed by each bus cycle, then say 'Y'.
150
151config MTD_CFI_I8
152 bool "Support 8-chip flash interleave" if MTD_CFI_GEOMETRY
153 default n
154 help
155 If your flash chips are interleaved in eights - i.e. you have eight
156 flash chips addressed by each bus cycle, then say 'Y'.
157
158config MTD_CFI_INTELEXT
159 tristate "Support for Intel/Sharp flash chips"
160 depends on MTD_GEN_PROBE
161 select MTD_CFI_UTIL
162 help
163 The Common Flash Interface defines a number of different command
164 sets which a CFI-compliant chip may claim to implement. This code
165 provides support for one of those command sets, used on Intel
166 StrataFlash and other parts.
167
168config MTD_CFI_AMDSTD
169 tristate "Support for AMD/Fujitsu flash chips"
170 depends on MTD_GEN_PROBE
171 select MTD_CFI_UTIL
172 help
173 The Common Flash Interface defines a number of different command
174 sets which a CFI-compliant chip may claim to implement. This code
175 provides support for one of those command sets, used on chips
176 including the AMD Am29LV320.
177
178config MTD_CFI_AMDSTD_RETRY
179 int "Retry failed commands (erase/program)"
180 depends on MTD_CFI_AMDSTD
181 default "0"
182 help
183 Some chips, when attached to a shared bus, don't properly filter
184 bus traffic that is destined to other devices. This broken
185 behavior causes erase and program sequences to be aborted when
186 the sequences are mixed with traffic for other devices.
187
188 SST49LF040 (and related) chips are know to be broken.
189
190config MTD_CFI_AMDSTD_RETRY_MAX
191 int "Max retries of failed commands (erase/program)"
192 depends on MTD_CFI_AMDSTD_RETRY
193 default "0"
194 help
195 If you have an SST49LF040 (or related chip) then this value should
196 be set to at least 1. This can also be adjusted at driver load
197 time with the retry_cmd_max module parameter.
198
199config MTD_CFI_STAA
200 tristate "Support for ST (Advanced Architecture) flash chips"
201 depends on MTD_GEN_PROBE
202 select MTD_CFI_UTIL
203 help
204 The Common Flash Interface defines a number of different command
205 sets which a CFI-compliant chip may claim to implement. This code
206 provides support for one of those command sets.
207
208config MTD_CFI_UTIL
209 tristate
210
211config MTD_RAM
212 tristate "Support for RAM chips in bus mapping"
213 depends on MTD
214 help
215 This option enables basic support for RAM chips accessed through
216 a bus mapping driver.
217
218config MTD_ROM
219 tristate "Support for ROM chips in bus mapping"
220 depends on MTD
221 help
222 This option enables basic support for ROM chips accessed through
223 a bus mapping driver.
224
225config MTD_ABSENT
226 tristate "Support for absent chips in bus mapping"
227 depends on MTD
228 help
229 This option enables support for a dummy probing driver used to
230 allocated placeholder MTD devices on systems that have socketed
231 or removable media. Use of this driver as a fallback chip probe
232 preserves the expected registration order of MTD device nodes on
233 the system regardless of media presence. Device nodes created
234 with this driver will return -ENODEV upon access.
235
236config MTD_OBSOLETE_CHIPS
237 depends on MTD && BROKEN
238 bool "Older (theoretically obsoleted now) drivers for non-CFI chips"
239 help
240 This option does not enable any code directly, but will allow you to
241 select some other chip drivers which are now considered obsolete,
242 because the generic CONFIG_JEDECPROBE code above should now detect
243 the chips which are supported by these drivers, and allow the generic
244 CFI-compatible drivers to drive the chips. Say 'N' here unless you have
245 already tried the CONFIG_JEDECPROBE method and reported its failure
246 to the MTD mailing list at <linux-mtd@lists.infradead.org>
247
248config MTD_AMDSTD
249 tristate "AMD compatible flash chip support (non-CFI)"
250 depends on MTD && MTD_OBSOLETE_CHIPS
251 help
252 This option enables support for flash chips using AMD-compatible
253 commands, including some which are not CFI-compatible and hence
254 cannot be used with the CONFIG_MTD_CFI_AMDSTD option.
255
256 It also works on AMD compatible chips that do conform to CFI.
257
258config MTD_SHARP
259 tristate "pre-CFI Sharp chip support"
260 depends on MTD && MTD_OBSOLETE_CHIPS
261 help
262 This option enables support for flash chips using Sharp-compatible
263 commands, including some which are not CFI-compatible and hence
264 cannot be used with the CONFIG_MTD_CFI_INTELxxx options.
265
266config MTD_JEDEC
267 tristate "JEDEC device support"
268 depends on MTD && MTD_OBSOLETE_CHIPS
269 help
270 Enable older older JEDEC flash interface devices for self
271 programming flash. It is commonly used in older AMD chips. It is
272 only called JEDEC because the JEDEC association
273 <http://www.jedec.org/> distributes the identification codes for the
274 chips.
275
276config MTD_XIP
277 bool "XIP aware MTD support"
278 depends on !SMP && MTD_CFI_INTELEXT && EXPERIMENTAL
279 default y if XIP_KERNEL
280 help
281 This allows MTD support to work with flash memory which is also
282 used for XIP purposes. If you're not sure what this is all about
283 then say N.
284
285endmenu
286
diff --git a/drivers/mtd/chips/Makefile b/drivers/mtd/chips/Makefile
new file mode 100644
index 000000000000..6830489828c6
--- /dev/null
+++ b/drivers/mtd/chips/Makefile
@@ -0,0 +1,26 @@
1#
2# linux/drivers/chips/Makefile
3#
4# $Id: Makefile.common,v 1.4 2004/07/12 16:07:30 dwmw2 Exp $
5
6# *** BIG UGLY NOTE ***
7#
8# The removal of get_module_symbol() and replacement with
9# inter_module_register() et al has introduced a link order dependency
10# here where previously there was none. We now have to ensure that
11# the CFI command set drivers are linked before gen_probe.o
12
13obj-$(CONFIG_MTD) += chipreg.o
14obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o
15obj-$(CONFIG_MTD_CFI) += cfi_probe.o
16obj-$(CONFIG_MTD_CFI_UTIL) += cfi_util.o
17obj-$(CONFIG_MTD_CFI_STAA) += cfi_cmdset_0020.o
18obj-$(CONFIG_MTD_CFI_AMDSTD) += cfi_cmdset_0002.o
19obj-$(CONFIG_MTD_CFI_INTELEXT) += cfi_cmdset_0001.o
20obj-$(CONFIG_MTD_GEN_PROBE) += gen_probe.o
21obj-$(CONFIG_MTD_JEDEC) += jedec.o
22obj-$(CONFIG_MTD_JEDECPROBE) += jedec_probe.o
23obj-$(CONFIG_MTD_RAM) += map_ram.o
24obj-$(CONFIG_MTD_ROM) += map_rom.o
25obj-$(CONFIG_MTD_SHARP) += sharp.o
26obj-$(CONFIG_MTD_ABSENT) += map_absent.o
diff --git a/drivers/mtd/chips/amd_flash.c b/drivers/mtd/chips/amd_flash.c
new file mode 100644
index 000000000000..41e2e3e31603
--- /dev/null
+++ b/drivers/mtd/chips/amd_flash.c
@@ -0,0 +1,1415 @@
1/*
2 * MTD map driver for AMD compatible flash chips (non-CFI)
3 *
4 * Author: Jonas Holmberg <jonas.holmberg@axis.com>
5 *
6 * $Id: amd_flash.c,v 1.26 2004/11/20 12:49:04 dwmw2 Exp $
7 *
8 * Copyright (c) 2001 Axis Communications AB
9 *
10 * This file is under GPL.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/errno.h>
19#include <linux/slab.h>
20#include <linux/delay.h>
21#include <linux/interrupt.h>
22#include <linux/init.h>
23#include <linux/mtd/map.h>
24#include <linux/mtd/mtd.h>
25#include <linux/mtd/flashchip.h>
26
27/* There's no limit. It exists only to avoid realloc. */
28#define MAX_AMD_CHIPS 8
29
30#define DEVICE_TYPE_X8 (8 / 8)
31#define DEVICE_TYPE_X16 (16 / 8)
32#define DEVICE_TYPE_X32 (32 / 8)
33
34/* Addresses */
35#define ADDR_MANUFACTURER 0x0000
36#define ADDR_DEVICE_ID 0x0001
37#define ADDR_SECTOR_LOCK 0x0002
38#define ADDR_HANDSHAKE 0x0003
39#define ADDR_UNLOCK_1 0x0555
40#define ADDR_UNLOCK_2 0x02AA
41
42/* Commands */
43#define CMD_UNLOCK_DATA_1 0x00AA
44#define CMD_UNLOCK_DATA_2 0x0055
45#define CMD_MANUFACTURER_UNLOCK_DATA 0x0090
46#define CMD_UNLOCK_BYPASS_MODE 0x0020
47#define CMD_PROGRAM_UNLOCK_DATA 0x00A0
48#define CMD_RESET_DATA 0x00F0
49#define CMD_SECTOR_ERASE_UNLOCK_DATA 0x0080
50#define CMD_SECTOR_ERASE_UNLOCK_DATA_2 0x0030
51
52#define CMD_UNLOCK_SECTOR 0x0060
53
54/* Manufacturers */
55#define MANUFACTURER_AMD 0x0001
56#define MANUFACTURER_ATMEL 0x001F
57#define MANUFACTURER_FUJITSU 0x0004
58#define MANUFACTURER_ST 0x0020
59#define MANUFACTURER_SST 0x00BF
60#define MANUFACTURER_TOSHIBA 0x0098
61
62/* AMD */
63#define AM29F800BB 0x2258
64#define AM29F800BT 0x22D6
65#define AM29LV800BB 0x225B
66#define AM29LV800BT 0x22DA
67#define AM29LV160DT 0x22C4
68#define AM29LV160DB 0x2249
69#define AM29BDS323D 0x22D1
70#define AM29BDS643D 0x227E
71
72/* Atmel */
73#define AT49xV16x 0x00C0
74#define AT49xV16xT 0x00C2
75
76/* Fujitsu */
77#define MBM29LV160TE 0x22C4
78#define MBM29LV160BE 0x2249
79#define MBM29LV800BB 0x225B
80
81/* ST - www.st.com */
82#define M29W800T 0x00D7
83#define M29W160DT 0x22C4
84#define M29W160DB 0x2249
85
86/* SST */
87#define SST39LF800 0x2781
88#define SST39LF160 0x2782
89
90/* Toshiba */
91#define TC58FVT160 0x00C2
92#define TC58FVB160 0x0043
93
94#define D6_MASK 0x40
95
96struct amd_flash_private {
97 int device_type;
98 int interleave;
99 int numchips;
100 unsigned long chipshift;
101// const char *im_name;
102 struct flchip chips[0];
103};
104
105struct amd_flash_info {
106 const __u16 mfr_id;
107 const __u16 dev_id;
108 const char *name;
109 const u_long size;
110 const int numeraseregions;
111 const struct mtd_erase_region_info regions[4];
112};
113
114
115
116static int amd_flash_read(struct mtd_info *, loff_t, size_t, size_t *,
117 u_char *);
118static int amd_flash_write(struct mtd_info *, loff_t, size_t, size_t *,
119 const u_char *);
120static int amd_flash_erase(struct mtd_info *, struct erase_info *);
121static void amd_flash_sync(struct mtd_info *);
122static int amd_flash_suspend(struct mtd_info *);
123static void amd_flash_resume(struct mtd_info *);
124static void amd_flash_destroy(struct mtd_info *);
125static struct mtd_info *amd_flash_probe(struct map_info *map);
126
127
128static struct mtd_chip_driver amd_flash_chipdrv = {
129 .probe = amd_flash_probe,
130 .destroy = amd_flash_destroy,
131 .name = "amd_flash",
132 .module = THIS_MODULE
133};
134
135
136
137static const char im_name[] = "amd_flash";
138
139
140
141static inline __u32 wide_read(struct map_info *map, __u32 addr)
142{
143 if (map->buswidth == 1) {
144 return map_read8(map, addr);
145 } else if (map->buswidth == 2) {
146 return map_read16(map, addr);
147 } else if (map->buswidth == 4) {
148 return map_read32(map, addr);
149 }
150
151 return 0;
152}
153
154static inline void wide_write(struct map_info *map, __u32 val, __u32 addr)
155{
156 if (map->buswidth == 1) {
157 map_write8(map, val, addr);
158 } else if (map->buswidth == 2) {
159 map_write16(map, val, addr);
160 } else if (map->buswidth == 4) {
161 map_write32(map, val, addr);
162 }
163}
164
165static inline __u32 make_cmd(struct map_info *map, __u32 cmd)
166{
167 const struct amd_flash_private *private = map->fldrv_priv;
168 if ((private->interleave == 2) &&
169 (private->device_type == DEVICE_TYPE_X16)) {
170 cmd |= (cmd << 16);
171 }
172
173 return cmd;
174}
175
176static inline void send_unlock(struct map_info *map, unsigned long base)
177{
178 wide_write(map, (CMD_UNLOCK_DATA_1 << 16) | CMD_UNLOCK_DATA_1,
179 base + (map->buswidth * ADDR_UNLOCK_1));
180 wide_write(map, (CMD_UNLOCK_DATA_2 << 16) | CMD_UNLOCK_DATA_2,
181 base + (map->buswidth * ADDR_UNLOCK_2));
182}
183
184static inline void send_cmd(struct map_info *map, unsigned long base, __u32 cmd)
185{
186 send_unlock(map, base);
187 wide_write(map, make_cmd(map, cmd),
188 base + (map->buswidth * ADDR_UNLOCK_1));
189}
190
191static inline void send_cmd_to_addr(struct map_info *map, unsigned long base,
192 __u32 cmd, unsigned long addr)
193{
194 send_unlock(map, base);
195 wide_write(map, make_cmd(map, cmd), addr);
196}
197
198static inline int flash_is_busy(struct map_info *map, unsigned long addr,
199 int interleave)
200{
201
202 if ((interleave == 2) && (map->buswidth == 4)) {
203 __u32 read1, read2;
204
205 read1 = wide_read(map, addr);
206 read2 = wide_read(map, addr);
207
208 return (((read1 >> 16) & D6_MASK) !=
209 ((read2 >> 16) & D6_MASK)) ||
210 (((read1 & 0xffff) & D6_MASK) !=
211 ((read2 & 0xffff) & D6_MASK));
212 }
213
214 return ((wide_read(map, addr) & D6_MASK) !=
215 (wide_read(map, addr) & D6_MASK));
216}
217
218static inline void unlock_sector(struct map_info *map, unsigned long sect_addr,
219 int unlock)
220{
221 /* Sector lock address. A6 = 1 for unlock, A6 = 0 for lock */
222 int SLA = unlock ?
223 (sect_addr | (0x40 * map->buswidth)) :
224 (sect_addr & ~(0x40 * map->buswidth)) ;
225
226 __u32 cmd = make_cmd(map, CMD_UNLOCK_SECTOR);
227
228 wide_write(map, make_cmd(map, CMD_RESET_DATA), 0);
229 wide_write(map, cmd, SLA); /* 1st cycle: write cmd to any address */
230 wide_write(map, cmd, SLA); /* 2nd cycle: write cmd to any address */
231 wide_write(map, cmd, SLA); /* 3rd cycle: write cmd to SLA */
232}
233
234static inline int is_sector_locked(struct map_info *map,
235 unsigned long sect_addr)
236{
237 int status;
238
239 wide_write(map, CMD_RESET_DATA, 0);
240 send_cmd(map, sect_addr, CMD_MANUFACTURER_UNLOCK_DATA);
241
242 /* status is 0x0000 for unlocked and 0x0001 for locked */
243 status = wide_read(map, sect_addr + (map->buswidth * ADDR_SECTOR_LOCK));
244 wide_write(map, CMD_RESET_DATA, 0);
245 return status;
246}
247
248static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
249 int is_unlock)
250{
251 struct map_info *map;
252 struct mtd_erase_region_info *merip;
253 int eraseoffset, erasesize, eraseblocks;
254 int i;
255 int retval = 0;
256 int lock_status;
257
258 map = mtd->priv;
259
260 /* Pass the whole chip through sector by sector and check for each
261 sector if the sector and the given interval overlap */
262 for(i = 0; i < mtd->numeraseregions; i++) {
263 merip = &mtd->eraseregions[i];
264
265 eraseoffset = merip->offset;
266 erasesize = merip->erasesize;
267 eraseblocks = merip->numblocks;
268
269 if (ofs > eraseoffset + erasesize)
270 continue;
271
272 while (eraseblocks > 0) {
273 if (ofs < eraseoffset + erasesize && ofs + len > eraseoffset) {
274 unlock_sector(map, eraseoffset, is_unlock);
275
276 lock_status = is_sector_locked(map, eraseoffset);
277
278 if (is_unlock && lock_status) {
279 printk("Cannot unlock sector at address %x length %xx\n",
280 eraseoffset, merip->erasesize);
281 retval = -1;
282 } else if (!is_unlock && !lock_status) {
283 printk("Cannot lock sector at address %x length %x\n",
284 eraseoffset, merip->erasesize);
285 retval = -1;
286 }
287 }
288 eraseoffset += erasesize;
289 eraseblocks --;
290 }
291 }
292 return retval;
293}
294
295static int amd_flash_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
296{
297 return amd_flash_do_unlock(mtd, ofs, len, 1);
298}
299
300static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
301{
302 return amd_flash_do_unlock(mtd, ofs, len, 0);
303}
304
305
306/*
307 * Reads JEDEC manufacturer ID and device ID and returns the index of the first
308 * matching table entry (-1 if not found or alias for already found chip).
309 */
310static int probe_new_chip(struct mtd_info *mtd, __u32 base,
311 struct flchip *chips,
312 struct amd_flash_private *private,
313 const struct amd_flash_info *table, int table_size)
314{
315 __u32 mfr_id;
316 __u32 dev_id;
317 struct map_info *map = mtd->priv;
318 struct amd_flash_private temp;
319 int i;
320
321 temp.device_type = DEVICE_TYPE_X16; // Assume X16 (FIXME)
322 temp.interleave = 2;
323 map->fldrv_priv = &temp;
324
325 /* Enter autoselect mode. */
326 send_cmd(map, base, CMD_RESET_DATA);
327 send_cmd(map, base, CMD_MANUFACTURER_UNLOCK_DATA);
328
329 mfr_id = wide_read(map, base + (map->buswidth * ADDR_MANUFACTURER));
330 dev_id = wide_read(map, base + (map->buswidth * ADDR_DEVICE_ID));
331
332 if ((map->buswidth == 4) && ((mfr_id >> 16) == (mfr_id & 0xffff)) &&
333 ((dev_id >> 16) == (dev_id & 0xffff))) {
334 mfr_id &= 0xffff;
335 dev_id &= 0xffff;
336 } else {
337 temp.interleave = 1;
338 }
339
340 for (i = 0; i < table_size; i++) {
341 if ((mfr_id == table[i].mfr_id) &&
342 (dev_id == table[i].dev_id)) {
343 if (chips) {
344 int j;
345
346 /* Is this an alias for an already found chip?
347 * In that case that chip should be in
348 * autoselect mode now.
349 */
350 for (j = 0; j < private->numchips; j++) {
351 __u32 mfr_id_other;
352 __u32 dev_id_other;
353
354 mfr_id_other =
355 wide_read(map, chips[j].start +
356 (map->buswidth *
357 ADDR_MANUFACTURER
358 ));
359 dev_id_other =
360 wide_read(map, chips[j].start +
361 (map->buswidth *
362 ADDR_DEVICE_ID));
363 if (temp.interleave == 2) {
364 mfr_id_other &= 0xffff;
365 dev_id_other &= 0xffff;
366 }
367 if ((mfr_id_other == mfr_id) &&
368 (dev_id_other == dev_id)) {
369
370 /* Exit autoselect mode. */
371 send_cmd(map, base,
372 CMD_RESET_DATA);
373
374 return -1;
375 }
376 }
377
378 if (private->numchips == MAX_AMD_CHIPS) {
379 printk(KERN_WARNING
380 "%s: Too many flash chips "
381 "detected. Increase "
382 "MAX_AMD_CHIPS from %d.\n",
383 map->name, MAX_AMD_CHIPS);
384
385 return -1;
386 }
387
388 chips[private->numchips].start = base;
389 chips[private->numchips].state = FL_READY;
390 chips[private->numchips].mutex =
391 &chips[private->numchips]._spinlock;
392 private->numchips++;
393 }
394
395 printk("%s: Found %d x %ldMiB %s at 0x%x\n", map->name,
396 temp.interleave, (table[i].size)/(1024*1024),
397 table[i].name, base);
398
399 mtd->size += table[i].size * temp.interleave;
400 mtd->numeraseregions += table[i].numeraseregions;
401
402 break;
403 }
404 }
405
406 /* Exit autoselect mode. */
407 send_cmd(map, base, CMD_RESET_DATA);
408
409 if (i == table_size) {
410 printk(KERN_DEBUG "%s: unknown flash device at 0x%x, "
411 "mfr id 0x%x, dev id 0x%x\n", map->name,
412 base, mfr_id, dev_id);
413 map->fldrv_priv = NULL;
414
415 return -1;
416 }
417
418 private->device_type = temp.device_type;
419 private->interleave = temp.interleave;
420
421 return i;
422}
423
424
425
426static struct mtd_info *amd_flash_probe(struct map_info *map)
427{
428 static const struct amd_flash_info table[] = {
429 {
430 .mfr_id = MANUFACTURER_AMD,
431 .dev_id = AM29LV160DT,
432 .name = "AMD AM29LV160DT",
433 .size = 0x00200000,
434 .numeraseregions = 4,
435 .regions = {
436 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
437 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
438 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
439 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
440 }
441 }, {
442 .mfr_id = MANUFACTURER_AMD,
443 .dev_id = AM29LV160DB,
444 .name = "AMD AM29LV160DB",
445 .size = 0x00200000,
446 .numeraseregions = 4,
447 .regions = {
448 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
449 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
450 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
451 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
452 }
453 }, {
454 .mfr_id = MANUFACTURER_TOSHIBA,
455 .dev_id = TC58FVT160,
456 .name = "Toshiba TC58FVT160",
457 .size = 0x00200000,
458 .numeraseregions = 4,
459 .regions = {
460 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
461 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
462 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
463 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
464 }
465 }, {
466 .mfr_id = MANUFACTURER_FUJITSU,
467 .dev_id = MBM29LV160TE,
468 .name = "Fujitsu MBM29LV160TE",
469 .size = 0x00200000,
470 .numeraseregions = 4,
471 .regions = {
472 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
473 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
474 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
475 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
476 }
477 }, {
478 .mfr_id = MANUFACTURER_TOSHIBA,
479 .dev_id = TC58FVB160,
480 .name = "Toshiba TC58FVB160",
481 .size = 0x00200000,
482 .numeraseregions = 4,
483 .regions = {
484 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
485 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
486 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
487 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
488 }
489 }, {
490 .mfr_id = MANUFACTURER_FUJITSU,
491 .dev_id = MBM29LV160BE,
492 .name = "Fujitsu MBM29LV160BE",
493 .size = 0x00200000,
494 .numeraseregions = 4,
495 .regions = {
496 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
497 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
498 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
499 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
500 }
501 }, {
502 .mfr_id = MANUFACTURER_AMD,
503 .dev_id = AM29LV800BB,
504 .name = "AMD AM29LV800BB",
505 .size = 0x00100000,
506 .numeraseregions = 4,
507 .regions = {
508 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
509 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
510 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
511 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
512 }
513 }, {
514 .mfr_id = MANUFACTURER_AMD,
515 .dev_id = AM29F800BB,
516 .name = "AMD AM29F800BB",
517 .size = 0x00100000,
518 .numeraseregions = 4,
519 .regions = {
520 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
521 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
522 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
523 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
524 }
525 }, {
526 .mfr_id = MANUFACTURER_AMD,
527 .dev_id = AM29LV800BT,
528 .name = "AMD AM29LV800BT",
529 .size = 0x00100000,
530 .numeraseregions = 4,
531 .regions = {
532 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
533 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
534 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
535 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
536 }
537 }, {
538 .mfr_id = MANUFACTURER_AMD,
539 .dev_id = AM29F800BT,
540 .name = "AMD AM29F800BT",
541 .size = 0x00100000,
542 .numeraseregions = 4,
543 .regions = {
544 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
545 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
546 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
547 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
548 }
549 }, {
550 .mfr_id = MANUFACTURER_AMD,
551 .dev_id = AM29LV800BB,
552 .name = "AMD AM29LV800BB",
553 .size = 0x00100000,
554 .numeraseregions = 4,
555 .regions = {
556 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
557 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
558 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
559 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
560 }
561 }, {
562 .mfr_id = MANUFACTURER_FUJITSU,
563 .dev_id = MBM29LV800BB,
564 .name = "Fujitsu MBM29LV800BB",
565 .size = 0x00100000,
566 .numeraseregions = 4,
567 .regions = {
568 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
569 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
570 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
571 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
572 }
573 }, {
574 .mfr_id = MANUFACTURER_ST,
575 .dev_id = M29W800T,
576 .name = "ST M29W800T",
577 .size = 0x00100000,
578 .numeraseregions = 4,
579 .regions = {
580 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
581 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
582 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
583 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
584 }
585 }, {
586 .mfr_id = MANUFACTURER_ST,
587 .dev_id = M29W160DT,
588 .name = "ST M29W160DT",
589 .size = 0x00200000,
590 .numeraseregions = 4,
591 .regions = {
592 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
593 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
594 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
595 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
596 }
597 }, {
598 .mfr_id = MANUFACTURER_ST,
599 .dev_id = M29W160DB,
600 .name = "ST M29W160DB",
601 .size = 0x00200000,
602 .numeraseregions = 4,
603 .regions = {
604 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
605 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
606 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
607 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
608 }
609 }, {
610 .mfr_id = MANUFACTURER_AMD,
611 .dev_id = AM29BDS323D,
612 .name = "AMD AM29BDS323D",
613 .size = 0x00400000,
614 .numeraseregions = 3,
615 .regions = {
616 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 48 },
617 { .offset = 0x300000, .erasesize = 0x10000, .numblocks = 15 },
618 { .offset = 0x3f0000, .erasesize = 0x02000, .numblocks = 8 },
619 }
620 }, {
621 .mfr_id = MANUFACTURER_AMD,
622 .dev_id = AM29BDS643D,
623 .name = "AMD AM29BDS643D",
624 .size = 0x00800000,
625 .numeraseregions = 3,
626 .regions = {
627 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 96 },
628 { .offset = 0x600000, .erasesize = 0x10000, .numblocks = 31 },
629 { .offset = 0x7f0000, .erasesize = 0x02000, .numblocks = 8 },
630 }
631 }, {
632 .mfr_id = MANUFACTURER_ATMEL,
633 .dev_id = AT49xV16x,
634 .name = "Atmel AT49xV16x",
635 .size = 0x00200000,
636 .numeraseregions = 2,
637 .regions = {
638 { .offset = 0x000000, .erasesize = 0x02000, .numblocks = 8 },
639 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
640 }
641 }, {
642 .mfr_id = MANUFACTURER_ATMEL,
643 .dev_id = AT49xV16xT,
644 .name = "Atmel AT49xV16xT",
645 .size = 0x00200000,
646 .numeraseregions = 2,
647 .regions = {
648 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
649 { .offset = 0x1F0000, .erasesize = 0x02000, .numblocks = 8 }
650 }
651 }
652 };
653
654 struct mtd_info *mtd;
655 struct flchip chips[MAX_AMD_CHIPS];
656 int table_pos[MAX_AMD_CHIPS];
657 struct amd_flash_private temp;
658 struct amd_flash_private *private;
659 u_long size;
660 unsigned long base;
661 int i;
662 int reg_idx;
663 int offset;
664
665 mtd = (struct mtd_info*)kmalloc(sizeof(*mtd), GFP_KERNEL);
666 if (!mtd) {
667 printk(KERN_WARNING
668 "%s: kmalloc failed for info structure\n", map->name);
669 return NULL;
670 }
671 memset(mtd, 0, sizeof(*mtd));
672 mtd->priv = map;
673
674 memset(&temp, 0, sizeof(temp));
675
676 printk("%s: Probing for AMD compatible flash...\n", map->name);
677
678 if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table,
679 sizeof(table)/sizeof(table[0])))
680 == -1) {
681 printk(KERN_WARNING
682 "%s: Found no AMD compatible device at location zero\n",
683 map->name);
684 kfree(mtd);
685
686 return NULL;
687 }
688
689 chips[0].start = 0;
690 chips[0].state = FL_READY;
691 chips[0].mutex = &chips[0]._spinlock;
692 temp.numchips = 1;
693 for (size = mtd->size; size > 1; size >>= 1) {
694 temp.chipshift++;
695 }
696 switch (temp.interleave) {
697 case 2:
698 temp.chipshift += 1;
699 break;
700 case 4:
701 temp.chipshift += 2;
702 break;
703 }
704
705 /* Find out if there are any more chips in the map. */
706 for (base = (1 << temp.chipshift);
707 base < map->size;
708 base += (1 << temp.chipshift)) {
709 int numchips = temp.numchips;
710 table_pos[numchips] = probe_new_chip(mtd, base, chips,
711 &temp, table, sizeof(table)/sizeof(table[0]));
712 }
713
714 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) *
715 mtd->numeraseregions, GFP_KERNEL);
716 if (!mtd->eraseregions) {
717 printk(KERN_WARNING "%s: Failed to allocate "
718 "memory for MTD erase region info\n", map->name);
719 kfree(mtd);
720 map->fldrv_priv = NULL;
721 return NULL;
722 }
723
724 reg_idx = 0;
725 offset = 0;
726 for (i = 0; i < temp.numchips; i++) {
727 int dev_size;
728 int j;
729
730 dev_size = 0;
731 for (j = 0; j < table[table_pos[i]].numeraseregions; j++) {
732 mtd->eraseregions[reg_idx].offset = offset +
733 (table[table_pos[i]].regions[j].offset *
734 temp.interleave);
735 mtd->eraseregions[reg_idx].erasesize =
736 table[table_pos[i]].regions[j].erasesize *
737 temp.interleave;
738 mtd->eraseregions[reg_idx].numblocks =
739 table[table_pos[i]].regions[j].numblocks;
740 if (mtd->erasesize <
741 mtd->eraseregions[reg_idx].erasesize) {
742 mtd->erasesize =
743 mtd->eraseregions[reg_idx].erasesize;
744 }
745 dev_size += mtd->eraseregions[reg_idx].erasesize *
746 mtd->eraseregions[reg_idx].numblocks;
747 reg_idx++;
748 }
749 offset += dev_size;
750 }
751 mtd->type = MTD_NORFLASH;
752 mtd->flags = MTD_CAP_NORFLASH;
753 mtd->name = map->name;
754 mtd->erase = amd_flash_erase;
755 mtd->read = amd_flash_read;
756 mtd->write = amd_flash_write;
757 mtd->sync = amd_flash_sync;
758 mtd->suspend = amd_flash_suspend;
759 mtd->resume = amd_flash_resume;
760 mtd->lock = amd_flash_lock;
761 mtd->unlock = amd_flash_unlock;
762
763 private = kmalloc(sizeof(*private) + (sizeof(struct flchip) *
764 temp.numchips), GFP_KERNEL);
765 if (!private) {
766 printk(KERN_WARNING
767 "%s: kmalloc failed for private structure\n", map->name);
768 kfree(mtd);
769 map->fldrv_priv = NULL;
770 return NULL;
771 }
772 memcpy(private, &temp, sizeof(temp));
773 memcpy(private->chips, chips,
774 sizeof(struct flchip) * private->numchips);
775 for (i = 0; i < private->numchips; i++) {
776 init_waitqueue_head(&private->chips[i].wq);
777 spin_lock_init(&private->chips[i]._spinlock);
778 }
779
780 map->fldrv_priv = private;
781
782 map->fldrv = &amd_flash_chipdrv;
783
784 __module_get(THIS_MODULE);
785 return mtd;
786}
787
788
789
790static inline int read_one_chip(struct map_info *map, struct flchip *chip,
791 loff_t adr, size_t len, u_char *buf)
792{
793 DECLARE_WAITQUEUE(wait, current);
794 unsigned long timeo = jiffies + HZ;
795
796retry:
797 spin_lock_bh(chip->mutex);
798
799 if (chip->state != FL_READY){
800 printk(KERN_INFO "%s: waiting for chip to read, state = %d\n",
801 map->name, chip->state);
802 set_current_state(TASK_UNINTERRUPTIBLE);
803 add_wait_queue(&chip->wq, &wait);
804
805 spin_unlock_bh(chip->mutex);
806
807 schedule();
808 remove_wait_queue(&chip->wq, &wait);
809
810 if(signal_pending(current)) {
811 return -EINTR;
812 }
813
814 timeo = jiffies + HZ;
815
816 goto retry;
817 }
818
819 adr += chip->start;
820
821 chip->state = FL_READY;
822
823 map_copy_from(map, buf, adr, len);
824
825 wake_up(&chip->wq);
826 spin_unlock_bh(chip->mutex);
827
828 return 0;
829}
830
831
832
833static int amd_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
834 size_t *retlen, u_char *buf)
835{
836 struct map_info *map = mtd->priv;
837 struct amd_flash_private *private = map->fldrv_priv;
838 unsigned long ofs;
839 int chipnum;
840 int ret = 0;
841
842 if ((from + len) > mtd->size) {
843 printk(KERN_WARNING "%s: read request past end of device "
844 "(0x%lx)\n", map->name, (unsigned long)from + len);
845
846 return -EINVAL;
847 }
848
849 /* Offset within the first chip that the first read should start. */
850 chipnum = (from >> private->chipshift);
851 ofs = from - (chipnum << private->chipshift);
852
853 *retlen = 0;
854
855 while (len) {
856 unsigned long this_len;
857
858 if (chipnum >= private->numchips) {
859 break;
860 }
861
862 if ((len + ofs - 1) >> private->chipshift) {
863 this_len = (1 << private->chipshift) - ofs;
864 } else {
865 this_len = len;
866 }
867
868 ret = read_one_chip(map, &private->chips[chipnum], ofs,
869 this_len, buf);
870 if (ret) {
871 break;
872 }
873
874 *retlen += this_len;
875 len -= this_len;
876 buf += this_len;
877
878 ofs = 0;
879 chipnum++;
880 }
881
882 return ret;
883}
884
885
886
887static int write_one_word(struct map_info *map, struct flchip *chip,
888 unsigned long adr, __u32 datum)
889{
890 unsigned long timeo = jiffies + HZ;
891 struct amd_flash_private *private = map->fldrv_priv;
892 DECLARE_WAITQUEUE(wait, current);
893 int ret = 0;
894 int times_left;
895
896retry:
897 spin_lock_bh(chip->mutex);
898
899 if (chip->state != FL_READY){
900 printk("%s: waiting for chip to write, state = %d\n",
901 map->name, chip->state);
902 set_current_state(TASK_UNINTERRUPTIBLE);
903 add_wait_queue(&chip->wq, &wait);
904
905 spin_unlock_bh(chip->mutex);
906
907 schedule();
908 remove_wait_queue(&chip->wq, &wait);
909 printk(KERN_INFO "%s: woke up to write\n", map->name);
910 if(signal_pending(current))
911 return -EINTR;
912
913 timeo = jiffies + HZ;
914
915 goto retry;
916 }
917
918 chip->state = FL_WRITING;
919
920 adr += chip->start;
921 ENABLE_VPP(map);
922 send_cmd(map, chip->start, CMD_PROGRAM_UNLOCK_DATA);
923 wide_write(map, datum, adr);
924
925 times_left = 500000;
926 while (times_left-- && flash_is_busy(map, adr, private->interleave)) {
927 if (need_resched()) {
928 spin_unlock_bh(chip->mutex);
929 schedule();
930 spin_lock_bh(chip->mutex);
931 }
932 }
933
934 if (!times_left) {
935 printk(KERN_WARNING "%s: write to 0x%lx timed out!\n",
936 map->name, adr);
937 ret = -EIO;
938 } else {
939 __u32 verify;
940 if ((verify = wide_read(map, adr)) != datum) {
941 printk(KERN_WARNING "%s: write to 0x%lx failed. "
942 "datum = %x, verify = %x\n",
943 map->name, adr, datum, verify);
944 ret = -EIO;
945 }
946 }
947
948 DISABLE_VPP(map);
949 chip->state = FL_READY;
950 wake_up(&chip->wq);
951 spin_unlock_bh(chip->mutex);
952
953 return ret;
954}
955
956
957
958static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
959 size_t *retlen, const u_char *buf)
960{
961 struct map_info *map = mtd->priv;
962 struct amd_flash_private *private = map->fldrv_priv;
963 int ret = 0;
964 int chipnum;
965 unsigned long ofs;
966 unsigned long chipstart;
967
968 *retlen = 0;
969 if (!len) {
970 return 0;
971 }
972
973 chipnum = to >> private->chipshift;
974 ofs = to - (chipnum << private->chipshift);
975 chipstart = private->chips[chipnum].start;
976
977 /* If it's not bus-aligned, do the first byte write. */
978 if (ofs & (map->buswidth - 1)) {
979 unsigned long bus_ofs = ofs & ~(map->buswidth - 1);
980 int i = ofs - bus_ofs;
981 int n = 0;
982 u_char tmp_buf[4];
983 __u32 datum;
984
985 map_copy_from(map, tmp_buf,
986 bus_ofs + private->chips[chipnum].start,
987 map->buswidth);
988 while (len && i < map->buswidth)
989 tmp_buf[i++] = buf[n++], len--;
990
991 if (map->buswidth == 2) {
992 datum = *(__u16*)tmp_buf;
993 } else if (map->buswidth == 4) {
994 datum = *(__u32*)tmp_buf;
995 } else {
996 return -EINVAL; /* should never happen, but be safe */
997 }
998
999 ret = write_one_word(map, &private->chips[chipnum], bus_ofs,
1000 datum);
1001 if (ret) {
1002 return ret;
1003 }
1004
1005 ofs += n;
1006 buf += n;
1007 (*retlen) += n;
1008
1009 if (ofs >> private->chipshift) {
1010 chipnum++;
1011 ofs = 0;
1012 if (chipnum == private->numchips) {
1013 return 0;
1014 }
1015 }
1016 }
1017
1018 /* We are now aligned, write as much as possible. */
1019 while(len >= map->buswidth) {
1020 __u32 datum;
1021
1022 if (map->buswidth == 1) {
1023 datum = *(__u8*)buf;
1024 } else if (map->buswidth == 2) {
1025 datum = *(__u16*)buf;
1026 } else if (map->buswidth == 4) {
1027 datum = *(__u32*)buf;
1028 } else {
1029 return -EINVAL;
1030 }
1031
1032 ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1033
1034 if (ret) {
1035 return ret;
1036 }
1037
1038 ofs += map->buswidth;
1039 buf += map->buswidth;
1040 (*retlen) += map->buswidth;
1041 len -= map->buswidth;
1042
1043 if (ofs >> private->chipshift) {
1044 chipnum++;
1045 ofs = 0;
1046 if (chipnum == private->numchips) {
1047 return 0;
1048 }
1049 chipstart = private->chips[chipnum].start;
1050 }
1051 }
1052
1053 if (len & (map->buswidth - 1)) {
1054 int i = 0, n = 0;
1055 u_char tmp_buf[2];
1056 __u32 datum;
1057
1058 map_copy_from(map, tmp_buf,
1059 ofs + private->chips[chipnum].start,
1060 map->buswidth);
1061 while (len--) {
1062 tmp_buf[i++] = buf[n++];
1063 }
1064
1065 if (map->buswidth == 2) {
1066 datum = *(__u16*)tmp_buf;
1067 } else if (map->buswidth == 4) {
1068 datum = *(__u32*)tmp_buf;
1069 } else {
1070 return -EINVAL; /* should never happen, but be safe */
1071 }
1072
1073 ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1074
1075 if (ret) {
1076 return ret;
1077 }
1078
1079 (*retlen) += n;
1080 }
1081
1082 return 0;
1083}
1084
1085
1086
1087static inline int erase_one_block(struct map_info *map, struct flchip *chip,
1088 unsigned long adr, u_long size)
1089{
1090 unsigned long timeo = jiffies + HZ;
1091 struct amd_flash_private *private = map->fldrv_priv;
1092 DECLARE_WAITQUEUE(wait, current);
1093
1094retry:
1095 spin_lock_bh(chip->mutex);
1096
1097 if (chip->state != FL_READY){
1098 set_current_state(TASK_UNINTERRUPTIBLE);
1099 add_wait_queue(&chip->wq, &wait);
1100
1101 spin_unlock_bh(chip->mutex);
1102
1103 schedule();
1104 remove_wait_queue(&chip->wq, &wait);
1105
1106 if (signal_pending(current)) {
1107 return -EINTR;
1108 }
1109
1110 timeo = jiffies + HZ;
1111
1112 goto retry;
1113 }
1114
1115 chip->state = FL_ERASING;
1116
1117 adr += chip->start;
1118 ENABLE_VPP(map);
1119 send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA);
1120 send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr);
1121
1122 timeo = jiffies + (HZ * 20);
1123
1124 spin_unlock_bh(chip->mutex);
1125 msleep(1000);
1126 spin_lock_bh(chip->mutex);
1127
1128 while (flash_is_busy(map, adr, private->interleave)) {
1129
1130 if (chip->state != FL_ERASING) {
1131 /* Someone's suspended the erase. Sleep */
1132 set_current_state(TASK_UNINTERRUPTIBLE);
1133 add_wait_queue(&chip->wq, &wait);
1134
1135 spin_unlock_bh(chip->mutex);
1136 printk(KERN_INFO "%s: erase suspended. Sleeping\n",
1137 map->name);
1138 schedule();
1139 remove_wait_queue(&chip->wq, &wait);
1140
1141 if (signal_pending(current)) {
1142 return -EINTR;
1143 }
1144
1145 timeo = jiffies + (HZ*2); /* FIXME */
1146 spin_lock_bh(chip->mutex);
1147 continue;
1148 }
1149
1150 /* OK Still waiting */
1151 if (time_after(jiffies, timeo)) {
1152 chip->state = FL_READY;
1153 spin_unlock_bh(chip->mutex);
1154 printk(KERN_WARNING "%s: waiting for erase to complete "
1155 "timed out.\n", map->name);
1156 DISABLE_VPP(map);
1157
1158 return -EIO;
1159 }
1160
1161 /* Latency issues. Drop the lock, wait a while and retry */
1162 spin_unlock_bh(chip->mutex);
1163
1164 if (need_resched())
1165 schedule();
1166 else
1167 udelay(1);
1168
1169 spin_lock_bh(chip->mutex);
1170 }
1171
1172 /* Verify every single word */
1173 {
1174 int address;
1175 int error = 0;
1176 __u8 verify;
1177
1178 for (address = adr; address < (adr + size); address++) {
1179 if ((verify = map_read8(map, address)) != 0xFF) {
1180 error = 1;
1181 break;
1182 }
1183 }
1184 if (error) {
1185 chip->state = FL_READY;
1186 spin_unlock_bh(chip->mutex);
1187 printk(KERN_WARNING
1188 "%s: verify error at 0x%x, size %ld.\n",
1189 map->name, address, size);
1190 DISABLE_VPP(map);
1191
1192 return -EIO;
1193 }
1194 }
1195
1196 DISABLE_VPP(map);
1197 chip->state = FL_READY;
1198 wake_up(&chip->wq);
1199 spin_unlock_bh(chip->mutex);
1200
1201 return 0;
1202}
1203
1204
1205
1206static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
1207{
1208 struct map_info *map = mtd->priv;
1209 struct amd_flash_private *private = map->fldrv_priv;
1210 unsigned long adr, len;
1211 int chipnum;
1212 int ret = 0;
1213 int i;
1214 int first;
1215 struct mtd_erase_region_info *regions = mtd->eraseregions;
1216
1217 if (instr->addr > mtd->size) {
1218 return -EINVAL;
1219 }
1220
1221 if ((instr->len + instr->addr) > mtd->size) {
1222 return -EINVAL;
1223 }
1224
1225 /* Check that both start and end of the requested erase are
1226 * aligned with the erasesize at the appropriate addresses.
1227 */
1228
1229 i = 0;
1230
1231 /* Skip all erase regions which are ended before the start of
1232 the requested erase. Actually, to save on the calculations,
1233 we skip to the first erase region which starts after the
1234 start of the requested erase, and then go back one.
1235 */
1236
1237 while ((i < mtd->numeraseregions) &&
1238 (instr->addr >= regions[i].offset)) {
1239 i++;
1240 }
1241 i--;
1242
1243 /* OK, now i is pointing at the erase region in which this
1244 * erase request starts. Check the start of the requested
1245 * erase range is aligned with the erase size which is in
1246 * effect here.
1247 */
1248
1249 if (instr->addr & (regions[i].erasesize-1)) {
1250 return -EINVAL;
1251 }
1252
1253 /* Remember the erase region we start on. */
1254
1255 first = i;
1256
1257 /* Next, check that the end of the requested erase is aligned
1258 * with the erase region at that address.
1259 */
1260
1261 while ((i < mtd->numeraseregions) &&
1262 ((instr->addr + instr->len) >= regions[i].offset)) {
1263 i++;
1264 }
1265
1266 /* As before, drop back one to point at the region in which
1267 * the address actually falls.
1268 */
1269
1270 i--;
1271
1272 if ((instr->addr + instr->len) & (regions[i].erasesize-1)) {
1273 return -EINVAL;
1274 }
1275
1276 chipnum = instr->addr >> private->chipshift;
1277 adr = instr->addr - (chipnum << private->chipshift);
1278 len = instr->len;
1279
1280 i = first;
1281
1282 while (len) {
1283 ret = erase_one_block(map, &private->chips[chipnum], adr,
1284 regions[i].erasesize);
1285
1286 if (ret) {
1287 return ret;
1288 }
1289
1290 adr += regions[i].erasesize;
1291 len -= regions[i].erasesize;
1292
1293 if ((adr % (1 << private->chipshift)) ==
1294 ((regions[i].offset + (regions[i].erasesize *
1295 regions[i].numblocks))
1296 % (1 << private->chipshift))) {
1297 i++;
1298 }
1299
1300 if (adr >> private->chipshift) {
1301 adr = 0;
1302 chipnum++;
1303 if (chipnum >= private->numchips) {
1304 break;
1305 }
1306 }
1307 }
1308
1309 instr->state = MTD_ERASE_DONE;
1310 mtd_erase_callback(instr);
1311
1312 return 0;
1313}
1314
1315
1316
1317static void amd_flash_sync(struct mtd_info *mtd)
1318{
1319 struct map_info *map = mtd->priv;
1320 struct amd_flash_private *private = map->fldrv_priv;
1321 int i;
1322 struct flchip *chip;
1323 int ret = 0;
1324 DECLARE_WAITQUEUE(wait, current);
1325
1326 for (i = 0; !ret && (i < private->numchips); i++) {
1327 chip = &private->chips[i];
1328
1329 retry:
1330 spin_lock_bh(chip->mutex);
1331
1332 switch(chip->state) {
1333 case FL_READY:
1334 case FL_STATUS:
1335 case FL_CFI_QUERY:
1336 case FL_JEDEC_QUERY:
1337 chip->oldstate = chip->state;
1338 chip->state = FL_SYNCING;
1339 /* No need to wake_up() on this state change -
1340 * as the whole point is that nobody can do anything
1341 * with the chip now anyway.
1342 */
1343 case FL_SYNCING:
1344 spin_unlock_bh(chip->mutex);
1345 break;
1346
1347 default:
1348 /* Not an idle state */
1349 add_wait_queue(&chip->wq, &wait);
1350
1351 spin_unlock_bh(chip->mutex);
1352
1353 schedule();
1354
1355 remove_wait_queue(&chip->wq, &wait);
1356
1357 goto retry;
1358 }
1359 }
1360
1361 /* Unlock the chips again */
1362 for (i--; i >= 0; i--) {
1363 chip = &private->chips[i];
1364
1365 spin_lock_bh(chip->mutex);
1366
1367 if (chip->state == FL_SYNCING) {
1368 chip->state = chip->oldstate;
1369 wake_up(&chip->wq);
1370 }
1371 spin_unlock_bh(chip->mutex);
1372 }
1373}
1374
1375
1376
1377static int amd_flash_suspend(struct mtd_info *mtd)
1378{
1379printk("amd_flash_suspend(): not implemented!\n");
1380 return -EINVAL;
1381}
1382
1383
1384
1385static void amd_flash_resume(struct mtd_info *mtd)
1386{
1387printk("amd_flash_resume(): not implemented!\n");
1388}
1389
1390
1391
1392static void amd_flash_destroy(struct mtd_info *mtd)
1393{
1394 struct map_info *map = mtd->priv;
1395 struct amd_flash_private *private = map->fldrv_priv;
1396 kfree(private);
1397}
1398
1399int __init amd_flash_init(void)
1400{
1401 register_mtd_chip_driver(&amd_flash_chipdrv);
1402 return 0;
1403}
1404
1405void __exit amd_flash_exit(void)
1406{
1407 unregister_mtd_chip_driver(&amd_flash_chipdrv);
1408}
1409
1410module_init(amd_flash_init);
1411module_exit(amd_flash_exit);
1412
1413MODULE_LICENSE("GPL");
1414MODULE_AUTHOR("Jonas Holmberg <jonas.holmberg@axis.com>");
1415MODULE_DESCRIPTION("Old MTD chip driver for AMD flash chips");
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
new file mode 100644
index 000000000000..c268bcd71720
--- /dev/null
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -0,0 +1,2160 @@
1/*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
7 * $Id: cfi_cmdset_0001.c,v 1.164 2004/11/16 18:29:00 dwmw2 Exp $
8 *
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <asm/io.h>
26#include <asm/byteorder.h>
27
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
32#include <linux/mtd/xip.h>
33#include <linux/mtd/map.h>
34#include <linux/mtd/mtd.h>
35#include <linux/mtd/compatmac.h>
36#include <linux/mtd/cfi.h>
37
38/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40
41// debugging, turns off buffer write mode if set to 1
42#define FORCE_WORD_WRITE 0
43
44#define MANUFACTURER_INTEL 0x0089
45#define I82802AB 0x00ad
46#define I82802AC 0x00ac
47#define MANUFACTURER_ST 0x0020
48#define M50LPW080 0x002F
49
50static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
51//static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52//static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56static void cfi_intelext_sync (struct mtd_info *);
57static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59static int cfi_intelext_suspend (struct mtd_info *);
60static void cfi_intelext_resume (struct mtd_info *);
61
62static void cfi_intelext_destroy(struct mtd_info *);
63
64struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
65
66static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
67static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
68
69static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
70 size_t *retlen, u_char **mtdbuf);
71static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
72 size_t len);
73
74static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
75static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
76#include "fwh_lock.h"
77
78
79
80/*
81 * *********** SETUP AND PROBE BITS ***********
82 */
83
84static struct mtd_chip_driver cfi_intelext_chipdrv = {
85 .probe = NULL, /* Not usable directly */
86 .destroy = cfi_intelext_destroy,
87 .name = "cfi_cmdset_0001",
88 .module = THIS_MODULE
89};
90
91/* #define DEBUG_LOCK_BITS */
92/* #define DEBUG_CFI_FEATURES */
93
94#ifdef DEBUG_CFI_FEATURES
95static void cfi_tell_features(struct cfi_pri_intelext *extp)
96{
97 int i;
98 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
99 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
100 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
101 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
102 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
103 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
104 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
105 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
106 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
107 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
108 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
109 for (i=10; i<32; i++) {
110 if (extp->FeatureSupport & (1<<i))
111 printk(" - Unknown Bit %X: supported\n", i);
112 }
113
114 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
115 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
116 for (i=1; i<8; i++) {
117 if (extp->SuspendCmdSupport & (1<<i))
118 printk(" - Unknown Bit %X: supported\n", i);
119 }
120
121 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
122 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
123 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
124 for (i=2; i<16; i++) {
125 if (extp->BlkStatusRegMask & (1<<i))
126 printk(" - Unknown Bit %X Active: yes\n",i);
127 }
128
129 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
130 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
131 if (extp->VppOptimal)
132 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
133 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
134}
135#endif
136
137#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
138/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
139static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
140{
141 struct map_info *map = mtd->priv;
142 struct cfi_private *cfi = map->fldrv_priv;
143 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
144
145 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
146 "erase on write disabled.\n");
147 extp->SuspendCmdSupport &= ~1;
148}
149#endif
150
151#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
152static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
153{
154 struct map_info *map = mtd->priv;
155 struct cfi_private *cfi = map->fldrv_priv;
156 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
157
158 if (cfip && (cfip->FeatureSupport&4)) {
159 cfip->FeatureSupport &= ~4;
160 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
161 }
162}
163#endif
164
165static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
166{
167 struct map_info *map = mtd->priv;
168 struct cfi_private *cfi = map->fldrv_priv;
169
170 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
171 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
172}
173
174static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
175{
176 struct map_info *map = mtd->priv;
177 struct cfi_private *cfi = map->fldrv_priv;
178
179 /* Note this is done after the region info is endian swapped */
180 cfi->cfiq->EraseRegionInfo[1] =
181 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
182};
183
184static void fixup_use_point(struct mtd_info *mtd, void *param)
185{
186 struct map_info *map = mtd->priv;
187 if (!mtd->point && map_is_linear(map)) {
188 mtd->point = cfi_intelext_point;
189 mtd->unpoint = cfi_intelext_unpoint;
190 }
191}
192
193static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
194{
195 struct map_info *map = mtd->priv;
196 struct cfi_private *cfi = map->fldrv_priv;
197 if (cfi->cfiq->BufWriteTimeoutTyp) {
198 printk(KERN_INFO "Using buffer write method\n" );
199 mtd->write = cfi_intelext_write_buffers;
200 }
201}
202
203static struct cfi_fixup cfi_fixup_table[] = {
204#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
205 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
206#endif
207#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
208 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
209#endif
210#if !FORCE_WORD_WRITE
211 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
212#endif
213 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
214 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
215 { 0, 0, NULL, NULL }
216};
217
218static struct cfi_fixup jedec_fixup_table[] = {
219 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
220 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
221 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
222 { 0, 0, NULL, NULL }
223};
224static struct cfi_fixup fixup_table[] = {
225 /* The CFI vendor ids and the JEDEC vendor IDs appear
226 * to be common. It is like the devices id's are as
227 * well. This table is to pick all cases where
228 * we know that is the case.
229 */
230 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
231 { 0, 0, NULL, NULL }
232};
233
234static inline struct cfi_pri_intelext *
235read_pri_intelext(struct map_info *map, __u16 adr)
236{
237 struct cfi_pri_intelext *extp;
238 unsigned int extp_size = sizeof(*extp);
239
240 again:
241 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
242 if (!extp)
243 return NULL;
244
245 /* Do some byteswapping if necessary */
246 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
247 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
248 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
249
250 if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
251 unsigned int extra_size = 0;
252 int nb_parts, i;
253
254 /* Protection Register info */
255 extra_size += (extp->NumProtectionFields - 1) * (4 + 6);
256
257 /* Burst Read info */
258 extra_size += 6;
259
260 /* Number of hardware-partitions */
261 extra_size += 1;
262 if (extp_size < sizeof(*extp) + extra_size)
263 goto need_more;
264 nb_parts = extp->extra[extra_size - 1];
265
266 for (i = 0; i < nb_parts; i++) {
267 struct cfi_intelext_regioninfo *rinfo;
268 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
269 extra_size += sizeof(*rinfo);
270 if (extp_size < sizeof(*extp) + extra_size)
271 goto need_more;
272 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
273 extra_size += (rinfo->NumBlockTypes - 1)
274 * sizeof(struct cfi_intelext_blockinfo);
275 }
276
277 if (extp_size < sizeof(*extp) + extra_size) {
278 need_more:
279 extp_size = sizeof(*extp) + extra_size;
280 kfree(extp);
281 if (extp_size > 4096) {
282 printk(KERN_ERR
283 "%s: cfi_pri_intelext is too fat\n",
284 __FUNCTION__);
285 return NULL;
286 }
287 goto again;
288 }
289 }
290
291 return extp;
292}
293
294/* This routine is made available to other mtd code via
295 * inter_module_register. It must only be accessed through
296 * inter_module_get which will bump the use count of this module. The
297 * addresses passed back in cfi are valid as long as the use count of
298 * this module is non-zero, i.e. between inter_module_get and
299 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
300 */
301struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
302{
303 struct cfi_private *cfi = map->fldrv_priv;
304 struct mtd_info *mtd;
305 int i;
306
307 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
308 if (!mtd) {
309 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
310 return NULL;
311 }
312 memset(mtd, 0, sizeof(*mtd));
313 mtd->priv = map;
314 mtd->type = MTD_NORFLASH;
315
316 /* Fill in the default mtd operations */
317 mtd->erase = cfi_intelext_erase_varsize;
318 mtd->read = cfi_intelext_read;
319 mtd->write = cfi_intelext_write_words;
320 mtd->sync = cfi_intelext_sync;
321 mtd->lock = cfi_intelext_lock;
322 mtd->unlock = cfi_intelext_unlock;
323 mtd->suspend = cfi_intelext_suspend;
324 mtd->resume = cfi_intelext_resume;
325 mtd->flags = MTD_CAP_NORFLASH;
326 mtd->name = map->name;
327
328 if (cfi->cfi_mode == CFI_MODE_CFI) {
329 /*
330 * It's a real CFI chip, not one for which the probe
331 * routine faked a CFI structure. So we read the feature
332 * table from it.
333 */
334 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
335 struct cfi_pri_intelext *extp;
336
337 extp = read_pri_intelext(map, adr);
338 if (!extp) {
339 kfree(mtd);
340 return NULL;
341 }
342
343 /* Install our own private info structure */
344 cfi->cmdset_priv = extp;
345
346 cfi_fixup(mtd, cfi_fixup_table);
347
348#ifdef DEBUG_CFI_FEATURES
349 /* Tell the user about it in lots of lovely detail */
350 cfi_tell_features(extp);
351#endif
352
353 if(extp->SuspendCmdSupport & 1) {
354 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
355 }
356 }
357 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
358 /* Apply jedec specific fixups */
359 cfi_fixup(mtd, jedec_fixup_table);
360 }
361 /* Apply generic fixups */
362 cfi_fixup(mtd, fixup_table);
363
364 for (i=0; i< cfi->numchips; i++) {
365 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
366 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
367 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
368 cfi->chips[i].ref_point_counter = 0;
369 }
370
371 map->fldrv = &cfi_intelext_chipdrv;
372
373 return cfi_intelext_setup(mtd);
374}
375
376static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
377{
378 struct map_info *map = mtd->priv;
379 struct cfi_private *cfi = map->fldrv_priv;
380 unsigned long offset = 0;
381 int i,j;
382 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
383
384 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
385
386 mtd->size = devsize * cfi->numchips;
387
388 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
389 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
390 * mtd->numeraseregions, GFP_KERNEL);
391 if (!mtd->eraseregions) {
392 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
393 goto setup_err;
394 }
395
396 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
397 unsigned long ernum, ersize;
398 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
399 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
400
401 if (mtd->erasesize < ersize) {
402 mtd->erasesize = ersize;
403 }
404 for (j=0; j<cfi->numchips; j++) {
405 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
406 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
407 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
408 }
409 offset += (ersize * ernum);
410 }
411
412 if (offset != devsize) {
413 /* Argh */
414 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
415 goto setup_err;
416 }
417
418 for (i=0; i<mtd->numeraseregions;i++){
419 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
420 i,mtd->eraseregions[i].offset,
421 mtd->eraseregions[i].erasesize,
422 mtd->eraseregions[i].numblocks);
423 }
424
425#if 0
426 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
427 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
428#endif
429
430 /* This function has the potential to distort the reality
431 a bit and therefore should be called last. */
432 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
433 goto setup_err;
434
435 __module_get(THIS_MODULE);
436 return mtd;
437
438 setup_err:
439 if(mtd) {
440 if(mtd->eraseregions)
441 kfree(mtd->eraseregions);
442 kfree(mtd);
443 }
444 kfree(cfi->cmdset_priv);
445 return NULL;
446}
447
448static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
449 struct cfi_private **pcfi)
450{
451 struct map_info *map = mtd->priv;
452 struct cfi_private *cfi = *pcfi;
453 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
454
455 /*
456 * Probing of multi-partition flash ships.
457 *
458 * To support multiple partitions when available, we simply arrange
459 * for each of them to have their own flchip structure even if they
460 * are on the same physical chip. This means completely recreating
461 * a new cfi_private structure right here which is a blatent code
462 * layering violation, but this is still the least intrusive
463 * arrangement at this point. This can be rearranged in the future
464 * if someone feels motivated enough. --nico
465 */
466 if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
467 && extp->FeatureSupport & (1 << 9)) {
468 struct cfi_private *newcfi;
469 struct flchip *chip;
470 struct flchip_shared *shared;
471 int offs, numregions, numparts, partshift, numvirtchips, i, j;
472
473 /* Protection Register info */
474 offs = (extp->NumProtectionFields - 1) * (4 + 6);
475
476 /* Burst Read info */
477 offs += 6;
478
479 /* Number of partition regions */
480 numregions = extp->extra[offs];
481 offs += 1;
482
483 /* Number of hardware partitions */
484 numparts = 0;
485 for (i = 0; i < numregions; i++) {
486 struct cfi_intelext_regioninfo *rinfo;
487 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
488 numparts += rinfo->NumIdentPartitions;
489 offs += sizeof(*rinfo)
490 + (rinfo->NumBlockTypes - 1) *
491 sizeof(struct cfi_intelext_blockinfo);
492 }
493
494 /*
495 * All functions below currently rely on all chips having
496 * the same geometry so we'll just assume that all hardware
497 * partitions are of the same size too.
498 */
499 partshift = cfi->chipshift - __ffs(numparts);
500
501 if ((1 << partshift) < mtd->erasesize) {
502 printk( KERN_ERR
503 "%s: bad number of hw partitions (%d)\n",
504 __FUNCTION__, numparts);
505 return -EINVAL;
506 }
507
508 numvirtchips = cfi->numchips * numparts;
509 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
510 if (!newcfi)
511 return -ENOMEM;
512 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
513 if (!shared) {
514 kfree(newcfi);
515 return -ENOMEM;
516 }
517 memcpy(newcfi, cfi, sizeof(struct cfi_private));
518 newcfi->numchips = numvirtchips;
519 newcfi->chipshift = partshift;
520
521 chip = &newcfi->chips[0];
522 for (i = 0; i < cfi->numchips; i++) {
523 shared[i].writing = shared[i].erasing = NULL;
524 spin_lock_init(&shared[i].lock);
525 for (j = 0; j < numparts; j++) {
526 *chip = cfi->chips[i];
527 chip->start += j << partshift;
528 chip->priv = &shared[i];
529 /* those should be reset too since
530 they create memory references. */
531 init_waitqueue_head(&chip->wq);
532 spin_lock_init(&chip->_spinlock);
533 chip->mutex = &chip->_spinlock;
534 chip++;
535 }
536 }
537
538 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
539 "--> %d partitions of %d KiB\n",
540 map->name, cfi->numchips, cfi->interleave,
541 newcfi->numchips, 1<<(newcfi->chipshift-10));
542
543 map->fldrv_priv = newcfi;
544 *pcfi = newcfi;
545 kfree(cfi);
546 }
547
548 return 0;
549}
550
551/*
552 * *********** CHIP ACCESS FUNCTIONS ***********
553 */
554
555static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
556{
557 DECLARE_WAITQUEUE(wait, current);
558 struct cfi_private *cfi = map->fldrv_priv;
559 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
560 unsigned long timeo;
561 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
562
563 resettime:
564 timeo = jiffies + HZ;
565 retry:
566 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)) {
567 /*
568 * OK. We have possibility for contension on the write/erase
569 * operations which are global to the real chip and not per
570 * partition. So let's fight it over in the partition which
571 * currently has authority on the operation.
572 *
573 * The rules are as follows:
574 *
575 * - any write operation must own shared->writing.
576 *
577 * - any erase operation must own _both_ shared->writing and
578 * shared->erasing.
579 *
580 * - contension arbitration is handled in the owner's context.
581 *
582 * The 'shared' struct can be read when its lock is taken.
583 * However any writes to it can only be made when the current
584 * owner's lock is also held.
585 */
586 struct flchip_shared *shared = chip->priv;
587 struct flchip *contender;
588 spin_lock(&shared->lock);
589 contender = shared->writing;
590 if (contender && contender != chip) {
591 /*
592 * The engine to perform desired operation on this
593 * partition is already in use by someone else.
594 * Let's fight over it in the context of the chip
595 * currently using it. If it is possible to suspend,
596 * that other partition will do just that, otherwise
597 * it'll happily send us to sleep. In any case, when
598 * get_chip returns success we're clear to go ahead.
599 */
600 int ret = spin_trylock(contender->mutex);
601 spin_unlock(&shared->lock);
602 if (!ret)
603 goto retry;
604 spin_unlock(chip->mutex);
605 ret = get_chip(map, contender, contender->start, mode);
606 spin_lock(chip->mutex);
607 if (ret) {
608 spin_unlock(contender->mutex);
609 return ret;
610 }
611 timeo = jiffies + HZ;
612 spin_lock(&shared->lock);
613 }
614
615 /* We now own it */
616 shared->writing = chip;
617 if (mode == FL_ERASING)
618 shared->erasing = chip;
619 if (contender && contender != chip)
620 spin_unlock(contender->mutex);
621 spin_unlock(&shared->lock);
622 }
623
624 switch (chip->state) {
625
626 case FL_STATUS:
627 for (;;) {
628 status = map_read(map, adr);
629 if (map_word_andequal(map, status, status_OK, status_OK))
630 break;
631
632 /* At this point we're fine with write operations
633 in other partitions as they don't conflict. */
634 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
635 break;
636
637 if (time_after(jiffies, timeo)) {
638 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
639 status.x[0]);
640 return -EIO;
641 }
642 spin_unlock(chip->mutex);
643 cfi_udelay(1);
644 spin_lock(chip->mutex);
645 /* Someone else might have been playing with it. */
646 goto retry;
647 }
648
649 case FL_READY:
650 case FL_CFI_QUERY:
651 case FL_JEDEC_QUERY:
652 return 0;
653
654 case FL_ERASING:
655 if (!cfip ||
656 !(cfip->FeatureSupport & 2) ||
657 !(mode == FL_READY || mode == FL_POINT ||
658 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
659 goto sleep;
660
661
662 /* Erase suspend */
663 map_write(map, CMD(0xB0), adr);
664
665 /* If the flash has finished erasing, then 'erase suspend'
666 * appears to make some (28F320) flash devices switch to
667 * 'read' mode. Make sure that we switch to 'read status'
668 * mode so we get the right data. --rmk
669 */
670 map_write(map, CMD(0x70), adr);
671 chip->oldstate = FL_ERASING;
672 chip->state = FL_ERASE_SUSPENDING;
673 chip->erase_suspended = 1;
674 for (;;) {
675 status = map_read(map, adr);
676 if (map_word_andequal(map, status, status_OK, status_OK))
677 break;
678
679 if (time_after(jiffies, timeo)) {
680 /* Urgh. Resume and pretend we weren't here. */
681 map_write(map, CMD(0xd0), adr);
682 /* Make sure we're in 'read status' mode if it had finished */
683 map_write(map, CMD(0x70), adr);
684 chip->state = FL_ERASING;
685 chip->oldstate = FL_READY;
686 printk(KERN_ERR "Chip not ready after erase "
687 "suspended: status = 0x%lx\n", status.x[0]);
688 return -EIO;
689 }
690
691 spin_unlock(chip->mutex);
692 cfi_udelay(1);
693 spin_lock(chip->mutex);
694 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
695 So we can just loop here. */
696 }
697 chip->state = FL_STATUS;
698 return 0;
699
700 case FL_XIP_WHILE_ERASING:
701 if (mode != FL_READY && mode != FL_POINT &&
702 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
703 goto sleep;
704 chip->oldstate = chip->state;
705 chip->state = FL_READY;
706 return 0;
707
708 case FL_POINT:
709 /* Only if there's no operation suspended... */
710 if (mode == FL_READY && chip->oldstate == FL_READY)
711 return 0;
712
713 default:
714 sleep:
715 set_current_state(TASK_UNINTERRUPTIBLE);
716 add_wait_queue(&chip->wq, &wait);
717 spin_unlock(chip->mutex);
718 schedule();
719 remove_wait_queue(&chip->wq, &wait);
720 spin_lock(chip->mutex);
721 goto resettime;
722 }
723}
724
725static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
726{
727 struct cfi_private *cfi = map->fldrv_priv;
728
729 if (chip->priv) {
730 struct flchip_shared *shared = chip->priv;
731 spin_lock(&shared->lock);
732 if (shared->writing == chip && chip->oldstate == FL_READY) {
733 /* We own the ability to write, but we're done */
734 shared->writing = shared->erasing;
735 if (shared->writing && shared->writing != chip) {
736 /* give back ownership to who we loaned it from */
737 struct flchip *loaner = shared->writing;
738 spin_lock(loaner->mutex);
739 spin_unlock(&shared->lock);
740 spin_unlock(chip->mutex);
741 put_chip(map, loaner, loaner->start);
742 spin_lock(chip->mutex);
743 spin_unlock(loaner->mutex);
744 wake_up(&chip->wq);
745 return;
746 }
747 shared->erasing = NULL;
748 shared->writing = NULL;
749 } else if (shared->erasing == chip && shared->writing != chip) {
750 /*
751 * We own the ability to erase without the ability
752 * to write, which means the erase was suspended
753 * and some other partition is currently writing.
754 * Don't let the switch below mess things up since
755 * we don't have ownership to resume anything.
756 */
757 spin_unlock(&shared->lock);
758 wake_up(&chip->wq);
759 return;
760 }
761 spin_unlock(&shared->lock);
762 }
763
764 switch(chip->oldstate) {
765 case FL_ERASING:
766 chip->state = chip->oldstate;
767 /* What if one interleaved chip has finished and the
768 other hasn't? The old code would leave the finished
769 one in READY mode. That's bad, and caused -EROFS
770 errors to be returned from do_erase_oneblock because
771 that's the only bit it checked for at the time.
772 As the state machine appears to explicitly allow
773 sending the 0x70 (Read Status) command to an erasing
774 chip and expecting it to be ignored, that's what we
775 do. */
776 map_write(map, CMD(0xd0), adr);
777 map_write(map, CMD(0x70), adr);
778 chip->oldstate = FL_READY;
779 chip->state = FL_ERASING;
780 break;
781
782 case FL_XIP_WHILE_ERASING:
783 chip->state = chip->oldstate;
784 chip->oldstate = FL_READY;
785 break;
786
787 case FL_READY:
788 case FL_STATUS:
789 case FL_JEDEC_QUERY:
790 /* We should really make set_vpp() count, rather than doing this */
791 DISABLE_VPP(map);
792 break;
793 default:
794 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
795 }
796 wake_up(&chip->wq);
797}
798
799#ifdef CONFIG_MTD_XIP
800
801/*
802 * No interrupt what so ever can be serviced while the flash isn't in array
803 * mode. This is ensured by the xip_disable() and xip_enable() functions
804 * enclosing any code path where the flash is known not to be in array mode.
805 * And within a XIP disabled code path, only functions marked with __xipram
806 * may be called and nothing else (it's a good thing to inspect generated
807 * assembly to make sure inline functions were actually inlined and that gcc
808 * didn't emit calls to its own support functions). Also configuring MTD CFI
809 * support to a single buswidth and a single interleave is also recommended.
810 * Note that not only IRQs are disabled but the preemption count is also
811 * increased to prevent other locking primitives (namely spin_unlock) from
812 * decrementing the preempt count to zero and scheduling the CPU away while
813 * not in array mode.
814 */
815
816static void xip_disable(struct map_info *map, struct flchip *chip,
817 unsigned long adr)
818{
819 /* TODO: chips with no XIP use should ignore and return */
820 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
821 preempt_disable();
822 local_irq_disable();
823}
824
825static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
826 unsigned long adr)
827{
828 struct cfi_private *cfi = map->fldrv_priv;
829 if (chip->state != FL_POINT && chip->state != FL_READY) {
830 map_write(map, CMD(0xff), adr);
831 chip->state = FL_READY;
832 }
833 (void) map_read(map, adr);
834 asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */
835 local_irq_enable();
836 preempt_enable();
837}
838
839/*
840 * When a delay is required for the flash operation to complete, the
841 * xip_udelay() function is polling for both the given timeout and pending
842 * (but still masked) hardware interrupts. Whenever there is an interrupt
843 * pending then the flash erase or write operation is suspended, array mode
844 * restored and interrupts unmasked. Task scheduling might also happen at that
845 * point. The CPU eventually returns from the interrupt or the call to
846 * schedule() and the suspended flash operation is resumed for the remaining
847 * of the delay period.
848 *
849 * Warning: this function _will_ fool interrupt latency tracing tools.
850 */
851
852static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
853 unsigned long adr, int usec)
854{
855 struct cfi_private *cfi = map->fldrv_priv;
856 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
857 map_word status, OK = CMD(0x80);
858 unsigned long suspended, start = xip_currtime();
859 flstate_t oldstate, newstate;
860
861 do {
862 cpu_relax();
863 if (xip_irqpending() && cfip &&
864 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
865 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
866 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
867 /*
868 * Let's suspend the erase or write operation when
869 * supported. Note that we currently don't try to
870 * suspend interleaved chips if there is already
871 * another operation suspended (imagine what happens
872 * when one chip was already done with the current
873 * operation while another chip suspended it, then
874 * we resume the whole thing at once). Yes, it
875 * can happen!
876 */
877 map_write(map, CMD(0xb0), adr);
878 map_write(map, CMD(0x70), adr);
879 usec -= xip_elapsed_since(start);
880 suspended = xip_currtime();
881 do {
882 if (xip_elapsed_since(suspended) > 100000) {
883 /*
884 * The chip doesn't want to suspend
885 * after waiting for 100 msecs.
886 * This is a critical error but there
887 * is not much we can do here.
888 */
889 return;
890 }
891 status = map_read(map, adr);
892 } while (!map_word_andequal(map, status, OK, OK));
893
894 /* Suspend succeeded */
895 oldstate = chip->state;
896 if (oldstate == FL_ERASING) {
897 if (!map_word_bitsset(map, status, CMD(0x40)))
898 break;
899 newstate = FL_XIP_WHILE_ERASING;
900 chip->erase_suspended = 1;
901 } else {
902 if (!map_word_bitsset(map, status, CMD(0x04)))
903 break;
904 newstate = FL_XIP_WHILE_WRITING;
905 chip->write_suspended = 1;
906 }
907 chip->state = newstate;
908 map_write(map, CMD(0xff), adr);
909 (void) map_read(map, adr);
910 asm volatile (".rep 8; nop; .endr");
911 local_irq_enable();
912 preempt_enable();
913 asm volatile (".rep 8; nop; .endr");
914 cond_resched();
915
916 /*
917 * We're back. However someone else might have
918 * decided to go write to the chip if we are in
919 * a suspended erase state. If so let's wait
920 * until it's done.
921 */
922 preempt_disable();
923 while (chip->state != newstate) {
924 DECLARE_WAITQUEUE(wait, current);
925 set_current_state(TASK_UNINTERRUPTIBLE);
926 add_wait_queue(&chip->wq, &wait);
927 preempt_enable();
928 schedule();
929 remove_wait_queue(&chip->wq, &wait);
930 preempt_disable();
931 }
932 /* Disallow XIP again */
933 local_irq_disable();
934
935 /* Resume the write or erase operation */
936 map_write(map, CMD(0xd0), adr);
937 map_write(map, CMD(0x70), adr);
938 chip->state = oldstate;
939 start = xip_currtime();
940 } else if (usec >= 1000000/HZ) {
941 /*
942 * Try to save on CPU power when waiting delay
943 * is at least a system timer tick period.
944 * No need to be extremely accurate here.
945 */
946 xip_cpu_idle();
947 }
948 status = map_read(map, adr);
949 } while (!map_word_andequal(map, status, OK, OK)
950 && xip_elapsed_since(start) < usec);
951}
952
953#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
954
955/*
956 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
957 * the flash is actively programming or erasing since we have to poll for
958 * the operation to complete anyway. We can't do that in a generic way with
959 * a XIP setup so do it before the actual flash operation in this case.
960 */
961#undef INVALIDATE_CACHED_RANGE
962#define INVALIDATE_CACHED_RANGE(x...)
963#define XIP_INVAL_CACHED_RANGE(map, from, size) \
964 do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
965
966/*
967 * Extra notes:
968 *
969 * Activating this XIP support changes the way the code works a bit. For
970 * example the code to suspend the current process when concurrent access
971 * happens is never executed because xip_udelay() will always return with the
972 * same chip state as it was entered with. This is why there is no care for
973 * the presence of add_wait_queue() or schedule() calls from within a couple
974 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
975 * The queueing and scheduling are always happening within xip_udelay().
976 *
977 * Similarly, get_chip() and put_chip() just happen to always be executed
978 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
979 * is in array mode, therefore never executing many cases therein and not
980 * causing any problem with XIP.
981 */
982
983#else
984
985#define xip_disable(map, chip, adr)
986#define xip_enable(map, chip, adr)
987
988#define UDELAY(map, chip, adr, usec) cfi_udelay(usec)
989
990#define XIP_INVAL_CACHED_RANGE(x...)
991
992#endif
993
994static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
995{
996 unsigned long cmd_addr;
997 struct cfi_private *cfi = map->fldrv_priv;
998 int ret = 0;
999
1000 adr += chip->start;
1001
1002 /* Ensure cmd read/writes are aligned. */
1003 cmd_addr = adr & ~(map_bankwidth(map)-1);
1004
1005 spin_lock(chip->mutex);
1006
1007 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1008
1009 if (!ret) {
1010 if (chip->state != FL_POINT && chip->state != FL_READY)
1011 map_write(map, CMD(0xff), cmd_addr);
1012
1013 chip->state = FL_POINT;
1014 chip->ref_point_counter++;
1015 }
1016 spin_unlock(chip->mutex);
1017
1018 return ret;
1019}
1020
1021static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1022{
1023 struct map_info *map = mtd->priv;
1024 struct cfi_private *cfi = map->fldrv_priv;
1025 unsigned long ofs;
1026 int chipnum;
1027 int ret = 0;
1028
1029 if (!map->virt || (from + len > mtd->size))
1030 return -EINVAL;
1031
1032 *mtdbuf = (void *)map->virt + from;
1033 *retlen = 0;
1034
1035 /* Now lock the chip(s) to POINT state */
1036
1037 /* ofs: offset within the first chip that the first read should start */
1038 chipnum = (from >> cfi->chipshift);
1039 ofs = from - (chipnum << cfi->chipshift);
1040
1041 while (len) {
1042 unsigned long thislen;
1043
1044 if (chipnum >= cfi->numchips)
1045 break;
1046
1047 if ((len + ofs -1) >> cfi->chipshift)
1048 thislen = (1<<cfi->chipshift) - ofs;
1049 else
1050 thislen = len;
1051
1052 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1053 if (ret)
1054 break;
1055
1056 *retlen += thislen;
1057 len -= thislen;
1058
1059 ofs = 0;
1060 chipnum++;
1061 }
1062 return 0;
1063}
1064
1065static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1066{
1067 struct map_info *map = mtd->priv;
1068 struct cfi_private *cfi = map->fldrv_priv;
1069 unsigned long ofs;
1070 int chipnum;
1071
1072 /* Now unlock the chip(s) POINT state */
1073
1074 /* ofs: offset within the first chip that the first read should start */
1075 chipnum = (from >> cfi->chipshift);
1076 ofs = from - (chipnum << cfi->chipshift);
1077
1078 while (len) {
1079 unsigned long thislen;
1080 struct flchip *chip;
1081
1082 chip = &cfi->chips[chipnum];
1083 if (chipnum >= cfi->numchips)
1084 break;
1085
1086 if ((len + ofs -1) >> cfi->chipshift)
1087 thislen = (1<<cfi->chipshift) - ofs;
1088 else
1089 thislen = len;
1090
1091 spin_lock(chip->mutex);
1092 if (chip->state == FL_POINT) {
1093 chip->ref_point_counter--;
1094 if(chip->ref_point_counter == 0)
1095 chip->state = FL_READY;
1096 } else
1097 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1098
1099 put_chip(map, chip, chip->start);
1100 spin_unlock(chip->mutex);
1101
1102 len -= thislen;
1103 ofs = 0;
1104 chipnum++;
1105 }
1106}
1107
1108static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1109{
1110 unsigned long cmd_addr;
1111 struct cfi_private *cfi = map->fldrv_priv;
1112 int ret;
1113
1114 adr += chip->start;
1115
1116 /* Ensure cmd read/writes are aligned. */
1117 cmd_addr = adr & ~(map_bankwidth(map)-1);
1118
1119 spin_lock(chip->mutex);
1120 ret = get_chip(map, chip, cmd_addr, FL_READY);
1121 if (ret) {
1122 spin_unlock(chip->mutex);
1123 return ret;
1124 }
1125
1126 if (chip->state != FL_POINT && chip->state != FL_READY) {
1127 map_write(map, CMD(0xff), cmd_addr);
1128
1129 chip->state = FL_READY;
1130 }
1131
1132 map_copy_from(map, buf, adr, len);
1133
1134 put_chip(map, chip, cmd_addr);
1135
1136 spin_unlock(chip->mutex);
1137 return 0;
1138}
1139
1140static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1141{
1142 struct map_info *map = mtd->priv;
1143 struct cfi_private *cfi = map->fldrv_priv;
1144 unsigned long ofs;
1145 int chipnum;
1146 int ret = 0;
1147
1148 /* ofs: offset within the first chip that the first read should start */
1149 chipnum = (from >> cfi->chipshift);
1150 ofs = from - (chipnum << cfi->chipshift);
1151
1152 *retlen = 0;
1153
1154 while (len) {
1155 unsigned long thislen;
1156
1157 if (chipnum >= cfi->numchips)
1158 break;
1159
1160 if ((len + ofs -1) >> cfi->chipshift)
1161 thislen = (1<<cfi->chipshift) - ofs;
1162 else
1163 thislen = len;
1164
1165 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1166 if (ret)
1167 break;
1168
1169 *retlen += thislen;
1170 len -= thislen;
1171 buf += thislen;
1172
1173 ofs = 0;
1174 chipnum++;
1175 }
1176 return ret;
1177}
1178
1179#if 0
1180static int __xipram cfi_intelext_read_prot_reg (struct mtd_info *mtd,
1181 loff_t from, size_t len,
1182 size_t *retlen,
1183 u_char *buf,
1184 int base_offst, int reg_sz)
1185{
1186 struct map_info *map = mtd->priv;
1187 struct cfi_private *cfi = map->fldrv_priv;
1188 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1189 struct flchip *chip;
1190 int ofs_factor = cfi->interleave * cfi->device_type;
1191 int count = len;
1192 int chip_num, offst;
1193 int ret;
1194
1195 chip_num = ((unsigned int)from/reg_sz);
1196 offst = from - (reg_sz*chip_num)+base_offst;
1197
1198 while (count) {
1199 /* Calculate which chip & protection register offset we need */
1200
1201 if (chip_num >= cfi->numchips)
1202 goto out;
1203
1204 chip = &cfi->chips[chip_num];
1205
1206 spin_lock(chip->mutex);
1207 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1208 if (ret) {
1209 spin_unlock(chip->mutex);
1210 return (len-count)?:ret;
1211 }
1212
1213 xip_disable(map, chip, chip->start);
1214
1215 if (chip->state != FL_JEDEC_QUERY) {
1216 map_write(map, CMD(0x90), chip->start);
1217 chip->state = FL_JEDEC_QUERY;
1218 }
1219
1220 while (count && ((offst-base_offst) < reg_sz)) {
1221 *buf = map_read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
1222 buf++;
1223 offst++;
1224 count--;
1225 }
1226
1227 xip_enable(map, chip, chip->start);
1228 put_chip(map, chip, chip->start);
1229 spin_unlock(chip->mutex);
1230
1231 /* Move on to the next chip */
1232 chip_num++;
1233 offst = base_offst;
1234 }
1235
1236 out:
1237 return len-count;
1238}
1239
1240static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1241{
1242 struct map_info *map = mtd->priv;
1243 struct cfi_private *cfi = map->fldrv_priv;
1244 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
1245 int base_offst,reg_sz;
1246
1247 /* Check that we actually have some protection registers */
1248 if(!extp || !(extp->FeatureSupport&64)){
1249 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
1250 return 0;
1251 }
1252
1253 base_offst=(1<<extp->FactProtRegSize);
1254 reg_sz=(1<<extp->UserProtRegSize);
1255
1256 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
1257}
1258
1259static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1260{
1261 struct map_info *map = mtd->priv;
1262 struct cfi_private *cfi = map->fldrv_priv;
1263 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
1264 int base_offst,reg_sz;
1265
1266 /* Check that we actually have some protection registers */
1267 if(!extp || !(extp->FeatureSupport&64)){
1268 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
1269 return 0;
1270 }
1271
1272 base_offst=0;
1273 reg_sz=(1<<extp->FactProtRegSize);
1274
1275 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
1276}
1277#endif
1278
1279static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1280 unsigned long adr, map_word datum)
1281{
1282 struct cfi_private *cfi = map->fldrv_priv;
1283 map_word status, status_OK;
1284 unsigned long timeo;
1285 int z, ret=0;
1286
1287 adr += chip->start;
1288
1289 /* Let's determine this according to the interleave only once */
1290 status_OK = CMD(0x80);
1291
1292 spin_lock(chip->mutex);
1293 ret = get_chip(map, chip, adr, FL_WRITING);
1294 if (ret) {
1295 spin_unlock(chip->mutex);
1296 return ret;
1297 }
1298
1299 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1300 ENABLE_VPP(map);
1301 xip_disable(map, chip, adr);
1302 map_write(map, CMD(0x40), adr);
1303 map_write(map, datum, adr);
1304 chip->state = FL_WRITING;
1305
1306 spin_unlock(chip->mutex);
1307 INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
1308 UDELAY(map, chip, adr, chip->word_write_time);
1309 spin_lock(chip->mutex);
1310
1311 timeo = jiffies + (HZ/2);
1312 z = 0;
1313 for (;;) {
1314 if (chip->state != FL_WRITING) {
1315 /* Someone's suspended the write. Sleep */
1316 DECLARE_WAITQUEUE(wait, current);
1317
1318 set_current_state(TASK_UNINTERRUPTIBLE);
1319 add_wait_queue(&chip->wq, &wait);
1320 spin_unlock(chip->mutex);
1321 schedule();
1322 remove_wait_queue(&chip->wq, &wait);
1323 timeo = jiffies + (HZ / 2); /* FIXME */
1324 spin_lock(chip->mutex);
1325 continue;
1326 }
1327
1328 status = map_read(map, adr);
1329 if (map_word_andequal(map, status, status_OK, status_OK))
1330 break;
1331
1332 /* OK Still waiting */
1333 if (time_after(jiffies, timeo)) {
1334 chip->state = FL_STATUS;
1335 xip_enable(map, chip, adr);
1336 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1337 ret = -EIO;
1338 goto out;
1339 }
1340
1341 /* Latency issues. Drop the lock, wait a while and retry */
1342 spin_unlock(chip->mutex);
1343 z++;
1344 UDELAY(map, chip, adr, 1);
1345 spin_lock(chip->mutex);
1346 }
1347 if (!z) {
1348 chip->word_write_time--;
1349 if (!chip->word_write_time)
1350 chip->word_write_time++;
1351 }
1352 if (z > 1)
1353 chip->word_write_time++;
1354
1355 /* Done and happy. */
1356 chip->state = FL_STATUS;
1357
1358 /* check for lock bit */
1359 if (map_word_bitsset(map, status, CMD(0x02))) {
1360 /* clear status */
1361 map_write(map, CMD(0x50), adr);
1362 /* put back into read status register mode */
1363 map_write(map, CMD(0x70), adr);
1364 ret = -EROFS;
1365 }
1366
1367 xip_enable(map, chip, adr);
1368 out: put_chip(map, chip, adr);
1369 spin_unlock(chip->mutex);
1370
1371 return ret;
1372}
1373
1374
1375static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1376{
1377 struct map_info *map = mtd->priv;
1378 struct cfi_private *cfi = map->fldrv_priv;
1379 int ret = 0;
1380 int chipnum;
1381 unsigned long ofs;
1382
1383 *retlen = 0;
1384 if (!len)
1385 return 0;
1386
1387 chipnum = to >> cfi->chipshift;
1388 ofs = to - (chipnum << cfi->chipshift);
1389
1390 /* If it's not bus-aligned, do the first byte write */
1391 if (ofs & (map_bankwidth(map)-1)) {
1392 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1393 int gap = ofs - bus_ofs;
1394 int n;
1395 map_word datum;
1396
1397 n = min_t(int, len, map_bankwidth(map)-gap);
1398 datum = map_word_ff(map);
1399 datum = map_word_load_partial(map, datum, buf, gap, n);
1400
1401 ret = do_write_oneword(map, &cfi->chips[chipnum],
1402 bus_ofs, datum);
1403 if (ret)
1404 return ret;
1405
1406 len -= n;
1407 ofs += n;
1408 buf += n;
1409 (*retlen) += n;
1410
1411 if (ofs >> cfi->chipshift) {
1412 chipnum ++;
1413 ofs = 0;
1414 if (chipnum == cfi->numchips)
1415 return 0;
1416 }
1417 }
1418
1419 while(len >= map_bankwidth(map)) {
1420 map_word datum = map_word_load(map, buf);
1421
1422 ret = do_write_oneword(map, &cfi->chips[chipnum],
1423 ofs, datum);
1424 if (ret)
1425 return ret;
1426
1427 ofs += map_bankwidth(map);
1428 buf += map_bankwidth(map);
1429 (*retlen) += map_bankwidth(map);
1430 len -= map_bankwidth(map);
1431
1432 if (ofs >> cfi->chipshift) {
1433 chipnum ++;
1434 ofs = 0;
1435 if (chipnum == cfi->numchips)
1436 return 0;
1437 }
1438 }
1439
1440 if (len & (map_bankwidth(map)-1)) {
1441 map_word datum;
1442
1443 datum = map_word_ff(map);
1444 datum = map_word_load_partial(map, datum, buf, 0, len);
1445
1446 ret = do_write_oneword(map, &cfi->chips[chipnum],
1447 ofs, datum);
1448 if (ret)
1449 return ret;
1450
1451 (*retlen) += len;
1452 }
1453
1454 return 0;
1455}
1456
1457
1458static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1459 unsigned long adr, const u_char *buf, int len)
1460{
1461 struct cfi_private *cfi = map->fldrv_priv;
1462 map_word status, status_OK;
1463 unsigned long cmd_adr, timeo;
1464 int wbufsize, z, ret=0, bytes, words;
1465
1466 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1467 adr += chip->start;
1468 cmd_adr = adr & ~(wbufsize-1);
1469
1470 /* Let's determine this according to the interleave only once */
1471 status_OK = CMD(0x80);
1472
1473 spin_lock(chip->mutex);
1474 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1475 if (ret) {
1476 spin_unlock(chip->mutex);
1477 return ret;
1478 }
1479
1480 XIP_INVAL_CACHED_RANGE(map, adr, len);
1481 ENABLE_VPP(map);
1482 xip_disable(map, chip, cmd_adr);
1483
1484 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1485 [...], the device will not accept any more Write to Buffer commands".
1486 So we must check here and reset those bits if they're set. Otherwise
1487 we're just pissing in the wind */
1488 if (chip->state != FL_STATUS)
1489 map_write(map, CMD(0x70), cmd_adr);
1490 status = map_read(map, cmd_adr);
1491 if (map_word_bitsset(map, status, CMD(0x30))) {
1492 xip_enable(map, chip, cmd_adr);
1493 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1494 xip_disable(map, chip, cmd_adr);
1495 map_write(map, CMD(0x50), cmd_adr);
1496 map_write(map, CMD(0x70), cmd_adr);
1497 }
1498
1499 chip->state = FL_WRITING_TO_BUFFER;
1500
1501 z = 0;
1502 for (;;) {
1503 map_write(map, CMD(0xe8), cmd_adr);
1504
1505 status = map_read(map, cmd_adr);
1506 if (map_word_andequal(map, status, status_OK, status_OK))
1507 break;
1508
1509 spin_unlock(chip->mutex);
1510 UDELAY(map, chip, cmd_adr, 1);
1511 spin_lock(chip->mutex);
1512
1513 if (++z > 20) {
1514 /* Argh. Not ready for write to buffer */
1515 map_word Xstatus;
1516 map_write(map, CMD(0x70), cmd_adr);
1517 chip->state = FL_STATUS;
1518 Xstatus = map_read(map, cmd_adr);
1519 /* Odd. Clear status bits */
1520 map_write(map, CMD(0x50), cmd_adr);
1521 map_write(map, CMD(0x70), cmd_adr);
1522 xip_enable(map, chip, cmd_adr);
1523 printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1524 status.x[0], Xstatus.x[0]);
1525 ret = -EIO;
1526 goto out;
1527 }
1528 }
1529
1530 /* Write length of data to come */
1531 bytes = len & (map_bankwidth(map)-1);
1532 words = len / map_bankwidth(map);
1533 map_write(map, CMD(words - !bytes), cmd_adr );
1534
1535 /* Write data */
1536 z = 0;
1537 while(z < words * map_bankwidth(map)) {
1538 map_word datum = map_word_load(map, buf);
1539 map_write(map, datum, adr+z);
1540
1541 z += map_bankwidth(map);
1542 buf += map_bankwidth(map);
1543 }
1544
1545 if (bytes) {
1546 map_word datum;
1547
1548 datum = map_word_ff(map);
1549 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1550 map_write(map, datum, adr+z);
1551 }
1552
1553 /* GO GO GO */
1554 map_write(map, CMD(0xd0), cmd_adr);
1555 chip->state = FL_WRITING;
1556
1557 spin_unlock(chip->mutex);
1558 INVALIDATE_CACHED_RANGE(map, adr, len);
1559 UDELAY(map, chip, cmd_adr, chip->buffer_write_time);
1560 spin_lock(chip->mutex);
1561
1562 timeo = jiffies + (HZ/2);
1563 z = 0;
1564 for (;;) {
1565 if (chip->state != FL_WRITING) {
1566 /* Someone's suspended the write. Sleep */
1567 DECLARE_WAITQUEUE(wait, current);
1568 set_current_state(TASK_UNINTERRUPTIBLE);
1569 add_wait_queue(&chip->wq, &wait);
1570 spin_unlock(chip->mutex);
1571 schedule();
1572 remove_wait_queue(&chip->wq, &wait);
1573 timeo = jiffies + (HZ / 2); /* FIXME */
1574 spin_lock(chip->mutex);
1575 continue;
1576 }
1577
1578 status = map_read(map, cmd_adr);
1579 if (map_word_andequal(map, status, status_OK, status_OK))
1580 break;
1581
1582 /* OK Still waiting */
1583 if (time_after(jiffies, timeo)) {
1584 chip->state = FL_STATUS;
1585 xip_enable(map, chip, cmd_adr);
1586 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1587 ret = -EIO;
1588 goto out;
1589 }
1590
1591 /* Latency issues. Drop the lock, wait a while and retry */
1592 spin_unlock(chip->mutex);
1593 UDELAY(map, chip, cmd_adr, 1);
1594 z++;
1595 spin_lock(chip->mutex);
1596 }
1597 if (!z) {
1598 chip->buffer_write_time--;
1599 if (!chip->buffer_write_time)
1600 chip->buffer_write_time++;
1601 }
1602 if (z > 1)
1603 chip->buffer_write_time++;
1604
1605 /* Done and happy. */
1606 chip->state = FL_STATUS;
1607
1608 /* check for lock bit */
1609 if (map_word_bitsset(map, status, CMD(0x02))) {
1610 /* clear status */
1611 map_write(map, CMD(0x50), cmd_adr);
1612 /* put back into read status register mode */
1613 map_write(map, CMD(0x70), adr);
1614 ret = -EROFS;
1615 }
1616
1617 xip_enable(map, chip, cmd_adr);
1618 out: put_chip(map, chip, cmd_adr);
1619 spin_unlock(chip->mutex);
1620 return ret;
1621}
1622
1623static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1624 size_t len, size_t *retlen, const u_char *buf)
1625{
1626 struct map_info *map = mtd->priv;
1627 struct cfi_private *cfi = map->fldrv_priv;
1628 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1629 int ret = 0;
1630 int chipnum;
1631 unsigned long ofs;
1632
1633 *retlen = 0;
1634 if (!len)
1635 return 0;
1636
1637 chipnum = to >> cfi->chipshift;
1638 ofs = to - (chipnum << cfi->chipshift);
1639
1640 /* If it's not bus-aligned, do the first word write */
1641 if (ofs & (map_bankwidth(map)-1)) {
1642 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1643 if (local_len > len)
1644 local_len = len;
1645 ret = cfi_intelext_write_words(mtd, to, local_len,
1646 retlen, buf);
1647 if (ret)
1648 return ret;
1649 ofs += local_len;
1650 buf += local_len;
1651 len -= local_len;
1652
1653 if (ofs >> cfi->chipshift) {
1654 chipnum ++;
1655 ofs = 0;
1656 if (chipnum == cfi->numchips)
1657 return 0;
1658 }
1659 }
1660
1661 while(len) {
1662 /* We must not cross write block boundaries */
1663 int size = wbufsize - (ofs & (wbufsize-1));
1664
1665 if (size > len)
1666 size = len;
1667 ret = do_write_buffer(map, &cfi->chips[chipnum],
1668 ofs, buf, size);
1669 if (ret)
1670 return ret;
1671
1672 ofs += size;
1673 buf += size;
1674 (*retlen) += size;
1675 len -= size;
1676
1677 if (ofs >> cfi->chipshift) {
1678 chipnum ++;
1679 ofs = 0;
1680 if (chipnum == cfi->numchips)
1681 return 0;
1682 }
1683 }
1684 return 0;
1685}
1686
1687static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1688 unsigned long adr, int len, void *thunk)
1689{
1690 struct cfi_private *cfi = map->fldrv_priv;
1691 map_word status, status_OK;
1692 unsigned long timeo;
1693 int retries = 3;
1694 DECLARE_WAITQUEUE(wait, current);
1695 int ret = 0;
1696
1697 adr += chip->start;
1698
1699 /* Let's determine this according to the interleave only once */
1700 status_OK = CMD(0x80);
1701
1702 retry:
1703 spin_lock(chip->mutex);
1704 ret = get_chip(map, chip, adr, FL_ERASING);
1705 if (ret) {
1706 spin_unlock(chip->mutex);
1707 return ret;
1708 }
1709
1710 XIP_INVAL_CACHED_RANGE(map, adr, len);
1711 ENABLE_VPP(map);
1712 xip_disable(map, chip, adr);
1713
1714 /* Clear the status register first */
1715 map_write(map, CMD(0x50), adr);
1716
1717 /* Now erase */
1718 map_write(map, CMD(0x20), adr);
1719 map_write(map, CMD(0xD0), adr);
1720 chip->state = FL_ERASING;
1721 chip->erase_suspended = 0;
1722
1723 spin_unlock(chip->mutex);
1724 INVALIDATE_CACHED_RANGE(map, adr, len);
1725 UDELAY(map, chip, adr, chip->erase_time*1000/2);
1726 spin_lock(chip->mutex);
1727
1728 /* FIXME. Use a timer to check this, and return immediately. */
1729 /* Once the state machine's known to be working I'll do that */
1730
1731 timeo = jiffies + (HZ*20);
1732 for (;;) {
1733 if (chip->state != FL_ERASING) {
1734 /* Someone's suspended the erase. Sleep */
1735 set_current_state(TASK_UNINTERRUPTIBLE);
1736 add_wait_queue(&chip->wq, &wait);
1737 spin_unlock(chip->mutex);
1738 schedule();
1739 remove_wait_queue(&chip->wq, &wait);
1740 spin_lock(chip->mutex);
1741 continue;
1742 }
1743 if (chip->erase_suspended) {
1744 /* This erase was suspended and resumed.
1745 Adjust the timeout */
1746 timeo = jiffies + (HZ*20); /* FIXME */
1747 chip->erase_suspended = 0;
1748 }
1749
1750 status = map_read(map, adr);
1751 if (map_word_andequal(map, status, status_OK, status_OK))
1752 break;
1753
1754 /* OK Still waiting */
1755 if (time_after(jiffies, timeo)) {
1756 map_word Xstatus;
1757 map_write(map, CMD(0x70), adr);
1758 chip->state = FL_STATUS;
1759 Xstatus = map_read(map, adr);
1760 /* Clear status bits */
1761 map_write(map, CMD(0x50), adr);
1762 map_write(map, CMD(0x70), adr);
1763 xip_enable(map, chip, adr);
1764 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
1765 adr, status.x[0], Xstatus.x[0]);
1766 ret = -EIO;
1767 goto out;
1768 }
1769
1770 /* Latency issues. Drop the lock, wait a while and retry */
1771 spin_unlock(chip->mutex);
1772 UDELAY(map, chip, adr, 1000000/HZ);
1773 spin_lock(chip->mutex);
1774 }
1775
1776 /* We've broken this before. It doesn't hurt to be safe */
1777 map_write(map, CMD(0x70), adr);
1778 chip->state = FL_STATUS;
1779 status = map_read(map, adr);
1780
1781 /* check for lock bit */
1782 if (map_word_bitsset(map, status, CMD(0x3a))) {
1783 unsigned char chipstatus;
1784
1785 /* Reset the error bits */
1786 map_write(map, CMD(0x50), adr);
1787 map_write(map, CMD(0x70), adr);
1788 xip_enable(map, chip, adr);
1789
1790 chipstatus = status.x[0];
1791 if (!map_word_equal(map, status, CMD(chipstatus))) {
1792 int i, w;
1793 for (w=0; w<map_words(map); w++) {
1794 for (i = 0; i<cfi_interleave(cfi); i++) {
1795 chipstatus |= status.x[w] >> (cfi->device_type * 8);
1796 }
1797 }
1798 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
1799 status.x[0], chipstatus);
1800 }
1801
1802 if ((chipstatus & 0x30) == 0x30) {
1803 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
1804 ret = -EIO;
1805 } else if (chipstatus & 0x02) {
1806 /* Protection bit set */
1807 ret = -EROFS;
1808 } else if (chipstatus & 0x8) {
1809 /* Voltage */
1810 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
1811 ret = -EIO;
1812 } else if (chipstatus & 0x20) {
1813 if (retries--) {
1814 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
1815 timeo = jiffies + HZ;
1816 put_chip(map, chip, adr);
1817 spin_unlock(chip->mutex);
1818 goto retry;
1819 }
1820 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
1821 ret = -EIO;
1822 }
1823 } else {
1824 xip_enable(map, chip, adr);
1825 ret = 0;
1826 }
1827
1828 out: put_chip(map, chip, adr);
1829 spin_unlock(chip->mutex);
1830 return ret;
1831}
1832
1833int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1834{
1835 unsigned long ofs, len;
1836 int ret;
1837
1838 ofs = instr->addr;
1839 len = instr->len;
1840
1841 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1842 if (ret)
1843 return ret;
1844
1845 instr->state = MTD_ERASE_DONE;
1846 mtd_erase_callback(instr);
1847
1848 return 0;
1849}
1850
1851static void cfi_intelext_sync (struct mtd_info *mtd)
1852{
1853 struct map_info *map = mtd->priv;
1854 struct cfi_private *cfi = map->fldrv_priv;
1855 int i;
1856 struct flchip *chip;
1857 int ret = 0;
1858
1859 for (i=0; !ret && i<cfi->numchips; i++) {
1860 chip = &cfi->chips[i];
1861
1862 spin_lock(chip->mutex);
1863 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1864
1865 if (!ret) {
1866 chip->oldstate = chip->state;
1867 chip->state = FL_SYNCING;
1868 /* No need to wake_up() on this state change -
1869 * as the whole point is that nobody can do anything
1870 * with the chip now anyway.
1871 */
1872 }
1873 spin_unlock(chip->mutex);
1874 }
1875
1876 /* Unlock the chips again */
1877
1878 for (i--; i >=0; i--) {
1879 chip = &cfi->chips[i];
1880
1881 spin_lock(chip->mutex);
1882
1883 if (chip->state == FL_SYNCING) {
1884 chip->state = chip->oldstate;
1885 wake_up(&chip->wq);
1886 }
1887 spin_unlock(chip->mutex);
1888 }
1889}
1890
1891#ifdef DEBUG_LOCK_BITS
1892static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1893 struct flchip *chip,
1894 unsigned long adr,
1895 int len, void *thunk)
1896{
1897 struct cfi_private *cfi = map->fldrv_priv;
1898 int status, ofs_factor = cfi->interleave * cfi->device_type;
1899
1900 xip_disable(map, chip, adr+(2*ofs_factor));
1901 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1902 chip->state = FL_JEDEC_QUERY;
1903 status = cfi_read_query(map, adr+(2*ofs_factor));
1904 xip_enable(map, chip, 0);
1905 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1906 adr, status);
1907 return 0;
1908}
1909#endif
1910
1911#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1912#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1913
1914static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1915 unsigned long adr, int len, void *thunk)
1916{
1917 struct cfi_private *cfi = map->fldrv_priv;
1918 map_word status, status_OK;
1919 unsigned long timeo = jiffies + HZ;
1920 int ret;
1921
1922 adr += chip->start;
1923
1924 /* Let's determine this according to the interleave only once */
1925 status_OK = CMD(0x80);
1926
1927 spin_lock(chip->mutex);
1928 ret = get_chip(map, chip, adr, FL_LOCKING);
1929 if (ret) {
1930 spin_unlock(chip->mutex);
1931 return ret;
1932 }
1933
1934 ENABLE_VPP(map);
1935 xip_disable(map, chip, adr);
1936
1937 map_write(map, CMD(0x60), adr);
1938 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1939 map_write(map, CMD(0x01), adr);
1940 chip->state = FL_LOCKING;
1941 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1942 map_write(map, CMD(0xD0), adr);
1943 chip->state = FL_UNLOCKING;
1944 } else
1945 BUG();
1946
1947 spin_unlock(chip->mutex);
1948 UDELAY(map, chip, adr, 1000000/HZ);
1949 spin_lock(chip->mutex);
1950
1951 /* FIXME. Use a timer to check this, and return immediately. */
1952 /* Once the state machine's known to be working I'll do that */
1953
1954 timeo = jiffies + (HZ*20);
1955 for (;;) {
1956
1957 status = map_read(map, adr);
1958 if (map_word_andequal(map, status, status_OK, status_OK))
1959 break;
1960
1961 /* OK Still waiting */
1962 if (time_after(jiffies, timeo)) {
1963 map_word Xstatus;
1964 map_write(map, CMD(0x70), adr);
1965 chip->state = FL_STATUS;
1966 Xstatus = map_read(map, adr);
1967 xip_enable(map, chip, adr);
1968 printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
1969 status.x[0], Xstatus.x[0]);
1970 put_chip(map, chip, adr);
1971 spin_unlock(chip->mutex);
1972 return -EIO;
1973 }
1974
1975 /* Latency issues. Drop the lock, wait a while and retry */
1976 spin_unlock(chip->mutex);
1977 UDELAY(map, chip, adr, 1);
1978 spin_lock(chip->mutex);
1979 }
1980
1981 /* Done and happy. */
1982 chip->state = FL_STATUS;
1983 xip_enable(map, chip, adr);
1984 put_chip(map, chip, adr);
1985 spin_unlock(chip->mutex);
1986 return 0;
1987}
1988
1989static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1990{
1991 int ret;
1992
1993#ifdef DEBUG_LOCK_BITS
1994 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1995 __FUNCTION__, ofs, len);
1996 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1997 ofs, len, 0);
1998#endif
1999
2000 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2001 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2002
2003#ifdef DEBUG_LOCK_BITS
2004 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2005 __FUNCTION__, ret);
2006 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2007 ofs, len, 0);
2008#endif
2009
2010 return ret;
2011}
2012
2013static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2014{
2015 int ret;
2016
2017#ifdef DEBUG_LOCK_BITS
2018 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2019 __FUNCTION__, ofs, len);
2020 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2021 ofs, len, 0);
2022#endif
2023
2024 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2025 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2026
2027#ifdef DEBUG_LOCK_BITS
2028 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2029 __FUNCTION__, ret);
2030 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2031 ofs, len, 0);
2032#endif
2033
2034 return ret;
2035}
2036
2037static int cfi_intelext_suspend(struct mtd_info *mtd)
2038{
2039 struct map_info *map = mtd->priv;
2040 struct cfi_private *cfi = map->fldrv_priv;
2041 int i;
2042 struct flchip *chip;
2043 int ret = 0;
2044
2045 for (i=0; !ret && i<cfi->numchips; i++) {
2046 chip = &cfi->chips[i];
2047
2048 spin_lock(chip->mutex);
2049
2050 switch (chip->state) {
2051 case FL_READY:
2052 case FL_STATUS:
2053 case FL_CFI_QUERY:
2054 case FL_JEDEC_QUERY:
2055 if (chip->oldstate == FL_READY) {
2056 chip->oldstate = chip->state;
2057 chip->state = FL_PM_SUSPENDED;
2058 /* No need to wake_up() on this state change -
2059 * as the whole point is that nobody can do anything
2060 * with the chip now anyway.
2061 */
2062 } else {
2063 /* There seems to be an operation pending. We must wait for it. */
2064 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2065 ret = -EAGAIN;
2066 }
2067 break;
2068 default:
2069 /* Should we actually wait? Once upon a time these routines weren't
2070 allowed to. Or should we return -EAGAIN, because the upper layers
2071 ought to have already shut down anything which was using the device
2072 anyway? The latter for now. */
2073 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2074 ret = -EAGAIN;
2075 case FL_PM_SUSPENDED:
2076 break;
2077 }
2078 spin_unlock(chip->mutex);
2079 }
2080
2081 /* Unlock the chips again */
2082
2083 if (ret) {
2084 for (i--; i >=0; i--) {
2085 chip = &cfi->chips[i];
2086
2087 spin_lock(chip->mutex);
2088
2089 if (chip->state == FL_PM_SUSPENDED) {
2090 /* No need to force it into a known state here,
2091 because we're returning failure, and it didn't
2092 get power cycled */
2093 chip->state = chip->oldstate;
2094 chip->oldstate = FL_READY;
2095 wake_up(&chip->wq);
2096 }
2097 spin_unlock(chip->mutex);
2098 }
2099 }
2100
2101 return ret;
2102}
2103
2104static void cfi_intelext_resume(struct mtd_info *mtd)
2105{
2106 struct map_info *map = mtd->priv;
2107 struct cfi_private *cfi = map->fldrv_priv;
2108 int i;
2109 struct flchip *chip;
2110
2111 for (i=0; i<cfi->numchips; i++) {
2112
2113 chip = &cfi->chips[i];
2114
2115 spin_lock(chip->mutex);
2116
2117 /* Go to known state. Chip may have been power cycled */
2118 if (chip->state == FL_PM_SUSPENDED) {
2119 map_write(map, CMD(0xFF), cfi->chips[i].start);
2120 chip->oldstate = chip->state = FL_READY;
2121 wake_up(&chip->wq);
2122 }
2123
2124 spin_unlock(chip->mutex);
2125 }
2126}
2127
2128static void cfi_intelext_destroy(struct mtd_info *mtd)
2129{
2130 struct map_info *map = mtd->priv;
2131 struct cfi_private *cfi = map->fldrv_priv;
2132 kfree(cfi->cmdset_priv);
2133 kfree(cfi->cfiq);
2134 kfree(cfi->chips[0].priv);
2135 kfree(cfi);
2136 kfree(mtd->eraseregions);
2137}
2138
2139static char im_name_1[]="cfi_cmdset_0001";
2140static char im_name_3[]="cfi_cmdset_0003";
2141
2142static int __init cfi_intelext_init(void)
2143{
2144 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
2145 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
2146 return 0;
2147}
2148
2149static void __exit cfi_intelext_exit(void)
2150{
2151 inter_module_unregister(im_name_1);
2152 inter_module_unregister(im_name_3);
2153}
2154
2155module_init(cfi_intelext_init);
2156module_exit(cfi_intelext_exit);
2157
2158MODULE_LICENSE("GPL");
2159MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2160MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
new file mode 100644
index 000000000000..fca8ff6f7e14
--- /dev/null
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -0,0 +1,1515 @@
1/*
2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4 *
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 *
8 * 2_by_8 routines added by Simon Munton
9 *
10 * 4_by_16 work by Carolyn J. Smith
11 *
12 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
13 *
14 * This code is GPL
15 *
16 * $Id: cfi_cmdset_0002.c,v 1.114 2004/12/11 15:43:53 dedekind Exp $
17 *
18 */
19
20#include <linux/config.h>
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/init.h>
26#include <asm/io.h>
27#include <asm/byteorder.h>
28
29#include <linux/errno.h>
30#include <linux/slab.h>
31#include <linux/delay.h>
32#include <linux/interrupt.h>
33#include <linux/mtd/compatmac.h>
34#include <linux/mtd/map.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/cfi.h>
37
38#define AMD_BOOTLOC_BUG
39#define FORCE_WORD_WRITE 0
40
41#define MAX_WORD_RETRIES 3
42
43#define MANUFACTURER_AMD 0x0001
44#define MANUFACTURER_SST 0x00BF
45#define SST49LF004B 0x0060
46
47static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
48static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
49static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
50static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
51static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
52static void cfi_amdstd_sync (struct mtd_info *);
53static int cfi_amdstd_suspend (struct mtd_info *);
54static void cfi_amdstd_resume (struct mtd_info *);
55static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56
57static void cfi_amdstd_destroy(struct mtd_info *);
58
59struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
60static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
61
62static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
63static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
64#include "fwh_lock.h"
65
66static struct mtd_chip_driver cfi_amdstd_chipdrv = {
67 .probe = NULL, /* Not usable directly */
68 .destroy = cfi_amdstd_destroy,
69 .name = "cfi_cmdset_0002",
70 .module = THIS_MODULE
71};
72
73
74/* #define DEBUG_CFI_FEATURES */
75
76
77#ifdef DEBUG_CFI_FEATURES
78static void cfi_tell_features(struct cfi_pri_amdstd *extp)
79{
80 const char* erase_suspend[3] = {
81 "Not supported", "Read only", "Read/write"
82 };
83 const char* top_bottom[6] = {
84 "No WP", "8x8KiB sectors at top & bottom, no WP",
85 "Bottom boot", "Top boot",
86 "Uniform, Bottom WP", "Uniform, Top WP"
87 };
88
89 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
90 printk(" Address sensitive unlock: %s\n",
91 (extp->SiliconRevision & 1) ? "Not required" : "Required");
92
93 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
94 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
95 else
96 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
97
98 if (extp->BlkProt == 0)
99 printk(" Block protection: Not supported\n");
100 else
101 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
102
103
104 printk(" Temporary block unprotect: %s\n",
105 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
106 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
107 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
108 printk(" Burst mode: %s\n",
109 extp->BurstMode ? "Supported" : "Not supported");
110 if (extp->PageMode == 0)
111 printk(" Page mode: Not supported\n");
112 else
113 printk(" Page mode: %d word page\n", extp->PageMode << 2);
114
115 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
116 extp->VppMin >> 4, extp->VppMin & 0xf);
117 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
118 extp->VppMax >> 4, extp->VppMax & 0xf);
119
120 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
121 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
122 else
123 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
124}
125#endif
126
127#ifdef AMD_BOOTLOC_BUG
128/* Wheee. Bring me the head of someone at AMD. */
129static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
130{
131 struct map_info *map = mtd->priv;
132 struct cfi_private *cfi = map->fldrv_priv;
133 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
134 __u8 major = extp->MajorVersion;
135 __u8 minor = extp->MinorVersion;
136
137 if (((major << 8) | minor) < 0x3131) {
138 /* CFI version 1.0 => don't trust bootloc */
139 if (cfi->id & 0x80) {
140 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
141 extp->TopBottom = 3; /* top boot */
142 } else {
143 extp->TopBottom = 2; /* bottom boot */
144 }
145 }
146}
147#endif
148
149static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
150{
151 struct map_info *map = mtd->priv;
152 struct cfi_private *cfi = map->fldrv_priv;
153 if (cfi->cfiq->BufWriteTimeoutTyp) {
154 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
155 mtd->write = cfi_amdstd_write_buffers;
156 }
157}
158
159static void fixup_use_secsi(struct mtd_info *mtd, void *param)
160{
161 /* Setup for chips with a secsi area */
162 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
163 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
164}
165
166static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
167{
168 struct map_info *map = mtd->priv;
169 struct cfi_private *cfi = map->fldrv_priv;
170 if ((cfi->cfiq->NumEraseRegions == 1) &&
171 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
172 mtd->erase = cfi_amdstd_erase_chip;
173 }
174
175}
176
177static struct cfi_fixup cfi_fixup_table[] = {
178#ifdef AMD_BOOTLOC_BUG
179 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
180#endif
181 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
182 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
183 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
184 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
185 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
186 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
187#if !FORCE_WORD_WRITE
188 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
189#endif
190 { 0, 0, NULL, NULL }
191};
192static struct cfi_fixup jedec_fixup_table[] = {
193 { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
194 { 0, 0, NULL, NULL }
195};
196
197static struct cfi_fixup fixup_table[] = {
198 /* The CFI vendor ids and the JEDEC vendor IDs appear
199 * to be common. It is like the devices id's are as
200 * well. This table is to pick all cases where
201 * we know that is the case.
202 */
203 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
204 { 0, 0, NULL, NULL }
205};
206
207
208struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
209{
210 struct cfi_private *cfi = map->fldrv_priv;
211 struct mtd_info *mtd;
212 int i;
213
214 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
215 if (!mtd) {
216 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
217 return NULL;
218 }
219 memset(mtd, 0, sizeof(*mtd));
220 mtd->priv = map;
221 mtd->type = MTD_NORFLASH;
222
223 /* Fill in the default mtd operations */
224 mtd->erase = cfi_amdstd_erase_varsize;
225 mtd->write = cfi_amdstd_write_words;
226 mtd->read = cfi_amdstd_read;
227 mtd->sync = cfi_amdstd_sync;
228 mtd->suspend = cfi_amdstd_suspend;
229 mtd->resume = cfi_amdstd_resume;
230 mtd->flags = MTD_CAP_NORFLASH;
231 mtd->name = map->name;
232
233 if (cfi->cfi_mode==CFI_MODE_CFI){
234 unsigned char bootloc;
235 /*
236 * It's a real CFI chip, not one for which the probe
237 * routine faked a CFI structure. So we read the feature
238 * table from it.
239 */
240 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
241 struct cfi_pri_amdstd *extp;
242
243 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
244 if (!extp) {
245 kfree(mtd);
246 return NULL;
247 }
248
249 /* Install our own private info structure */
250 cfi->cmdset_priv = extp;
251
252 /* Apply cfi device specific fixups */
253 cfi_fixup(mtd, cfi_fixup_table);
254
255#ifdef DEBUG_CFI_FEATURES
256 /* Tell the user about it in lots of lovely detail */
257 cfi_tell_features(extp);
258#endif
259
260 bootloc = extp->TopBottom;
261 if ((bootloc != 2) && (bootloc != 3)) {
262 printk(KERN_WARNING "%s: CFI does not contain boot "
263 "bank location. Assuming top.\n", map->name);
264 bootloc = 2;
265 }
266
267 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
268 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
269
270 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
271 int j = (cfi->cfiq->NumEraseRegions-1)-i;
272 __u32 swap;
273
274 swap = cfi->cfiq->EraseRegionInfo[i];
275 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
276 cfi->cfiq->EraseRegionInfo[j] = swap;
277 }
278 }
279 /* Set the default CFI lock/unlock addresses */
280 cfi->addr_unlock1 = 0x555;
281 cfi->addr_unlock2 = 0x2aa;
282 /* Modify the unlock address if we are in compatibility mode */
283 if ( /* x16 in x8 mode */
284 ((cfi->device_type == CFI_DEVICETYPE_X8) &&
285 (cfi->cfiq->InterfaceDesc == 2)) ||
286 /* x32 in x16 mode */
287 ((cfi->device_type == CFI_DEVICETYPE_X16) &&
288 (cfi->cfiq->InterfaceDesc == 4)))
289 {
290 cfi->addr_unlock1 = 0xaaa;
291 cfi->addr_unlock2 = 0x555;
292 }
293
294 } /* CFI mode */
295 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
296 /* Apply jedec specific fixups */
297 cfi_fixup(mtd, jedec_fixup_table);
298 }
299 /* Apply generic fixups */
300 cfi_fixup(mtd, fixup_table);
301
302 for (i=0; i< cfi->numchips; i++) {
303 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
304 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
305 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
306 }
307
308 map->fldrv = &cfi_amdstd_chipdrv;
309
310 return cfi_amdstd_setup(mtd);
311}
312
313
314static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
315{
316 struct map_info *map = mtd->priv;
317 struct cfi_private *cfi = map->fldrv_priv;
318 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
319 unsigned long offset = 0;
320 int i,j;
321
322 printk(KERN_NOTICE "number of %s chips: %d\n",
323 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
324 /* Select the correct geometry setup */
325 mtd->size = devsize * cfi->numchips;
326
327 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
328 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
329 * mtd->numeraseregions, GFP_KERNEL);
330 if (!mtd->eraseregions) {
331 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
332 goto setup_err;
333 }
334
335 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
336 unsigned long ernum, ersize;
337 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
338 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
339
340 if (mtd->erasesize < ersize) {
341 mtd->erasesize = ersize;
342 }
343 for (j=0; j<cfi->numchips; j++) {
344 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
345 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
346 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
347 }
348 offset += (ersize * ernum);
349 }
350 if (offset != devsize) {
351 /* Argh */
352 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
353 goto setup_err;
354 }
355#if 0
356 // debug
357 for (i=0; i<mtd->numeraseregions;i++){
358 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
359 i,mtd->eraseregions[i].offset,
360 mtd->eraseregions[i].erasesize,
361 mtd->eraseregions[i].numblocks);
362 }
363#endif
364
365 /* FIXME: erase-suspend-program is broken. See
366 http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
367 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
368
369 __module_get(THIS_MODULE);
370 return mtd;
371
372 setup_err:
373 if(mtd) {
374 if(mtd->eraseregions)
375 kfree(mtd->eraseregions);
376 kfree(mtd);
377 }
378 kfree(cfi->cmdset_priv);
379 kfree(cfi->cfiq);
380 return NULL;
381}
382
383/*
384 * Return true if the chip is ready.
385 *
386 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
387 * non-suspended sector) and is indicated by no toggle bits toggling.
388 *
389 * Note that anything more complicated than checking if no bits are toggling
390 * (including checking DQ5 for an error status) is tricky to get working
391 * correctly and is therefore not done (particulary with interleaved chips
392 * as each chip must be checked independantly of the others).
393 */
394static int chip_ready(struct map_info *map, unsigned long addr)
395{
396 map_word d, t;
397
398 d = map_read(map, addr);
399 t = map_read(map, addr);
400
401 return map_word_equal(map, d, t);
402}
403
404static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
405{
406 DECLARE_WAITQUEUE(wait, current);
407 struct cfi_private *cfi = map->fldrv_priv;
408 unsigned long timeo;
409 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
410
411 resettime:
412 timeo = jiffies + HZ;
413 retry:
414 switch (chip->state) {
415
416 case FL_STATUS:
417 for (;;) {
418 if (chip_ready(map, adr))
419 break;
420
421 if (time_after(jiffies, timeo)) {
422 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
423 cfi_spin_unlock(chip->mutex);
424 return -EIO;
425 }
426 cfi_spin_unlock(chip->mutex);
427 cfi_udelay(1);
428 cfi_spin_lock(chip->mutex);
429 /* Someone else might have been playing with it. */
430 goto retry;
431 }
432
433 case FL_READY:
434 case FL_CFI_QUERY:
435 case FL_JEDEC_QUERY:
436 return 0;
437
438 case FL_ERASING:
439 if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
440 goto sleep;
441
442 if (!(mode == FL_READY || mode == FL_POINT
443 || !cfip
444 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
445 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1))))
446 goto sleep;
447
448 /* We could check to see if we're trying to access the sector
449 * that is currently being erased. However, no user will try
450 * anything like that so we just wait for the timeout. */
451
452 /* Erase suspend */
453 /* It's harmless to issue the Erase-Suspend and Erase-Resume
454 * commands when the erase algorithm isn't in progress. */
455 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
456 chip->oldstate = FL_ERASING;
457 chip->state = FL_ERASE_SUSPENDING;
458 chip->erase_suspended = 1;
459 for (;;) {
460 if (chip_ready(map, adr))
461 break;
462
463 if (time_after(jiffies, timeo)) {
464 /* Should have suspended the erase by now.
465 * Send an Erase-Resume command as either
466 * there was an error (so leave the erase
467 * routine to recover from it) or we trying to
468 * use the erase-in-progress sector. */
469 map_write(map, CMD(0x30), chip->in_progress_block_addr);
470 chip->state = FL_ERASING;
471 chip->oldstate = FL_READY;
472 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
473 return -EIO;
474 }
475
476 cfi_spin_unlock(chip->mutex);
477 cfi_udelay(1);
478 cfi_spin_lock(chip->mutex);
479 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
480 So we can just loop here. */
481 }
482 chip->state = FL_READY;
483 return 0;
484
485 case FL_POINT:
486 /* Only if there's no operation suspended... */
487 if (mode == FL_READY && chip->oldstate == FL_READY)
488 return 0;
489
490 default:
491 sleep:
492 set_current_state(TASK_UNINTERRUPTIBLE);
493 add_wait_queue(&chip->wq, &wait);
494 cfi_spin_unlock(chip->mutex);
495 schedule();
496 remove_wait_queue(&chip->wq, &wait);
497 cfi_spin_lock(chip->mutex);
498 goto resettime;
499 }
500}
501
502
503static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
504{
505 struct cfi_private *cfi = map->fldrv_priv;
506
507 switch(chip->oldstate) {
508 case FL_ERASING:
509 chip->state = chip->oldstate;
510 map_write(map, CMD(0x30), chip->in_progress_block_addr);
511 chip->oldstate = FL_READY;
512 chip->state = FL_ERASING;
513 break;
514
515 case FL_READY:
516 case FL_STATUS:
517 /* We should really make set_vpp() count, rather than doing this */
518 DISABLE_VPP(map);
519 break;
520 default:
521 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
522 }
523 wake_up(&chip->wq);
524}
525
526
527static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
528{
529 unsigned long cmd_addr;
530 struct cfi_private *cfi = map->fldrv_priv;
531 int ret;
532
533 adr += chip->start;
534
535 /* Ensure cmd read/writes are aligned. */
536 cmd_addr = adr & ~(map_bankwidth(map)-1);
537
538 cfi_spin_lock(chip->mutex);
539 ret = get_chip(map, chip, cmd_addr, FL_READY);
540 if (ret) {
541 cfi_spin_unlock(chip->mutex);
542 return ret;
543 }
544
545 if (chip->state != FL_POINT && chip->state != FL_READY) {
546 map_write(map, CMD(0xf0), cmd_addr);
547 chip->state = FL_READY;
548 }
549
550 map_copy_from(map, buf, adr, len);
551
552 put_chip(map, chip, cmd_addr);
553
554 cfi_spin_unlock(chip->mutex);
555 return 0;
556}
557
558
559static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
560{
561 struct map_info *map = mtd->priv;
562 struct cfi_private *cfi = map->fldrv_priv;
563 unsigned long ofs;
564 int chipnum;
565 int ret = 0;
566
567 /* ofs: offset within the first chip that the first read should start */
568
569 chipnum = (from >> cfi->chipshift);
570 ofs = from - (chipnum << cfi->chipshift);
571
572
573 *retlen = 0;
574
575 while (len) {
576 unsigned long thislen;
577
578 if (chipnum >= cfi->numchips)
579 break;
580
581 if ((len + ofs -1) >> cfi->chipshift)
582 thislen = (1<<cfi->chipshift) - ofs;
583 else
584 thislen = len;
585
586 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
587 if (ret)
588 break;
589
590 *retlen += thislen;
591 len -= thislen;
592 buf += thislen;
593
594 ofs = 0;
595 chipnum++;
596 }
597 return ret;
598}
599
600
601static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
602{
603 DECLARE_WAITQUEUE(wait, current);
604 unsigned long timeo = jiffies + HZ;
605 struct cfi_private *cfi = map->fldrv_priv;
606
607 retry:
608 cfi_spin_lock(chip->mutex);
609
610 if (chip->state != FL_READY){
611#if 0
612 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
613#endif
614 set_current_state(TASK_UNINTERRUPTIBLE);
615 add_wait_queue(&chip->wq, &wait);
616
617 cfi_spin_unlock(chip->mutex);
618
619 schedule();
620 remove_wait_queue(&chip->wq, &wait);
621#if 0
622 if(signal_pending(current))
623 return -EINTR;
624#endif
625 timeo = jiffies + HZ;
626
627 goto retry;
628 }
629
630 adr += chip->start;
631
632 chip->state = FL_READY;
633
634 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
635 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
636 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
637
638 map_copy_from(map, buf, adr, len);
639
640 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
641 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
642 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
643 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
644
645 wake_up(&chip->wq);
646 cfi_spin_unlock(chip->mutex);
647
648 return 0;
649}
650
651static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
652{
653 struct map_info *map = mtd->priv;
654 struct cfi_private *cfi = map->fldrv_priv;
655 unsigned long ofs;
656 int chipnum;
657 int ret = 0;
658
659
660 /* ofs: offset within the first chip that the first read should start */
661
662 /* 8 secsi bytes per chip */
663 chipnum=from>>3;
664 ofs=from & 7;
665
666
667 *retlen = 0;
668
669 while (len) {
670 unsigned long thislen;
671
672 if (chipnum >= cfi->numchips)
673 break;
674
675 if ((len + ofs -1) >> 3)
676 thislen = (1<<3) - ofs;
677 else
678 thislen = len;
679
680 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
681 if (ret)
682 break;
683
684 *retlen += thislen;
685 len -= thislen;
686 buf += thislen;
687
688 ofs = 0;
689 chipnum++;
690 }
691 return ret;
692}
693
694
695static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
696{
697 struct cfi_private *cfi = map->fldrv_priv;
698 unsigned long timeo = jiffies + HZ;
699 /*
700 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
701 * have a max write time of a few hundreds usec). However, we should
702 * use the maximum timeout value given by the chip at probe time
703 * instead. Unfortunately, struct flchip does have a field for
704 * maximum timeout, only for typical which can be far too short
705 * depending of the conditions. The ' + 1' is to avoid having a
706 * timeout of 0 jiffies if HZ is smaller than 1000.
707 */
708 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
709 int ret = 0;
710 map_word oldd;
711 int retry_cnt = 0;
712
713 adr += chip->start;
714
715 cfi_spin_lock(chip->mutex);
716 ret = get_chip(map, chip, adr, FL_WRITING);
717 if (ret) {
718 cfi_spin_unlock(chip->mutex);
719 return ret;
720 }
721
722 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
723 __func__, adr, datum.x[0] );
724
725 /*
726 * Check for a NOP for the case when the datum to write is already
727 * present - it saves time and works around buggy chips that corrupt
728 * data at other locations when 0xff is written to a location that
729 * already contains 0xff.
730 */
731 oldd = map_read(map, adr);
732 if (map_word_equal(map, oldd, datum)) {
733 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
734 __func__);
735 goto op_done;
736 }
737
738 ENABLE_VPP(map);
739 retry:
740 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
741 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
742 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
743 map_write(map, datum, adr);
744 chip->state = FL_WRITING;
745
746 cfi_spin_unlock(chip->mutex);
747 cfi_udelay(chip->word_write_time);
748 cfi_spin_lock(chip->mutex);
749
750 /* See comment above for timeout value. */
751 timeo = jiffies + uWriteTimeout;
752 for (;;) {
753 if (chip->state != FL_WRITING) {
754 /* Someone's suspended the write. Sleep */
755 DECLARE_WAITQUEUE(wait, current);
756
757 set_current_state(TASK_UNINTERRUPTIBLE);
758 add_wait_queue(&chip->wq, &wait);
759 cfi_spin_unlock(chip->mutex);
760 schedule();
761 remove_wait_queue(&chip->wq, &wait);
762 timeo = jiffies + (HZ / 2); /* FIXME */
763 cfi_spin_lock(chip->mutex);
764 continue;
765 }
766
767 if (chip_ready(map, adr))
768 goto op_done;
769
770 if (time_after(jiffies, timeo))
771 break;
772
773 /* Latency issues. Drop the lock, wait a while and retry */
774 cfi_spin_unlock(chip->mutex);
775 cfi_udelay(1);
776 cfi_spin_lock(chip->mutex);
777 }
778
779 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
780
781 /* reset on all failures. */
782 map_write( map, CMD(0xF0), chip->start );
783 /* FIXME - should have reset delay before continuing */
784 if (++retry_cnt <= MAX_WORD_RETRIES)
785 goto retry;
786
787 ret = -EIO;
788 op_done:
789 chip->state = FL_READY;
790 put_chip(map, chip, adr);
791 cfi_spin_unlock(chip->mutex);
792
793 return ret;
794}
795
796
797static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
798 size_t *retlen, const u_char *buf)
799{
800 struct map_info *map = mtd->priv;
801 struct cfi_private *cfi = map->fldrv_priv;
802 int ret = 0;
803 int chipnum;
804 unsigned long ofs, chipstart;
805 DECLARE_WAITQUEUE(wait, current);
806
807 *retlen = 0;
808 if (!len)
809 return 0;
810
811 chipnum = to >> cfi->chipshift;
812 ofs = to - (chipnum << cfi->chipshift);
813 chipstart = cfi->chips[chipnum].start;
814
815 /* If it's not bus-aligned, do the first byte write */
816 if (ofs & (map_bankwidth(map)-1)) {
817 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
818 int i = ofs - bus_ofs;
819 int n = 0;
820 map_word tmp_buf;
821
822 retry:
823 cfi_spin_lock(cfi->chips[chipnum].mutex);
824
825 if (cfi->chips[chipnum].state != FL_READY) {
826#if 0
827 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
828#endif
829 set_current_state(TASK_UNINTERRUPTIBLE);
830 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
831
832 cfi_spin_unlock(cfi->chips[chipnum].mutex);
833
834 schedule();
835 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
836#if 0
837 if(signal_pending(current))
838 return -EINTR;
839#endif
840 goto retry;
841 }
842
843 /* Load 'tmp_buf' with old contents of flash */
844 tmp_buf = map_read(map, bus_ofs+chipstart);
845
846 cfi_spin_unlock(cfi->chips[chipnum].mutex);
847
848 /* Number of bytes to copy from buffer */
849 n = min_t(int, len, map_bankwidth(map)-i);
850
851 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
852
853 ret = do_write_oneword(map, &cfi->chips[chipnum],
854 bus_ofs, tmp_buf);
855 if (ret)
856 return ret;
857
858 ofs += n;
859 buf += n;
860 (*retlen) += n;
861 len -= n;
862
863 if (ofs >> cfi->chipshift) {
864 chipnum ++;
865 ofs = 0;
866 if (chipnum == cfi->numchips)
867 return 0;
868 }
869 }
870
871 /* We are now aligned, write as much as possible */
872 while(len >= map_bankwidth(map)) {
873 map_word datum;
874
875 datum = map_word_load(map, buf);
876
877 ret = do_write_oneword(map, &cfi->chips[chipnum],
878 ofs, datum);
879 if (ret)
880 return ret;
881
882 ofs += map_bankwidth(map);
883 buf += map_bankwidth(map);
884 (*retlen) += map_bankwidth(map);
885 len -= map_bankwidth(map);
886
887 if (ofs >> cfi->chipshift) {
888 chipnum ++;
889 ofs = 0;
890 if (chipnum == cfi->numchips)
891 return 0;
892 chipstart = cfi->chips[chipnum].start;
893 }
894 }
895
896 /* Write the trailing bytes if any */
897 if (len & (map_bankwidth(map)-1)) {
898 map_word tmp_buf;
899
900 retry1:
901 cfi_spin_lock(cfi->chips[chipnum].mutex);
902
903 if (cfi->chips[chipnum].state != FL_READY) {
904#if 0
905 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
906#endif
907 set_current_state(TASK_UNINTERRUPTIBLE);
908 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
909
910 cfi_spin_unlock(cfi->chips[chipnum].mutex);
911
912 schedule();
913 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
914#if 0
915 if(signal_pending(current))
916 return -EINTR;
917#endif
918 goto retry1;
919 }
920
921 tmp_buf = map_read(map, ofs + chipstart);
922
923 cfi_spin_unlock(cfi->chips[chipnum].mutex);
924
925 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
926
927 ret = do_write_oneword(map, &cfi->chips[chipnum],
928 ofs, tmp_buf);
929 if (ret)
930 return ret;
931
932 (*retlen) += len;
933 }
934
935 return 0;
936}
937
938
939/*
940 * FIXME: interleaved mode not tested, and probably not supported!
941 */
942static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
943 unsigned long adr, const u_char *buf, int len)
944{
945 struct cfi_private *cfi = map->fldrv_priv;
946 unsigned long timeo = jiffies + HZ;
947 /* see comments in do_write_oneword() regarding uWriteTimeo. */
948 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
949 int ret = -EIO;
950 unsigned long cmd_adr;
951 int z, words;
952 map_word datum;
953
954 adr += chip->start;
955 cmd_adr = adr;
956
957 cfi_spin_lock(chip->mutex);
958 ret = get_chip(map, chip, adr, FL_WRITING);
959 if (ret) {
960 cfi_spin_unlock(chip->mutex);
961 return ret;
962 }
963
964 datum = map_word_load(map, buf);
965
966 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
967 __func__, adr, datum.x[0] );
968
969 ENABLE_VPP(map);
970 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
971 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
972 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
973
974 /* Write Buffer Load */
975 map_write(map, CMD(0x25), cmd_adr);
976
977 chip->state = FL_WRITING_TO_BUFFER;
978
979 /* Write length of data to come */
980 words = len / map_bankwidth(map);
981 map_write(map, CMD(words - 1), cmd_adr);
982 /* Write data */
983 z = 0;
984 while(z < words * map_bankwidth(map)) {
985 datum = map_word_load(map, buf);
986 map_write(map, datum, adr + z);
987
988 z += map_bankwidth(map);
989 buf += map_bankwidth(map);
990 }
991 z -= map_bankwidth(map);
992
993 adr += z;
994
995 /* Write Buffer Program Confirm: GO GO GO */
996 map_write(map, CMD(0x29), cmd_adr);
997 chip->state = FL_WRITING;
998
999 cfi_spin_unlock(chip->mutex);
1000 cfi_udelay(chip->buffer_write_time);
1001 cfi_spin_lock(chip->mutex);
1002
1003 timeo = jiffies + uWriteTimeout;
1004
1005 for (;;) {
1006 if (chip->state != FL_WRITING) {
1007 /* Someone's suspended the write. Sleep */
1008 DECLARE_WAITQUEUE(wait, current);
1009
1010 set_current_state(TASK_UNINTERRUPTIBLE);
1011 add_wait_queue(&chip->wq, &wait);
1012 cfi_spin_unlock(chip->mutex);
1013 schedule();
1014 remove_wait_queue(&chip->wq, &wait);
1015 timeo = jiffies + (HZ / 2); /* FIXME */
1016 cfi_spin_lock(chip->mutex);
1017 continue;
1018 }
1019
1020 if (chip_ready(map, adr))
1021 goto op_done;
1022
1023 if( time_after(jiffies, timeo))
1024 break;
1025
1026 /* Latency issues. Drop the lock, wait a while and retry */
1027 cfi_spin_unlock(chip->mutex);
1028 cfi_udelay(1);
1029 cfi_spin_lock(chip->mutex);
1030 }
1031
1032 printk(KERN_WARNING "MTD %s(): software timeout\n",
1033 __func__ );
1034
1035 /* reset on all failures. */
1036 map_write( map, CMD(0xF0), chip->start );
1037 /* FIXME - should have reset delay before continuing */
1038
1039 ret = -EIO;
1040 op_done:
1041 chip->state = FL_READY;
1042 put_chip(map, chip, adr);
1043 cfi_spin_unlock(chip->mutex);
1044
1045 return ret;
1046}
1047
1048
1049static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1050 size_t *retlen, const u_char *buf)
1051{
1052 struct map_info *map = mtd->priv;
1053 struct cfi_private *cfi = map->fldrv_priv;
1054 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1055 int ret = 0;
1056 int chipnum;
1057 unsigned long ofs;
1058
1059 *retlen = 0;
1060 if (!len)
1061 return 0;
1062
1063 chipnum = to >> cfi->chipshift;
1064 ofs = to - (chipnum << cfi->chipshift);
1065
1066 /* If it's not bus-aligned, do the first word write */
1067 if (ofs & (map_bankwidth(map)-1)) {
1068 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1069 if (local_len > len)
1070 local_len = len;
1071 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1072 local_len, retlen, buf);
1073 if (ret)
1074 return ret;
1075 ofs += local_len;
1076 buf += local_len;
1077 len -= local_len;
1078
1079 if (ofs >> cfi->chipshift) {
1080 chipnum ++;
1081 ofs = 0;
1082 if (chipnum == cfi->numchips)
1083 return 0;
1084 }
1085 }
1086
1087 /* Write buffer is worth it only if more than one word to write... */
1088 while (len >= map_bankwidth(map) * 2) {
1089 /* We must not cross write block boundaries */
1090 int size = wbufsize - (ofs & (wbufsize-1));
1091
1092 if (size > len)
1093 size = len;
1094 if (size % map_bankwidth(map))
1095 size -= size % map_bankwidth(map);
1096
1097 ret = do_write_buffer(map, &cfi->chips[chipnum],
1098 ofs, buf, size);
1099 if (ret)
1100 return ret;
1101
1102 ofs += size;
1103 buf += size;
1104 (*retlen) += size;
1105 len -= size;
1106
1107 if (ofs >> cfi->chipshift) {
1108 chipnum ++;
1109 ofs = 0;
1110 if (chipnum == cfi->numchips)
1111 return 0;
1112 }
1113 }
1114
1115 if (len) {
1116 size_t retlen_dregs = 0;
1117
1118 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1119 len, &retlen_dregs, buf);
1120
1121 *retlen += retlen_dregs;
1122 return ret;
1123 }
1124
1125 return 0;
1126}
1127
1128
1129/*
1130 * Handle devices with one erase region, that only implement
1131 * the chip erase command.
1132 */
1133static inline int do_erase_chip(struct map_info *map, struct flchip *chip)
1134{
1135 struct cfi_private *cfi = map->fldrv_priv;
1136 unsigned long timeo = jiffies + HZ;
1137 unsigned long int adr;
1138 DECLARE_WAITQUEUE(wait, current);
1139 int ret = 0;
1140
1141 adr = cfi->addr_unlock1;
1142
1143 cfi_spin_lock(chip->mutex);
1144 ret = get_chip(map, chip, adr, FL_WRITING);
1145 if (ret) {
1146 cfi_spin_unlock(chip->mutex);
1147 return ret;
1148 }
1149
1150 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1151 __func__, chip->start );
1152
1153 ENABLE_VPP(map);
1154 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1155 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1156 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1157 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1158 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1159 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1160
1161 chip->state = FL_ERASING;
1162 chip->erase_suspended = 0;
1163 chip->in_progress_block_addr = adr;
1164
1165 cfi_spin_unlock(chip->mutex);
1166 msleep(chip->erase_time/2);
1167 cfi_spin_lock(chip->mutex);
1168
1169 timeo = jiffies + (HZ*20);
1170
1171 for (;;) {
1172 if (chip->state != FL_ERASING) {
1173 /* Someone's suspended the erase. Sleep */
1174 set_current_state(TASK_UNINTERRUPTIBLE);
1175 add_wait_queue(&chip->wq, &wait);
1176 cfi_spin_unlock(chip->mutex);
1177 schedule();
1178 remove_wait_queue(&chip->wq, &wait);
1179 cfi_spin_lock(chip->mutex);
1180 continue;
1181 }
1182 if (chip->erase_suspended) {
1183 /* This erase was suspended and resumed.
1184 Adjust the timeout */
1185 timeo = jiffies + (HZ*20); /* FIXME */
1186 chip->erase_suspended = 0;
1187 }
1188
1189 if (chip_ready(map, adr))
1190 goto op_done;
1191
1192 if (time_after(jiffies, timeo))
1193 break;
1194
1195 /* Latency issues. Drop the lock, wait a while and retry */
1196 cfi_spin_unlock(chip->mutex);
1197 set_current_state(TASK_UNINTERRUPTIBLE);
1198 schedule_timeout(1);
1199 cfi_spin_lock(chip->mutex);
1200 }
1201
1202 printk(KERN_WARNING "MTD %s(): software timeout\n",
1203 __func__ );
1204
1205 /* reset on all failures. */
1206 map_write( map, CMD(0xF0), chip->start );
1207 /* FIXME - should have reset delay before continuing */
1208
1209 ret = -EIO;
1210 op_done:
1211 chip->state = FL_READY;
1212 put_chip(map, chip, adr);
1213 cfi_spin_unlock(chip->mutex);
1214
1215 return ret;
1216}
1217
1218
1219static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1220{
1221 struct cfi_private *cfi = map->fldrv_priv;
1222 unsigned long timeo = jiffies + HZ;
1223 DECLARE_WAITQUEUE(wait, current);
1224 int ret = 0;
1225
1226 adr += chip->start;
1227
1228 cfi_spin_lock(chip->mutex);
1229 ret = get_chip(map, chip, adr, FL_ERASING);
1230 if (ret) {
1231 cfi_spin_unlock(chip->mutex);
1232 return ret;
1233 }
1234
1235 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1236 __func__, adr );
1237
1238 ENABLE_VPP(map);
1239 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1240 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1241 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1242 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1243 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1244 map_write(map, CMD(0x30), adr);
1245
1246 chip->state = FL_ERASING;
1247 chip->erase_suspended = 0;
1248 chip->in_progress_block_addr = adr;
1249
1250 cfi_spin_unlock(chip->mutex);
1251 msleep(chip->erase_time/2);
1252 cfi_spin_lock(chip->mutex);
1253
1254 timeo = jiffies + (HZ*20);
1255
1256 for (;;) {
1257 if (chip->state != FL_ERASING) {
1258 /* Someone's suspended the erase. Sleep */
1259 set_current_state(TASK_UNINTERRUPTIBLE);
1260 add_wait_queue(&chip->wq, &wait);
1261 cfi_spin_unlock(chip->mutex);
1262 schedule();
1263 remove_wait_queue(&chip->wq, &wait);
1264 cfi_spin_lock(chip->mutex);
1265 continue;
1266 }
1267 if (chip->erase_suspended) {
1268 /* This erase was suspended and resumed.
1269 Adjust the timeout */
1270 timeo = jiffies + (HZ*20); /* FIXME */
1271 chip->erase_suspended = 0;
1272 }
1273
1274 if (chip_ready(map, adr))
1275 goto op_done;
1276
1277 if (time_after(jiffies, timeo))
1278 break;
1279
1280 /* Latency issues. Drop the lock, wait a while and retry */
1281 cfi_spin_unlock(chip->mutex);
1282 set_current_state(TASK_UNINTERRUPTIBLE);
1283 schedule_timeout(1);
1284 cfi_spin_lock(chip->mutex);
1285 }
1286
1287 printk(KERN_WARNING "MTD %s(): software timeout\n",
1288 __func__ );
1289
1290 /* reset on all failures. */
1291 map_write( map, CMD(0xF0), chip->start );
1292 /* FIXME - should have reset delay before continuing */
1293
1294 ret = -EIO;
1295 op_done:
1296 chip->state = FL_READY;
1297 put_chip(map, chip, adr);
1298 cfi_spin_unlock(chip->mutex);
1299 return ret;
1300}
1301
1302
1303int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1304{
1305 unsigned long ofs, len;
1306 int ret;
1307
1308 ofs = instr->addr;
1309 len = instr->len;
1310
1311 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1312 if (ret)
1313 return ret;
1314
1315 instr->state = MTD_ERASE_DONE;
1316 mtd_erase_callback(instr);
1317
1318 return 0;
1319}
1320
1321
1322static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1323{
1324 struct map_info *map = mtd->priv;
1325 struct cfi_private *cfi = map->fldrv_priv;
1326 int ret = 0;
1327
1328 if (instr->addr != 0)
1329 return -EINVAL;
1330
1331 if (instr->len != mtd->size)
1332 return -EINVAL;
1333
1334 ret = do_erase_chip(map, &cfi->chips[0]);
1335 if (ret)
1336 return ret;
1337
1338 instr->state = MTD_ERASE_DONE;
1339 mtd_erase_callback(instr);
1340
1341 return 0;
1342}
1343
1344
1345static void cfi_amdstd_sync (struct mtd_info *mtd)
1346{
1347 struct map_info *map = mtd->priv;
1348 struct cfi_private *cfi = map->fldrv_priv;
1349 int i;
1350 struct flchip *chip;
1351 int ret = 0;
1352 DECLARE_WAITQUEUE(wait, current);
1353
1354 for (i=0; !ret && i<cfi->numchips; i++) {
1355 chip = &cfi->chips[i];
1356
1357 retry:
1358 cfi_spin_lock(chip->mutex);
1359
1360 switch(chip->state) {
1361 case FL_READY:
1362 case FL_STATUS:
1363 case FL_CFI_QUERY:
1364 case FL_JEDEC_QUERY:
1365 chip->oldstate = chip->state;
1366 chip->state = FL_SYNCING;
1367 /* No need to wake_up() on this state change -
1368 * as the whole point is that nobody can do anything
1369 * with the chip now anyway.
1370 */
1371 case FL_SYNCING:
1372 cfi_spin_unlock(chip->mutex);
1373 break;
1374
1375 default:
1376 /* Not an idle state */
1377 add_wait_queue(&chip->wq, &wait);
1378
1379 cfi_spin_unlock(chip->mutex);
1380
1381 schedule();
1382
1383 remove_wait_queue(&chip->wq, &wait);
1384
1385 goto retry;
1386 }
1387 }
1388
1389 /* Unlock the chips again */
1390
1391 for (i--; i >=0; i--) {
1392 chip = &cfi->chips[i];
1393
1394 cfi_spin_lock(chip->mutex);
1395
1396 if (chip->state == FL_SYNCING) {
1397 chip->state = chip->oldstate;
1398 wake_up(&chip->wq);
1399 }
1400 cfi_spin_unlock(chip->mutex);
1401 }
1402}
1403
1404
1405static int cfi_amdstd_suspend(struct mtd_info *mtd)
1406{
1407 struct map_info *map = mtd->priv;
1408 struct cfi_private *cfi = map->fldrv_priv;
1409 int i;
1410 struct flchip *chip;
1411 int ret = 0;
1412
1413 for (i=0; !ret && i<cfi->numchips; i++) {
1414 chip = &cfi->chips[i];
1415
1416 cfi_spin_lock(chip->mutex);
1417
1418 switch(chip->state) {
1419 case FL_READY:
1420 case FL_STATUS:
1421 case FL_CFI_QUERY:
1422 case FL_JEDEC_QUERY:
1423 chip->oldstate = chip->state;
1424 chip->state = FL_PM_SUSPENDED;
1425 /* No need to wake_up() on this state change -
1426 * as the whole point is that nobody can do anything
1427 * with the chip now anyway.
1428 */
1429 case FL_PM_SUSPENDED:
1430 break;
1431
1432 default:
1433 ret = -EAGAIN;
1434 break;
1435 }
1436 cfi_spin_unlock(chip->mutex);
1437 }
1438
1439 /* Unlock the chips again */
1440
1441 if (ret) {
1442 for (i--; i >=0; i--) {
1443 chip = &cfi->chips[i];
1444
1445 cfi_spin_lock(chip->mutex);
1446
1447 if (chip->state == FL_PM_SUSPENDED) {
1448 chip->state = chip->oldstate;
1449 wake_up(&chip->wq);
1450 }
1451 cfi_spin_unlock(chip->mutex);
1452 }
1453 }
1454
1455 return ret;
1456}
1457
1458
1459static void cfi_amdstd_resume(struct mtd_info *mtd)
1460{
1461 struct map_info *map = mtd->priv;
1462 struct cfi_private *cfi = map->fldrv_priv;
1463 int i;
1464 struct flchip *chip;
1465
1466 for (i=0; i<cfi->numchips; i++) {
1467
1468 chip = &cfi->chips[i];
1469
1470 cfi_spin_lock(chip->mutex);
1471
1472 if (chip->state == FL_PM_SUSPENDED) {
1473 chip->state = FL_READY;
1474 map_write(map, CMD(0xF0), chip->start);
1475 wake_up(&chip->wq);
1476 }
1477 else
1478 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1479
1480 cfi_spin_unlock(chip->mutex);
1481 }
1482}
1483
1484static void cfi_amdstd_destroy(struct mtd_info *mtd)
1485{
1486 struct map_info *map = mtd->priv;
1487 struct cfi_private *cfi = map->fldrv_priv;
1488 kfree(cfi->cmdset_priv);
1489 kfree(cfi->cfiq);
1490 kfree(cfi);
1491 kfree(mtd->eraseregions);
1492}
1493
1494static char im_name[]="cfi_cmdset_0002";
1495
1496
1497static int __init cfi_amdstd_init(void)
1498{
1499 inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
1500 return 0;
1501}
1502
1503
1504static void __exit cfi_amdstd_exit(void)
1505{
1506 inter_module_unregister(im_name);
1507}
1508
1509
1510module_init(cfi_amdstd_init);
1511module_exit(cfi_amdstd_exit);
1512
1513MODULE_LICENSE("GPL");
1514MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1515MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
new file mode 100644
index 000000000000..8c24e18db3b4
--- /dev/null
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -0,0 +1,1418 @@
1/*
2 * Common Flash Interface support:
3 * ST Advanced Architecture Command Set (ID 0x0020)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
7 * $Id: cfi_cmdset_0020.c,v 1.17 2004/11/20 12:49:04 dwmw2 Exp $
8 *
9 * 10/10/2000 Nicolas Pitre <nico@cam.org>
10 * - completely revamped method functions so they are aware and
11 * independent of the flash geometry (buswidth, interleave, etc.)
12 * - scalability vs code size is completely set at compile-time
13 * (see include/linux/mtd/cfi.h for selection)
14 * - optimized write buffer method
15 * 06/21/2002 Joern Engel <joern@wh.fh-wedel.de> and others
16 * - modified Intel Command Set 0x0001 to support ST Advanced Architecture
17 * (command set 0x0020)
18 * - added a writev function
19 */
20
21#include <linux/version.h>
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/sched.h>
26#include <linux/init.h>
27#include <asm/io.h>
28#include <asm/byteorder.h>
29
30#include <linux/errno.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/interrupt.h>
34#include <linux/mtd/map.h>
35#include <linux/mtd/cfi.h>
36#include <linux/mtd/mtd.h>
37#include <linux/mtd/compatmac.h>
38
39
40static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
41static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
42static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
43 unsigned long count, loff_t to, size_t *retlen);
44static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
45static void cfi_staa_sync (struct mtd_info *);
46static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
47static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
48static int cfi_staa_suspend (struct mtd_info *);
49static void cfi_staa_resume (struct mtd_info *);
50
51static void cfi_staa_destroy(struct mtd_info *);
52
53struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
54
55static struct mtd_info *cfi_staa_setup (struct map_info *);
56
57static struct mtd_chip_driver cfi_staa_chipdrv = {
58 .probe = NULL, /* Not usable directly */
59 .destroy = cfi_staa_destroy,
60 .name = "cfi_cmdset_0020",
61 .module = THIS_MODULE
62};
63
64/* #define DEBUG_LOCK_BITS */
65//#define DEBUG_CFI_FEATURES
66
67#ifdef DEBUG_CFI_FEATURES
68static void cfi_tell_features(struct cfi_pri_intelext *extp)
69{
70 int i;
71 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
72 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
73 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
74 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
75 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
76 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
77 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
78 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
79 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
80 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
81 for (i=9; i<32; i++) {
82 if (extp->FeatureSupport & (1<<i))
83 printk(" - Unknown Bit %X: supported\n", i);
84 }
85
86 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
87 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
88 for (i=1; i<8; i++) {
89 if (extp->SuspendCmdSupport & (1<<i))
90 printk(" - Unknown Bit %X: supported\n", i);
91 }
92
93 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
94 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
95 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
96 for (i=2; i<16; i++) {
97 if (extp->BlkStatusRegMask & (1<<i))
98 printk(" - Unknown Bit %X Active: yes\n",i);
99 }
100
101 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
102 extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
103 if (extp->VppOptimal)
104 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
105 extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
106}
107#endif
108
109/* This routine is made available to other mtd code via
110 * inter_module_register. It must only be accessed through
111 * inter_module_get which will bump the use count of this module. The
112 * addresses passed back in cfi are valid as long as the use count of
113 * this module is non-zero, i.e. between inter_module_get and
114 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
115 */
116struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
117{
118 struct cfi_private *cfi = map->fldrv_priv;
119 int i;
120
121 if (cfi->cfi_mode) {
122 /*
123 * It's a real CFI chip, not one for which the probe
124 * routine faked a CFI structure. So we read the feature
125 * table from it.
126 */
127 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
128 struct cfi_pri_intelext *extp;
129
130 extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
131 if (!extp)
132 return NULL;
133
134 /* Do some byteswapping if necessary */
135 extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
136 extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
137
138#ifdef DEBUG_CFI_FEATURES
139 /* Tell the user about it in lots of lovely detail */
140 cfi_tell_features(extp);
141#endif
142
143 /* Install our own private info structure */
144 cfi->cmdset_priv = extp;
145 }
146
147 for (i=0; i< cfi->numchips; i++) {
148 cfi->chips[i].word_write_time = 128;
149 cfi->chips[i].buffer_write_time = 128;
150 cfi->chips[i].erase_time = 1024;
151 }
152
153 return cfi_staa_setup(map);
154}
155
156static struct mtd_info *cfi_staa_setup(struct map_info *map)
157{
158 struct cfi_private *cfi = map->fldrv_priv;
159 struct mtd_info *mtd;
160 unsigned long offset = 0;
161 int i,j;
162 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
163
164 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
165 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
166
167 if (!mtd) {
168 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
169 kfree(cfi->cmdset_priv);
170 return NULL;
171 }
172
173 memset(mtd, 0, sizeof(*mtd));
174 mtd->priv = map;
175 mtd->type = MTD_NORFLASH;
176 mtd->size = devsize * cfi->numchips;
177
178 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
179 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
180 * mtd->numeraseregions, GFP_KERNEL);
181 if (!mtd->eraseregions) {
182 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
183 kfree(cfi->cmdset_priv);
184 kfree(mtd);
185 return NULL;
186 }
187
188 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
189 unsigned long ernum, ersize;
190 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
191 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
192
193 if (mtd->erasesize < ersize) {
194 mtd->erasesize = ersize;
195 }
196 for (j=0; j<cfi->numchips; j++) {
197 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
198 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
199 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
200 }
201 offset += (ersize * ernum);
202 }
203
204 if (offset != devsize) {
205 /* Argh */
206 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
207 kfree(mtd->eraseregions);
208 kfree(cfi->cmdset_priv);
209 kfree(mtd);
210 return NULL;
211 }
212
213 for (i=0; i<mtd->numeraseregions;i++){
214 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
215 i,mtd->eraseregions[i].offset,
216 mtd->eraseregions[i].erasesize,
217 mtd->eraseregions[i].numblocks);
218 }
219
220 /* Also select the correct geometry setup too */
221 mtd->erase = cfi_staa_erase_varsize;
222 mtd->read = cfi_staa_read;
223 mtd->write = cfi_staa_write_buffers;
224 mtd->writev = cfi_staa_writev;
225 mtd->sync = cfi_staa_sync;
226 mtd->lock = cfi_staa_lock;
227 mtd->unlock = cfi_staa_unlock;
228 mtd->suspend = cfi_staa_suspend;
229 mtd->resume = cfi_staa_resume;
230 mtd->flags = MTD_CAP_NORFLASH;
231 mtd->flags |= MTD_ECC; /* FIXME: Not all STMicro flashes have this */
232 mtd->eccsize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
233 map->fldrv = &cfi_staa_chipdrv;
234 __module_get(THIS_MODULE);
235 mtd->name = map->name;
236 return mtd;
237}
238
239
240static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
241{
242 map_word status, status_OK;
243 unsigned long timeo;
244 DECLARE_WAITQUEUE(wait, current);
245 int suspended = 0;
246 unsigned long cmd_addr;
247 struct cfi_private *cfi = map->fldrv_priv;
248
249 adr += chip->start;
250
251 /* Ensure cmd read/writes are aligned. */
252 cmd_addr = adr & ~(map_bankwidth(map)-1);
253
254 /* Let's determine this according to the interleave only once */
255 status_OK = CMD(0x80);
256
257 timeo = jiffies + HZ;
258 retry:
259 spin_lock_bh(chip->mutex);
260
261 /* Check that the chip's ready to talk to us.
262 * If it's in FL_ERASING state, suspend it and make it talk now.
263 */
264 switch (chip->state) {
265 case FL_ERASING:
266 if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
267 goto sleep; /* We don't support erase suspend */
268
269 map_write (map, CMD(0xb0), cmd_addr);
270 /* If the flash has finished erasing, then 'erase suspend'
271 * appears to make some (28F320) flash devices switch to
272 * 'read' mode. Make sure that we switch to 'read status'
273 * mode so we get the right data. --rmk
274 */
275 map_write(map, CMD(0x70), cmd_addr);
276 chip->oldstate = FL_ERASING;
277 chip->state = FL_ERASE_SUSPENDING;
278 // printk("Erase suspending at 0x%lx\n", cmd_addr);
279 for (;;) {
280 status = map_read(map, cmd_addr);
281 if (map_word_andequal(map, status, status_OK, status_OK))
282 break;
283
284 if (time_after(jiffies, timeo)) {
285 /* Urgh */
286 map_write(map, CMD(0xd0), cmd_addr);
287 /* make sure we're in 'read status' mode */
288 map_write(map, CMD(0x70), cmd_addr);
289 chip->state = FL_ERASING;
290 spin_unlock_bh(chip->mutex);
291 printk(KERN_ERR "Chip not ready after erase "
292 "suspended: status = 0x%lx\n", status.x[0]);
293 return -EIO;
294 }
295
296 spin_unlock_bh(chip->mutex);
297 cfi_udelay(1);
298 spin_lock_bh(chip->mutex);
299 }
300
301 suspended = 1;
302 map_write(map, CMD(0xff), cmd_addr);
303 chip->state = FL_READY;
304 break;
305
306#if 0
307 case FL_WRITING:
308 /* Not quite yet */
309#endif
310
311 case FL_READY:
312 break;
313
314 case FL_CFI_QUERY:
315 case FL_JEDEC_QUERY:
316 map_write(map, CMD(0x70), cmd_addr);
317 chip->state = FL_STATUS;
318
319 case FL_STATUS:
320 status = map_read(map, cmd_addr);
321 if (map_word_andequal(map, status, status_OK, status_OK)) {
322 map_write(map, CMD(0xff), cmd_addr);
323 chip->state = FL_READY;
324 break;
325 }
326
327 /* Urgh. Chip not yet ready to talk to us. */
328 if (time_after(jiffies, timeo)) {
329 spin_unlock_bh(chip->mutex);
330 printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
331 return -EIO;
332 }
333
334 /* Latency issues. Drop the lock, wait a while and retry */
335 spin_unlock_bh(chip->mutex);
336 cfi_udelay(1);
337 goto retry;
338
339 default:
340 sleep:
341 /* Stick ourselves on a wait queue to be woken when
342 someone changes the status */
343 set_current_state(TASK_UNINTERRUPTIBLE);
344 add_wait_queue(&chip->wq, &wait);
345 spin_unlock_bh(chip->mutex);
346 schedule();
347 remove_wait_queue(&chip->wq, &wait);
348 timeo = jiffies + HZ;
349 goto retry;
350 }
351
352 map_copy_from(map, buf, adr, len);
353
354 if (suspended) {
355 chip->state = chip->oldstate;
356 /* What if one interleaved chip has finished and the
357 other hasn't? The old code would leave the finished
358 one in READY mode. That's bad, and caused -EROFS
359 errors to be returned from do_erase_oneblock because
360 that's the only bit it checked for at the time.
361 As the state machine appears to explicitly allow
362 sending the 0x70 (Read Status) command to an erasing
363 chip and expecting it to be ignored, that's what we
364 do. */
365 map_write(map, CMD(0xd0), cmd_addr);
366 map_write(map, CMD(0x70), cmd_addr);
367 }
368
369 wake_up(&chip->wq);
370 spin_unlock_bh(chip->mutex);
371 return 0;
372}
373
374static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
375{
376 struct map_info *map = mtd->priv;
377 struct cfi_private *cfi = map->fldrv_priv;
378 unsigned long ofs;
379 int chipnum;
380 int ret = 0;
381
382 /* ofs: offset within the first chip that the first read should start */
383 chipnum = (from >> cfi->chipshift);
384 ofs = from - (chipnum << cfi->chipshift);
385
386 *retlen = 0;
387
388 while (len) {
389 unsigned long thislen;
390
391 if (chipnum >= cfi->numchips)
392 break;
393
394 if ((len + ofs -1) >> cfi->chipshift)
395 thislen = (1<<cfi->chipshift) - ofs;
396 else
397 thislen = len;
398
399 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
400 if (ret)
401 break;
402
403 *retlen += thislen;
404 len -= thislen;
405 buf += thislen;
406
407 ofs = 0;
408 chipnum++;
409 }
410 return ret;
411}
412
413static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
414 unsigned long adr, const u_char *buf, int len)
415{
416 struct cfi_private *cfi = map->fldrv_priv;
417 map_word status, status_OK;
418 unsigned long cmd_adr, timeo;
419 DECLARE_WAITQUEUE(wait, current);
420 int wbufsize, z;
421
422 /* M58LW064A requires bus alignment for buffer wriets -- saw */
423 if (adr & (map_bankwidth(map)-1))
424 return -EINVAL;
425
426 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
427 adr += chip->start;
428 cmd_adr = adr & ~(wbufsize-1);
429
430 /* Let's determine this according to the interleave only once */
431 status_OK = CMD(0x80);
432
433 timeo = jiffies + HZ;
434 retry:
435
436#ifdef DEBUG_CFI_FEATURES
437 printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state);
438#endif
439 spin_lock_bh(chip->mutex);
440
441 /* Check that the chip's ready to talk to us.
442 * Later, we can actually think about interrupting it
443 * if it's in FL_ERASING state.
444 * Not just yet, though.
445 */
446 switch (chip->state) {
447 case FL_READY:
448 break;
449
450 case FL_CFI_QUERY:
451 case FL_JEDEC_QUERY:
452 map_write(map, CMD(0x70), cmd_adr);
453 chip->state = FL_STATUS;
454#ifdef DEBUG_CFI_FEATURES
455 printk("%s: 1 status[%x]\n", __FUNCTION__, map_read(map, cmd_adr));
456#endif
457
458 case FL_STATUS:
459 status = map_read(map, cmd_adr);
460 if (map_word_andequal(map, status, status_OK, status_OK))
461 break;
462 /* Urgh. Chip not yet ready to talk to us. */
463 if (time_after(jiffies, timeo)) {
464 spin_unlock_bh(chip->mutex);
465 printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
466 status.x[0], map_read(map, cmd_adr).x[0]);
467 return -EIO;
468 }
469
470 /* Latency issues. Drop the lock, wait a while and retry */
471 spin_unlock_bh(chip->mutex);
472 cfi_udelay(1);
473 goto retry;
474
475 default:
476 /* Stick ourselves on a wait queue to be woken when
477 someone changes the status */
478 set_current_state(TASK_UNINTERRUPTIBLE);
479 add_wait_queue(&chip->wq, &wait);
480 spin_unlock_bh(chip->mutex);
481 schedule();
482 remove_wait_queue(&chip->wq, &wait);
483 timeo = jiffies + HZ;
484 goto retry;
485 }
486
487 ENABLE_VPP(map);
488 map_write(map, CMD(0xe8), cmd_adr);
489 chip->state = FL_WRITING_TO_BUFFER;
490
491 z = 0;
492 for (;;) {
493 status = map_read(map, cmd_adr);
494 if (map_word_andequal(map, status, status_OK, status_OK))
495 break;
496
497 spin_unlock_bh(chip->mutex);
498 cfi_udelay(1);
499 spin_lock_bh(chip->mutex);
500
501 if (++z > 100) {
502 /* Argh. Not ready for write to buffer */
503 DISABLE_VPP(map);
504 map_write(map, CMD(0x70), cmd_adr);
505 chip->state = FL_STATUS;
506 spin_unlock_bh(chip->mutex);
507 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
508 return -EIO;
509 }
510 }
511
512 /* Write length of data to come */
513 map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
514
515 /* Write data */
516 for (z = 0; z < len;
517 z += map_bankwidth(map), buf += map_bankwidth(map)) {
518 map_word d;
519 d = map_word_load(map, buf);
520 map_write(map, d, adr+z);
521 }
522 /* GO GO GO */
523 map_write(map, CMD(0xd0), cmd_adr);
524 chip->state = FL_WRITING;
525
526 spin_unlock_bh(chip->mutex);
527 cfi_udelay(chip->buffer_write_time);
528 spin_lock_bh(chip->mutex);
529
530 timeo = jiffies + (HZ/2);
531 z = 0;
532 for (;;) {
533 if (chip->state != FL_WRITING) {
534 /* Someone's suspended the write. Sleep */
535 set_current_state(TASK_UNINTERRUPTIBLE);
536 add_wait_queue(&chip->wq, &wait);
537 spin_unlock_bh(chip->mutex);
538 schedule();
539 remove_wait_queue(&chip->wq, &wait);
540 timeo = jiffies + (HZ / 2); /* FIXME */
541 spin_lock_bh(chip->mutex);
542 continue;
543 }
544
545 status = map_read(map, cmd_adr);
546 if (map_word_andequal(map, status, status_OK, status_OK))
547 break;
548
549 /* OK Still waiting */
550 if (time_after(jiffies, timeo)) {
551 /* clear status */
552 map_write(map, CMD(0x50), cmd_adr);
553 /* put back into read status register mode */
554 map_write(map, CMD(0x70), adr);
555 chip->state = FL_STATUS;
556 DISABLE_VPP(map);
557 spin_unlock_bh(chip->mutex);
558 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
559 return -EIO;
560 }
561
562 /* Latency issues. Drop the lock, wait a while and retry */
563 spin_unlock_bh(chip->mutex);
564 cfi_udelay(1);
565 z++;
566 spin_lock_bh(chip->mutex);
567 }
568 if (!z) {
569 chip->buffer_write_time--;
570 if (!chip->buffer_write_time)
571 chip->buffer_write_time++;
572 }
573 if (z > 1)
574 chip->buffer_write_time++;
575
576 /* Done and happy. */
577 DISABLE_VPP(map);
578 chip->state = FL_STATUS;
579
580 /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
581 if (map_word_bitsset(map, status, CMD(0x3a))) {
582#ifdef DEBUG_CFI_FEATURES
583 printk("%s: 2 status[%lx]\n", __FUNCTION__, status.x[0]);
584#endif
585 /* clear status */
586 map_write(map, CMD(0x50), cmd_adr);
587 /* put back into read status register mode */
588 map_write(map, CMD(0x70), adr);
589 wake_up(&chip->wq);
590 spin_unlock_bh(chip->mutex);
591 return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
592 }
593 wake_up(&chip->wq);
594 spin_unlock_bh(chip->mutex);
595
596 return 0;
597}
598
599static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
600 size_t len, size_t *retlen, const u_char *buf)
601{
602 struct map_info *map = mtd->priv;
603 struct cfi_private *cfi = map->fldrv_priv;
604 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
605 int ret = 0;
606 int chipnum;
607 unsigned long ofs;
608
609 *retlen = 0;
610 if (!len)
611 return 0;
612
613 chipnum = to >> cfi->chipshift;
614 ofs = to - (chipnum << cfi->chipshift);
615
616#ifdef DEBUG_CFI_FEATURES
617 printk("%s: map_bankwidth(map)[%x]\n", __FUNCTION__, map_bankwidth(map));
618 printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize);
619 printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len);
620#endif
621
622 /* Write buffer is worth it only if more than one word to write... */
623 while (len > 0) {
624 /* We must not cross write block boundaries */
625 int size = wbufsize - (ofs & (wbufsize-1));
626
627 if (size > len)
628 size = len;
629
630 ret = do_write_buffer(map, &cfi->chips[chipnum],
631 ofs, buf, size);
632 if (ret)
633 return ret;
634
635 ofs += size;
636 buf += size;
637 (*retlen) += size;
638 len -= size;
639
640 if (ofs >> cfi->chipshift) {
641 chipnum ++;
642 ofs = 0;
643 if (chipnum == cfi->numchips)
644 return 0;
645 }
646 }
647
648 return 0;
649}
650
651/*
652 * Writev for ECC-Flashes is a little more complicated. We need to maintain
653 * a small buffer for this.
654 * XXX: If the buffer size is not a multiple of 2, this will break
655 */
656#define ECCBUF_SIZE (mtd->eccsize)
657#define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
658#define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1))
659static int
660cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
661 unsigned long count, loff_t to, size_t *retlen)
662{
663 unsigned long i;
664 size_t totlen = 0, thislen;
665 int ret = 0;
666 size_t buflen = 0;
667 static char *buffer;
668
669 if (!ECCBUF_SIZE) {
670 /* We should fall back to a general writev implementation.
671 * Until that is written, just break.
672 */
673 return -EIO;
674 }
675 buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
676 if (!buffer)
677 return -ENOMEM;
678
679 for (i=0; i<count; i++) {
680 size_t elem_len = vecs[i].iov_len;
681 void *elem_base = vecs[i].iov_base;
682 if (!elem_len) /* FIXME: Might be unnecessary. Check that */
683 continue;
684 if (buflen) { /* cut off head */
685 if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
686 memcpy(buffer+buflen, elem_base, elem_len);
687 buflen += elem_len;
688 continue;
689 }
690 memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
691 ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
692 totlen += thislen;
693 if (ret || thislen != ECCBUF_SIZE)
694 goto write_error;
695 elem_len -= thislen-buflen;
696 elem_base += thislen-buflen;
697 to += ECCBUF_SIZE;
698 }
699 if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
700 ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
701 totlen += thislen;
702 if (ret || thislen != ECCBUF_DIV(elem_len))
703 goto write_error;
704 to += thislen;
705 }
706 buflen = ECCBUF_MOD(elem_len); /* cut off tail */
707 if (buflen) {
708 memset(buffer, 0xff, ECCBUF_SIZE);
709 memcpy(buffer, elem_base + thislen, buflen);
710 }
711 }
712 if (buflen) { /* flush last page, even if not full */
713 /* This is sometimes intended behaviour, really */
714 ret = mtd->write(mtd, to, buflen, &thislen, buffer);
715 totlen += thislen;
716 if (ret || thislen != ECCBUF_SIZE)
717 goto write_error;
718 }
719write_error:
720 if (retlen)
721 *retlen = totlen;
722 return ret;
723}
724
725
726static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
727{
728 struct cfi_private *cfi = map->fldrv_priv;
729 map_word status, status_OK;
730 unsigned long timeo;
731 int retries = 3;
732 DECLARE_WAITQUEUE(wait, current);
733 int ret = 0;
734
735 adr += chip->start;
736
737 /* Let's determine this according to the interleave only once */
738 status_OK = CMD(0x80);
739
740 timeo = jiffies + HZ;
741retry:
742 spin_lock_bh(chip->mutex);
743
744 /* Check that the chip's ready to talk to us. */
745 switch (chip->state) {
746 case FL_CFI_QUERY:
747 case FL_JEDEC_QUERY:
748 case FL_READY:
749 map_write(map, CMD(0x70), adr);
750 chip->state = FL_STATUS;
751
752 case FL_STATUS:
753 status = map_read(map, adr);
754 if (map_word_andequal(map, status, status_OK, status_OK))
755 break;
756
757 /* Urgh. Chip not yet ready to talk to us. */
758 if (time_after(jiffies, timeo)) {
759 spin_unlock_bh(chip->mutex);
760 printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
761 return -EIO;
762 }
763
764 /* Latency issues. Drop the lock, wait a while and retry */
765 spin_unlock_bh(chip->mutex);
766 cfi_udelay(1);
767 goto retry;
768
769 default:
770 /* Stick ourselves on a wait queue to be woken when
771 someone changes the status */
772 set_current_state(TASK_UNINTERRUPTIBLE);
773 add_wait_queue(&chip->wq, &wait);
774 spin_unlock_bh(chip->mutex);
775 schedule();
776 remove_wait_queue(&chip->wq, &wait);
777 timeo = jiffies + HZ;
778 goto retry;
779 }
780
781 ENABLE_VPP(map);
782 /* Clear the status register first */
783 map_write(map, CMD(0x50), adr);
784
785 /* Now erase */
786 map_write(map, CMD(0x20), adr);
787 map_write(map, CMD(0xD0), adr);
788 chip->state = FL_ERASING;
789
790 spin_unlock_bh(chip->mutex);
791 msleep(1000);
792 spin_lock_bh(chip->mutex);
793
794 /* FIXME. Use a timer to check this, and return immediately. */
795 /* Once the state machine's known to be working I'll do that */
796
797 timeo = jiffies + (HZ*20);
798 for (;;) {
799 if (chip->state != FL_ERASING) {
800 /* Someone's suspended the erase. Sleep */
801 set_current_state(TASK_UNINTERRUPTIBLE);
802 add_wait_queue(&chip->wq, &wait);
803 spin_unlock_bh(chip->mutex);
804 schedule();
805 remove_wait_queue(&chip->wq, &wait);
806 timeo = jiffies + (HZ*20); /* FIXME */
807 spin_lock_bh(chip->mutex);
808 continue;
809 }
810
811 status = map_read(map, adr);
812 if (map_word_andequal(map, status, status_OK, status_OK))
813 break;
814
815 /* OK Still waiting */
816 if (time_after(jiffies, timeo)) {
817 map_write(map, CMD(0x70), adr);
818 chip->state = FL_STATUS;
819 printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
820 DISABLE_VPP(map);
821 spin_unlock_bh(chip->mutex);
822 return -EIO;
823 }
824
825 /* Latency issues. Drop the lock, wait a while and retry */
826 spin_unlock_bh(chip->mutex);
827 cfi_udelay(1);
828 spin_lock_bh(chip->mutex);
829 }
830
831 DISABLE_VPP(map);
832 ret = 0;
833
834 /* We've broken this before. It doesn't hurt to be safe */
835 map_write(map, CMD(0x70), adr);
836 chip->state = FL_STATUS;
837 status = map_read(map, adr);
838
839 /* check for lock bit */
840 if (map_word_bitsset(map, status, CMD(0x3a))) {
841 unsigned char chipstatus = status.x[0];
842 if (!map_word_equal(map, status, CMD(chipstatus))) {
843 int i, w;
844 for (w=0; w<map_words(map); w++) {
845 for (i = 0; i<cfi_interleave(cfi); i++) {
846 chipstatus |= status.x[w] >> (cfi->device_type * 8);
847 }
848 }
849 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
850 status.x[0], chipstatus);
851 }
852 /* Reset the error bits */
853 map_write(map, CMD(0x50), adr);
854 map_write(map, CMD(0x70), adr);
855
856 if ((chipstatus & 0x30) == 0x30) {
857 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
858 ret = -EIO;
859 } else if (chipstatus & 0x02) {
860 /* Protection bit set */
861 ret = -EROFS;
862 } else if (chipstatus & 0x8) {
863 /* Voltage */
864 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
865 ret = -EIO;
866 } else if (chipstatus & 0x20) {
867 if (retries--) {
868 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
869 timeo = jiffies + HZ;
870 chip->state = FL_STATUS;
871 spin_unlock_bh(chip->mutex);
872 goto retry;
873 }
874 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
875 ret = -EIO;
876 }
877 }
878
879 wake_up(&chip->wq);
880 spin_unlock_bh(chip->mutex);
881 return ret;
882}
883
884int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
885{ struct map_info *map = mtd->priv;
886 struct cfi_private *cfi = map->fldrv_priv;
887 unsigned long adr, len;
888 int chipnum, ret = 0;
889 int i, first;
890 struct mtd_erase_region_info *regions = mtd->eraseregions;
891
892 if (instr->addr > mtd->size)
893 return -EINVAL;
894
895 if ((instr->len + instr->addr) > mtd->size)
896 return -EINVAL;
897
898 /* Check that both start and end of the requested erase are
899 * aligned with the erasesize at the appropriate addresses.
900 */
901
902 i = 0;
903
904 /* Skip all erase regions which are ended before the start of
905 the requested erase. Actually, to save on the calculations,
906 we skip to the first erase region which starts after the
907 start of the requested erase, and then go back one.
908 */
909
910 while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
911 i++;
912 i--;
913
914 /* OK, now i is pointing at the erase region in which this
915 erase request starts. Check the start of the requested
916 erase range is aligned with the erase size which is in
917 effect here.
918 */
919
920 if (instr->addr & (regions[i].erasesize-1))
921 return -EINVAL;
922
923 /* Remember the erase region we start on */
924 first = i;
925
926 /* Next, check that the end of the requested erase is aligned
927 * with the erase region at that address.
928 */
929
930 while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
931 i++;
932
933 /* As before, drop back one to point at the region in which
934 the address actually falls
935 */
936 i--;
937
938 if ((instr->addr + instr->len) & (regions[i].erasesize-1))
939 return -EINVAL;
940
941 chipnum = instr->addr >> cfi->chipshift;
942 adr = instr->addr - (chipnum << cfi->chipshift);
943 len = instr->len;
944
945 i=first;
946
947 while(len) {
948 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
949
950 if (ret)
951 return ret;
952
953 adr += regions[i].erasesize;
954 len -= regions[i].erasesize;
955
956 if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
957 i++;
958
959 if (adr >> cfi->chipshift) {
960 adr = 0;
961 chipnum++;
962
963 if (chipnum >= cfi->numchips)
964 break;
965 }
966 }
967
968 instr->state = MTD_ERASE_DONE;
969 mtd_erase_callback(instr);
970
971 return 0;
972}
973
974static void cfi_staa_sync (struct mtd_info *mtd)
975{
976 struct map_info *map = mtd->priv;
977 struct cfi_private *cfi = map->fldrv_priv;
978 int i;
979 struct flchip *chip;
980 int ret = 0;
981 DECLARE_WAITQUEUE(wait, current);
982
983 for (i=0; !ret && i<cfi->numchips; i++) {
984 chip = &cfi->chips[i];
985
986 retry:
987 spin_lock_bh(chip->mutex);
988
989 switch(chip->state) {
990 case FL_READY:
991 case FL_STATUS:
992 case FL_CFI_QUERY:
993 case FL_JEDEC_QUERY:
994 chip->oldstate = chip->state;
995 chip->state = FL_SYNCING;
996 /* No need to wake_up() on this state change -
997 * as the whole point is that nobody can do anything
998 * with the chip now anyway.
999 */
1000 case FL_SYNCING:
1001 spin_unlock_bh(chip->mutex);
1002 break;
1003
1004 default:
1005 /* Not an idle state */
1006 add_wait_queue(&chip->wq, &wait);
1007
1008 spin_unlock_bh(chip->mutex);
1009 schedule();
1010 remove_wait_queue(&chip->wq, &wait);
1011
1012 goto retry;
1013 }
1014 }
1015
1016 /* Unlock the chips again */
1017
1018 for (i--; i >=0; i--) {
1019 chip = &cfi->chips[i];
1020
1021 spin_lock_bh(chip->mutex);
1022
1023 if (chip->state == FL_SYNCING) {
1024 chip->state = chip->oldstate;
1025 wake_up(&chip->wq);
1026 }
1027 spin_unlock_bh(chip->mutex);
1028 }
1029}
1030
1031static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1032{
1033 struct cfi_private *cfi = map->fldrv_priv;
1034 map_word status, status_OK;
1035 unsigned long timeo = jiffies + HZ;
1036 DECLARE_WAITQUEUE(wait, current);
1037
1038 adr += chip->start;
1039
1040 /* Let's determine this according to the interleave only once */
1041 status_OK = CMD(0x80);
1042
1043 timeo = jiffies + HZ;
1044retry:
1045 spin_lock_bh(chip->mutex);
1046
1047 /* Check that the chip's ready to talk to us. */
1048 switch (chip->state) {
1049 case FL_CFI_QUERY:
1050 case FL_JEDEC_QUERY:
1051 case FL_READY:
1052 map_write(map, CMD(0x70), adr);
1053 chip->state = FL_STATUS;
1054
1055 case FL_STATUS:
1056 status = map_read(map, adr);
1057 if (map_word_andequal(map, status, status_OK, status_OK))
1058 break;
1059
1060 /* Urgh. Chip not yet ready to talk to us. */
1061 if (time_after(jiffies, timeo)) {
1062 spin_unlock_bh(chip->mutex);
1063 printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1064 return -EIO;
1065 }
1066
1067 /* Latency issues. Drop the lock, wait a while and retry */
1068 spin_unlock_bh(chip->mutex);
1069 cfi_udelay(1);
1070 goto retry;
1071
1072 default:
1073 /* Stick ourselves on a wait queue to be woken when
1074 someone changes the status */
1075 set_current_state(TASK_UNINTERRUPTIBLE);
1076 add_wait_queue(&chip->wq, &wait);
1077 spin_unlock_bh(chip->mutex);
1078 schedule();
1079 remove_wait_queue(&chip->wq, &wait);
1080 timeo = jiffies + HZ;
1081 goto retry;
1082 }
1083
1084 ENABLE_VPP(map);
1085 map_write(map, CMD(0x60), adr);
1086 map_write(map, CMD(0x01), adr);
1087 chip->state = FL_LOCKING;
1088
1089 spin_unlock_bh(chip->mutex);
1090 msleep(1000);
1091 spin_lock_bh(chip->mutex);
1092
1093 /* FIXME. Use a timer to check this, and return immediately. */
1094 /* Once the state machine's known to be working I'll do that */
1095
1096 timeo = jiffies + (HZ*2);
1097 for (;;) {
1098
1099 status = map_read(map, adr);
1100 if (map_word_andequal(map, status, status_OK, status_OK))
1101 break;
1102
1103 /* OK Still waiting */
1104 if (time_after(jiffies, timeo)) {
1105 map_write(map, CMD(0x70), adr);
1106 chip->state = FL_STATUS;
1107 printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1108 DISABLE_VPP(map);
1109 spin_unlock_bh(chip->mutex);
1110 return -EIO;
1111 }
1112
1113 /* Latency issues. Drop the lock, wait a while and retry */
1114 spin_unlock_bh(chip->mutex);
1115 cfi_udelay(1);
1116 spin_lock_bh(chip->mutex);
1117 }
1118
1119 /* Done and happy. */
1120 chip->state = FL_STATUS;
1121 DISABLE_VPP(map);
1122 wake_up(&chip->wq);
1123 spin_unlock_bh(chip->mutex);
1124 return 0;
1125}
1126static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1127{
1128 struct map_info *map = mtd->priv;
1129 struct cfi_private *cfi = map->fldrv_priv;
1130 unsigned long adr;
1131 int chipnum, ret = 0;
1132#ifdef DEBUG_LOCK_BITS
1133 int ofs_factor = cfi->interleave * cfi->device_type;
1134#endif
1135
1136 if (ofs & (mtd->erasesize - 1))
1137 return -EINVAL;
1138
1139 if (len & (mtd->erasesize -1))
1140 return -EINVAL;
1141
1142 if ((len + ofs) > mtd->size)
1143 return -EINVAL;
1144
1145 chipnum = ofs >> cfi->chipshift;
1146 adr = ofs - (chipnum << cfi->chipshift);
1147
1148 while(len) {
1149
1150#ifdef DEBUG_LOCK_BITS
1151 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1152 printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1153 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1154#endif
1155
1156 ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1157
1158#ifdef DEBUG_LOCK_BITS
1159 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1160 printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1161 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1162#endif
1163
1164 if (ret)
1165 return ret;
1166
1167 adr += mtd->erasesize;
1168 len -= mtd->erasesize;
1169
1170 if (adr >> cfi->chipshift) {
1171 adr = 0;
1172 chipnum++;
1173
1174 if (chipnum >= cfi->numchips)
1175 break;
1176 }
1177 }
1178 return 0;
1179}
1180static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1181{
1182 struct cfi_private *cfi = map->fldrv_priv;
1183 map_word status, status_OK;
1184 unsigned long timeo = jiffies + HZ;
1185 DECLARE_WAITQUEUE(wait, current);
1186
1187 adr += chip->start;
1188
1189 /* Let's determine this according to the interleave only once */
1190 status_OK = CMD(0x80);
1191
1192 timeo = jiffies + HZ;
1193retry:
1194 spin_lock_bh(chip->mutex);
1195
1196 /* Check that the chip's ready to talk to us. */
1197 switch (chip->state) {
1198 case FL_CFI_QUERY:
1199 case FL_JEDEC_QUERY:
1200 case FL_READY:
1201 map_write(map, CMD(0x70), adr);
1202 chip->state = FL_STATUS;
1203
1204 case FL_STATUS:
1205 status = map_read(map, adr);
1206 if (map_word_andequal(map, status, status_OK, status_OK))
1207 break;
1208
1209 /* Urgh. Chip not yet ready to talk to us. */
1210 if (time_after(jiffies, timeo)) {
1211 spin_unlock_bh(chip->mutex);
1212 printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1213 return -EIO;
1214 }
1215
1216 /* Latency issues. Drop the lock, wait a while and retry */
1217 spin_unlock_bh(chip->mutex);
1218 cfi_udelay(1);
1219 goto retry;
1220
1221 default:
1222 /* Stick ourselves on a wait queue to be woken when
1223 someone changes the status */
1224 set_current_state(TASK_UNINTERRUPTIBLE);
1225 add_wait_queue(&chip->wq, &wait);
1226 spin_unlock_bh(chip->mutex);
1227 schedule();
1228 remove_wait_queue(&chip->wq, &wait);
1229 timeo = jiffies + HZ;
1230 goto retry;
1231 }
1232
1233 ENABLE_VPP(map);
1234 map_write(map, CMD(0x60), adr);
1235 map_write(map, CMD(0xD0), adr);
1236 chip->state = FL_UNLOCKING;
1237
1238 spin_unlock_bh(chip->mutex);
1239 msleep(1000);
1240 spin_lock_bh(chip->mutex);
1241
1242 /* FIXME. Use a timer to check this, and return immediately. */
1243 /* Once the state machine's known to be working I'll do that */
1244
1245 timeo = jiffies + (HZ*2);
1246 for (;;) {
1247
1248 status = map_read(map, adr);
1249 if (map_word_andequal(map, status, status_OK, status_OK))
1250 break;
1251
1252 /* OK Still waiting */
1253 if (time_after(jiffies, timeo)) {
1254 map_write(map, CMD(0x70), adr);
1255 chip->state = FL_STATUS;
1256 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1257 DISABLE_VPP(map);
1258 spin_unlock_bh(chip->mutex);
1259 return -EIO;
1260 }
1261
1262 /* Latency issues. Drop the unlock, wait a while and retry */
1263 spin_unlock_bh(chip->mutex);
1264 cfi_udelay(1);
1265 spin_lock_bh(chip->mutex);
1266 }
1267
1268 /* Done and happy. */
1269 chip->state = FL_STATUS;
1270 DISABLE_VPP(map);
1271 wake_up(&chip->wq);
1272 spin_unlock_bh(chip->mutex);
1273 return 0;
1274}
1275static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1276{
1277 struct map_info *map = mtd->priv;
1278 struct cfi_private *cfi = map->fldrv_priv;
1279 unsigned long adr;
1280 int chipnum, ret = 0;
1281#ifdef DEBUG_LOCK_BITS
1282 int ofs_factor = cfi->interleave * cfi->device_type;
1283#endif
1284
1285 chipnum = ofs >> cfi->chipshift;
1286 adr = ofs - (chipnum << cfi->chipshift);
1287
1288#ifdef DEBUG_LOCK_BITS
1289 {
1290 unsigned long temp_adr = adr;
1291 unsigned long temp_len = len;
1292
1293 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1294 while (temp_len) {
1295 printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1296 temp_adr += mtd->erasesize;
1297 temp_len -= mtd->erasesize;
1298 }
1299 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1300 }
1301#endif
1302
1303 ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1304
1305#ifdef DEBUG_LOCK_BITS
1306 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1307 printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1308 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1309#endif
1310
1311 return ret;
1312}
1313
1314static int cfi_staa_suspend(struct mtd_info *mtd)
1315{
1316 struct map_info *map = mtd->priv;
1317 struct cfi_private *cfi = map->fldrv_priv;
1318 int i;
1319 struct flchip *chip;
1320 int ret = 0;
1321
1322 for (i=0; !ret && i<cfi->numchips; i++) {
1323 chip = &cfi->chips[i];
1324
1325 spin_lock_bh(chip->mutex);
1326
1327 switch(chip->state) {
1328 case FL_READY:
1329 case FL_STATUS:
1330 case FL_CFI_QUERY:
1331 case FL_JEDEC_QUERY:
1332 chip->oldstate = chip->state;
1333 chip->state = FL_PM_SUSPENDED;
1334 /* No need to wake_up() on this state change -
1335 * as the whole point is that nobody can do anything
1336 * with the chip now anyway.
1337 */
1338 case FL_PM_SUSPENDED:
1339 break;
1340
1341 default:
1342 ret = -EAGAIN;
1343 break;
1344 }
1345 spin_unlock_bh(chip->mutex);
1346 }
1347
1348 /* Unlock the chips again */
1349
1350 if (ret) {
1351 for (i--; i >=0; i--) {
1352 chip = &cfi->chips[i];
1353
1354 spin_lock_bh(chip->mutex);
1355
1356 if (chip->state == FL_PM_SUSPENDED) {
1357 /* No need to force it into a known state here,
1358 because we're returning failure, and it didn't
1359 get power cycled */
1360 chip->state = chip->oldstate;
1361 wake_up(&chip->wq);
1362 }
1363 spin_unlock_bh(chip->mutex);
1364 }
1365 }
1366
1367 return ret;
1368}
1369
1370static void cfi_staa_resume(struct mtd_info *mtd)
1371{
1372 struct map_info *map = mtd->priv;
1373 struct cfi_private *cfi = map->fldrv_priv;
1374 int i;
1375 struct flchip *chip;
1376
1377 for (i=0; i<cfi->numchips; i++) {
1378
1379 chip = &cfi->chips[i];
1380
1381 spin_lock_bh(chip->mutex);
1382
1383 /* Go to known state. Chip may have been power cycled */
1384 if (chip->state == FL_PM_SUSPENDED) {
1385 map_write(map, CMD(0xFF), 0);
1386 chip->state = FL_READY;
1387 wake_up(&chip->wq);
1388 }
1389
1390 spin_unlock_bh(chip->mutex);
1391 }
1392}
1393
1394static void cfi_staa_destroy(struct mtd_info *mtd)
1395{
1396 struct map_info *map = mtd->priv;
1397 struct cfi_private *cfi = map->fldrv_priv;
1398 kfree(cfi->cmdset_priv);
1399 kfree(cfi);
1400}
1401
1402static char im_name[]="cfi_cmdset_0020";
1403
1404static int __init cfi_staa_init(void)
1405{
1406 inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0020);
1407 return 0;
1408}
1409
1410static void __exit cfi_staa_exit(void)
1411{
1412 inter_module_unregister(im_name);
1413}
1414
1415module_init(cfi_staa_init);
1416module_exit(cfi_staa_exit);
1417
1418MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
new file mode 100644
index 000000000000..cf750038ce6a
--- /dev/null
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -0,0 +1,445 @@
1/*
2 Common Flash Interface probe code.
3 (C) 2000 Red Hat. GPL'd.
4 $Id: cfi_probe.c,v 1.83 2004/11/16 18:19:02 nico Exp $
5*/
6
7#include <linux/config.h>
8#include <linux/module.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <asm/io.h>
13#include <asm/byteorder.h>
14#include <linux/errno.h>
15#include <linux/slab.h>
16#include <linux/interrupt.h>
17
18#include <linux/mtd/xip.h>
19#include <linux/mtd/map.h>
20#include <linux/mtd/cfi.h>
21#include <linux/mtd/gen_probe.h>
22
23//#define DEBUG_CFI
24
25#ifdef DEBUG_CFI
26static void print_cfi_ident(struct cfi_ident *);
27#endif
28
29static int cfi_probe_chip(struct map_info *map, __u32 base,
30 unsigned long *chip_map, struct cfi_private *cfi);
31static int cfi_chip_setup(struct map_info *map, struct cfi_private *cfi);
32
33struct mtd_info *cfi_probe(struct map_info *map);
34
35#ifdef CONFIG_MTD_XIP
36
37/* only needed for short periods, so this is rather simple */
38#define xip_disable() local_irq_disable()
39
40#define xip_allowed(base, map) \
41do { \
42 (void) map_read(map, base); \
43 asm volatile (".rep 8; nop; .endr"); \
44 local_irq_enable(); \
45} while (0)
46
47#define xip_enable(base, map, cfi) \
48do { \
49 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); \
50 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); \
51 xip_allowed(base, map); \
52} while (0)
53
54#define xip_disable_qry(base, map, cfi) \
55do { \
56 xip_disable(); \
57 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); \
58 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); \
59 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); \
60} while (0)
61
62#else
63
64#define xip_disable() do { } while (0)
65#define xip_allowed(base, map) do { } while (0)
66#define xip_enable(base, map, cfi) do { } while (0)
67#define xip_disable_qry(base, map, cfi) do { } while (0)
68
69#endif
70
71/* check for QRY.
72 in: interleave,type,mode
73 ret: table index, <0 for error
74 */
75static int __xipram qry_present(struct map_info *map, __u32 base,
76 struct cfi_private *cfi)
77{
78 int osf = cfi->interleave * cfi->device_type; // scale factor
79 map_word val[3];
80 map_word qry[3];
81
82 qry[0] = cfi_build_cmd('Q', map, cfi);
83 qry[1] = cfi_build_cmd('R', map, cfi);
84 qry[2] = cfi_build_cmd('Y', map, cfi);
85
86 val[0] = map_read(map, base + osf*0x10);
87 val[1] = map_read(map, base + osf*0x11);
88 val[2] = map_read(map, base + osf*0x12);
89
90 if (!map_word_equal(map, qry[0], val[0]))
91 return 0;
92
93 if (!map_word_equal(map, qry[1], val[1]))
94 return 0;
95
96 if (!map_word_equal(map, qry[2], val[2]))
97 return 0;
98
99 return 1; // "QRY" found
100}
101
102static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
103 unsigned long *chip_map, struct cfi_private *cfi)
104{
105 int i;
106
107 if ((base + 0) >= map->size) {
108 printk(KERN_NOTICE
109 "Probe at base[0x00](0x%08lx) past the end of the map(0x%08lx)\n",
110 (unsigned long)base, map->size -1);
111 return 0;
112 }
113 if ((base + 0xff) >= map->size) {
114 printk(KERN_NOTICE
115 "Probe at base[0x55](0x%08lx) past the end of the map(0x%08lx)\n",
116 (unsigned long)base + 0x55, map->size -1);
117 return 0;
118 }
119
120 xip_disable();
121 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
122 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
123 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
124
125 if (!qry_present(map,base,cfi)) {
126 xip_enable(base, map, cfi);
127 return 0;
128 }
129
130 if (!cfi->numchips) {
131 /* This is the first time we're called. Set up the CFI
132 stuff accordingly and return */
133 return cfi_chip_setup(map, cfi);
134 }
135
136 /* Check each previous chip to see if it's an alias */
137 for (i=0; i < (base >> cfi->chipshift); i++) {
138 unsigned long start;
139 if(!test_bit(i, chip_map)) {
140 /* Skip location; no valid chip at this address */
141 continue;
142 }
143 start = i << cfi->chipshift;
144 /* This chip should be in read mode if it's one
145 we've already touched. */
146 if (qry_present(map, start, cfi)) {
147 /* Eep. This chip also had the QRY marker.
148 * Is it an alias for the new one? */
149 cfi_send_gen_cmd(0xF0, 0, start, map, cfi, cfi->device_type, NULL);
150 cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL);
151
152 /* If the QRY marker goes away, it's an alias */
153 if (!qry_present(map, start, cfi)) {
154 xip_allowed(base, map);
155 printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
156 map->name, base, start);
157 return 0;
158 }
159 /* Yes, it's actually got QRY for data. Most
160 * unfortunate. Stick the new chip in read mode
161 * too and if it's the same, assume it's an alias. */
162 /* FIXME: Use other modes to do a proper check */
163 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
164 cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL);
165
166 if (qry_present(map, base, cfi)) {
167 xip_allowed(base, map);
168 printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
169 map->name, base, start);
170 return 0;
171 }
172 }
173 }
174
175 /* OK, if we got to here, then none of the previous chips appear to
176 be aliases for the current one. */
177 set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
178 cfi->numchips++;
179
180 /* Put it back into Read Mode */
181 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
182 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
183 xip_allowed(base, map);
184
185 printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
186 map->name, cfi->interleave, cfi->device_type*8, base,
187 map->bankwidth*8);
188
189 return 1;
190}
191
192static int __xipram cfi_chip_setup(struct map_info *map,
193 struct cfi_private *cfi)
194{
195 int ofs_factor = cfi->interleave*cfi->device_type;
196 __u32 base = 0;
197 int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor);
198 int i;
199
200 xip_enable(base, map, cfi);
201#ifdef DEBUG_CFI
202 printk("Number of erase regions: %d\n", num_erase_regions);
203#endif
204 if (!num_erase_regions)
205 return 0;
206
207 cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
208 if (!cfi->cfiq) {
209 printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
210 return 0;
211 }
212
213 memset(cfi->cfiq,0,sizeof(struct cfi_ident));
214
215 cfi->cfi_mode = CFI_MODE_CFI;
216
217 /* Read the CFI info structure */
218 xip_disable_qry(base, map, cfi);
219 for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++)
220 ((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor);
221
222 /* Note we put the device back into Read Mode BEFORE going into Auto
223 * Select Mode, as some devices support nesting of modes, others
224 * don't. This way should always work.
225 * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
226 * so should be treated as nops or illegal (and so put the device
227 * back into Read Mode, which is a nop in this case).
228 */
229 cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
230 cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
231 cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
232 cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
233 cfi->mfr = cfi_read_query(map, base);
234 cfi->id = cfi_read_query(map, base + ofs_factor);
235
236 /* Put it back into Read Mode */
237 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
238 /* ... even if it's an Intel chip */
239 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
240 xip_allowed(base, map);
241
242 /* Do any necessary byteswapping */
243 cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID);
244
245 cfi->cfiq->P_ADR = le16_to_cpu(cfi->cfiq->P_ADR);
246 cfi->cfiq->A_ID = le16_to_cpu(cfi->cfiq->A_ID);
247 cfi->cfiq->A_ADR = le16_to_cpu(cfi->cfiq->A_ADR);
248 cfi->cfiq->InterfaceDesc = le16_to_cpu(cfi->cfiq->InterfaceDesc);
249 cfi->cfiq->MaxBufWriteSize = le16_to_cpu(cfi->cfiq->MaxBufWriteSize);
250
251#ifdef DEBUG_CFI
252 /* Dump the information therein */
253 print_cfi_ident(cfi->cfiq);
254#endif
255
256 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
257 cfi->cfiq->EraseRegionInfo[i] = le32_to_cpu(cfi->cfiq->EraseRegionInfo[i]);
258
259#ifdef DEBUG_CFI
260 printk(" Erase Region #%d: BlockSize 0x%4.4X bytes, %d blocks\n",
261 i, (cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff,
262 (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1);
263#endif
264 }
265
266 printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
267 map->name, cfi->interleave, cfi->device_type*8, base,
268 map->bankwidth*8);
269
270 return 1;
271}
272
273#ifdef DEBUG_CFI
274static char *vendorname(__u16 vendor)
275{
276 switch (vendor) {
277 case P_ID_NONE:
278 return "None";
279
280 case P_ID_INTEL_EXT:
281 return "Intel/Sharp Extended";
282
283 case P_ID_AMD_STD:
284 return "AMD/Fujitsu Standard";
285
286 case P_ID_INTEL_STD:
287 return "Intel/Sharp Standard";
288
289 case P_ID_AMD_EXT:
290 return "AMD/Fujitsu Extended";
291
292 case P_ID_WINBOND:
293 return "Winbond Standard";
294
295 case P_ID_ST_ADV:
296 return "ST Advanced";
297
298 case P_ID_MITSUBISHI_STD:
299 return "Mitsubishi Standard";
300
301 case P_ID_MITSUBISHI_EXT:
302 return "Mitsubishi Extended";
303
304 case P_ID_SST_PAGE:
305 return "SST Page Write";
306
307 case P_ID_INTEL_PERFORMANCE:
308 return "Intel Performance Code";
309
310 case P_ID_INTEL_DATA:
311 return "Intel Data";
312
313 case P_ID_RESERVED:
314 return "Not Allowed / Reserved for Future Use";
315
316 default:
317 return "Unknown";
318 }
319}
320
321
322static void print_cfi_ident(struct cfi_ident *cfip)
323{
324#if 0
325 if (cfip->qry[0] != 'Q' || cfip->qry[1] != 'R' || cfip->qry[2] != 'Y') {
326 printk("Invalid CFI ident structure.\n");
327 return;
328 }
329#endif
330 printk("Primary Vendor Command Set: %4.4X (%s)\n", cfip->P_ID, vendorname(cfip->P_ID));
331 if (cfip->P_ADR)
332 printk("Primary Algorithm Table at %4.4X\n", cfip->P_ADR);
333 else
334 printk("No Primary Algorithm Table\n");
335
336 printk("Alternative Vendor Command Set: %4.4X (%s)\n", cfip->A_ID, vendorname(cfip->A_ID));
337 if (cfip->A_ADR)
338 printk("Alternate Algorithm Table at %4.4X\n", cfip->A_ADR);
339 else
340 printk("No Alternate Algorithm Table\n");
341
342
343 printk("Vcc Minimum: %2d.%d V\n", cfip->VccMin >> 4, cfip->VccMin & 0xf);
344 printk("Vcc Maximum: %2d.%d V\n", cfip->VccMax >> 4, cfip->VccMax & 0xf);
345 if (cfip->VppMin) {
346 printk("Vpp Minimum: %2d.%d V\n", cfip->VppMin >> 4, cfip->VppMin & 0xf);
347 printk("Vpp Maximum: %2d.%d V\n", cfip->VppMax >> 4, cfip->VppMax & 0xf);
348 }
349 else
350 printk("No Vpp line\n");
351
352 printk("Typical byte/word write timeout: %d µs\n", 1<<cfip->WordWriteTimeoutTyp);
353 printk("Maximum byte/word write timeout: %d µs\n", (1<<cfip->WordWriteTimeoutMax) * (1<<cfip->WordWriteTimeoutTyp));
354
355 if (cfip->BufWriteTimeoutTyp || cfip->BufWriteTimeoutMax) {
356 printk("Typical full buffer write timeout: %d µs\n", 1<<cfip->BufWriteTimeoutTyp);
357 printk("Maximum full buffer write timeout: %d µs\n", (1<<cfip->BufWriteTimeoutMax) * (1<<cfip->BufWriteTimeoutTyp));
358 }
359 else
360 printk("Full buffer write not supported\n");
361
362 printk("Typical block erase timeout: %d ms\n", 1<<cfip->BlockEraseTimeoutTyp);
363 printk("Maximum block erase timeout: %d ms\n", (1<<cfip->BlockEraseTimeoutMax) * (1<<cfip->BlockEraseTimeoutTyp));
364 if (cfip->ChipEraseTimeoutTyp || cfip->ChipEraseTimeoutMax) {
365 printk("Typical chip erase timeout: %d ms\n", 1<<cfip->ChipEraseTimeoutTyp);
366 printk("Maximum chip erase timeout: %d ms\n", (1<<cfip->ChipEraseTimeoutMax) * (1<<cfip->ChipEraseTimeoutTyp));
367 }
368 else
369 printk("Chip erase not supported\n");
370
371 printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20));
372 printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc);
373 switch(cfip->InterfaceDesc) {
374 case 0:
375 printk(" - x8-only asynchronous interface\n");
376 break;
377
378 case 1:
379 printk(" - x16-only asynchronous interface\n");
380 break;
381
382 case 2:
383 printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n");
384 break;
385
386 case 3:
387 printk(" - x32-only asynchronous interface\n");
388 break;
389
390 case 4:
391 printk(" - supports x16 and x32 via Word# with asynchronous interface\n");
392 break;
393
394 case 65535:
395 printk(" - Not Allowed / Reserved\n");
396 break;
397
398 default:
399 printk(" - Unknown\n");
400 break;
401 }
402
403 printk("Max. bytes in buffer write: 0x%x\n", 1<< cfip->MaxBufWriteSize);
404 printk("Number of Erase Block Regions: %d\n", cfip->NumEraseRegions);
405
406}
407#endif /* DEBUG_CFI */
408
409static struct chip_probe cfi_chip_probe = {
410 .name = "CFI",
411 .probe_chip = cfi_probe_chip
412};
413
414struct mtd_info *cfi_probe(struct map_info *map)
415{
416 /*
417 * Just use the generic probe stuff to call our CFI-specific
418 * chip_probe routine in all the possible permutations, etc.
419 */
420 return mtd_do_chip_probe(map, &cfi_chip_probe);
421}
422
423static struct mtd_chip_driver cfi_chipdrv = {
424 .probe = cfi_probe,
425 .name = "cfi_probe",
426 .module = THIS_MODULE
427};
428
429int __init cfi_probe_init(void)
430{
431 register_mtd_chip_driver(&cfi_chipdrv);
432 return 0;
433}
434
435static void __exit cfi_probe_exit(void)
436{
437 unregister_mtd_chip_driver(&cfi_chipdrv);
438}
439
440module_init(cfi_probe_init);
441module_exit(cfi_probe_exit);
442
443MODULE_LICENSE("GPL");
444MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
445MODULE_DESCRIPTION("Probe code for CFI-compliant flash chips");
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
new file mode 100644
index 000000000000..2b2ede2bfcca
--- /dev/null
+++ b/drivers/mtd/chips/cfi_util.c
@@ -0,0 +1,196 @@
1/*
2 * Common Flash Interface support:
3 * Generic utility functions not dependant on command set
4 *
5 * Copyright (C) 2002 Red Hat
6 * Copyright (C) 2003 STMicroelectronics Limited
7 *
8 * This code is covered by the GPL.
9 *
10 * $Id: cfi_util.c,v 1.8 2004/12/14 19:55:56 nico Exp $
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <asm/io.h>
19#include <asm/byteorder.h>
20
21#include <linux/errno.h>
22#include <linux/slab.h>
23#include <linux/delay.h>
24#include <linux/interrupt.h>
25#include <linux/mtd/xip.h>
26#include <linux/mtd/mtd.h>
27#include <linux/mtd/map.h>
28#include <linux/mtd/cfi.h>
29#include <linux/mtd/compatmac.h>
30
31struct cfi_extquery *
32__xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
33{
34 struct cfi_private *cfi = map->fldrv_priv;
35 __u32 base = 0; // cfi->chips[0].start;
36 int ofs_factor = cfi->interleave * cfi->device_type;
37 int i;
38 struct cfi_extquery *extp = NULL;
39
40 printk(" %s Extended Query Table at 0x%4.4X\n", name, adr);
41 if (!adr)
42 goto out;
43
44 extp = kmalloc(size, GFP_KERNEL);
45 if (!extp) {
46 printk(KERN_ERR "Failed to allocate memory\n");
47 goto out;
48 }
49
50#ifdef CONFIG_MTD_XIP
51 local_irq_disable();
52#endif
53
54 /* Switch it into Query Mode */
55 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
56
57 /* Read in the Extended Query Table */
58 for (i=0; i<size; i++) {
59 ((unsigned char *)extp)[i] =
60 cfi_read_query(map, base+((adr+i)*ofs_factor));
61 }
62
63 /* Make sure it returns to read mode */
64 cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
65 cfi_send_gen_cmd(0xff, 0, base, map, cfi, cfi->device_type, NULL);
66
67#ifdef CONFIG_MTD_XIP
68 (void) map_read(map, base);
69 asm volatile (".rep 8; nop; .endr");
70 local_irq_enable();
71#endif
72
73 if (extp->MajorVersion != '1' ||
74 (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
75 printk(KERN_WARNING " Unknown %s Extended Query "
76 "version %c.%c.\n", name, extp->MajorVersion,
77 extp->MinorVersion);
78 kfree(extp);
79 extp = NULL;
80 }
81
82 out: return extp;
83}
84
85EXPORT_SYMBOL(cfi_read_pri);
86
87void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
88{
89 struct map_info *map = mtd->priv;
90 struct cfi_private *cfi = map->fldrv_priv;
91 struct cfi_fixup *f;
92
93 for (f=fixups; f->fixup; f++) {
94 if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
95 ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
96 f->fixup(mtd, f->param);
97 }
98 }
99}
100
101EXPORT_SYMBOL(cfi_fixup);
102
103int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
104 loff_t ofs, size_t len, void *thunk)
105{
106 struct map_info *map = mtd->priv;
107 struct cfi_private *cfi = map->fldrv_priv;
108 unsigned long adr;
109 int chipnum, ret = 0;
110 int i, first;
111 struct mtd_erase_region_info *regions = mtd->eraseregions;
112
113 if (ofs > mtd->size)
114 return -EINVAL;
115
116 if ((len + ofs) > mtd->size)
117 return -EINVAL;
118
119 /* Check that both start and end of the requested erase are
120 * aligned with the erasesize at the appropriate addresses.
121 */
122
123 i = 0;
124
125 /* Skip all erase regions which are ended before the start of
126 the requested erase. Actually, to save on the calculations,
127 we skip to the first erase region which starts after the
128 start of the requested erase, and then go back one.
129 */
130
131 while (i < mtd->numeraseregions && ofs >= regions[i].offset)
132 i++;
133 i--;
134
135 /* OK, now i is pointing at the erase region in which this
136 erase request starts. Check the start of the requested
137 erase range is aligned with the erase size which is in
138 effect here.
139 */
140
141 if (ofs & (regions[i].erasesize-1))
142 return -EINVAL;
143
144 /* Remember the erase region we start on */
145 first = i;
146
147 /* Next, check that the end of the requested erase is aligned
148 * with the erase region at that address.
149 */
150
151 while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
152 i++;
153
154 /* As before, drop back one to point at the region in which
155 the address actually falls
156 */
157 i--;
158
159 if ((ofs + len) & (regions[i].erasesize-1))
160 return -EINVAL;
161
162 chipnum = ofs >> cfi->chipshift;
163 adr = ofs - (chipnum << cfi->chipshift);
164
165 i=first;
166
167 while(len) {
168 int size = regions[i].erasesize;
169
170 ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
171
172 if (ret)
173 return ret;
174
175 adr += size;
176 ofs += size;
177 len -= size;
178
179 if (ofs == regions[i].offset + size * regions[i].numblocks)
180 i++;
181
182 if (adr >> cfi->chipshift) {
183 adr = 0;
184 chipnum++;
185
186 if (chipnum >= cfi->numchips)
187 break;
188 }
189 }
190
191 return 0;
192}
193
194EXPORT_SYMBOL(cfi_varsize_frob);
195
196MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/chips/chipreg.c b/drivers/mtd/chips/chipreg.c
new file mode 100644
index 000000000000..d7d739a108ae
--- /dev/null
+++ b/drivers/mtd/chips/chipreg.c
@@ -0,0 +1,111 @@
1/*
2 * $Id: chipreg.c,v 1.17 2004/11/16 18:29:00 dwmw2 Exp $
3 *
4 * Registration for chip drivers
5 *
6 */
7
8#include <linux/kernel.h>
9#include <linux/config.h>
10#include <linux/module.h>
11#include <linux/kmod.h>
12#include <linux/spinlock.h>
13#include <linux/slab.h>
14#include <linux/mtd/map.h>
15#include <linux/mtd/mtd.h>
16#include <linux/mtd/compatmac.h>
17
18static DEFINE_SPINLOCK(chip_drvs_lock);
19static LIST_HEAD(chip_drvs_list);
20
21void register_mtd_chip_driver(struct mtd_chip_driver *drv)
22{
23 spin_lock(&chip_drvs_lock);
24 list_add(&drv->list, &chip_drvs_list);
25 spin_unlock(&chip_drvs_lock);
26}
27
28void unregister_mtd_chip_driver(struct mtd_chip_driver *drv)
29{
30 spin_lock(&chip_drvs_lock);
31 list_del(&drv->list);
32 spin_unlock(&chip_drvs_lock);
33}
34
35static struct mtd_chip_driver *get_mtd_chip_driver (const char *name)
36{
37 struct list_head *pos;
38 struct mtd_chip_driver *ret = NULL, *this;
39
40 spin_lock(&chip_drvs_lock);
41
42 list_for_each(pos, &chip_drvs_list) {
43 this = list_entry(pos, typeof(*this), list);
44
45 if (!strcmp(this->name, name)) {
46 ret = this;
47 break;
48 }
49 }
50 if (ret && !try_module_get(ret->module))
51 ret = NULL;
52
53 spin_unlock(&chip_drvs_lock);
54
55 return ret;
56}
57
58 /* Hide all the horrid details, like some silly person taking
59 get_module_symbol() away from us, from the caller. */
60
61struct mtd_info *do_map_probe(const char *name, struct map_info *map)
62{
63 struct mtd_chip_driver *drv;
64 struct mtd_info *ret;
65
66 drv = get_mtd_chip_driver(name);
67
68 if (!drv && !request_module("%s", name))
69 drv = get_mtd_chip_driver(name);
70
71 if (!drv)
72 return NULL;
73
74 ret = drv->probe(map);
75
76 /* We decrease the use count here. It may have been a
77 probe-only module, which is no longer required from this
78 point, having given us a handle on (and increased the use
79 count of) the actual driver code.
80 */
81 module_put(drv->module);
82
83 if (ret)
84 return ret;
85
86 return NULL;
87}
88/*
89 * Destroy an MTD device which was created for a map device.
90 * Make sure the MTD device is already unregistered before calling this
91 */
92void map_destroy(struct mtd_info *mtd)
93{
94 struct map_info *map = mtd->priv;
95
96 if (map->fldrv->destroy)
97 map->fldrv->destroy(mtd);
98
99 module_put(map->fldrv->module);
100
101 kfree(mtd);
102}
103
104EXPORT_SYMBOL(register_mtd_chip_driver);
105EXPORT_SYMBOL(unregister_mtd_chip_driver);
106EXPORT_SYMBOL(do_map_probe);
107EXPORT_SYMBOL(map_destroy);
108
109MODULE_LICENSE("GPL");
110MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
111MODULE_DESCRIPTION("Core routines for registering and invoking MTD chip drivers");
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
new file mode 100644
index 000000000000..fbf44708a861
--- /dev/null
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -0,0 +1,107 @@
1#ifndef FWH_LOCK_H
2#define FWH_LOCK_H
3
4
5enum fwh_lock_state {
6 FWH_UNLOCKED = 0,
7 FWH_DENY_WRITE = 1,
8 FWH_IMMUTABLE = 2,
9 FWH_DENY_READ = 4,
10};
11
12struct fwh_xxlock_thunk {
13 enum fwh_lock_state val;
14 flstate_t state;
15};
16
17
18#define FWH_XXLOCK_ONEBLOCK_LOCK ((struct fwh_xxlock_thunk){ FWH_DENY_WRITE, FL_LOCKING})
19#define FWH_XXLOCK_ONEBLOCK_UNLOCK ((struct fwh_xxlock_thunk){ FWH_UNLOCKED, FL_UNLOCKING})
20
21/*
22 * This locking/unlock is specific to firmware hub parts. Only one
23 * is known that supports the Intel command set. Firmware
24 * hub parts cannot be interleaved as they are on the LPC bus
25 * so this code has not been tested with interleaved chips,
26 * and will likely fail in that context.
27 */
28static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
29 unsigned long adr, int len, void *thunk)
30{
31 struct cfi_private *cfi = map->fldrv_priv;
32 struct fwh_xxlock_thunk *xxlt = (struct fwh_xxlock_thunk *)thunk;
33 int ret;
34
35 /* Refuse the operation if the we cannot look behind the chip */
36 if (chip->start < 0x400000) {
37 DEBUG( MTD_DEBUG_LEVEL3,
38 "MTD %s(): chip->start: %lx wanted >= 0x400000\n",
39 __func__, chip->start );
40 return -EIO;
41 }
42 /*
43 * lock block registers:
44 * - on 64k boundariesand
45 * - bit 1 set high
46 * - block lock registers are 4MiB lower - overflow subtract (danger)
47 *
48 * The address manipulation is first done on the logical address
49 * which is 0 at the start of the chip, and then the offset of
50 * the individual chip is addted to it. Any other order a weird
51 * map offset could cause problems.
52 */
53 adr = (adr & ~0xffffUL) | 0x2;
54 adr += chip->start - 0x400000;
55
56 /*
57 * This is easy because these are writes to registers and not writes
58 * to flash memory - that means that we don't have to check status
59 * and timeout.
60 */
61 cfi_spin_lock(chip->mutex);
62 ret = get_chip(map, chip, adr, FL_LOCKING);
63 if (ret) {
64 cfi_spin_unlock(chip->mutex);
65 return ret;
66 }
67
68 chip->state = xxlt->state;
69 map_write(map, CMD(xxlt->val), adr);
70
71 /* Done and happy. */
72 chip->state = FL_READY;
73 put_chip(map, chip, adr);
74 cfi_spin_unlock(chip->mutex);
75 return 0;
76}
77
78
79static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len)
80{
81 int ret;
82
83 ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len,
84 (void *)&FWH_XXLOCK_ONEBLOCK_LOCK);
85
86 return ret;
87}
88
89
90static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len)
91{
92 int ret;
93
94 ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len,
95 (void *)&FWH_XXLOCK_ONEBLOCK_UNLOCK);
96
97 return ret;
98}
99
100static void fixup_use_fwh_lock(struct mtd_info *mtd, void *param)
101{
102 printk(KERN_NOTICE "using fwh lock/unlock method\n");
103 /* Setup for the chips with the fwh lock method */
104 mtd->lock = fwh_lock_varsize;
105 mtd->unlock = fwh_unlock_varsize;
106}
107#endif /* FWH_LOCK_H */
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
new file mode 100644
index 000000000000..fc982c4671f0
--- /dev/null
+++ b/drivers/mtd/chips/gen_probe.c
@@ -0,0 +1,255 @@
1/*
2 * Routines common to all CFI-type probes.
3 * (C) 2001-2003 Red Hat, Inc.
4 * GPL'd
5 * $Id: gen_probe.c,v 1.21 2004/08/14 15:14:05 dwmw2 Exp $
6 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/module.h>
11#include <linux/mtd/mtd.h>
12#include <linux/mtd/map.h>
13#include <linux/mtd/cfi.h>
14#include <linux/mtd/gen_probe.h>
15
16static struct mtd_info *check_cmd_set(struct map_info *, int);
17static struct cfi_private *genprobe_ident_chips(struct map_info *map,
18 struct chip_probe *cp);
19static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
20 struct cfi_private *cfi);
21
22struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp)
23{
24 struct mtd_info *mtd = NULL;
25 struct cfi_private *cfi;
26
27 /* First probe the map to see if we have CFI stuff there. */
28 cfi = genprobe_ident_chips(map, cp);
29
30 if (!cfi)
31 return NULL;
32
33 map->fldrv_priv = cfi;
34 /* OK we liked it. Now find a driver for the command set it talks */
35
36 mtd = check_cmd_set(map, 1); /* First the primary cmdset */
37 if (!mtd)
38 mtd = check_cmd_set(map, 0); /* Then the secondary */
39
40 if (mtd)
41 return mtd;
42
43 printk(KERN_WARNING"gen_probe: No supported Vendor Command Set found\n");
44
45 kfree(cfi->cfiq);
46 kfree(cfi);
47 map->fldrv_priv = NULL;
48 return NULL;
49}
50EXPORT_SYMBOL(mtd_do_chip_probe);
51
52
53static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chip_probe *cp)
54{
55 struct cfi_private cfi;
56 struct cfi_private *retcfi;
57 unsigned long *chip_map;
58 int i, j, mapsize;
59 int max_chips;
60
61 memset(&cfi, 0, sizeof(cfi));
62
63 /* Call the probetype-specific code with all permutations of
64 interleave and device type, etc. */
65 if (!genprobe_new_chip(map, cp, &cfi)) {
66 /* The probe didn't like it */
67 printk(KERN_DEBUG "%s: Found no %s device at location zero\n",
68 cp->name, map->name);
69 return NULL;
70 }
71
72#if 0 /* Let the CFI probe routine do this sanity check. The Intel and AMD
73 probe routines won't ever return a broken CFI structure anyway,
74 because they make them up themselves.
75 */
76 if (cfi.cfiq->NumEraseRegions == 0) {
77 printk(KERN_WARNING "Number of erase regions is zero\n");
78 kfree(cfi.cfiq);
79 return NULL;
80 }
81#endif
82 cfi.chipshift = cfi.cfiq->DevSize;
83
84 if (cfi_interleave_is_1(&cfi)) {
85 ;
86 } else if (cfi_interleave_is_2(&cfi)) {
87 cfi.chipshift++;
88 } else if (cfi_interleave_is_4((&cfi))) {
89 cfi.chipshift += 2;
90 } else if (cfi_interleave_is_8(&cfi)) {
91 cfi.chipshift += 3;
92 } else {
93 BUG();
94 }
95
96 cfi.numchips = 1;
97
98 /*
99 * Allocate memory for bitmap of valid chips.
100 * Align bitmap storage size to full byte.
101 */
102 max_chips = map->size >> cfi.chipshift;
103 mapsize = (max_chips / 8) + ((max_chips % 8) ? 1 : 0);
104 chip_map = kmalloc(mapsize, GFP_KERNEL);
105 if (!chip_map) {
106 printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name);
107 kfree(cfi.cfiq);
108 return NULL;
109 }
110 memset (chip_map, 0, mapsize);
111
112 set_bit(0, chip_map); /* Mark first chip valid */
113
114 /*
115 * Now probe for other chips, checking sensibly for aliases while
116 * we're at it. The new_chip probe above should have let the first
117 * chip in read mode.
118 */
119
120 for (i = 1; i < max_chips; i++) {
121 cp->probe_chip(map, i << cfi.chipshift, chip_map, &cfi);
122 }
123
124 /*
125 * Now allocate the space for the structures we need to return to
126 * our caller, and copy the appropriate data into them.
127 */
128
129 retcfi = kmalloc(sizeof(struct cfi_private) + cfi.numchips * sizeof(struct flchip), GFP_KERNEL);
130
131 if (!retcfi) {
132 printk(KERN_WARNING "%s: kmalloc failed for CFI private structure\n", map->name);
133 kfree(cfi.cfiq);
134 kfree(chip_map);
135 return NULL;
136 }
137
138 memcpy(retcfi, &cfi, sizeof(cfi));
139 memset(&retcfi->chips[0], 0, sizeof(struct flchip) * cfi.numchips);
140
141 for (i = 0, j = 0; (j < cfi.numchips) && (i < max_chips); i++) {
142 if(test_bit(i, chip_map)) {
143 struct flchip *pchip = &retcfi->chips[j++];
144
145 pchip->start = (i << cfi.chipshift);
146 pchip->state = FL_READY;
147 init_waitqueue_head(&pchip->wq);
148 spin_lock_init(&pchip->_spinlock);
149 pchip->mutex = &pchip->_spinlock;
150 }
151 }
152
153 kfree(chip_map);
154 return retcfi;
155}
156
157
158static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
159 struct cfi_private *cfi)
160{
161 int min_chips = (map_bankwidth(map)/4?:1); /* At most 4-bytes wide. */
162 int max_chips = map_bankwidth(map); /* And minimum 1 */
163 int nr_chips, type;
164
165 for (nr_chips = min_chips; nr_chips <= max_chips; nr_chips <<= 1) {
166
167 if (!cfi_interleave_supported(nr_chips))
168 continue;
169
170 cfi->interleave = nr_chips;
171
172 /* Minimum device size. Don't look for one 8-bit device
173 in a 16-bit bus, etc. */
174 type = map_bankwidth(map) / nr_chips;
175
176 for (; type <= CFI_DEVICETYPE_X32; type<<=1) {
177 cfi->device_type = type;
178
179 if (cp->probe_chip(map, 0, NULL, cfi))
180 return 1;
181 }
182 }
183 return 0;
184}
185
186typedef struct mtd_info *cfi_cmdset_fn_t(struct map_info *, int);
187
188extern cfi_cmdset_fn_t cfi_cmdset_0001;
189extern cfi_cmdset_fn_t cfi_cmdset_0002;
190extern cfi_cmdset_fn_t cfi_cmdset_0020;
191
192static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
193 int primary)
194{
195 struct cfi_private *cfi = map->fldrv_priv;
196 __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
197#if defined(CONFIG_MODULES) && defined(HAVE_INTER_MODULE)
198 char probename[32];
199 cfi_cmdset_fn_t *probe_function;
200
201 sprintf(probename, "cfi_cmdset_%4.4X", type);
202
203 probe_function = inter_module_get_request(probename, probename);
204
205 if (probe_function) {
206 struct mtd_info *mtd;
207
208 mtd = (*probe_function)(map, primary);
209 /* If it was happy, it'll have increased its own use count */
210 inter_module_put(probename);
211 return mtd;
212 }
213#endif
214 printk(KERN_NOTICE "Support for command set %04X not present\n",
215 type);
216
217 return NULL;
218}
219
220static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
221{
222 struct cfi_private *cfi = map->fldrv_priv;
223 __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
224
225 if (type == P_ID_NONE || type == P_ID_RESERVED)
226 return NULL;
227
228 switch(type){
229 /* Urgh. Ifdefs. The version with weak symbols was
230 * _much_ nicer. Shame it didn't seem to work on
231 * anything but x86, really.
232 * But we can't rely in inter_module_get() because
233 * that'd mean we depend on link order.
234 */
235#ifdef CONFIG_MTD_CFI_INTELEXT
236 case 0x0001:
237 case 0x0003:
238 return cfi_cmdset_0001(map, primary);
239#endif
240#ifdef CONFIG_MTD_CFI_AMDSTD
241 case 0x0002:
242 return cfi_cmdset_0002(map, primary);
243#endif
244#ifdef CONFIG_MTD_CFI_STAA
245 case 0x0020:
246 return cfi_cmdset_0020(map, primary);
247#endif
248 }
249
250 return cfi_cmdset_unknown(map, primary);
251}
252
253MODULE_LICENSE("GPL");
254MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
255MODULE_DESCRIPTION("Helper routines for flash chip probe code");
diff --git a/drivers/mtd/chips/jedec.c b/drivers/mtd/chips/jedec.c
new file mode 100644
index 000000000000..62d235a9a4e2
--- /dev/null
+++ b/drivers/mtd/chips/jedec.c
@@ -0,0 +1,934 @@
1
2/* JEDEC Flash Interface.
3 * This is an older type of interface for self programming flash. It is
4 * commonly use in older AMD chips and is obsolete compared with CFI.
5 * It is called JEDEC because the JEDEC association distributes the ID codes
6 * for the chips.
7 *
8 * See the AMD flash databook for information on how to operate the interface.
9 *
10 * This code does not support anything wider than 8 bit flash chips, I am
11 * not going to guess how to send commands to them, plus I expect they will
12 * all speak CFI..
13 *
14 * $Id: jedec.c,v 1.22 2005/01/05 18:05:11 dwmw2 Exp $
15 */
16
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/mtd/jedec.h>
21#include <linux/mtd/map.h>
22#include <linux/mtd/mtd.h>
23#include <linux/mtd/compatmac.h>
24
25static struct mtd_info *jedec_probe(struct map_info *);
26static int jedec_probe8(struct map_info *map,unsigned long base,
27 struct jedec_private *priv);
28static int jedec_probe16(struct map_info *map,unsigned long base,
29 struct jedec_private *priv);
30static int jedec_probe32(struct map_info *map,unsigned long base,
31 struct jedec_private *priv);
32static void jedec_flash_chip_scan(struct jedec_private *priv,unsigned long start,
33 unsigned long len);
34static int flash_erase(struct mtd_info *mtd, struct erase_info *instr);
35static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
36 size_t *retlen, const u_char *buf);
37
38static unsigned long my_bank_size;
39
40/* Listing of parts and sizes. We need this table to learn the sector
41 size of the chip and the total length */
42static const struct JEDECTable JEDEC_table[] = {
43 {
44 .jedec = 0x013D,
45 .name = "AMD Am29F017D",
46 .size = 2*1024*1024,
47 .sectorsize = 64*1024,
48 .capabilities = MTD_CAP_NORFLASH
49 },
50 {
51 .jedec = 0x01AD,
52 .name = "AMD Am29F016",
53 .size = 2*1024*1024,
54 .sectorsize = 64*1024,
55 .capabilities = MTD_CAP_NORFLASH
56 },
57 {
58 .jedec = 0x01D5,
59 .name = "AMD Am29F080",
60 .size = 1*1024*1024,
61 .sectorsize = 64*1024,
62 .capabilities = MTD_CAP_NORFLASH
63 },
64 {
65 .jedec = 0x01A4,
66 .name = "AMD Am29F040",
67 .size = 512*1024,
68 .sectorsize = 64*1024,
69 .capabilities = MTD_CAP_NORFLASH
70 },
71 {
72 .jedec = 0x20E3,
73 .name = "AMD Am29W040B",
74 .size = 512*1024,
75 .sectorsize = 64*1024,
76 .capabilities = MTD_CAP_NORFLASH
77 },
78 {
79 .jedec = 0xC2AD,
80 .name = "Macronix MX29F016",
81 .size = 2*1024*1024,
82 .sectorsize = 64*1024,
83 .capabilities = MTD_CAP_NORFLASH
84 },
85 { .jedec = 0x0 }
86};
87
88static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id);
89static void jedec_sync(struct mtd_info *mtd) {};
90static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len,
91 size_t *retlen, u_char *buf);
92static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
93 size_t *retlen, u_char *buf);
94
95static struct mtd_info *jedec_probe(struct map_info *map);
96
97
98
99static struct mtd_chip_driver jedec_chipdrv = {
100 .probe = jedec_probe,
101 .name = "jedec",
102 .module = THIS_MODULE
103};
104
105/* Probe entry point */
106
107static struct mtd_info *jedec_probe(struct map_info *map)
108{
109 struct mtd_info *MTD;
110 struct jedec_private *priv;
111 unsigned long Base;
112 unsigned long SectorSize;
113 unsigned count;
114 unsigned I,Uniq;
115 char Part[200];
116 memset(&priv,0,sizeof(priv));
117
118 MTD = kmalloc(sizeof(struct mtd_info) + sizeof(struct jedec_private), GFP_KERNEL);
119 if (!MTD)
120 return NULL;
121
122 memset(MTD, 0, sizeof(struct mtd_info) + sizeof(struct jedec_private));
123 priv = (struct jedec_private *)&MTD[1];
124
125 my_bank_size = map->size;
126
127 if (map->size/my_bank_size > MAX_JEDEC_CHIPS)
128 {
129 printk("mtd: Increase MAX_JEDEC_CHIPS, too many banks.\n");
130 kfree(MTD);
131 return NULL;
132 }
133
134 for (Base = 0; Base < map->size; Base += my_bank_size)
135 {
136 // Perhaps zero could designate all tests?
137 if (map->buswidth == 0)
138 map->buswidth = 1;
139
140 if (map->buswidth == 1){
141 if (jedec_probe8(map,Base,priv) == 0) {
142 printk("did recognize jedec chip\n");
143 kfree(MTD);
144 return NULL;
145 }
146 }
147 if (map->buswidth == 2)
148 jedec_probe16(map,Base,priv);
149 if (map->buswidth == 4)
150 jedec_probe32(map,Base,priv);
151 }
152
153 // Get the biggest sector size
154 SectorSize = 0;
155 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
156 {
157 // printk("priv->chips[%d].jedec is %x\n",I,priv->chips[I].jedec);
158 // printk("priv->chips[%d].sectorsize is %lx\n",I,priv->chips[I].sectorsize);
159 if (priv->chips[I].sectorsize > SectorSize)
160 SectorSize = priv->chips[I].sectorsize;
161 }
162
163 // Quickly ensure that the other sector sizes are factors of the largest
164 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
165 {
166 if ((SectorSize/priv->chips[I].sectorsize)*priv->chips[I].sectorsize != SectorSize)
167 {
168 printk("mtd: Failed. Device has incompatible mixed sector sizes\n");
169 kfree(MTD);
170 return NULL;
171 }
172 }
173
174 /* Generate a part name that includes the number of different chips and
175 other configuration information */
176 count = 1;
177 strlcpy(Part,map->name,sizeof(Part)-10);
178 strcat(Part," ");
179 Uniq = 0;
180 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
181 {
182 const struct JEDECTable *JEDEC;
183
184 if (priv->chips[I+1].jedec == priv->chips[I].jedec)
185 {
186 count++;
187 continue;
188 }
189
190 // Locate the chip in the jedec table
191 JEDEC = jedec_idtoinf(priv->chips[I].jedec >> 8,priv->chips[I].jedec);
192 if (JEDEC == 0)
193 {
194 printk("mtd: Internal Error, JEDEC not set\n");
195 kfree(MTD);
196 return NULL;
197 }
198
199 if (Uniq != 0)
200 strcat(Part,",");
201 Uniq++;
202
203 if (count != 1)
204 sprintf(Part+strlen(Part),"%x*[%s]",count,JEDEC->name);
205 else
206 sprintf(Part+strlen(Part),"%s",JEDEC->name);
207 if (strlen(Part) > sizeof(Part)*2/3)
208 break;
209 count = 1;
210 }
211
212 /* Determine if the chips are organized in a linear fashion, or if there
213 are empty banks. Note, the last bank does not count here, only the
214 first banks are important. Holes on non-bank boundaries can not exist
215 due to the way the detection algorithm works. */
216 if (priv->size < my_bank_size)
217 my_bank_size = priv->size;
218 priv->is_banked = 0;
219 //printk("priv->size is %x, my_bank_size is %x\n",priv->size,my_bank_size);
220 //printk("priv->bank_fill[0] is %x\n",priv->bank_fill[0]);
221 if (!priv->size) {
222 printk("priv->size is zero\n");
223 kfree(MTD);
224 return NULL;
225 }
226 if (priv->size/my_bank_size) {
227 if (priv->size/my_bank_size == 1) {
228 priv->size = my_bank_size;
229 }
230 else {
231 for (I = 0; I != priv->size/my_bank_size - 1; I++)
232 {
233 if (priv->bank_fill[I] != my_bank_size)
234 priv->is_banked = 1;
235
236 /* This even could be eliminated, but new de-optimized read/write
237 functions have to be written */
238 printk("priv->bank_fill[%d] is %lx, priv->bank_fill[0] is %lx\n",I,priv->bank_fill[I],priv->bank_fill[0]);
239 if (priv->bank_fill[I] != priv->bank_fill[0])
240 {
241 printk("mtd: Failed. Cannot handle unsymmetric banking\n");
242 kfree(MTD);
243 return NULL;
244 }
245 }
246 }
247 }
248 if (priv->is_banked == 1)
249 strcat(Part,", banked");
250
251 // printk("Part: '%s'\n",Part);
252
253 memset(MTD,0,sizeof(*MTD));
254 // strlcpy(MTD->name,Part,sizeof(MTD->name));
255 MTD->name = map->name;
256 MTD->type = MTD_NORFLASH;
257 MTD->flags = MTD_CAP_NORFLASH;
258 MTD->erasesize = SectorSize*(map->buswidth);
259 // printk("MTD->erasesize is %x\n",(unsigned int)MTD->erasesize);
260 MTD->size = priv->size;
261 // printk("MTD->size is %x\n",(unsigned int)MTD->size);
262 //MTD->module = THIS_MODULE; // ? Maybe this should be the low level module?
263 MTD->erase = flash_erase;
264 if (priv->is_banked == 1)
265 MTD->read = jedec_read_banked;
266 else
267 MTD->read = jedec_read;
268 MTD->write = flash_write;
269 MTD->sync = jedec_sync;
270 MTD->priv = map;
271 map->fldrv_priv = priv;
272 map->fldrv = &jedec_chipdrv;
273 __module_get(THIS_MODULE);
274 return MTD;
275}
276
277/* Helper for the JEDEC function, JEDEC numbers all have odd parity */
278static int checkparity(u_char C)
279{
280 u_char parity = 0;
281 while (C != 0)
282 {
283 parity ^= C & 1;
284 C >>= 1;
285 }
286
287 return parity == 1;
288}
289
290
291/* Take an array of JEDEC numbers that represent interleved flash chips
292 and process them. Check to make sure they are good JEDEC numbers, look
293 them up and then add them to the chip list */
294static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
295 unsigned long base,struct jedec_private *priv)
296{
297 unsigned I,J;
298 unsigned long Size;
299 unsigned long SectorSize;
300 const struct JEDECTable *JEDEC;
301
302 // Test #2 JEDEC numbers exhibit odd parity
303 for (I = 0; I != Count; I++)
304 {
305 if (checkparity(Mfg[I]) == 0 || checkparity(Id[I]) == 0)
306 return 0;
307 }
308
309 // Finally, just make sure all the chip sizes are the same
310 JEDEC = jedec_idtoinf(Mfg[0],Id[0]);
311
312 if (JEDEC == 0)
313 {
314 printk("mtd: Found JEDEC flash chip, but do not have a table entry for %x:%x\n",Mfg[0],Mfg[1]);
315 return 0;
316 }
317
318 Size = JEDEC->size;
319 SectorSize = JEDEC->sectorsize;
320 for (I = 0; I != Count; I++)
321 {
322 JEDEC = jedec_idtoinf(Mfg[0],Id[0]);
323 if (JEDEC == 0)
324 {
325 printk("mtd: Found JEDEC flash chip, but do not have a table entry for %x:%x\n",Mfg[0],Mfg[1]);
326 return 0;
327 }
328
329 if (Size != JEDEC->size || SectorSize != JEDEC->sectorsize)
330 {
331 printk("mtd: Failed. Interleved flash does not have matching characteristics\n");
332 return 0;
333 }
334 }
335
336 // Load the Chips
337 for (I = 0; I != MAX_JEDEC_CHIPS; I++)
338 {
339 if (priv->chips[I].jedec == 0)
340 break;
341 }
342
343 if (I + Count > MAX_JEDEC_CHIPS)
344 {
345 printk("mtd: Device has too many chips. Increase MAX_JEDEC_CHIPS\n");
346 return 0;
347 }
348
349 // Add them to the table
350 for (J = 0; J != Count; J++)
351 {
352 unsigned long Bank;
353
354 JEDEC = jedec_idtoinf(Mfg[J],Id[J]);
355 priv->chips[I].jedec = (Mfg[J] << 8) | Id[J];
356 priv->chips[I].size = JEDEC->size;
357 priv->chips[I].sectorsize = JEDEC->sectorsize;
358 priv->chips[I].base = base + J;
359 priv->chips[I].datashift = J*8;
360 priv->chips[I].capabilities = JEDEC->capabilities;
361 priv->chips[I].offset = priv->size + J;
362
363 // log2 n :|
364 priv->chips[I].addrshift = 0;
365 for (Bank = Count; Bank != 1; Bank >>= 1, priv->chips[I].addrshift++);
366
367 // Determine how filled this bank is.
368 Bank = base & (~(my_bank_size-1));
369 if (priv->bank_fill[Bank/my_bank_size] < base +
370 (JEDEC->size << priv->chips[I].addrshift) - Bank)
371 priv->bank_fill[Bank/my_bank_size] = base + (JEDEC->size << priv->chips[I].addrshift) - Bank;
372 I++;
373 }
374
375 priv->size += priv->chips[I-1].size*Count;
376
377 return priv->chips[I-1].size;
378}
379
380/* Lookup the chip information from the JEDEC ID table. */
381static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id)
382{
383 __u16 Id = (mfr << 8) | id;
384 unsigned long I = 0;
385 for (I = 0; JEDEC_table[I].jedec != 0; I++)
386 if (JEDEC_table[I].jedec == Id)
387 return JEDEC_table + I;
388 return NULL;
389}
390
391// Look for flash using an 8 bit bus interface
392static int jedec_probe8(struct map_info *map,unsigned long base,
393 struct jedec_private *priv)
394{
395 #define flread(x) map_read8(map,base+x)
396 #define flwrite(v,x) map_write8(map,v,base+x)
397
398 const unsigned long AutoSel1 = 0xAA;
399 const unsigned long AutoSel2 = 0x55;
400 const unsigned long AutoSel3 = 0x90;
401 const unsigned long Reset = 0xF0;
402 __u32 OldVal;
403 __u8 Mfg[1];
404 __u8 Id[1];
405 unsigned I;
406 unsigned long Size;
407
408 // Wait for any write/erase operation to settle
409 OldVal = flread(base);
410 for (I = 0; OldVal != flread(base) && I < 10000; I++)
411 OldVal = flread(base);
412
413 // Reset the chip
414 flwrite(Reset,0x555);
415
416 // Send the sequence
417 flwrite(AutoSel1,0x555);
418 flwrite(AutoSel2,0x2AA);
419 flwrite(AutoSel3,0x555);
420
421 // Get the JEDEC numbers
422 Mfg[0] = flread(0);
423 Id[0] = flread(1);
424 // printk("Mfg is %x, Id is %x\n",Mfg[0],Id[0]);
425
426 Size = handle_jedecs(map,Mfg,Id,1,base,priv);
427 // printk("handle_jedecs Size is %x\n",(unsigned int)Size);
428 if (Size == 0)
429 {
430 flwrite(Reset,0x555);
431 return 0;
432 }
433
434
435 // Reset.
436 flwrite(Reset,0x555);
437
438 return 1;
439
440 #undef flread
441 #undef flwrite
442}
443
444// Look for flash using a 16 bit bus interface (ie 2 8-bit chips)
445static int jedec_probe16(struct map_info *map,unsigned long base,
446 struct jedec_private *priv)
447{
448 return 0;
449}
450
451// Look for flash using a 32 bit bus interface (ie 4 8-bit chips)
452static int jedec_probe32(struct map_info *map,unsigned long base,
453 struct jedec_private *priv)
454{
455 #define flread(x) map_read32(map,base+((x)<<2))
456 #define flwrite(v,x) map_write32(map,v,base+((x)<<2))
457
458 const unsigned long AutoSel1 = 0xAAAAAAAA;
459 const unsigned long AutoSel2 = 0x55555555;
460 const unsigned long AutoSel3 = 0x90909090;
461 const unsigned long Reset = 0xF0F0F0F0;
462 __u32 OldVal;
463 __u8 Mfg[4];
464 __u8 Id[4];
465 unsigned I;
466 unsigned long Size;
467
468 // Wait for any write/erase operation to settle
469 OldVal = flread(base);
470 for (I = 0; OldVal != flread(base) && I < 10000; I++)
471 OldVal = flread(base);
472
473 // Reset the chip
474 flwrite(Reset,0x555);
475
476 // Send the sequence
477 flwrite(AutoSel1,0x555);
478 flwrite(AutoSel2,0x2AA);
479 flwrite(AutoSel3,0x555);
480
481 // Test #1, JEDEC numbers are readable from 0x??00/0x??01
482 if (flread(0) != flread(0x100) ||
483 flread(1) != flread(0x101))
484 {
485 flwrite(Reset,0x555);
486 return 0;
487 }
488
489 // Split up the JEDEC numbers
490 OldVal = flread(0);
491 for (I = 0; I != 4; I++)
492 Mfg[I] = (OldVal >> (I*8));
493 OldVal = flread(1);
494 for (I = 0; I != 4; I++)
495 Id[I] = (OldVal >> (I*8));
496
497 Size = handle_jedecs(map,Mfg,Id,4,base,priv);
498 if (Size == 0)
499 {
500 flwrite(Reset,0x555);
501 return 0;
502 }
503
504 /* Check if there is address wrap around within a single bank, if this
505 returns JEDEC numbers then we assume that it is wrap around. Notice
506 we call this routine with the JEDEC return still enabled, if two or
507 more flashes have a truncated address space the probe test will still
508 work */
509 if (base + (Size<<2)+0x555 < map->size &&
510 base + (Size<<2)+0x555 < (base & (~(my_bank_size-1))) + my_bank_size)
511 {
512 if (flread(base+Size) != flread(base+Size + 0x100) ||
513 flread(base+Size + 1) != flread(base+Size + 0x101))
514 {
515 jedec_probe32(map,base+Size,priv);
516 }
517 }
518
519 // Reset.
520 flwrite(0xF0F0F0F0,0x555);
521
522 return 1;
523
524 #undef flread
525 #undef flwrite
526}
527
528/* Linear read. */
529static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len,
530 size_t *retlen, u_char *buf)
531{
532 struct map_info *map = mtd->priv;
533
534 map_copy_from(map, buf, from, len);
535 *retlen = len;
536 return 0;
537}
538
539/* Banked read. Take special care to jump past the holes in the bank
540 mapping. This version assumes symetry in the holes.. */
541static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
542 size_t *retlen, u_char *buf)
543{
544 struct map_info *map = mtd->priv;
545 struct jedec_private *priv = map->fldrv_priv;
546
547 *retlen = 0;
548 while (len > 0)
549 {
550 // Determine what bank and offset into that bank the first byte is
551 unsigned long bank = from & (~(priv->bank_fill[0]-1));
552 unsigned long offset = from & (priv->bank_fill[0]-1);
553 unsigned long get = len;
554 if (priv->bank_fill[0] - offset < len)
555 get = priv->bank_fill[0] - offset;
556
557 bank /= priv->bank_fill[0];
558 map_copy_from(map,buf + *retlen,bank*my_bank_size + offset,get);
559
560 len -= get;
561 *retlen += get;
562 from += get;
563 }
564 return 0;
565}
566
567/* Pass the flags value that the flash return before it re-entered read
568 mode. */
569static void jedec_flash_failed(unsigned char code)
570{
571 /* Bit 5 being high indicates that there was an internal device
572 failure, erasure time limits exceeded or something */
573 if ((code & (1 << 5)) != 0)
574 {
575 printk("mtd: Internal Flash failure\n");
576 return;
577 }
578 printk("mtd: Programming didn't take\n");
579}
580
581/* This uses the erasure function described in the AMD Flash Handbook,
582 it will work for flashes with a fixed sector size only. Flashes with
583 a selection of sector sizes (ie the AMD Am29F800B) will need a different
584 routine. This routine tries to parallize erasing multiple chips/sectors
585 where possible */
586static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
587{
588 // Does IO to the currently selected chip
589 #define flread(x) map_read8(map,chip->base+((x)<<chip->addrshift))
590 #define flwrite(v,x) map_write8(map,v,chip->base+((x)<<chip->addrshift))
591
592 unsigned long Time = 0;
593 unsigned long NoTime = 0;
594 unsigned long start = instr->addr, len = instr->len;
595 unsigned int I;
596 struct map_info *map = mtd->priv;
597 struct jedec_private *priv = map->fldrv_priv;
598
599 // Verify the arguments..
600 if (start + len > mtd->size ||
601 (start % mtd->erasesize) != 0 ||
602 (len % mtd->erasesize) != 0 ||
603 (len/mtd->erasesize) == 0)
604 return -EINVAL;
605
606 jedec_flash_chip_scan(priv,start,len);
607
608 // Start the erase sequence on each chip
609 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
610 {
611 unsigned long off;
612 struct jedec_flash_chip *chip = priv->chips + I;
613
614 if (chip->length == 0)
615 continue;
616
617 if (chip->start + chip->length > chip->size)
618 {
619 printk("DIE\n");
620 return -EIO;
621 }
622
623 flwrite(0xF0,chip->start + 0x555);
624 flwrite(0xAA,chip->start + 0x555);
625 flwrite(0x55,chip->start + 0x2AA);
626 flwrite(0x80,chip->start + 0x555);
627 flwrite(0xAA,chip->start + 0x555);
628 flwrite(0x55,chip->start + 0x2AA);
629
630 /* Once we start selecting the erase sectors the delay between each
631 command must not exceed 50us or it will immediately start erasing
632 and ignore the other sectors */
633 for (off = 0; off < len; off += chip->sectorsize)
634 {
635 // Check to make sure we didn't timeout
636 flwrite(0x30,chip->start + off);
637 if (off == 0)
638 continue;
639 if ((flread(chip->start + off) & (1 << 3)) != 0)
640 {
641 printk("mtd: Ack! We timed out the erase timer!\n");
642 return -EIO;
643 }
644 }
645 }
646
647 /* We could split this into a timer routine and return early, performing
648 background erasure.. Maybe later if the need warrents */
649
650 /* Poll the flash for erasure completion, specs say this can take as long
651 as 480 seconds to do all the sectors (for a 2 meg flash).
652 Erasure time is dependent on chip age, temp and wear.. */
653
654 /* This being a generic routine assumes a 32 bit bus. It does read32s
655 and bundles interleved chips into the same grouping. This will work
656 for all bus widths */
657 Time = 0;
658 NoTime = 0;
659 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
660 {
661 struct jedec_flash_chip *chip = priv->chips + I;
662 unsigned long off = 0;
663 unsigned todo[4] = {0,0,0,0};
664 unsigned todo_left = 0;
665 unsigned J;
666
667 if (chip->length == 0)
668 continue;
669
670 /* Find all chips in this data line, realistically this is all
671 or nothing up to the interleve count */
672 for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++)
673 {
674 if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) ==
675 (chip->base & (~((1<<chip->addrshift)-1))))
676 {
677 todo_left++;
678 todo[priv->chips[J].base & ((1<<chip->addrshift)-1)] = 1;
679 }
680 }
681
682 /* printk("todo: %x %x %x %x\n",(short)todo[0],(short)todo[1],
683 (short)todo[2],(short)todo[3]);
684 */
685 while (1)
686 {
687 __u32 Last[4];
688 unsigned long Count = 0;
689
690 /* During erase bit 7 is held low and bit 6 toggles, we watch this,
691 should it stop toggling or go high then the erase is completed,
692 or this is not really flash ;> */
693 switch (map->buswidth) {
694 case 1:
695 Last[0] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
696 Last[1] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
697 Last[2] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
698 break;
699 case 2:
700 Last[0] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
701 Last[1] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
702 Last[2] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
703 break;
704 case 3:
705 Last[0] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
706 Last[1] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
707 Last[2] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
708 break;
709 }
710 Count = 3;
711 while (todo_left != 0)
712 {
713 for (J = 0; J != 4; J++)
714 {
715 __u8 Byte1 = (Last[(Count-1)%4] >> (J*8)) & 0xFF;
716 __u8 Byte2 = (Last[(Count-2)%4] >> (J*8)) & 0xFF;
717 __u8 Byte3 = (Last[(Count-3)%4] >> (J*8)) & 0xFF;
718 if (todo[J] == 0)
719 continue;
720
721 if ((Byte1 & (1 << 7)) == 0 && Byte1 != Byte2)
722 {
723// printk("Check %x %x %x\n",(short)J,(short)Byte1,(short)Byte2);
724 continue;
725 }
726
727 if (Byte1 == Byte2)
728 {
729 jedec_flash_failed(Byte3);
730 return -EIO;
731 }
732
733 todo[J] = 0;
734 todo_left--;
735 }
736
737/* if (NoTime == 0)
738 Time += HZ/10 - schedule_timeout(HZ/10);*/
739 NoTime = 0;
740
741 switch (map->buswidth) {
742 case 1:
743 Last[Count % 4] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
744 break;
745 case 2:
746 Last[Count % 4] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
747 break;
748 case 4:
749 Last[Count % 4] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
750 break;
751 }
752 Count++;
753
754/* // Count time, max of 15s per sector (according to AMD)
755 if (Time > 15*len/mtd->erasesize*HZ)
756 {
757 printk("mtd: Flash Erase Timed out\n");
758 return -EIO;
759 } */
760 }
761
762 // Skip to the next chip if we used chip erase
763 if (chip->length == chip->size)
764 off = chip->size;
765 else
766 off += chip->sectorsize;
767
768 if (off >= chip->length)
769 break;
770 NoTime = 1;
771 }
772
773 for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++)
774 {
775 if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) ==
776 (chip->base & (~((1<<chip->addrshift)-1))))
777 priv->chips[J].length = 0;
778 }
779 }
780
781 //printk("done\n");
782 instr->state = MTD_ERASE_DONE;
783 mtd_erase_callback(instr);
784 return 0;
785
786 #undef flread
787 #undef flwrite
788}
789
790/* This is the simple flash writing function. It writes to every byte, in
791 sequence. It takes care of how to properly address the flash if
792 the flash is interleved. It can only be used if all the chips in the
793 array are identical!*/
794static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
795 size_t *retlen, const u_char *buf)
796{
797 /* Does IO to the currently selected chip. It takes the bank addressing
798 base (which is divisible by the chip size) adds the necessary lower bits
799 of addrshift (interleave index) and then adds the control register index. */
800 #define flread(x) map_read8(map,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift))
801 #define flwrite(v,x) map_write8(map,v,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift))
802
803 struct map_info *map = mtd->priv;
804 struct jedec_private *priv = map->fldrv_priv;
805 unsigned long base;
806 unsigned long off;
807 size_t save_len = len;
808
809 if (start + len > mtd->size)
810 return -EIO;
811
812 //printk("Here");
813
814 //printk("flash_write: start is %x, len is %x\n",start,(unsigned long)len);
815 while (len != 0)
816 {
817 struct jedec_flash_chip *chip = priv->chips;
818 unsigned long bank;
819 unsigned long boffset;
820
821 // Compute the base of the flash.
822 off = ((unsigned long)start) % (chip->size << chip->addrshift);
823 base = start - off;
824
825 // Perform banked addressing translation.
826 bank = base & (~(priv->bank_fill[0]-1));
827 boffset = base & (priv->bank_fill[0]-1);
828 bank = (bank/priv->bank_fill[0])*my_bank_size;
829 base = bank + boffset;
830
831 // printk("Flasing %X %X %X\n",base,chip->size,len);
832 // printk("off is %x, compare with %x\n",off,chip->size << chip->addrshift);
833
834 // Loop over this page
835 for (; off != (chip->size << chip->addrshift) && len != 0; start++, len--, off++,buf++)
836 {
837 unsigned char oldbyte = map_read8(map,base+off);
838 unsigned char Last[4];
839 unsigned long Count = 0;
840
841 if (oldbyte == *buf) {
842 // printk("oldbyte and *buf is %x,len is %x\n",oldbyte,len);
843 continue;
844 }
845 if (((~oldbyte) & *buf) != 0)
846 printk("mtd: warn: Trying to set a 0 to a 1\n");
847
848 // Write
849 flwrite(0xAA,0x555);
850 flwrite(0x55,0x2AA);
851 flwrite(0xA0,0x555);
852 map_write8(map,*buf,base + off);
853 Last[0] = map_read8(map,base + off);
854 Last[1] = map_read8(map,base + off);
855 Last[2] = map_read8(map,base + off);
856
857 /* Wait for the flash to finish the operation. We store the last 4
858 status bytes that have been retrieved so we can determine why
859 it failed. The toggle bits keep toggling when there is a
860 failure */
861 for (Count = 3; Last[(Count - 1) % 4] != Last[(Count - 2) % 4] &&
862 Count < 10000; Count++)
863 Last[Count % 4] = map_read8(map,base + off);
864 if (Last[(Count - 1) % 4] != *buf)
865 {
866 jedec_flash_failed(Last[(Count - 3) % 4]);
867 return -EIO;
868 }
869 }
870 }
871 *retlen = save_len;
872 return 0;
873}
874
875/* This is used to enhance the speed of the erase routine,
876 when things are being done to multiple chips it is possible to
877 parallize the operations, particularly full memory erases of multi
878 chip memories benifit */
879static void jedec_flash_chip_scan(struct jedec_private *priv,unsigned long start,
880 unsigned long len)
881{
882 unsigned int I;
883
884 // Zero the records
885 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
886 priv->chips[I].start = priv->chips[I].length = 0;
887
888 // Intersect the region with each chip
889 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
890 {
891 struct jedec_flash_chip *chip = priv->chips + I;
892 unsigned long ByteStart;
893 unsigned long ChipEndByte = chip->offset + (chip->size << chip->addrshift);
894
895 // End is before this chip or the start is after it
896 if (start+len < chip->offset ||
897 ChipEndByte - (1 << chip->addrshift) < start)
898 continue;
899
900 if (start < chip->offset)
901 {
902 ByteStart = chip->offset;
903 chip->start = 0;
904 }
905 else
906 {
907 chip->start = (start - chip->offset + (1 << chip->addrshift)-1) >> chip->addrshift;
908 ByteStart = start;
909 }
910
911 if (start + len >= ChipEndByte)
912 chip->length = (ChipEndByte - ByteStart) >> chip->addrshift;
913 else
914 chip->length = (start + len - ByteStart + (1 << chip->addrshift)-1) >> chip->addrshift;
915 }
916}
917
918int __init jedec_init(void)
919{
920 register_mtd_chip_driver(&jedec_chipdrv);
921 return 0;
922}
923
924static void __exit jedec_exit(void)
925{
926 unregister_mtd_chip_driver(&jedec_chipdrv);
927}
928
929module_init(jedec_init);
930module_exit(jedec_exit);
931
932MODULE_LICENSE("GPL");
933MODULE_AUTHOR("Jason Gunthorpe <jgg@deltatee.com> et al.");
934MODULE_DESCRIPTION("Old MTD chip driver for JEDEC-compliant flash chips");
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
new file mode 100644
index 000000000000..30325a25ab95
--- /dev/null
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -0,0 +1,2127 @@
1/*
2 Common Flash Interface probe code.
3 (C) 2000 Red Hat. GPL'd.
4 $Id: jedec_probe.c,v 1.61 2004/11/19 20:52:16 thayne Exp $
5 See JEDEC (http://www.jedec.org/) standard JESD21C (section 3.5)
6 for the standard this probe goes back to.
7
8 Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
9*/
10
11#include <linux/config.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <asm/io.h>
17#include <asm/byteorder.h>
18#include <linux/errno.h>
19#include <linux/slab.h>
20#include <linux/interrupt.h>
21#include <linux/init.h>
22
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/map.h>
25#include <linux/mtd/cfi.h>
26#include <linux/mtd/gen_probe.h>
27
28/* Manufacturers */
29#define MANUFACTURER_AMD 0x0001
30#define MANUFACTURER_ATMEL 0x001f
31#define MANUFACTURER_FUJITSU 0x0004
32#define MANUFACTURER_HYUNDAI 0x00AD
33#define MANUFACTURER_INTEL 0x0089
34#define MANUFACTURER_MACRONIX 0x00C2
35#define MANUFACTURER_NEC 0x0010
36#define MANUFACTURER_PMC 0x009D
37#define MANUFACTURER_SST 0x00BF
38#define MANUFACTURER_ST 0x0020
39#define MANUFACTURER_TOSHIBA 0x0098
40#define MANUFACTURER_WINBOND 0x00da
41
42
43/* AMD */
44#define AM29DL800BB 0x22C8
45#define AM29DL800BT 0x224A
46
47#define AM29F800BB 0x2258
48#define AM29F800BT 0x22D6
49#define AM29LV400BB 0x22BA
50#define AM29LV400BT 0x22B9
51#define AM29LV800BB 0x225B
52#define AM29LV800BT 0x22DA
53#define AM29LV160DT 0x22C4
54#define AM29LV160DB 0x2249
55#define AM29F017D 0x003D
56#define AM29F016D 0x00AD
57#define AM29F080 0x00D5
58#define AM29F040 0x00A4
59#define AM29LV040B 0x004F
60#define AM29F032B 0x0041
61#define AM29F002T 0x00B0
62
63/* Atmel */
64#define AT49BV512 0x0003
65#define AT29LV512 0x003d
66#define AT49BV16X 0x00C0
67#define AT49BV16XT 0x00C2
68#define AT49BV32X 0x00C8
69#define AT49BV32XT 0x00C9
70
71/* Fujitsu */
72#define MBM29F040C 0x00A4
73#define MBM29LV650UE 0x22D7
74#define MBM29LV320TE 0x22F6
75#define MBM29LV320BE 0x22F9
76#define MBM29LV160TE 0x22C4
77#define MBM29LV160BE 0x2249
78#define MBM29LV800BA 0x225B
79#define MBM29LV800TA 0x22DA
80#define MBM29LV400TC 0x22B9
81#define MBM29LV400BC 0x22BA
82
83/* Hyundai */
84#define HY29F002T 0x00B0
85
86/* Intel */
87#define I28F004B3T 0x00d4
88#define I28F004B3B 0x00d5
89#define I28F400B3T 0x8894
90#define I28F400B3B 0x8895
91#define I28F008S5 0x00a6
92#define I28F016S5 0x00a0
93#define I28F008SA 0x00a2
94#define I28F008B3T 0x00d2
95#define I28F008B3B 0x00d3
96#define I28F800B3T 0x8892
97#define I28F800B3B 0x8893
98#define I28F016S3 0x00aa
99#define I28F016B3T 0x00d0
100#define I28F016B3B 0x00d1
101#define I28F160B3T 0x8890
102#define I28F160B3B 0x8891
103#define I28F320B3T 0x8896
104#define I28F320B3B 0x8897
105#define I28F640B3T 0x8898
106#define I28F640B3B 0x8899
107#define I82802AB 0x00ad
108#define I82802AC 0x00ac
109
110/* Macronix */
111#define MX29LV040C 0x004F
112#define MX29LV160T 0x22C4
113#define MX29LV160B 0x2249
114#define MX29F016 0x00AD
115#define MX29F002T 0x00B0
116#define MX29F004T 0x0045
117#define MX29F004B 0x0046
118
119/* NEC */
120#define UPD29F064115 0x221C
121
122/* PMC */
123#define PM49FL002 0x006D
124#define PM49FL004 0x006E
125#define PM49FL008 0x006A
126
127/* ST - www.st.com */
128#define M29W800DT 0x00D7
129#define M29W800DB 0x005B
130#define M29W160DT 0x22C4
131#define M29W160DB 0x2249
132#define M29W040B 0x00E3
133#define M50FW040 0x002C
134#define M50FW080 0x002D
135#define M50FW016 0x002E
136#define M50LPW080 0x002F
137
138/* SST */
139#define SST29EE020 0x0010
140#define SST29LE020 0x0012
141#define SST29EE512 0x005d
142#define SST29LE512 0x003d
143#define SST39LF800 0x2781
144#define SST39LF160 0x2782
145#define SST39LF512 0x00D4
146#define SST39LF010 0x00D5
147#define SST39LF020 0x00D6
148#define SST39LF040 0x00D7
149#define SST39SF010A 0x00B5
150#define SST39SF020A 0x00B6
151#define SST49LF004B 0x0060
152#define SST49LF008A 0x005a
153#define SST49LF030A 0x001C
154#define SST49LF040A 0x0051
155#define SST49LF080A 0x005B
156
157/* Toshiba */
158#define TC58FVT160 0x00C2
159#define TC58FVB160 0x0043
160#define TC58FVT321 0x009A
161#define TC58FVB321 0x009C
162#define TC58FVT641 0x0093
163#define TC58FVB641 0x0095
164
165/* Winbond */
166#define W49V002A 0x00b0
167
168
169/*
170 * Unlock address sets for AMD command sets.
171 * Intel command sets use the MTD_UADDR_UNNECESSARY.
172 * Each identifier, except MTD_UADDR_UNNECESSARY, and
173 * MTD_UADDR_NO_SUPPORT must be defined below in unlock_addrs[].
174 * MTD_UADDR_NOT_SUPPORTED must be 0 so that structure
175 * initialization need not require initializing all of the
176 * unlock addresses for all bit widths.
177 */
178enum uaddr {
179 MTD_UADDR_NOT_SUPPORTED = 0, /* data width not supported */
180 MTD_UADDR_0x0555_0x02AA,
181 MTD_UADDR_0x0555_0x0AAA,
182 MTD_UADDR_0x5555_0x2AAA,
183 MTD_UADDR_0x0AAA_0x0555,
184 MTD_UADDR_DONT_CARE, /* Requires an arbitrary address */
185 MTD_UADDR_UNNECESSARY, /* Does not require any address */
186};
187
188
189struct unlock_addr {
190 u32 addr1;
191 u32 addr2;
192};
193
194
195/*
196 * I don't like the fact that the first entry in unlock_addrs[]
197 * exists, but is for MTD_UADDR_NOT_SUPPORTED - and, therefore,
198 * should not be used. The problem is that structures with
199 * initializers have extra fields initialized to 0. It is _very_
200 * desireable to have the unlock address entries for unsupported
201 * data widths automatically initialized - that means that
202 * MTD_UADDR_NOT_SUPPORTED must be 0 and the first entry here
203 * must go unused.
204 */
205static const struct unlock_addr unlock_addrs[] = {
206 [MTD_UADDR_NOT_SUPPORTED] = {
207 .addr1 = 0xffff,
208 .addr2 = 0xffff
209 },
210
211 [MTD_UADDR_0x0555_0x02AA] = {
212 .addr1 = 0x0555,
213 .addr2 = 0x02aa
214 },
215
216 [MTD_UADDR_0x0555_0x0AAA] = {
217 .addr1 = 0x0555,
218 .addr2 = 0x0aaa
219 },
220
221 [MTD_UADDR_0x5555_0x2AAA] = {
222 .addr1 = 0x5555,
223 .addr2 = 0x2aaa
224 },
225
226 [MTD_UADDR_0x0AAA_0x0555] = {
227 .addr1 = 0x0AAA,
228 .addr2 = 0x0555
229 },
230
231 [MTD_UADDR_DONT_CARE] = {
232 .addr1 = 0x0000, /* Doesn't matter which address */
233 .addr2 = 0x0000 /* is used - must be last entry */
234 },
235
236 [MTD_UADDR_UNNECESSARY] = {
237 .addr1 = 0x0000,
238 .addr2 = 0x0000
239 }
240};
241
242
243struct amd_flash_info {
244 const __u16 mfr_id;
245 const __u16 dev_id;
246 const char *name;
247 const int DevSize;
248 const int NumEraseRegions;
249 const int CmdSet;
250 const __u8 uaddr[4]; /* unlock addrs for 8, 16, 32, 64 */
251 const ulong regions[6];
252};
253
254#define ERASEINFO(size,blocks) (size<<8)|(blocks-1)
255
256#define SIZE_64KiB 16
257#define SIZE_128KiB 17
258#define SIZE_256KiB 18
259#define SIZE_512KiB 19
260#define SIZE_1MiB 20
261#define SIZE_2MiB 21
262#define SIZE_4MiB 22
263#define SIZE_8MiB 23
264
265
266/*
267 * Please keep this list ordered by manufacturer!
268 * Fortunately, the list isn't searched often and so a
269 * slow, linear search isn't so bad.
270 */
271static const struct amd_flash_info jedec_table[] = {
272 {
273 .mfr_id = MANUFACTURER_AMD,
274 .dev_id = AM29F032B,
275 .name = "AMD AM29F032B",
276 .uaddr = {
277 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
278 },
279 .DevSize = SIZE_4MiB,
280 .CmdSet = P_ID_AMD_STD,
281 .NumEraseRegions= 1,
282 .regions = {
283 ERASEINFO(0x10000,64)
284 }
285 }, {
286 .mfr_id = MANUFACTURER_AMD,
287 .dev_id = AM29LV160DT,
288 .name = "AMD AM29LV160DT",
289 .uaddr = {
290 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
291 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
292 },
293 .DevSize = SIZE_2MiB,
294 .CmdSet = P_ID_AMD_STD,
295 .NumEraseRegions= 4,
296 .regions = {
297 ERASEINFO(0x10000,31),
298 ERASEINFO(0x08000,1),
299 ERASEINFO(0x02000,2),
300 ERASEINFO(0x04000,1)
301 }
302 }, {
303 .mfr_id = MANUFACTURER_AMD,
304 .dev_id = AM29LV160DB,
305 .name = "AMD AM29LV160DB",
306 .uaddr = {
307 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
308 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
309 },
310 .DevSize = SIZE_2MiB,
311 .CmdSet = P_ID_AMD_STD,
312 .NumEraseRegions= 4,
313 .regions = {
314 ERASEINFO(0x04000,1),
315 ERASEINFO(0x02000,2),
316 ERASEINFO(0x08000,1),
317 ERASEINFO(0x10000,31)
318 }
319 }, {
320 .mfr_id = MANUFACTURER_AMD,
321 .dev_id = AM29LV400BB,
322 .name = "AMD AM29LV400BB",
323 .uaddr = {
324 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
325 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
326 },
327 .DevSize = SIZE_512KiB,
328 .CmdSet = P_ID_AMD_STD,
329 .NumEraseRegions= 4,
330 .regions = {
331 ERASEINFO(0x04000,1),
332 ERASEINFO(0x02000,2),
333 ERASEINFO(0x08000,1),
334 ERASEINFO(0x10000,7)
335 }
336 }, {
337 .mfr_id = MANUFACTURER_AMD,
338 .dev_id = AM29LV400BT,
339 .name = "AMD AM29LV400BT",
340 .uaddr = {
341 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
342 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
343 },
344 .DevSize = SIZE_512KiB,
345 .CmdSet = P_ID_AMD_STD,
346 .NumEraseRegions= 4,
347 .regions = {
348 ERASEINFO(0x10000,7),
349 ERASEINFO(0x08000,1),
350 ERASEINFO(0x02000,2),
351 ERASEINFO(0x04000,1)
352 }
353 }, {
354 .mfr_id = MANUFACTURER_AMD,
355 .dev_id = AM29LV800BB,
356 .name = "AMD AM29LV800BB",
357 .uaddr = {
358 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
359 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
360 },
361 .DevSize = SIZE_1MiB,
362 .CmdSet = P_ID_AMD_STD,
363 .NumEraseRegions= 4,
364 .regions = {
365 ERASEINFO(0x04000,1),
366 ERASEINFO(0x02000,2),
367 ERASEINFO(0x08000,1),
368 ERASEINFO(0x10000,15),
369 }
370 }, {
371/* add DL */
372 .mfr_id = MANUFACTURER_AMD,
373 .dev_id = AM29DL800BB,
374 .name = "AMD AM29DL800BB",
375 .uaddr = {
376 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
377 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
378 },
379 .DevSize = SIZE_1MiB,
380 .CmdSet = P_ID_AMD_STD,
381 .NumEraseRegions= 6,
382 .regions = {
383 ERASEINFO(0x04000,1),
384 ERASEINFO(0x08000,1),
385 ERASEINFO(0x02000,4),
386 ERASEINFO(0x08000,1),
387 ERASEINFO(0x04000,1),
388 ERASEINFO(0x10000,14)
389 }
390 }, {
391 .mfr_id = MANUFACTURER_AMD,
392 .dev_id = AM29DL800BT,
393 .name = "AMD AM29DL800BT",
394 .uaddr = {
395 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
396 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
397 },
398 .DevSize = SIZE_1MiB,
399 .CmdSet = P_ID_AMD_STD,
400 .NumEraseRegions= 6,
401 .regions = {
402 ERASEINFO(0x10000,14),
403 ERASEINFO(0x04000,1),
404 ERASEINFO(0x08000,1),
405 ERASEINFO(0x02000,4),
406 ERASEINFO(0x08000,1),
407 ERASEINFO(0x04000,1)
408 }
409 }, {
410 .mfr_id = MANUFACTURER_AMD,
411 .dev_id = AM29F800BB,
412 .name = "AMD AM29F800BB",
413 .uaddr = {
414 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
415 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
416 },
417 .DevSize = SIZE_1MiB,
418 .CmdSet = P_ID_AMD_STD,
419 .NumEraseRegions= 4,
420 .regions = {
421 ERASEINFO(0x04000,1),
422 ERASEINFO(0x02000,2),
423 ERASEINFO(0x08000,1),
424 ERASEINFO(0x10000,15),
425 }
426 }, {
427 .mfr_id = MANUFACTURER_AMD,
428 .dev_id = AM29LV800BT,
429 .name = "AMD AM29LV800BT",
430 .uaddr = {
431 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
432 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
433 },
434 .DevSize = SIZE_1MiB,
435 .CmdSet = P_ID_AMD_STD,
436 .NumEraseRegions= 4,
437 .regions = {
438 ERASEINFO(0x10000,15),
439 ERASEINFO(0x08000,1),
440 ERASEINFO(0x02000,2),
441 ERASEINFO(0x04000,1)
442 }
443 }, {
444 .mfr_id = MANUFACTURER_AMD,
445 .dev_id = AM29F800BT,
446 .name = "AMD AM29F800BT",
447 .uaddr = {
448 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
449 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
450 },
451 .DevSize = SIZE_1MiB,
452 .CmdSet = P_ID_AMD_STD,
453 .NumEraseRegions= 4,
454 .regions = {
455 ERASEINFO(0x10000,15),
456 ERASEINFO(0x08000,1),
457 ERASEINFO(0x02000,2),
458 ERASEINFO(0x04000,1)
459 }
460 }, {
461 .mfr_id = MANUFACTURER_AMD,
462 .dev_id = AM29F017D,
463 .name = "AMD AM29F017D",
464 .uaddr = {
465 [0] = MTD_UADDR_DONT_CARE /* x8 */
466 },
467 .DevSize = SIZE_2MiB,
468 .CmdSet = P_ID_AMD_STD,
469 .NumEraseRegions= 1,
470 .regions = {
471 ERASEINFO(0x10000,32),
472 }
473 }, {
474 .mfr_id = MANUFACTURER_AMD,
475 .dev_id = AM29F016D,
476 .name = "AMD AM29F016D",
477 .uaddr = {
478 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
479 },
480 .DevSize = SIZE_2MiB,
481 .CmdSet = P_ID_AMD_STD,
482 .NumEraseRegions= 1,
483 .regions = {
484 ERASEINFO(0x10000,32),
485 }
486 }, {
487 .mfr_id = MANUFACTURER_AMD,
488 .dev_id = AM29F080,
489 .name = "AMD AM29F080",
490 .uaddr = {
491 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
492 },
493 .DevSize = SIZE_1MiB,
494 .CmdSet = P_ID_AMD_STD,
495 .NumEraseRegions= 1,
496 .regions = {
497 ERASEINFO(0x10000,16),
498 }
499 }, {
500 .mfr_id = MANUFACTURER_AMD,
501 .dev_id = AM29F040,
502 .name = "AMD AM29F040",
503 .uaddr = {
504 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
505 },
506 .DevSize = SIZE_512KiB,
507 .CmdSet = P_ID_AMD_STD,
508 .NumEraseRegions= 1,
509 .regions = {
510 ERASEINFO(0x10000,8),
511 }
512 }, {
513 .mfr_id = MANUFACTURER_AMD,
514 .dev_id = AM29LV040B,
515 .name = "AMD AM29LV040B",
516 .uaddr = {
517 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
518 },
519 .DevSize = SIZE_512KiB,
520 .CmdSet = P_ID_AMD_STD,
521 .NumEraseRegions= 1,
522 .regions = {
523 ERASEINFO(0x10000,8),
524 }
525 }, {
526 .mfr_id = MANUFACTURER_AMD,
527 .dev_id = AM29F002T,
528 .name = "AMD AM29F002T",
529 .uaddr = {
530 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
531 },
532 .DevSize = SIZE_256KiB,
533 .CmdSet = P_ID_AMD_STD,
534 .NumEraseRegions= 4,
535 .regions = {
536 ERASEINFO(0x10000,3),
537 ERASEINFO(0x08000,1),
538 ERASEINFO(0x02000,2),
539 ERASEINFO(0x04000,1),
540 }
541 }, {
542 .mfr_id = MANUFACTURER_ATMEL,
543 .dev_id = AT49BV512,
544 .name = "Atmel AT49BV512",
545 .uaddr = {
546 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
547 },
548 .DevSize = SIZE_64KiB,
549 .CmdSet = P_ID_AMD_STD,
550 .NumEraseRegions= 1,
551 .regions = {
552 ERASEINFO(0x10000,1)
553 }
554 }, {
555 .mfr_id = MANUFACTURER_ATMEL,
556 .dev_id = AT29LV512,
557 .name = "Atmel AT29LV512",
558 .uaddr = {
559 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
560 },
561 .DevSize = SIZE_64KiB,
562 .CmdSet = P_ID_AMD_STD,
563 .NumEraseRegions= 1,
564 .regions = {
565 ERASEINFO(0x80,256),
566 ERASEINFO(0x80,256)
567 }
568 }, {
569 .mfr_id = MANUFACTURER_ATMEL,
570 .dev_id = AT49BV16X,
571 .name = "Atmel AT49BV16X",
572 .uaddr = {
573 [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
574 [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
575 },
576 .DevSize = SIZE_2MiB,
577 .CmdSet = P_ID_AMD_STD,
578 .NumEraseRegions= 2,
579 .regions = {
580 ERASEINFO(0x02000,8),
581 ERASEINFO(0x10000,31)
582 }
583 }, {
584 .mfr_id = MANUFACTURER_ATMEL,
585 .dev_id = AT49BV16XT,
586 .name = "Atmel AT49BV16XT",
587 .uaddr = {
588 [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
589 [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
590 },
591 .DevSize = SIZE_2MiB,
592 .CmdSet = P_ID_AMD_STD,
593 .NumEraseRegions= 2,
594 .regions = {
595 ERASEINFO(0x10000,31),
596 ERASEINFO(0x02000,8)
597 }
598 }, {
599 .mfr_id = MANUFACTURER_ATMEL,
600 .dev_id = AT49BV32X,
601 .name = "Atmel AT49BV32X",
602 .uaddr = {
603 [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
604 [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
605 },
606 .DevSize = SIZE_4MiB,
607 .CmdSet = P_ID_AMD_STD,
608 .NumEraseRegions= 2,
609 .regions = {
610 ERASEINFO(0x02000,8),
611 ERASEINFO(0x10000,63)
612 }
613 }, {
614 .mfr_id = MANUFACTURER_ATMEL,
615 .dev_id = AT49BV32XT,
616 .name = "Atmel AT49BV32XT",
617 .uaddr = {
618 [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
619 [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
620 },
621 .DevSize = SIZE_4MiB,
622 .CmdSet = P_ID_AMD_STD,
623 .NumEraseRegions= 2,
624 .regions = {
625 ERASEINFO(0x10000,63),
626 ERASEINFO(0x02000,8)
627 }
628 }, {
629 .mfr_id = MANUFACTURER_FUJITSU,
630 .dev_id = MBM29F040C,
631 .name = "Fujitsu MBM29F040C",
632 .uaddr = {
633 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
634 },
635 .DevSize = SIZE_512KiB,
636 .CmdSet = P_ID_AMD_STD,
637 .NumEraseRegions= 1,
638 .regions = {
639 ERASEINFO(0x10000,8)
640 }
641 }, {
642 .mfr_id = MANUFACTURER_FUJITSU,
643 .dev_id = MBM29LV650UE,
644 .name = "Fujitsu MBM29LV650UE",
645 .uaddr = {
646 [0] = MTD_UADDR_DONT_CARE /* x16 */
647 },
648 .DevSize = SIZE_8MiB,
649 .CmdSet = P_ID_AMD_STD,
650 .NumEraseRegions= 1,
651 .regions = {
652 ERASEINFO(0x10000,128)
653 }
654 }, {
655 .mfr_id = MANUFACTURER_FUJITSU,
656 .dev_id = MBM29LV320TE,
657 .name = "Fujitsu MBM29LV320TE",
658 .uaddr = {
659 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
660 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
661 },
662 .DevSize = SIZE_4MiB,
663 .CmdSet = P_ID_AMD_STD,
664 .NumEraseRegions= 2,
665 .regions = {
666 ERASEINFO(0x10000,63),
667 ERASEINFO(0x02000,8)
668 }
669 }, {
670 .mfr_id = MANUFACTURER_FUJITSU,
671 .dev_id = MBM29LV320BE,
672 .name = "Fujitsu MBM29LV320BE",
673 .uaddr = {
674 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
675 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
676 },
677 .DevSize = SIZE_4MiB,
678 .CmdSet = P_ID_AMD_STD,
679 .NumEraseRegions= 2,
680 .regions = {
681 ERASEINFO(0x02000,8),
682 ERASEINFO(0x10000,63)
683 }
684 }, {
685 .mfr_id = MANUFACTURER_FUJITSU,
686 .dev_id = MBM29LV160TE,
687 .name = "Fujitsu MBM29LV160TE",
688 .uaddr = {
689 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
690 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
691 },
692 .DevSize = SIZE_2MiB,
693 .CmdSet = P_ID_AMD_STD,
694 .NumEraseRegions= 4,
695 .regions = {
696 ERASEINFO(0x10000,31),
697 ERASEINFO(0x08000,1),
698 ERASEINFO(0x02000,2),
699 ERASEINFO(0x04000,1)
700 }
701 }, {
702 .mfr_id = MANUFACTURER_FUJITSU,
703 .dev_id = MBM29LV160BE,
704 .name = "Fujitsu MBM29LV160BE",
705 .uaddr = {
706 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
707 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
708 },
709 .DevSize = SIZE_2MiB,
710 .CmdSet = P_ID_AMD_STD,
711 .NumEraseRegions= 4,
712 .regions = {
713 ERASEINFO(0x04000,1),
714 ERASEINFO(0x02000,2),
715 ERASEINFO(0x08000,1),
716 ERASEINFO(0x10000,31)
717 }
718 }, {
719 .mfr_id = MANUFACTURER_FUJITSU,
720 .dev_id = MBM29LV800BA,
721 .name = "Fujitsu MBM29LV800BA",
722 .uaddr = {
723 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
724 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
725 },
726 .DevSize = SIZE_1MiB,
727 .CmdSet = P_ID_AMD_STD,
728 .NumEraseRegions= 4,
729 .regions = {
730 ERASEINFO(0x04000,1),
731 ERASEINFO(0x02000,2),
732 ERASEINFO(0x08000,1),
733 ERASEINFO(0x10000,15)
734 }
735 }, {
736 .mfr_id = MANUFACTURER_FUJITSU,
737 .dev_id = MBM29LV800TA,
738 .name = "Fujitsu MBM29LV800TA",
739 .uaddr = {
740 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
741 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
742 },
743 .DevSize = SIZE_1MiB,
744 .CmdSet = P_ID_AMD_STD,
745 .NumEraseRegions= 4,
746 .regions = {
747 ERASEINFO(0x10000,15),
748 ERASEINFO(0x08000,1),
749 ERASEINFO(0x02000,2),
750 ERASEINFO(0x04000,1)
751 }
752 }, {
753 .mfr_id = MANUFACTURER_FUJITSU,
754 .dev_id = MBM29LV400BC,
755 .name = "Fujitsu MBM29LV400BC",
756 .uaddr = {
757 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
758 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
759 },
760 .DevSize = SIZE_512KiB,
761 .CmdSet = P_ID_AMD_STD,
762 .NumEraseRegions= 4,
763 .regions = {
764 ERASEINFO(0x04000,1),
765 ERASEINFO(0x02000,2),
766 ERASEINFO(0x08000,1),
767 ERASEINFO(0x10000,7)
768 }
769 }, {
770 .mfr_id = MANUFACTURER_FUJITSU,
771 .dev_id = MBM29LV400TC,
772 .name = "Fujitsu MBM29LV400TC",
773 .uaddr = {
774 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
775 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
776 },
777 .DevSize = SIZE_512KiB,
778 .CmdSet = P_ID_AMD_STD,
779 .NumEraseRegions= 4,
780 .regions = {
781 ERASEINFO(0x10000,7),
782 ERASEINFO(0x08000,1),
783 ERASEINFO(0x02000,2),
784 ERASEINFO(0x04000,1)
785 }
786 }, {
787 .mfr_id = MANUFACTURER_HYUNDAI,
788 .dev_id = HY29F002T,
789 .name = "Hyundai HY29F002T",
790 .uaddr = {
791 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
792 },
793 .DevSize = SIZE_256KiB,
794 .CmdSet = P_ID_AMD_STD,
795 .NumEraseRegions= 4,
796 .regions = {
797 ERASEINFO(0x10000,3),
798 ERASEINFO(0x08000,1),
799 ERASEINFO(0x02000,2),
800 ERASEINFO(0x04000,1),
801 }
802 }, {
803 .mfr_id = MANUFACTURER_INTEL,
804 .dev_id = I28F004B3B,
805 .name = "Intel 28F004B3B",
806 .uaddr = {
807 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
808 },
809 .DevSize = SIZE_512KiB,
810 .CmdSet = P_ID_INTEL_STD,
811 .NumEraseRegions= 2,
812 .regions = {
813 ERASEINFO(0x02000, 8),
814 ERASEINFO(0x10000, 7),
815 }
816 }, {
817 .mfr_id = MANUFACTURER_INTEL,
818 .dev_id = I28F004B3T,
819 .name = "Intel 28F004B3T",
820 .uaddr = {
821 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
822 },
823 .DevSize = SIZE_512KiB,
824 .CmdSet = P_ID_INTEL_STD,
825 .NumEraseRegions= 2,
826 .regions = {
827 ERASEINFO(0x10000, 7),
828 ERASEINFO(0x02000, 8),
829 }
830 }, {
831 .mfr_id = MANUFACTURER_INTEL,
832 .dev_id = I28F400B3B,
833 .name = "Intel 28F400B3B",
834 .uaddr = {
835 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
836 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
837 },
838 .DevSize = SIZE_512KiB,
839 .CmdSet = P_ID_INTEL_STD,
840 .NumEraseRegions= 2,
841 .regions = {
842 ERASEINFO(0x02000, 8),
843 ERASEINFO(0x10000, 7),
844 }
845 }, {
846 .mfr_id = MANUFACTURER_INTEL,
847 .dev_id = I28F400B3T,
848 .name = "Intel 28F400B3T",
849 .uaddr = {
850 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
851 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
852 },
853 .DevSize = SIZE_512KiB,
854 .CmdSet = P_ID_INTEL_STD,
855 .NumEraseRegions= 2,
856 .regions = {
857 ERASEINFO(0x10000, 7),
858 ERASEINFO(0x02000, 8),
859 }
860 }, {
861 .mfr_id = MANUFACTURER_INTEL,
862 .dev_id = I28F008B3B,
863 .name = "Intel 28F008B3B",
864 .uaddr = {
865 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
866 },
867 .DevSize = SIZE_1MiB,
868 .CmdSet = P_ID_INTEL_STD,
869 .NumEraseRegions= 2,
870 .regions = {
871 ERASEINFO(0x02000, 8),
872 ERASEINFO(0x10000, 15),
873 }
874 }, {
875 .mfr_id = MANUFACTURER_INTEL,
876 .dev_id = I28F008B3T,
877 .name = "Intel 28F008B3T",
878 .uaddr = {
879 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
880 },
881 .DevSize = SIZE_1MiB,
882 .CmdSet = P_ID_INTEL_STD,
883 .NumEraseRegions= 2,
884 .regions = {
885 ERASEINFO(0x10000, 15),
886 ERASEINFO(0x02000, 8),
887 }
888 }, {
889 .mfr_id = MANUFACTURER_INTEL,
890 .dev_id = I28F008S5,
891 .name = "Intel 28F008S5",
892 .uaddr = {
893 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
894 },
895 .DevSize = SIZE_1MiB,
896 .CmdSet = P_ID_INTEL_EXT,
897 .NumEraseRegions= 1,
898 .regions = {
899 ERASEINFO(0x10000,16),
900 }
901 }, {
902 .mfr_id = MANUFACTURER_INTEL,
903 .dev_id = I28F016S5,
904 .name = "Intel 28F016S5",
905 .uaddr = {
906 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
907 },
908 .DevSize = SIZE_2MiB,
909 .CmdSet = P_ID_INTEL_EXT,
910 .NumEraseRegions= 1,
911 .regions = {
912 ERASEINFO(0x10000,32),
913 }
914 }, {
915 .mfr_id = MANUFACTURER_INTEL,
916 .dev_id = I28F008SA,
917 .name = "Intel 28F008SA",
918 .uaddr = {
919 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
920 },
921 .DevSize = SIZE_1MiB,
922 .CmdSet = P_ID_INTEL_STD,
923 .NumEraseRegions= 1,
924 .regions = {
925 ERASEINFO(0x10000, 16),
926 }
927 }, {
928 .mfr_id = MANUFACTURER_INTEL,
929 .dev_id = I28F800B3B,
930 .name = "Intel 28F800B3B",
931 .uaddr = {
932 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
933 },
934 .DevSize = SIZE_1MiB,
935 .CmdSet = P_ID_INTEL_STD,
936 .NumEraseRegions= 2,
937 .regions = {
938 ERASEINFO(0x02000, 8),
939 ERASEINFO(0x10000, 15),
940 }
941 }, {
942 .mfr_id = MANUFACTURER_INTEL,
943 .dev_id = I28F800B3T,
944 .name = "Intel 28F800B3T",
945 .uaddr = {
946 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
947 },
948 .DevSize = SIZE_1MiB,
949 .CmdSet = P_ID_INTEL_STD,
950 .NumEraseRegions= 2,
951 .regions = {
952 ERASEINFO(0x10000, 15),
953 ERASEINFO(0x02000, 8),
954 }
955 }, {
956 .mfr_id = MANUFACTURER_INTEL,
957 .dev_id = I28F016B3B,
958 .name = "Intel 28F016B3B",
959 .uaddr = {
960 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
961 },
962 .DevSize = SIZE_2MiB,
963 .CmdSet = P_ID_INTEL_STD,
964 .NumEraseRegions= 2,
965 .regions = {
966 ERASEINFO(0x02000, 8),
967 ERASEINFO(0x10000, 31),
968 }
969 }, {
970 .mfr_id = MANUFACTURER_INTEL,
971 .dev_id = I28F016S3,
972 .name = "Intel I28F016S3",
973 .uaddr = {
974 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
975 },
976 .DevSize = SIZE_2MiB,
977 .CmdSet = P_ID_INTEL_STD,
978 .NumEraseRegions= 1,
979 .regions = {
980 ERASEINFO(0x10000, 32),
981 }
982 }, {
983 .mfr_id = MANUFACTURER_INTEL,
984 .dev_id = I28F016B3T,
985 .name = "Intel 28F016B3T",
986 .uaddr = {
987 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
988 },
989 .DevSize = SIZE_2MiB,
990 .CmdSet = P_ID_INTEL_STD,
991 .NumEraseRegions= 2,
992 .regions = {
993 ERASEINFO(0x10000, 31),
994 ERASEINFO(0x02000, 8),
995 }
996 }, {
997 .mfr_id = MANUFACTURER_INTEL,
998 .dev_id = I28F160B3B,
999 .name = "Intel 28F160B3B",
1000 .uaddr = {
1001 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
1002 },
1003 .DevSize = SIZE_2MiB,
1004 .CmdSet = P_ID_INTEL_STD,
1005 .NumEraseRegions= 2,
1006 .regions = {
1007 ERASEINFO(0x02000, 8),
1008 ERASEINFO(0x10000, 31),
1009 }
1010 }, {
1011 .mfr_id = MANUFACTURER_INTEL,
1012 .dev_id = I28F160B3T,
1013 .name = "Intel 28F160B3T",
1014 .uaddr = {
1015 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
1016 },
1017 .DevSize = SIZE_2MiB,
1018 .CmdSet = P_ID_INTEL_STD,
1019 .NumEraseRegions= 2,
1020 .regions = {
1021 ERASEINFO(0x10000, 31),
1022 ERASEINFO(0x02000, 8),
1023 }
1024 }, {
1025 .mfr_id = MANUFACTURER_INTEL,
1026 .dev_id = I28F320B3B,
1027 .name = "Intel 28F320B3B",
1028 .uaddr = {
1029 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
1030 },
1031 .DevSize = SIZE_4MiB,
1032 .CmdSet = P_ID_INTEL_STD,
1033 .NumEraseRegions= 2,
1034 .regions = {
1035 ERASEINFO(0x02000, 8),
1036 ERASEINFO(0x10000, 63),
1037 }
1038 }, {
1039 .mfr_id = MANUFACTURER_INTEL,
1040 .dev_id = I28F320B3T,
1041 .name = "Intel 28F320B3T",
1042 .uaddr = {
1043 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
1044 },
1045 .DevSize = SIZE_4MiB,
1046 .CmdSet = P_ID_INTEL_STD,
1047 .NumEraseRegions= 2,
1048 .regions = {
1049 ERASEINFO(0x10000, 63),
1050 ERASEINFO(0x02000, 8),
1051 }
1052 }, {
1053 .mfr_id = MANUFACTURER_INTEL,
1054 .dev_id = I28F640B3B,
1055 .name = "Intel 28F640B3B",
1056 .uaddr = {
1057 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
1058 },
1059 .DevSize = SIZE_8MiB,
1060 .CmdSet = P_ID_INTEL_STD,
1061 .NumEraseRegions= 2,
1062 .regions = {
1063 ERASEINFO(0x02000, 8),
1064 ERASEINFO(0x10000, 127),
1065 }
1066 }, {
1067 .mfr_id = MANUFACTURER_INTEL,
1068 .dev_id = I28F640B3T,
1069 .name = "Intel 28F640B3T",
1070 .uaddr = {
1071 [1] = MTD_UADDR_UNNECESSARY, /* x16 */
1072 },
1073 .DevSize = SIZE_8MiB,
1074 .CmdSet = P_ID_INTEL_STD,
1075 .NumEraseRegions= 2,
1076 .regions = {
1077 ERASEINFO(0x10000, 127),
1078 ERASEINFO(0x02000, 8),
1079 }
1080 }, {
1081 .mfr_id = MANUFACTURER_INTEL,
1082 .dev_id = I82802AB,
1083 .name = "Intel 82802AB",
1084 .uaddr = {
1085 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
1086 },
1087 .DevSize = SIZE_512KiB,
1088 .CmdSet = P_ID_INTEL_EXT,
1089 .NumEraseRegions= 1,
1090 .regions = {
1091 ERASEINFO(0x10000,8),
1092 }
1093 }, {
1094 .mfr_id = MANUFACTURER_INTEL,
1095 .dev_id = I82802AC,
1096 .name = "Intel 82802AC",
1097 .uaddr = {
1098 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
1099 },
1100 .DevSize = SIZE_1MiB,
1101 .CmdSet = P_ID_INTEL_EXT,
1102 .NumEraseRegions= 1,
1103 .regions = {
1104 ERASEINFO(0x10000,16),
1105 }
1106 }, {
1107 .mfr_id = MANUFACTURER_MACRONIX,
1108 .dev_id = MX29LV040C,
1109 .name = "Macronix MX29LV040C",
1110 .uaddr = {
1111 [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
1112 },
1113 .DevSize = SIZE_512KiB,
1114 .CmdSet = P_ID_AMD_STD,
1115 .NumEraseRegions= 1,
1116 .regions = {
1117 ERASEINFO(0x10000,8),
1118 }
1119 }, {
1120 .mfr_id = MANUFACTURER_MACRONIX,
1121 .dev_id = MX29LV160T,
1122 .name = "MXIC MX29LV160T",
1123 .uaddr = {
1124 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
1125 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
1126 },
1127 .DevSize = SIZE_2MiB,
1128 .CmdSet = P_ID_AMD_STD,
1129 .NumEraseRegions= 4,
1130 .regions = {
1131 ERASEINFO(0x10000,31),
1132 ERASEINFO(0x08000,1),
1133 ERASEINFO(0x02000,2),
1134 ERASEINFO(0x04000,1)
1135 }
1136 }, {
1137 .mfr_id = MANUFACTURER_NEC,
1138 .dev_id = UPD29F064115,
1139 .name = "NEC uPD29F064115",
1140 .uaddr = {
1141 [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
1142 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
1143 },
1144 .DevSize = SIZE_8MiB,
1145 .CmdSet = P_ID_AMD_STD,
1146 .NumEraseRegions= 3,
1147 .regions = {
1148 ERASEINFO(0x2000,8),
1149 ERASEINFO(0x10000,126),
1150 ERASEINFO(0x2000,8),
1151 }
1152 }, {
1153 .mfr_id = MANUFACTURER_MACRONIX,
1154 .dev_id = MX29LV160B,
1155 .name = "MXIC MX29LV160B",
1156 .uaddr = {
1157 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
1158 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
1159 },
1160 .DevSize = SIZE_2MiB,
1161 .CmdSet = P_ID_AMD_STD,
1162 .NumEraseRegions= 4,
1163 .regions = {
1164 ERASEINFO(0x04000,1),
1165 ERASEINFO(0x02000,2),
1166 ERASEINFO(0x08000,1),
1167 ERASEINFO(0x10000,31)
1168 }
1169 }, {
1170 .mfr_id = MANUFACTURER_MACRONIX,
1171 .dev_id = MX29F016,
1172 .name = "Macronix MX29F016",
1173 .uaddr = {
1174 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
1175 },
1176 .DevSize = SIZE_2MiB,
1177 .CmdSet = P_ID_AMD_STD,
1178 .NumEraseRegions= 1,
1179 .regions = {
1180 ERASEINFO(0x10000,32),
1181 }
1182 }, {
1183 .mfr_id = MANUFACTURER_MACRONIX,
1184 .dev_id = MX29F004T,
1185 .name = "Macronix MX29F004T",
1186 .uaddr = {
1187 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
1188 },
1189 .DevSize = SIZE_512KiB,
1190 .CmdSet = P_ID_AMD_STD,
1191 .NumEraseRegions= 4,
1192 .regions = {
1193 ERASEINFO(0x10000,7),
1194 ERASEINFO(0x08000,1),
1195 ERASEINFO(0x02000,2),
1196 ERASEINFO(0x04000,1),
1197 }
1198 }, {
1199 .mfr_id = MANUFACTURER_MACRONIX,
1200 .dev_id = MX29F004B,
1201 .name = "Macronix MX29F004B",
1202 .uaddr = {
1203 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
1204 },
1205 .DevSize = SIZE_512KiB,
1206 .CmdSet = P_ID_AMD_STD,
1207 .NumEraseRegions= 4,
1208 .regions = {
1209 ERASEINFO(0x04000,1),
1210 ERASEINFO(0x02000,2),
1211 ERASEINFO(0x08000,1),
1212 ERASEINFO(0x10000,7),
1213 }
1214 }, {
1215 .mfr_id = MANUFACTURER_MACRONIX,
1216 .dev_id = MX29F002T,
1217 .name = "Macronix MX29F002T",
1218 .uaddr = {
1219 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
1220 },
1221 .DevSize = SIZE_256KiB,
1222 .CmdSet = P_ID_AMD_STD,
1223 .NumEraseRegions= 4,
1224 .regions = {
1225 ERASEINFO(0x10000,3),
1226 ERASEINFO(0x08000,1),
1227 ERASEINFO(0x02000,2),
1228 ERASEINFO(0x04000,1),
1229 }
1230 }, {
1231 .mfr_id = MANUFACTURER_PMC,
1232 .dev_id = PM49FL002,
1233 .name = "PMC Pm49FL002",
1234 .uaddr = {
1235 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1236 },
1237 .DevSize = SIZE_256KiB,
1238 .CmdSet = P_ID_AMD_STD,
1239 .NumEraseRegions= 1,
1240 .regions = {
1241 ERASEINFO( 0x01000, 64 )
1242 }
1243 }, {
1244 .mfr_id = MANUFACTURER_PMC,
1245 .dev_id = PM49FL004,
1246 .name = "PMC Pm49FL004",
1247 .uaddr = {
1248 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1249 },
1250 .DevSize = SIZE_512KiB,
1251 .CmdSet = P_ID_AMD_STD,
1252 .NumEraseRegions= 1,
1253 .regions = {
1254 ERASEINFO( 0x01000, 128 )
1255 }
1256 }, {
1257 .mfr_id = MANUFACTURER_PMC,
1258 .dev_id = PM49FL008,
1259 .name = "PMC Pm49FL008",
1260 .uaddr = {
1261 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1262 },
1263 .DevSize = SIZE_1MiB,
1264 .CmdSet = P_ID_AMD_STD,
1265 .NumEraseRegions= 1,
1266 .regions = {
1267 ERASEINFO( 0x01000, 256 )
1268 }
1269 }, {
1270 .mfr_id = MANUFACTURER_SST,
1271 .dev_id = SST39LF512,
1272 .name = "SST 39LF512",
1273 .uaddr = {
1274 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1275 },
1276 .DevSize = SIZE_64KiB,
1277 .CmdSet = P_ID_AMD_STD,
1278 .NumEraseRegions= 1,
1279 .regions = {
1280 ERASEINFO(0x01000,16),
1281 }
1282 }, {
1283 .mfr_id = MANUFACTURER_SST,
1284 .dev_id = SST39LF010,
1285 .name = "SST 39LF010",
1286 .uaddr = {
1287 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1288 },
1289 .DevSize = SIZE_128KiB,
1290 .CmdSet = P_ID_AMD_STD,
1291 .NumEraseRegions= 1,
1292 .regions = {
1293 ERASEINFO(0x01000,32),
1294 }
1295 }, {
1296 .mfr_id = MANUFACTURER_SST,
1297 .dev_id = SST29EE020,
1298 .name = "SST 29EE020",
1299 .uaddr = {
1300 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1301 },
1302 .DevSize = SIZE_256KiB,
1303 .CmdSet = P_ID_SST_PAGE,
1304 .NumEraseRegions= 1,
1305 .regions = {ERASEINFO(0x01000,64),
1306 }
1307 }, {
1308 .mfr_id = MANUFACTURER_SST,
1309 .dev_id = SST29LE020,
1310 .name = "SST 29LE020",
1311 .uaddr = {
1312 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1313 },
1314 .DevSize = SIZE_256KiB,
1315 .CmdSet = P_ID_SST_PAGE,
1316 .NumEraseRegions= 1,
1317 .regions = {ERASEINFO(0x01000,64),
1318 }
1319 }, {
1320 .mfr_id = MANUFACTURER_SST,
1321 .dev_id = SST39LF020,
1322 .name = "SST 39LF020",
1323 .uaddr = {
1324 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1325 },
1326 .DevSize = SIZE_256KiB,
1327 .CmdSet = P_ID_AMD_STD,
1328 .NumEraseRegions= 1,
1329 .regions = {
1330 ERASEINFO(0x01000,64),
1331 }
1332 }, {
1333 .mfr_id = MANUFACTURER_SST,
1334 .dev_id = SST39LF040,
1335 .name = "SST 39LF040",
1336 .uaddr = {
1337 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1338 },
1339 .DevSize = SIZE_512KiB,
1340 .CmdSet = P_ID_AMD_STD,
1341 .NumEraseRegions= 1,
1342 .regions = {
1343 ERASEINFO(0x01000,128),
1344 }
1345 }, {
1346 .mfr_id = MANUFACTURER_SST,
1347 .dev_id = SST39SF010A,
1348 .name = "SST 39SF010A",
1349 .uaddr = {
1350 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1351 },
1352 .DevSize = SIZE_128KiB,
1353 .CmdSet = P_ID_AMD_STD,
1354 .NumEraseRegions= 1,
1355 .regions = {
1356 ERASEINFO(0x01000,32),
1357 }
1358 }, {
1359 .mfr_id = MANUFACTURER_SST,
1360 .dev_id = SST39SF020A,
1361 .name = "SST 39SF020A",
1362 .uaddr = {
1363 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1364 },
1365 .DevSize = SIZE_256KiB,
1366 .CmdSet = P_ID_AMD_STD,
1367 .NumEraseRegions= 1,
1368 .regions = {
1369 ERASEINFO(0x01000,64),
1370 }
1371 }, {
1372 .mfr_id = MANUFACTURER_SST,
1373 .dev_id = SST49LF004B,
1374 .name = "SST 49LF004B",
1375 .uaddr = {
1376 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1377 },
1378 .DevSize = SIZE_512KiB,
1379 .CmdSet = P_ID_AMD_STD,
1380 .NumEraseRegions= 1,
1381 .regions = {
1382 ERASEINFO(0x01000,128),
1383 }
1384 }, {
1385 .mfr_id = MANUFACTURER_SST,
1386 .dev_id = SST49LF008A,
1387 .name = "SST 49LF008A",
1388 .uaddr = {
1389 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1390 },
1391 .DevSize = SIZE_1MiB,
1392 .CmdSet = P_ID_AMD_STD,
1393 .NumEraseRegions= 1,
1394 .regions = {
1395 ERASEINFO(0x01000,256),
1396 }
1397 }, {
1398 .mfr_id = MANUFACTURER_SST,
1399 .dev_id = SST49LF030A,
1400 .name = "SST 49LF030A",
1401 .uaddr = {
1402 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1403 },
1404 .DevSize = SIZE_512KiB,
1405 .CmdSet = P_ID_AMD_STD,
1406 .NumEraseRegions= 1,
1407 .regions = {
1408 ERASEINFO(0x01000,96),
1409 }
1410 }, {
1411 .mfr_id = MANUFACTURER_SST,
1412 .dev_id = SST49LF040A,
1413 .name = "SST 49LF040A",
1414 .uaddr = {
1415 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1416 },
1417 .DevSize = SIZE_512KiB,
1418 .CmdSet = P_ID_AMD_STD,
1419 .NumEraseRegions= 1,
1420 .regions = {
1421 ERASEINFO(0x01000,128),
1422 }
1423 }, {
1424 .mfr_id = MANUFACTURER_SST,
1425 .dev_id = SST49LF080A,
1426 .name = "SST 49LF080A",
1427 .uaddr = {
1428 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1429 },
1430 .DevSize = SIZE_1MiB,
1431 .CmdSet = P_ID_AMD_STD,
1432 .NumEraseRegions= 1,
1433 .regions = {
1434 ERASEINFO(0x01000,256),
1435 }
1436 }, {
1437 .mfr_id = MANUFACTURER_SST, /* should be CFI */
1438 .dev_id = SST39LF160,
1439 .name = "SST 39LF160",
1440 .uaddr = {
1441 [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */
1442 [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */
1443 },
1444 .DevSize = SIZE_2MiB,
1445 .CmdSet = P_ID_AMD_STD,
1446 .NumEraseRegions= 2,
1447 .regions = {
1448 ERASEINFO(0x1000,256),
1449 ERASEINFO(0x1000,256)
1450 }
1451
1452 }, {
1453 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1454 .dev_id = M29W800DT,
1455 .name = "ST M29W800DT",
1456 .uaddr = {
1457 [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */
1458 [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */
1459 },
1460 .DevSize = SIZE_1MiB,
1461 .CmdSet = P_ID_AMD_STD,
1462 .NumEraseRegions= 4,
1463 .regions = {
1464 ERASEINFO(0x10000,15),
1465 ERASEINFO(0x08000,1),
1466 ERASEINFO(0x02000,2),
1467 ERASEINFO(0x04000,1)
1468 }
1469 }, {
1470 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1471 .dev_id = M29W800DB,
1472 .name = "ST M29W800DB",
1473 .uaddr = {
1474 [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */
1475 [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */
1476 },
1477 .DevSize = SIZE_1MiB,
1478 .CmdSet = P_ID_AMD_STD,
1479 .NumEraseRegions= 4,
1480 .regions = {
1481 ERASEINFO(0x04000,1),
1482 ERASEINFO(0x02000,2),
1483 ERASEINFO(0x08000,1),
1484 ERASEINFO(0x10000,15)
1485 }
1486 }, {
1487 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1488 .dev_id = M29W160DT,
1489 .name = "ST M29W160DT",
1490 .uaddr = {
1491 [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
1492 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
1493 },
1494 .DevSize = SIZE_2MiB,
1495 .CmdSet = P_ID_AMD_STD,
1496 .NumEraseRegions= 4,
1497 .regions = {
1498 ERASEINFO(0x10000,31),
1499 ERASEINFO(0x08000,1),
1500 ERASEINFO(0x02000,2),
1501 ERASEINFO(0x04000,1)
1502 }
1503 }, {
1504 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1505 .dev_id = M29W160DB,
1506 .name = "ST M29W160DB",
1507 .uaddr = {
1508 [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
1509 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
1510 },
1511 .DevSize = SIZE_2MiB,
1512 .CmdSet = P_ID_AMD_STD,
1513 .NumEraseRegions= 4,
1514 .regions = {
1515 ERASEINFO(0x04000,1),
1516 ERASEINFO(0x02000,2),
1517 ERASEINFO(0x08000,1),
1518 ERASEINFO(0x10000,31)
1519 }
1520 }, {
1521 .mfr_id = MANUFACTURER_ST,
1522 .dev_id = M29W040B,
1523 .name = "ST M29W040B",
1524 .uaddr = {
1525 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
1526 },
1527 .DevSize = SIZE_512KiB,
1528 .CmdSet = P_ID_AMD_STD,
1529 .NumEraseRegions= 1,
1530 .regions = {
1531 ERASEINFO(0x10000,8),
1532 }
1533 }, {
1534 .mfr_id = MANUFACTURER_ST,
1535 .dev_id = M50FW040,
1536 .name = "ST M50FW040",
1537 .uaddr = {
1538 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
1539 },
1540 .DevSize = SIZE_512KiB,
1541 .CmdSet = P_ID_INTEL_EXT,
1542 .NumEraseRegions= 1,
1543 .regions = {
1544 ERASEINFO(0x10000,8),
1545 }
1546 }, {
1547 .mfr_id = MANUFACTURER_ST,
1548 .dev_id = M50FW080,
1549 .name = "ST M50FW080",
1550 .uaddr = {
1551 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
1552 },
1553 .DevSize = SIZE_1MiB,
1554 .CmdSet = P_ID_INTEL_EXT,
1555 .NumEraseRegions= 1,
1556 .regions = {
1557 ERASEINFO(0x10000,16),
1558 }
1559 }, {
1560 .mfr_id = MANUFACTURER_ST,
1561 .dev_id = M50FW016,
1562 .name = "ST M50FW016",
1563 .uaddr = {
1564 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
1565 },
1566 .DevSize = SIZE_2MiB,
1567 .CmdSet = P_ID_INTEL_EXT,
1568 .NumEraseRegions= 1,
1569 .regions = {
1570 ERASEINFO(0x10000,32),
1571 }
1572 }, {
1573 .mfr_id = MANUFACTURER_ST,
1574 .dev_id = M50LPW080,
1575 .name = "ST M50LPW080",
1576 .uaddr = {
1577 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
1578 },
1579 .DevSize = SIZE_1MiB,
1580 .CmdSet = P_ID_INTEL_EXT,
1581 .NumEraseRegions= 1,
1582 .regions = {
1583 ERASEINFO(0x10000,16),
1584 }
1585 }, {
1586 .mfr_id = MANUFACTURER_TOSHIBA,
1587 .dev_id = TC58FVT160,
1588 .name = "Toshiba TC58FVT160",
1589 .uaddr = {
1590 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
1591 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
1592 },
1593 .DevSize = SIZE_2MiB,
1594 .CmdSet = P_ID_AMD_STD,
1595 .NumEraseRegions= 4,
1596 .regions = {
1597 ERASEINFO(0x10000,31),
1598 ERASEINFO(0x08000,1),
1599 ERASEINFO(0x02000,2),
1600 ERASEINFO(0x04000,1)
1601 }
1602 }, {
1603 .mfr_id = MANUFACTURER_TOSHIBA,
1604 .dev_id = TC58FVB160,
1605 .name = "Toshiba TC58FVB160",
1606 .uaddr = {
1607 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
1608 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
1609 },
1610 .DevSize = SIZE_2MiB,
1611 .CmdSet = P_ID_AMD_STD,
1612 .NumEraseRegions= 4,
1613 .regions = {
1614 ERASEINFO(0x04000,1),
1615 ERASEINFO(0x02000,2),
1616 ERASEINFO(0x08000,1),
1617 ERASEINFO(0x10000,31)
1618 }
1619 }, {
1620 .mfr_id = MANUFACTURER_TOSHIBA,
1621 .dev_id = TC58FVB321,
1622 .name = "Toshiba TC58FVB321",
1623 .uaddr = {
1624 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
1625 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
1626 },
1627 .DevSize = SIZE_4MiB,
1628 .CmdSet = P_ID_AMD_STD,
1629 .NumEraseRegions= 2,
1630 .regions = {
1631 ERASEINFO(0x02000,8),
1632 ERASEINFO(0x10000,63)
1633 }
1634 }, {
1635 .mfr_id = MANUFACTURER_TOSHIBA,
1636 .dev_id = TC58FVT321,
1637 .name = "Toshiba TC58FVT321",
1638 .uaddr = {
1639 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
1640 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
1641 },
1642 .DevSize = SIZE_4MiB,
1643 .CmdSet = P_ID_AMD_STD,
1644 .NumEraseRegions= 2,
1645 .regions = {
1646 ERASEINFO(0x10000,63),
1647 ERASEINFO(0x02000,8)
1648 }
1649 }, {
1650 .mfr_id = MANUFACTURER_TOSHIBA,
1651 .dev_id = TC58FVB641,
1652 .name = "Toshiba TC58FVB641",
1653 .uaddr = {
1654 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
1655 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
1656 },
1657 .DevSize = SIZE_8MiB,
1658 .CmdSet = P_ID_AMD_STD,
1659 .NumEraseRegions= 2,
1660 .regions = {
1661 ERASEINFO(0x02000,8),
1662 ERASEINFO(0x10000,127)
1663 }
1664 }, {
1665 .mfr_id = MANUFACTURER_TOSHIBA,
1666 .dev_id = TC58FVT641,
1667 .name = "Toshiba TC58FVT641",
1668 .uaddr = {
1669 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
1670 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
1671 },
1672 .DevSize = SIZE_8MiB,
1673 .CmdSet = P_ID_AMD_STD,
1674 .NumEraseRegions= 2,
1675 .regions = {
1676 ERASEINFO(0x10000,127),
1677 ERASEINFO(0x02000,8)
1678 }
1679 }, {
1680 .mfr_id = MANUFACTURER_WINBOND,
1681 .dev_id = W49V002A,
1682 .name = "Winbond W49V002A",
1683 .uaddr = {
1684 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
1685 },
1686 .DevSize = SIZE_256KiB,
1687 .CmdSet = P_ID_AMD_STD,
1688 .NumEraseRegions= 4,
1689 .regions = {
1690 ERASEINFO(0x10000, 3),
1691 ERASEINFO(0x08000, 1),
1692 ERASEINFO(0x02000, 2),
1693 ERASEINFO(0x04000, 1),
1694 }
1695 }
1696};
1697
1698
1699static int cfi_jedec_setup(struct cfi_private *p_cfi, int index);
1700
1701static int jedec_probe_chip(struct map_info *map, __u32 base,
1702 unsigned long *chip_map, struct cfi_private *cfi);
1703
1704static struct mtd_info *jedec_probe(struct map_info *map);
1705
1706static inline u32 jedec_read_mfr(struct map_info *map, __u32 base,
1707 struct cfi_private *cfi)
1708{
1709 map_word result;
1710 unsigned long mask;
1711 u32 ofs = cfi_build_cmd_addr(0, cfi_interleave(cfi), cfi->device_type);
1712 mask = (1 << (cfi->device_type * 8)) -1;
1713 result = map_read(map, base + ofs);
1714 return result.x[0] & mask;
1715}
1716
1717static inline u32 jedec_read_id(struct map_info *map, __u32 base,
1718 struct cfi_private *cfi)
1719{
1720 map_word result;
1721 unsigned long mask;
1722 u32 ofs = cfi_build_cmd_addr(1, cfi_interleave(cfi), cfi->device_type);
1723 mask = (1 << (cfi->device_type * 8)) -1;
1724 result = map_read(map, base + ofs);
1725 return result.x[0] & mask;
1726}
1727
1728static inline void jedec_reset(u32 base, struct map_info *map,
1729 struct cfi_private *cfi)
1730{
1731 /* Reset */
1732
1733 /* after checking the datasheets for SST, MACRONIX and ATMEL
1734 * (oh and incidentaly the jedec spec - 3.5.3.3) the reset
1735 * sequence is *supposed* to be 0xaa at 0x5555, 0x55 at
1736 * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips
1737 * as they will ignore the writes and dont care what address
1738 * the F0 is written to */
1739 if(cfi->addr_unlock1) {
1740 DEBUG( MTD_DEBUG_LEVEL3,
1741 "reset unlock called %x %x \n",
1742 cfi->addr_unlock1,cfi->addr_unlock2);
1743 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
1744 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
1745 }
1746
1747 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
1748 /* Some misdesigned intel chips do not respond for 0xF0 for a reset,
1749 * so ensure we're in read mode. Send both the Intel and the AMD command
1750 * for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so
1751 * this should be safe.
1752 */
1753 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
1754 /* FIXME - should have reset delay before continuing */
1755}
1756
1757
1758static inline __u8 finfo_uaddr(const struct amd_flash_info *finfo, int device_type)
1759{
1760 int uaddr_idx;
1761 __u8 uaddr = MTD_UADDR_NOT_SUPPORTED;
1762
1763 switch ( device_type ) {
1764 case CFI_DEVICETYPE_X8: uaddr_idx = 0; break;
1765 case CFI_DEVICETYPE_X16: uaddr_idx = 1; break;
1766 case CFI_DEVICETYPE_X32: uaddr_idx = 2; break;
1767 default:
1768 printk(KERN_NOTICE "MTD: %s(): unknown device_type %d\n",
1769 __func__, device_type);
1770 goto uaddr_done;
1771 }
1772
1773 uaddr = finfo->uaddr[uaddr_idx];
1774
1775 if (uaddr != MTD_UADDR_NOT_SUPPORTED ) {
1776 /* ASSERT("The unlock addresses for non-8-bit mode
1777 are bollocks. We don't really need an array."); */
1778 uaddr = finfo->uaddr[0];
1779 }
1780
1781 uaddr_done:
1782 return uaddr;
1783}
1784
1785
1786static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
1787{
1788 int i,num_erase_regions;
1789 __u8 uaddr;
1790
1791 printk("Found: %s\n",jedec_table[index].name);
1792
1793 num_erase_regions = jedec_table[index].NumEraseRegions;
1794
1795 p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
1796 if (!p_cfi->cfiq) {
1797 //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
1798 return 0;
1799 }
1800
1801 memset(p_cfi->cfiq,0,sizeof(struct cfi_ident));
1802
1803 p_cfi->cfiq->P_ID = jedec_table[index].CmdSet;
1804 p_cfi->cfiq->NumEraseRegions = jedec_table[index].NumEraseRegions;
1805 p_cfi->cfiq->DevSize = jedec_table[index].DevSize;
1806 p_cfi->cfi_mode = CFI_MODE_JEDEC;
1807
1808 for (i=0; i<num_erase_regions; i++){
1809 p_cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i];
1810 }
1811 p_cfi->cmdset_priv = NULL;
1812
1813 /* This may be redundant for some cases, but it doesn't hurt */
1814 p_cfi->mfr = jedec_table[index].mfr_id;
1815 p_cfi->id = jedec_table[index].dev_id;
1816
1817 uaddr = finfo_uaddr(&jedec_table[index], p_cfi->device_type);
1818 if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) {
1819 kfree( p_cfi->cfiq );
1820 return 0;
1821 }
1822
1823 p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1;
1824 p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2;
1825
1826 return 1; /* ok */
1827}
1828
1829
1830/*
1831 * There is a BIG problem properly ID'ing the JEDEC devic and guaranteeing
1832 * the mapped address, unlock addresses, and proper chip ID. This function
1833 * attempts to minimize errors. It is doubtfull that this probe will ever
1834 * be perfect - consequently there should be some module parameters that
1835 * could be manually specified to force the chip info.
1836 */
1837static inline int jedec_match( __u32 base,
1838 struct map_info *map,
1839 struct cfi_private *cfi,
1840 const struct amd_flash_info *finfo )
1841{
1842 int rc = 0; /* failure until all tests pass */
1843 u32 mfr, id;
1844 __u8 uaddr;
1845
1846 /*
1847 * The IDs must match. For X16 and X32 devices operating in
1848 * a lower width ( X8 or X16 ), the device ID's are usually just
1849 * the lower byte(s) of the larger device ID for wider mode. If
1850 * a part is found that doesn't fit this assumption (device id for
1851 * smaller width mode is completely unrealated to full-width mode)
1852 * then the jedec_table[] will have to be augmented with the IDs
1853 * for different widths.
1854 */
1855 switch (cfi->device_type) {
1856 case CFI_DEVICETYPE_X8:
1857 mfr = (__u8)finfo->mfr_id;
1858 id = (__u8)finfo->dev_id;
1859 break;
1860 case CFI_DEVICETYPE_X16:
1861 mfr = (__u16)finfo->mfr_id;
1862 id = (__u16)finfo->dev_id;
1863 break;
1864 case CFI_DEVICETYPE_X32:
1865 mfr = (__u16)finfo->mfr_id;
1866 id = (__u32)finfo->dev_id;
1867 break;
1868 default:
1869 printk(KERN_WARNING
1870 "MTD %s(): Unsupported device type %d\n",
1871 __func__, cfi->device_type);
1872 goto match_done;
1873 }
1874 if ( cfi->mfr != mfr || cfi->id != id ) {
1875 goto match_done;
1876 }
1877
1878 /* the part size must fit in the memory window */
1879 DEBUG( MTD_DEBUG_LEVEL3,
1880 "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
1881 __func__, base, 1 << finfo->DevSize, base + (1 << finfo->DevSize) );
1882 if ( base + cfi_interleave(cfi) * ( 1 << finfo->DevSize ) > map->size ) {
1883 DEBUG( MTD_DEBUG_LEVEL3,
1884 "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
1885 __func__, finfo->mfr_id, finfo->dev_id,
1886 1 << finfo->DevSize );
1887 goto match_done;
1888 }
1889
1890 uaddr = finfo_uaddr(finfo, cfi->device_type);
1891 if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) {
1892 goto match_done;
1893 }
1894
1895 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
1896 __func__, cfi->addr_unlock1, cfi->addr_unlock2 );
1897 if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr
1898 && ( unlock_addrs[uaddr].addr1 != cfi->addr_unlock1 ||
1899 unlock_addrs[uaddr].addr2 != cfi->addr_unlock2 ) ) {
1900 DEBUG( MTD_DEBUG_LEVEL3,
1901 "MTD %s(): 0x%.4x 0x%.4x did not match\n",
1902 __func__,
1903 unlock_addrs[uaddr].addr1,
1904 unlock_addrs[uaddr].addr2);
1905 goto match_done;
1906 }
1907
1908 /*
1909 * Make sure the ID's dissappear when the device is taken out of
1910 * ID mode. The only time this should fail when it should succeed
1911 * is when the ID's are written as data to the same
1912 * addresses. For this rare and unfortunate case the chip
1913 * cannot be probed correctly.
1914 * FIXME - write a driver that takes all of the chip info as
1915 * module parameters, doesn't probe but forces a load.
1916 */
1917 DEBUG( MTD_DEBUG_LEVEL3,
1918 "MTD %s(): check ID's disappear when not in ID mode\n",
1919 __func__ );
1920 jedec_reset( base, map, cfi );
1921 mfr = jedec_read_mfr( map, base, cfi );
1922 id = jedec_read_id( map, base, cfi );
1923 if ( mfr == cfi->mfr && id == cfi->id ) {
1924 DEBUG( MTD_DEBUG_LEVEL3,
1925 "MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n"
1926 "You might need to manually specify JEDEC parameters.\n",
1927 __func__, cfi->mfr, cfi->id );
1928 goto match_done;
1929 }
1930
1931 /* all tests passed - mark as success */
1932 rc = 1;
1933
1934 /*
1935 * Put the device back in ID mode - only need to do this if we
1936 * were truly frobbing a real device.
1937 */
1938 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ );
1939 if(cfi->addr_unlock1) {
1940 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
1941 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
1942 }
1943 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
1944 /* FIXME - should have a delay before continuing */
1945
1946 match_done:
1947 return rc;
1948}
1949
1950
1951static int jedec_probe_chip(struct map_info *map, __u32 base,
1952 unsigned long *chip_map, struct cfi_private *cfi)
1953{
1954 int i;
1955 enum uaddr uaddr_idx = MTD_UADDR_NOT_SUPPORTED;
1956 u32 probe_offset1, probe_offset2;
1957
1958 retry:
1959 if (!cfi->numchips) {
1960 uaddr_idx++;
1961
1962 if (MTD_UADDR_UNNECESSARY == uaddr_idx)
1963 return 0;
1964
1965 cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1;
1966 cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2;
1967 }
1968
1969 /* Make certain we aren't probing past the end of map */
1970 if (base >= map->size) {
1971 printk(KERN_NOTICE
1972 "Probe at base(0x%08x) past the end of the map(0x%08lx)\n",
1973 base, map->size -1);
1974 return 0;
1975
1976 }
1977 /* Ensure the unlock addresses we try stay inside the map */
1978 probe_offset1 = cfi_build_cmd_addr(
1979 cfi->addr_unlock1,
1980 cfi_interleave(cfi),
1981 cfi->device_type);
1982 probe_offset2 = cfi_build_cmd_addr(
1983 cfi->addr_unlock1,
1984 cfi_interleave(cfi),
1985 cfi->device_type);
1986 if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
1987 ((base + probe_offset2 + map_bankwidth(map)) >= map->size))
1988 {
1989 goto retry;
1990 }
1991
1992 /* Reset */
1993 jedec_reset(base, map, cfi);
1994
1995 /* Autoselect Mode */
1996 if(cfi->addr_unlock1) {
1997 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
1998 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
1999 }
2000 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
2001 /* FIXME - should have a delay before continuing */
2002
2003 if (!cfi->numchips) {
2004 /* This is the first time we're called. Set up the CFI
2005 stuff accordingly and return */
2006
2007 cfi->mfr = jedec_read_mfr(map, base, cfi);
2008 cfi->id = jedec_read_id(map, base, cfi);
2009 DEBUG(MTD_DEBUG_LEVEL3,
2010 "Search for id:(%02x %02x) interleave(%d) type(%d)\n",
2011 cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type);
2012 for (i=0; i<sizeof(jedec_table)/sizeof(jedec_table[0]); i++) {
2013 if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) {
2014 DEBUG( MTD_DEBUG_LEVEL3,
2015 "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
2016 __func__, cfi->mfr, cfi->id,
2017 cfi->addr_unlock1, cfi->addr_unlock2 );
2018 if (!cfi_jedec_setup(cfi, i))
2019 return 0;
2020 goto ok_out;
2021 }
2022 }
2023 goto retry;
2024 } else {
2025 __u16 mfr;
2026 __u16 id;
2027
2028 /* Make sure it is a chip of the same manufacturer and id */
2029 mfr = jedec_read_mfr(map, base, cfi);
2030 id = jedec_read_id(map, base, cfi);
2031
2032 if ((mfr != cfi->mfr) || (id != cfi->id)) {
2033 printk(KERN_DEBUG "%s: Found different chip or no chip at all (mfr 0x%x, id 0x%x) at 0x%x\n",
2034 map->name, mfr, id, base);
2035 jedec_reset(base, map, cfi);
2036 return 0;
2037 }
2038 }
2039
2040 /* Check each previous chip locations to see if it's an alias */
2041 for (i=0; i < (base >> cfi->chipshift); i++) {
2042 unsigned long start;
2043 if(!test_bit(i, chip_map)) {
2044 continue; /* Skip location; no valid chip at this address */
2045 }
2046 start = i << cfi->chipshift;
2047 if (jedec_read_mfr(map, start, cfi) == cfi->mfr &&
2048 jedec_read_id(map, start, cfi) == cfi->id) {
2049 /* Eep. This chip also looks like it's in autoselect mode.
2050 Is it an alias for the new one? */
2051 jedec_reset(start, map, cfi);
2052
2053 /* If the device IDs go away, it's an alias */
2054 if (jedec_read_mfr(map, base, cfi) != cfi->mfr ||
2055 jedec_read_id(map, base, cfi) != cfi->id) {
2056 printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
2057 map->name, base, start);
2058 return 0;
2059 }
2060
2061 /* Yes, it's actually got the device IDs as data. Most
2062 * unfortunate. Stick the new chip in read mode
2063 * too and if it's the same, assume it's an alias. */
2064 /* FIXME: Use other modes to do a proper check */
2065 jedec_reset(base, map, cfi);
2066 if (jedec_read_mfr(map, base, cfi) == cfi->mfr &&
2067 jedec_read_id(map, base, cfi) == cfi->id) {
2068 printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
2069 map->name, base, start);
2070 return 0;
2071 }
2072 }
2073 }
2074
2075 /* OK, if we got to here, then none of the previous chips appear to
2076 be aliases for the current one. */
2077 set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
2078 cfi->numchips++;
2079
2080ok_out:
2081 /* Put it back into Read Mode */
2082 jedec_reset(base, map, cfi);
2083
2084 printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
2085 map->name, cfi_interleave(cfi), cfi->device_type*8, base,
2086 map->bankwidth*8);
2087
2088 return 1;
2089}
2090
2091static struct chip_probe jedec_chip_probe = {
2092 .name = "JEDEC",
2093 .probe_chip = jedec_probe_chip
2094};
2095
2096static struct mtd_info *jedec_probe(struct map_info *map)
2097{
2098 /*
2099 * Just use the generic probe stuff to call our CFI-specific
2100 * chip_probe routine in all the possible permutations, etc.
2101 */
2102 return mtd_do_chip_probe(map, &jedec_chip_probe);
2103}
2104
2105static struct mtd_chip_driver jedec_chipdrv = {
2106 .probe = jedec_probe,
2107 .name = "jedec_probe",
2108 .module = THIS_MODULE
2109};
2110
2111static int __init jedec_probe_init(void)
2112{
2113 register_mtd_chip_driver(&jedec_chipdrv);
2114 return 0;
2115}
2116
2117static void __exit jedec_probe_exit(void)
2118{
2119 unregister_mtd_chip_driver(&jedec_chipdrv);
2120}
2121
2122module_init(jedec_probe_init);
2123module_exit(jedec_probe_exit);
2124
2125MODULE_LICENSE("GPL");
2126MODULE_AUTHOR("Erwin Authried <eauth@softsys.co.at> et al.");
2127MODULE_DESCRIPTION("Probe code for JEDEC-compliant flash chips");
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c
new file mode 100644
index 000000000000..c6c83833cc32
--- /dev/null
+++ b/drivers/mtd/chips/map_absent.c
@@ -0,0 +1,117 @@
1/*
2 * Common code to handle absent "placeholder" devices
3 * Copyright 2001 Resilience Corporation <ebrower@resilience.com>
4 * $Id: map_absent.c,v 1.5 2004/11/16 18:29:00 dwmw2 Exp $
5 *
6 * This map driver is used to allocate "placeholder" MTD
7 * devices on systems that have socketed/removable media.
8 * Use of this driver as a fallback preserves the expected
9 * registration of MTD device nodes regardless of probe outcome.
10 * A usage example is as follows:
11 *
12 * my_dev[i] = do_map_probe("cfi", &my_map[i]);
13 * if(NULL == my_dev[i]) {
14 * my_dev[i] = do_map_probe("map_absent", &my_map[i]);
15 * }
16 *
17 * Any device 'probed' with this driver will return -ENODEV
18 * upon open.
19 */
20
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/errno.h>
25#include <linux/slab.h>
26#include <linux/init.h>
27#include <linux/mtd/mtd.h>
28#include <linux/mtd/map.h>
29#include <linux/mtd/compatmac.h>
30
31static int map_absent_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
32static int map_absent_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
33static int map_absent_erase (struct mtd_info *, struct erase_info *);
34static void map_absent_sync (struct mtd_info *);
35static struct mtd_info *map_absent_probe(struct map_info *map);
36static void map_absent_destroy (struct mtd_info *);
37
38
39static struct mtd_chip_driver map_absent_chipdrv = {
40 .probe = map_absent_probe,
41 .destroy = map_absent_destroy,
42 .name = "map_absent",
43 .module = THIS_MODULE
44};
45
46static struct mtd_info *map_absent_probe(struct map_info *map)
47{
48 struct mtd_info *mtd;
49
50 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
51 if (!mtd) {
52 return NULL;
53 }
54
55 memset(mtd, 0, sizeof(*mtd));
56
57 map->fldrv = &map_absent_chipdrv;
58 mtd->priv = map;
59 mtd->name = map->name;
60 mtd->type = MTD_ABSENT;
61 mtd->size = map->size;
62 mtd->erase = map_absent_erase;
63 mtd->read = map_absent_read;
64 mtd->write = map_absent_write;
65 mtd->sync = map_absent_sync;
66 mtd->flags = 0;
67 mtd->erasesize = PAGE_SIZE;
68
69 __module_get(THIS_MODULE);
70 return mtd;
71}
72
73
74static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
75{
76 *retlen = 0;
77 return -ENODEV;
78}
79
80static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
81{
82 *retlen = 0;
83 return -ENODEV;
84}
85
86static int map_absent_erase(struct mtd_info *mtd, struct erase_info *instr)
87{
88 return -ENODEV;
89}
90
91static void map_absent_sync(struct mtd_info *mtd)
92{
93 /* nop */
94}
95
96static void map_absent_destroy(struct mtd_info *mtd)
97{
98 /* nop */
99}
100
101static int __init map_absent_init(void)
102{
103 register_mtd_chip_driver(&map_absent_chipdrv);
104 return 0;
105}
106
107static void __exit map_absent_exit(void)
108{
109 unregister_mtd_chip_driver(&map_absent_chipdrv);
110}
111
112module_init(map_absent_init);
113module_exit(map_absent_exit);
114
115MODULE_LICENSE("GPL");
116MODULE_AUTHOR("Resilience Corporation - Eric Brower <ebrower@resilience.com>");
117MODULE_DESCRIPTION("Placeholder MTD chip driver for 'absent' chips");
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
new file mode 100644
index 000000000000..bd2e876a814b
--- /dev/null
+++ b/drivers/mtd/chips/map_ram.c
@@ -0,0 +1,143 @@
1/*
2 * Common code to handle map devices which are simple RAM
3 * (C) 2000 Red Hat. GPL'd.
4 * $Id: map_ram.c,v 1.22 2005/01/05 18:05:12 dwmw2 Exp $
5 */
6
7#include <linux/module.h>
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <asm/io.h>
11#include <asm/byteorder.h>
12#include <linux/errno.h>
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/mtd/mtd.h>
16#include <linux/mtd/map.h>
17#include <linux/mtd/compatmac.h>
18
19
20static int mapram_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
21static int mapram_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
22static int mapram_erase (struct mtd_info *, struct erase_info *);
23static void mapram_nop (struct mtd_info *);
24static struct mtd_info *map_ram_probe(struct map_info *map);
25
26
27static struct mtd_chip_driver mapram_chipdrv = {
28 .probe = map_ram_probe,
29 .name = "map_ram",
30 .module = THIS_MODULE
31};
32
33static struct mtd_info *map_ram_probe(struct map_info *map)
34{
35 struct mtd_info *mtd;
36
37 /* Check the first byte is RAM */
38#if 0
39 map_write8(map, 0x55, 0);
40 if (map_read8(map, 0) != 0x55)
41 return NULL;
42
43 map_write8(map, 0xAA, 0);
44 if (map_read8(map, 0) != 0xAA)
45 return NULL;
46
47 /* Check the last byte is RAM */
48 map_write8(map, 0x55, map->size-1);
49 if (map_read8(map, map->size-1) != 0x55)
50 return NULL;
51
52 map_write8(map, 0xAA, map->size-1);
53 if (map_read8(map, map->size-1) != 0xAA)
54 return NULL;
55#endif
56 /* OK. It seems to be RAM. */
57
58 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
59 if (!mtd)
60 return NULL;
61
62 memset(mtd, 0, sizeof(*mtd));
63
64 map->fldrv = &mapram_chipdrv;
65 mtd->priv = map;
66 mtd->name = map->name;
67 mtd->type = MTD_RAM;
68 mtd->size = map->size;
69 mtd->erase = mapram_erase;
70 mtd->read = mapram_read;
71 mtd->write = mapram_write;
72 mtd->sync = mapram_nop;
73 mtd->flags = MTD_CAP_RAM | MTD_VOLATILE;
74
75 mtd->erasesize = PAGE_SIZE;
76 while(mtd->size & (mtd->erasesize - 1))
77 mtd->erasesize >>= 1;
78
79 __module_get(THIS_MODULE);
80 return mtd;
81}
82
83
84static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
85{
86 struct map_info *map = mtd->priv;
87
88 map_copy_from(map, buf, from, len);
89 *retlen = len;
90 return 0;
91}
92
93static int mapram_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
94{
95 struct map_info *map = mtd->priv;
96
97 map_copy_to(map, to, buf, len);
98 *retlen = len;
99 return 0;
100}
101
102static int mapram_erase (struct mtd_info *mtd, struct erase_info *instr)
103{
104 /* Yeah, it's inefficient. Who cares? It's faster than a _real_
105 flash erase. */
106 struct map_info *map = mtd->priv;
107 map_word allff;
108 unsigned long i;
109
110 allff = map_word_ff(map);
111
112 for (i=0; i<instr->len; i += map_bankwidth(map))
113 map_write(map, allff, instr->addr + i);
114
115 instr->state = MTD_ERASE_DONE;
116
117 mtd_erase_callback(instr);
118
119 return 0;
120}
121
122static void mapram_nop(struct mtd_info *mtd)
123{
124 /* Nothing to see here */
125}
126
127static int __init map_ram_init(void)
128{
129 register_mtd_chip_driver(&mapram_chipdrv);
130 return 0;
131}
132
133static void __exit map_ram_exit(void)
134{
135 unregister_mtd_chip_driver(&mapram_chipdrv);
136}
137
138module_init(map_ram_init);
139module_exit(map_ram_exit);
140
141MODULE_LICENSE("GPL");
142MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
143MODULE_DESCRIPTION("MTD chip driver for RAM chips");
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
new file mode 100644
index 000000000000..624c12c232c8
--- /dev/null
+++ b/drivers/mtd/chips/map_rom.c
@@ -0,0 +1,94 @@
1/*
2 * Common code to handle map devices which are simple ROM
3 * (C) 2000 Red Hat. GPL'd.
4 * $Id: map_rom.c,v 1.23 2005/01/05 18:05:12 dwmw2 Exp $
5 */
6
7#include <linux/module.h>
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <asm/io.h>
11#include <asm/byteorder.h>
12#include <linux/errno.h>
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/mtd/mtd.h>
16#include <linux/mtd/map.h>
17#include <linux/mtd/compatmac.h>
18
19static int maprom_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
20static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
21static void maprom_nop (struct mtd_info *);
22static struct mtd_info *map_rom_probe(struct map_info *map);
23
24static struct mtd_chip_driver maprom_chipdrv = {
25 .probe = map_rom_probe,
26 .name = "map_rom",
27 .module = THIS_MODULE
28};
29
30static struct mtd_info *map_rom_probe(struct map_info *map)
31{
32 struct mtd_info *mtd;
33
34 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
35 if (!mtd)
36 return NULL;
37
38 memset(mtd, 0, sizeof(*mtd));
39
40 map->fldrv = &maprom_chipdrv;
41 mtd->priv = map;
42 mtd->name = map->name;
43 mtd->type = MTD_ROM;
44 mtd->size = map->size;
45 mtd->read = maprom_read;
46 mtd->write = maprom_write;
47 mtd->sync = maprom_nop;
48 mtd->flags = MTD_CAP_ROM;
49 mtd->erasesize = 131072;
50 while(mtd->size & (mtd->erasesize - 1))
51 mtd->erasesize >>= 1;
52
53 __module_get(THIS_MODULE);
54 return mtd;
55}
56
57
58static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
59{
60 struct map_info *map = mtd->priv;
61
62 map_copy_from(map, buf, from, len);
63 *retlen = len;
64 return 0;
65}
66
67static void maprom_nop(struct mtd_info *mtd)
68{
69 /* Nothing to see here */
70}
71
72static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
73{
74 printk(KERN_NOTICE "maprom_write called\n");
75 return -EIO;
76}
77
78static int __init map_rom_init(void)
79{
80 register_mtd_chip_driver(&maprom_chipdrv);
81 return 0;
82}
83
84static void __exit map_rom_exit(void)
85{
86 unregister_mtd_chip_driver(&maprom_chipdrv);
87}
88
89module_init(map_rom_init);
90module_exit(map_rom_exit);
91
92MODULE_LICENSE("GPL");
93MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
94MODULE_DESCRIPTION("MTD chip driver for ROM chips");
diff --git a/drivers/mtd/chips/sharp.c b/drivers/mtd/chips/sharp.c
new file mode 100644
index 000000000000..c3cf0f63bc93
--- /dev/null
+++ b/drivers/mtd/chips/sharp.c
@@ -0,0 +1,596 @@
1/*
2 * MTD chip driver for pre-CFI Sharp flash chips
3 *
4 * Copyright 2000,2001 David A. Schleef <ds@schleef.org>
5 * 2000,2001 Lineo, Inc.
6 *
7 * $Id: sharp.c,v 1.14 2004/08/09 13:19:43 dwmw2 Exp $
8 *
9 * Devices supported:
10 * LH28F016SCT Symmetrical block flash memory, 2Mx8
11 * LH28F008SCT Symmetrical block flash memory, 1Mx8
12 *
13 * Documentation:
14 * http://www.sharpmeg.com/datasheets/memic/flashcmp/
15 * http://www.sharpmeg.com/datasheets/memic/flashcmp/01symf/16m/016sctl9.pdf
16 * 016sctl9.pdf
17 *
18 * Limitations:
19 * This driver only supports 4x1 arrangement of chips.
20 * Not tested on anything but PowerPC.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/sched.h>
27#include <linux/errno.h>
28#include <linux/interrupt.h>
29#include <linux/mtd/map.h>
30#include <linux/mtd/mtd.h>
31#include <linux/mtd/cfi.h>
32#include <linux/delay.h>
33#include <linux/init.h>
34
35#define CMD_RESET 0xffffffff
36#define CMD_READ_ID 0x90909090
37#define CMD_READ_STATUS 0x70707070
38#define CMD_CLEAR_STATUS 0x50505050
39#define CMD_BLOCK_ERASE_1 0x20202020
40#define CMD_BLOCK_ERASE_2 0xd0d0d0d0
41#define CMD_BYTE_WRITE 0x40404040
42#define CMD_SUSPEND 0xb0b0b0b0
43#define CMD_RESUME 0xd0d0d0d0
44#define CMD_SET_BLOCK_LOCK_1 0x60606060
45#define CMD_SET_BLOCK_LOCK_2 0x01010101
46#define CMD_SET_MASTER_LOCK_1 0x60606060
47#define CMD_SET_MASTER_LOCK_2 0xf1f1f1f1
48#define CMD_CLEAR_BLOCK_LOCKS_1 0x60606060
49#define CMD_CLEAR_BLOCK_LOCKS_2 0xd0d0d0d0
50
51#define SR_READY 0x80808080 // 1 = ready
52#define SR_ERASE_SUSPEND 0x40404040 // 1 = block erase suspended
53#define SR_ERROR_ERASE 0x20202020 // 1 = error in block erase or clear lock bits
54#define SR_ERROR_WRITE 0x10101010 // 1 = error in byte write or set lock bit
55#define SR_VPP 0x08080808 // 1 = Vpp is low
56#define SR_WRITE_SUSPEND 0x04040404 // 1 = byte write suspended
57#define SR_PROTECT 0x02020202 // 1 = lock bit set
58#define SR_RESERVED 0x01010101
59
60#define SR_ERRORS (SR_ERROR_ERASE|SR_ERROR_WRITE|SR_VPP|SR_PROTECT)
61
62/* Configuration options */
63
64#undef AUTOUNLOCK /* automatically unlocks blocks before erasing */
65
66struct mtd_info *sharp_probe(struct map_info *);
67
68static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd);
69
70static int sharp_read(struct mtd_info *mtd, loff_t from, size_t len,
71 size_t *retlen, u_char *buf);
72static int sharp_write(struct mtd_info *mtd, loff_t from, size_t len,
73 size_t *retlen, const u_char *buf);
74static int sharp_erase(struct mtd_info *mtd, struct erase_info *instr);
75static void sharp_sync(struct mtd_info *mtd);
76static int sharp_suspend(struct mtd_info *mtd);
77static void sharp_resume(struct mtd_info *mtd);
78static void sharp_destroy(struct mtd_info *mtd);
79
80static int sharp_write_oneword(struct map_info *map, struct flchip *chip,
81 unsigned long adr, __u32 datum);
82static int sharp_erase_oneblock(struct map_info *map, struct flchip *chip,
83 unsigned long adr);
84#ifdef AUTOUNLOCK
85static void sharp_unlock_oneblock(struct map_info *map, struct flchip *chip,
86 unsigned long adr);
87#endif
88
89
90struct sharp_info{
91 struct flchip *chip;
92 int bogus;
93 int chipshift;
94 int numchips;
95 struct flchip chips[1];
96};
97
98struct mtd_info *sharp_probe(struct map_info *map);
99static void sharp_destroy(struct mtd_info *mtd);
100
101static struct mtd_chip_driver sharp_chipdrv = {
102 .probe = sharp_probe,
103 .destroy = sharp_destroy,
104 .name = "sharp",
105 .module = THIS_MODULE
106};
107
108
109struct mtd_info *sharp_probe(struct map_info *map)
110{
111 struct mtd_info *mtd = NULL;
112 struct sharp_info *sharp = NULL;
113 int width;
114
115 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
116 if(!mtd)
117 return NULL;
118
119 sharp = kmalloc(sizeof(*sharp), GFP_KERNEL);
120 if(!sharp) {
121 kfree(mtd);
122 return NULL;
123 }
124
125 memset(mtd, 0, sizeof(*mtd));
126
127 width = sharp_probe_map(map,mtd);
128 if(!width){
129 kfree(mtd);
130 kfree(sharp);
131 return NULL;
132 }
133
134 mtd->priv = map;
135 mtd->type = MTD_NORFLASH;
136 mtd->erase = sharp_erase;
137 mtd->read = sharp_read;
138 mtd->write = sharp_write;
139 mtd->sync = sharp_sync;
140 mtd->suspend = sharp_suspend;
141 mtd->resume = sharp_resume;
142 mtd->flags = MTD_CAP_NORFLASH;
143 mtd->name = map->name;
144
145 memset(sharp, 0, sizeof(*sharp));
146 sharp->chipshift = 23;
147 sharp->numchips = 1;
148 sharp->chips[0].start = 0;
149 sharp->chips[0].state = FL_READY;
150 sharp->chips[0].mutex = &sharp->chips[0]._spinlock;
151 sharp->chips[0].word_write_time = 0;
152 init_waitqueue_head(&sharp->chips[0].wq);
153 spin_lock_init(&sharp->chips[0]._spinlock);
154
155 map->fldrv = &sharp_chipdrv;
156 map->fldrv_priv = sharp;
157
158 __module_get(THIS_MODULE);
159 return mtd;
160}
161
162static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd)
163{
164 unsigned long tmp;
165 unsigned long base = 0;
166 u32 read0, read4;
167 int width = 4;
168
169 tmp = map_read32(map, base+0);
170
171 map_write32(map, CMD_READ_ID, base+0);
172
173 read0=map_read32(map, base+0);
174 read4=map_read32(map, base+4);
175 if(read0 == 0x89898989){
176 printk("Looks like sharp flash\n");
177 switch(read4){
178 case 0xaaaaaaaa:
179 case 0xa0a0a0a0:
180 /* aa - LH28F016SCT-L95 2Mx8, 32 64k blocks*/
181 /* a0 - LH28F016SCT-Z4 2Mx8, 32 64k blocks*/
182 mtd->erasesize = 0x10000 * width;
183 mtd->size = 0x200000 * width;
184 return width;
185 case 0xa6a6a6a6:
186 /* a6 - LH28F008SCT-L12 1Mx8, 16 64k blocks*/
187 /* a6 - LH28F008SCR-L85 1Mx8, 16 64k blocks*/
188 mtd->erasesize = 0x10000 * width;
189 mtd->size = 0x100000 * width;
190 return width;
191#if 0
192 case 0x00000000: /* unknown */
193 /* XX - LH28F004SCT 512kx8, 8 64k blocks*/
194 mtd->erasesize = 0x10000 * width;
195 mtd->size = 0x80000 * width;
196 return width;
197#endif
198 default:
199 printk("Sort-of looks like sharp flash, 0x%08x 0x%08x\n",
200 read0,read4);
201 }
202 }else if((map_read32(map, base+0) == CMD_READ_ID)){
203 /* RAM, probably */
204 printk("Looks like RAM\n");
205 map_write32(map, tmp, base+0);
206 }else{
207 printk("Doesn't look like sharp flash, 0x%08x 0x%08x\n",
208 read0,read4);
209 }
210
211 return 0;
212}
213
214/* This function returns with the chip->mutex lock held. */
215static int sharp_wait(struct map_info *map, struct flchip *chip)
216{
217 __u16 status;
218 unsigned long timeo = jiffies + HZ;
219 DECLARE_WAITQUEUE(wait, current);
220 int adr = 0;
221
222retry:
223 spin_lock_bh(chip->mutex);
224
225 switch(chip->state){
226 case FL_READY:
227 map_write32(map,CMD_READ_STATUS,adr);
228 chip->state = FL_STATUS;
229 case FL_STATUS:
230 status = map_read32(map,adr);
231//printk("status=%08x\n",status);
232
233 udelay(100);
234 if((status & SR_READY)!=SR_READY){
235//printk(".status=%08x\n",status);
236 udelay(100);
237 }
238 break;
239 default:
240 printk("Waiting for chip\n");
241
242 set_current_state(TASK_INTERRUPTIBLE);
243 add_wait_queue(&chip->wq, &wait);
244
245 spin_unlock_bh(chip->mutex);
246
247 schedule();
248 remove_wait_queue(&chip->wq, &wait);
249
250 if(signal_pending(current))
251 return -EINTR;
252
253 timeo = jiffies + HZ;
254
255 goto retry;
256 }
257
258 map_write32(map,CMD_RESET, adr);
259
260 chip->state = FL_READY;
261
262 return 0;
263}
264
265static void sharp_release(struct flchip *chip)
266{
267 wake_up(&chip->wq);
268 spin_unlock_bh(chip->mutex);
269}
270
271static int sharp_read(struct mtd_info *mtd, loff_t from, size_t len,
272 size_t *retlen, u_char *buf)
273{
274 struct map_info *map = mtd->priv;
275 struct sharp_info *sharp = map->fldrv_priv;
276 int chipnum;
277 int ret = 0;
278 int ofs = 0;
279
280 chipnum = (from >> sharp->chipshift);
281 ofs = from & ((1 << sharp->chipshift)-1);
282
283 *retlen = 0;
284
285 while(len){
286 unsigned long thislen;
287
288 if(chipnum>=sharp->numchips)
289 break;
290
291 thislen = len;
292 if(ofs+thislen >= (1<<sharp->chipshift))
293 thislen = (1<<sharp->chipshift) - ofs;
294
295 ret = sharp_wait(map,&sharp->chips[chipnum]);
296 if(ret<0)
297 break;
298
299 map_copy_from(map,buf,ofs,thislen);
300
301 sharp_release(&sharp->chips[chipnum]);
302
303 *retlen += thislen;
304 len -= thislen;
305 buf += thislen;
306
307 ofs = 0;
308 chipnum++;
309 }
310 return ret;
311}
312
313static int sharp_write(struct mtd_info *mtd, loff_t to, size_t len,
314 size_t *retlen, const u_char *buf)
315{
316 struct map_info *map = mtd->priv;
317 struct sharp_info *sharp = map->fldrv_priv;
318 int ret = 0;
319 int i,j;
320 int chipnum;
321 unsigned long ofs;
322 union { u32 l; unsigned char uc[4]; } tbuf;
323
324 *retlen = 0;
325
326 while(len){
327 tbuf.l = 0xffffffff;
328 chipnum = to >> sharp->chipshift;
329 ofs = to & ((1<<sharp->chipshift)-1);
330
331 j=0;
332 for(i=ofs&3;i<4 && len;i++){
333 tbuf.uc[i] = *buf;
334 buf++;
335 to++;
336 len--;
337 j++;
338 }
339 sharp_write_oneword(map, &sharp->chips[chipnum], ofs&~3, tbuf.l);
340 if(ret<0)
341 return ret;
342 (*retlen)+=j;
343 }
344
345 return 0;
346}
347
348static int sharp_write_oneword(struct map_info *map, struct flchip *chip,
349 unsigned long adr, __u32 datum)
350{
351 int ret;
352 int timeo;
353 int try;
354 int i;
355 int status = 0;
356
357 ret = sharp_wait(map,chip);
358
359 for(try=0;try<10;try++){
360 map_write32(map,CMD_BYTE_WRITE,adr);
361 /* cpu_to_le32 -> hack to fix the writel be->le conversion */
362 map_write32(map,cpu_to_le32(datum),adr);
363
364 chip->state = FL_WRITING;
365
366 timeo = jiffies + (HZ/2);
367
368 map_write32(map,CMD_READ_STATUS,adr);
369 for(i=0;i<100;i++){
370 status = map_read32(map,adr);
371 if((status & SR_READY)==SR_READY)
372 break;
373 }
374 if(i==100){
375 printk("sharp: timed out writing\n");
376 }
377
378 if(!(status&SR_ERRORS))
379 break;
380
381 printk("sharp: error writing byte at addr=%08lx status=%08x\n",adr,status);
382
383 map_write32(map,CMD_CLEAR_STATUS,adr);
384 }
385 map_write32(map,CMD_RESET,adr);
386 chip->state = FL_READY;
387
388 wake_up(&chip->wq);
389 spin_unlock_bh(chip->mutex);
390
391 return 0;
392}
393
394static int sharp_erase(struct mtd_info *mtd, struct erase_info *instr)
395{
396 struct map_info *map = mtd->priv;
397 struct sharp_info *sharp = map->fldrv_priv;
398 unsigned long adr,len;
399 int chipnum, ret=0;
400
401//printk("sharp_erase()\n");
402 if(instr->addr & (mtd->erasesize - 1))
403 return -EINVAL;
404 if(instr->len & (mtd->erasesize - 1))
405 return -EINVAL;
406 if(instr->len + instr->addr > mtd->size)
407 return -EINVAL;
408
409 chipnum = instr->addr >> sharp->chipshift;
410 adr = instr->addr & ((1<<sharp->chipshift)-1);
411 len = instr->len;
412
413 while(len){
414 ret = sharp_erase_oneblock(map, &sharp->chips[chipnum], adr);
415 if(ret)return ret;
416
417 adr += mtd->erasesize;
418 len -= mtd->erasesize;
419 if(adr >> sharp->chipshift){
420 adr = 0;
421 chipnum++;
422 if(chipnum>=sharp->numchips)
423 break;
424 }
425 }
426
427 instr->state = MTD_ERASE_DONE;
428 mtd_erase_callback(instr);
429
430 return 0;
431}
432
433static int sharp_do_wait_for_ready(struct map_info *map, struct flchip *chip,
434 unsigned long adr)
435{
436 int ret;
437 unsigned long timeo;
438 int status;
439 DECLARE_WAITQUEUE(wait, current);
440
441 map_write32(map,CMD_READ_STATUS,adr);
442 status = map_read32(map,adr);
443
444 timeo = jiffies + HZ;
445
446 while(time_before(jiffies, timeo)){
447 map_write32(map,CMD_READ_STATUS,adr);
448 status = map_read32(map,adr);
449 if((status & SR_READY)==SR_READY){
450 ret = 0;
451 goto out;
452 }
453 set_current_state(TASK_INTERRUPTIBLE);
454 add_wait_queue(&chip->wq, &wait);
455
456 //spin_unlock_bh(chip->mutex);
457
458 schedule_timeout(1);
459 schedule();
460 remove_wait_queue(&chip->wq, &wait);
461
462 //spin_lock_bh(chip->mutex);
463
464 if (signal_pending(current)){
465 ret = -EINTR;
466 goto out;
467 }
468
469 }
470 ret = -ETIME;
471out:
472 return ret;
473}
474
475static int sharp_erase_oneblock(struct map_info *map, struct flchip *chip,
476 unsigned long adr)
477{
478 int ret;
479 //int timeo;
480 int status;
481 //int i;
482
483//printk("sharp_erase_oneblock()\n");
484
485#ifdef AUTOUNLOCK
486 /* This seems like a good place to do an unlock */
487 sharp_unlock_oneblock(map,chip,adr);
488#endif
489
490 map_write32(map,CMD_BLOCK_ERASE_1,adr);
491 map_write32(map,CMD_BLOCK_ERASE_2,adr);
492
493 chip->state = FL_ERASING;
494
495 ret = sharp_do_wait_for_ready(map,chip,adr);
496 if(ret<0)return ret;
497
498 map_write32(map,CMD_READ_STATUS,adr);
499 status = map_read32(map,adr);
500
501 if(!(status&SR_ERRORS)){
502 map_write32(map,CMD_RESET,adr);
503 chip->state = FL_READY;
504 //spin_unlock_bh(chip->mutex);
505 return 0;
506 }
507
508 printk("sharp: error erasing block at addr=%08lx status=%08x\n",adr,status);
509 map_write32(map,CMD_CLEAR_STATUS,adr);
510
511 //spin_unlock_bh(chip->mutex);
512
513 return -EIO;
514}
515
516#ifdef AUTOUNLOCK
517static void sharp_unlock_oneblock(struct map_info *map, struct flchip *chip,
518 unsigned long adr)
519{
520 int i;
521 int status;
522
523 map_write32(map,CMD_CLEAR_BLOCK_LOCKS_1,adr);
524 map_write32(map,CMD_CLEAR_BLOCK_LOCKS_2,adr);
525
526 udelay(100);
527
528 status = map_read32(map,adr);
529 printk("status=%08x\n",status);
530
531 for(i=0;i<1000;i++){
532 //map_write32(map,CMD_READ_STATUS,adr);
533 status = map_read32(map,adr);
534 if((status & SR_READY)==SR_READY)
535 break;
536 udelay(100);
537 }
538 if(i==1000){
539 printk("sharp: timed out unlocking block\n");
540 }
541
542 if(!(status&SR_ERRORS)){
543 map_write32(map,CMD_RESET,adr);
544 chip->state = FL_READY;
545 return;
546 }
547
548 printk("sharp: error unlocking block at addr=%08lx status=%08x\n",adr,status);
549 map_write32(map,CMD_CLEAR_STATUS,adr);
550}
551#endif
552
553static void sharp_sync(struct mtd_info *mtd)
554{
555 //printk("sharp_sync()\n");
556}
557
558static int sharp_suspend(struct mtd_info *mtd)
559{
560 printk("sharp_suspend()\n");
561 return -EINVAL;
562}
563
564static void sharp_resume(struct mtd_info *mtd)
565{
566 printk("sharp_resume()\n");
567
568}
569
570static void sharp_destroy(struct mtd_info *mtd)
571{
572 printk("sharp_destroy()\n");
573
574}
575
576int __init sharp_probe_init(void)
577{
578 printk("MTD Sharp chip driver <ds@lineo.com>\n");
579
580 register_mtd_chip_driver(&sharp_chipdrv);
581
582 return 0;
583}
584
585static void __exit sharp_probe_exit(void)
586{
587 unregister_mtd_chip_driver(&sharp_chipdrv);
588}
589
590module_init(sharp_probe_init);
591module_exit(sharp_probe_exit);
592
593
594MODULE_LICENSE("GPL");
595MODULE_AUTHOR("David Schleef <ds@schleef.org>");
596MODULE_DESCRIPTION("Old MTD chip driver for pre-CFI Sharp flash chips");
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
new file mode 100644
index 000000000000..60ab4b89a2f9
--- /dev/null
+++ b/drivers/mtd/cmdlinepart.c
@@ -0,0 +1,367 @@
1/*
2 * $Id: cmdlinepart.c,v 1.17 2004/11/26 11:18:47 lavinen Exp $
3 *
4 * Read flash partition table from command line
5 *
6 * Copyright 2002 SYSGO Real-Time Solutions GmbH
7 *
8 * The format for the command line is as follows:
9 *
10 * mtdparts=<mtddef>[;<mtddef]
11 * <mtddef> := <mtd-id>:<partdef>[,<partdef>]
12 * <partdef> := <size>[@offset][<name>][ro]
13 * <mtd-id> := unique name used in mapping driver/device (mtd->name)
14 * <size> := standard linux memsize OR "-" to denote all remaining space
15 * <name> := '(' NAME ')'
16 *
17 * Examples:
18 *
19 * 1 NOR Flash, with 1 single writable partition:
20 * edb7312-nor:-
21 *
22 * 1 NOR Flash with 2 partitions, 1 NAND with one
23 * edb7312-nor:256k(ARMboot)ro,-(root);edb7312-nand:-(home)
24 */
25
26#include <linux/kernel.h>
27#include <linux/slab.h>
28
29#include <linux/mtd/mtd.h>
30#include <linux/mtd/partitions.h>
31#include <linux/bootmem.h>
32
33/* error message prefix */
34#define ERRP "mtd: "
35
36/* debug macro */
37#if 0
38#define dbg(x) do { printk("DEBUG-CMDLINE-PART: "); printk x; } while(0)
39#else
40#define dbg(x)
41#endif
42
43
44/* special size referring to all the remaining space in a partition */
45#define SIZE_REMAINING 0xffffffff
46
47struct cmdline_mtd_partition {
48 struct cmdline_mtd_partition *next;
49 char *mtd_id;
50 int num_parts;
51 struct mtd_partition *parts;
52};
53
54/* mtdpart_setup() parses into here */
55static struct cmdline_mtd_partition *partitions;
56
57/* the command line passed to mtdpart_setupd() */
58static char *cmdline;
59static int cmdline_parsed = 0;
60
61/*
62 * Parse one partition definition for an MTD. Since there can be many
63 * comma separated partition definitions, this function calls itself
64 * recursively until no more partition definitions are found. Nice side
65 * effect: the memory to keep the mtd_partition structs and the names
66 * is allocated upon the last definition being found. At that point the
67 * syntax has been verified ok.
68 */
69static struct mtd_partition * newpart(char *s,
70 char **retptr,
71 int *num_parts,
72 int this_part,
73 unsigned char **extra_mem_ptr,
74 int extra_mem_size)
75{
76 struct mtd_partition *parts;
77 unsigned long size;
78 unsigned long offset = 0;
79 char *name;
80 int name_len;
81 unsigned char *extra_mem;
82 char delim;
83 unsigned int mask_flags;
84
85 /* fetch the partition size */
86 if (*s == '-')
87 { /* assign all remaining space to this partition */
88 size = SIZE_REMAINING;
89 s++;
90 }
91 else
92 {
93 size = memparse(s, &s);
94 if (size < PAGE_SIZE)
95 {
96 printk(KERN_ERR ERRP "partition size too small (%lx)\n", size);
97 return NULL;
98 }
99 }
100
101 /* fetch partition name and flags */
102 mask_flags = 0; /* this is going to be a regular partition */
103 delim = 0;
104 /* check for offset */
105 if (*s == '@')
106 {
107 s++;
108 offset = memparse(s, &s);
109 }
110 /* now look for name */
111 if (*s == '(')
112 {
113 delim = ')';
114 }
115
116 if (delim)
117 {
118 char *p;
119
120 name = ++s;
121 if ((p = strchr(name, delim)) == 0)
122 {
123 printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim);
124 return NULL;
125 }
126 name_len = p - name;
127 s = p + 1;
128 }
129 else
130 {
131 name = NULL;
132 name_len = 13; /* Partition_000 */
133 }
134
135 /* record name length for memory allocation later */
136 extra_mem_size += name_len + 1;
137
138 /* test for options */
139 if (strncmp(s, "ro", 2) == 0)
140 {
141 mask_flags |= MTD_WRITEABLE;
142 s += 2;
143 }
144
145 /* test if more partitions are following */
146 if (*s == ',')
147 {
148 if (size == SIZE_REMAINING)
149 {
150 printk(KERN_ERR ERRP "no partitions allowed after a fill-up partition\n");
151 return NULL;
152 }
153 /* more partitions follow, parse them */
154 if ((parts = newpart(s + 1, &s, num_parts,
155 this_part + 1, &extra_mem, extra_mem_size)) == 0)
156 return NULL;
157 }
158 else
159 { /* this is the last partition: allocate space for all */
160 int alloc_size;
161
162 *num_parts = this_part + 1;
163 alloc_size = *num_parts * sizeof(struct mtd_partition) +
164 extra_mem_size;
165 parts = kmalloc(alloc_size, GFP_KERNEL);
166 if (!parts)
167 {
168 printk(KERN_ERR ERRP "out of memory\n");
169 return NULL;
170 }
171 memset(parts, 0, alloc_size);
172 extra_mem = (unsigned char *)(parts + *num_parts);
173 }
174 /* enter this partition (offset will be calculated later if it is zero at this point) */
175 parts[this_part].size = size;
176 parts[this_part].offset = offset;
177 parts[this_part].mask_flags = mask_flags;
178 if (name)
179 {
180 strlcpy(extra_mem, name, name_len + 1);
181 }
182 else
183 {
184 sprintf(extra_mem, "Partition_%03d", this_part);
185 }
186 parts[this_part].name = extra_mem;
187 extra_mem += name_len + 1;
188
189 dbg(("partition %d: name <%s>, offset %x, size %x, mask flags %x\n",
190 this_part,
191 parts[this_part].name,
192 parts[this_part].offset,
193 parts[this_part].size,
194 parts[this_part].mask_flags));
195
196 /* return (updated) pointer to extra_mem memory */
197 if (extra_mem_ptr)
198 *extra_mem_ptr = extra_mem;
199
200 /* return (updated) pointer command line string */
201 *retptr = s;
202
203 /* return partition table */
204 return parts;
205}
206
207/*
208 * Parse the command line.
209 */
210static int mtdpart_setup_real(char *s)
211{
212 cmdline_parsed = 1;
213
214 for( ; s != NULL; )
215 {
216 struct cmdline_mtd_partition *this_mtd;
217 struct mtd_partition *parts;
218 int mtd_id_len;
219 int num_parts;
220 char *p, *mtd_id;
221
222 mtd_id = s;
223 /* fetch <mtd-id> */
224 if (!(p = strchr(s, ':')))
225 {
226 printk(KERN_ERR ERRP "no mtd-id\n");
227 return 0;
228 }
229 mtd_id_len = p - mtd_id;
230
231 dbg(("parsing <%s>\n", p+1));
232
233 /*
234 * parse one mtd. have it reserve memory for the
235 * struct cmdline_mtd_partition and the mtd-id string.
236 */
237 parts = newpart(p + 1, /* cmdline */
238 &s, /* out: updated cmdline ptr */
239 &num_parts, /* out: number of parts */
240 0, /* first partition */
241 (unsigned char**)&this_mtd, /* out: extra mem */
242 mtd_id_len + 1 + sizeof(*this_mtd));
243 if(!parts)
244 {
245 /*
246 * An error occurred. We're either:
247 * a) out of memory, or
248 * b) in the middle of the partition spec
249 * Either way, this mtd is hosed and we're
250 * unlikely to succeed in parsing any more
251 */
252 return 0;
253 }
254
255 /* enter results */
256 this_mtd->parts = parts;
257 this_mtd->num_parts = num_parts;
258 this_mtd->mtd_id = (char*)(this_mtd + 1);
259 strlcpy(this_mtd->mtd_id, mtd_id, mtd_id_len + 1);
260
261 /* link into chain */
262 this_mtd->next = partitions;
263 partitions = this_mtd;
264
265 dbg(("mtdid=<%s> num_parts=<%d>\n",
266 this_mtd->mtd_id, this_mtd->num_parts));
267
268
269 /* EOS - we're done */
270 if (*s == 0)
271 break;
272
273 /* does another spec follow? */
274 if (*s != ';')
275 {
276 printk(KERN_ERR ERRP "bad character after partition (%c)\n", *s);
277 return 0;
278 }
279 s++;
280 }
281 return 1;
282}
283
284/*
285 * Main function to be called from the MTD mapping driver/device to
286 * obtain the partitioning information. At this point the command line
287 * arguments will actually be parsed and turned to struct mtd_partition
288 * information. It returns partitions for the requested mtd device, or
289 * the first one in the chain if a NULL mtd_id is passed in.
290 */
291static int parse_cmdline_partitions(struct mtd_info *master,
292 struct mtd_partition **pparts,
293 unsigned long origin)
294{
295 unsigned long offset;
296 int i;
297 struct cmdline_mtd_partition *part;
298 char *mtd_id = master->name;
299
300 if(!cmdline)
301 return -EINVAL;
302
303 /* parse command line */
304 if (!cmdline_parsed)
305 mtdpart_setup_real(cmdline);
306
307 for(part = partitions; part; part = part->next)
308 {
309 if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id)))
310 {
311 for(i = 0, offset = 0; i < part->num_parts; i++)
312 {
313 if (!part->parts[i].offset)
314 part->parts[i].offset = offset;
315 else
316 offset = part->parts[i].offset;
317 if (part->parts[i].size == SIZE_REMAINING)
318 part->parts[i].size = master->size - offset;
319 if (offset + part->parts[i].size > master->size)
320 {
321 printk(KERN_WARNING ERRP
322 "%s: partitioning exceeds flash size, truncating\n",
323 part->mtd_id);
324 part->parts[i].size = master->size - offset;
325 part->num_parts = i;
326 }
327 offset += part->parts[i].size;
328 }
329 *pparts = part->parts;
330 return part->num_parts;
331 }
332 }
333 return -EINVAL;
334}
335
336
337/*
338 * This is the handler for our kernel parameter, called from
339 * main.c::checksetup(). Note that we can not yet kmalloc() anything,
340 * so we only save the commandline for later processing.
341 *
342 * This function needs to be visible for bootloaders.
343 */
344int mtdpart_setup(char *s)
345{
346 cmdline = s;
347 return 1;
348}
349
350__setup("mtdparts=", mtdpart_setup);
351
352static struct mtd_part_parser cmdline_parser = {
353 .owner = THIS_MODULE,
354 .parse_fn = parse_cmdline_partitions,
355 .name = "cmdlinepart",
356};
357
358static int __init cmdline_parser_init(void)
359{
360 return register_mtd_parser(&cmdline_parser);
361}
362
363module_init(cmdline_parser_init);
364
365MODULE_LICENSE("GPL");
366MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
367MODULE_DESCRIPTION("Command line configuration of MTD partitions");
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
new file mode 100644
index 000000000000..c4a56a4ac5e2
--- /dev/null
+++ b/drivers/mtd/devices/Kconfig
@@ -0,0 +1,259 @@
1# drivers/mtd/maps/Kconfig
2# $Id: Kconfig,v 1.15 2004/12/22 17:51:15 joern Exp $
3
4menu "Self-contained MTD device drivers"
5 depends on MTD!=n
6
7config MTD_PMC551
8 tristate "Ramix PMC551 PCI Mezzanine RAM card support"
9 depends on MTD && PCI
10 ---help---
11 This provides a MTD device driver for the Ramix PMC551 RAM PCI card
12 from Ramix Inc. <http://www.ramix.com/products/memory/pmc551.html>.
13 These devices come in memory configurations from 32M - 1G. If you
14 have one, you probably want to enable this.
15
16 If this driver is compiled as a module you get the ability to select
17 the size of the aperture window pointing into the devices memory.
18 What this means is that if you have a 1G card, normally the kernel
19 will use a 1G memory map as its view of the device. As a module,
20 you can select a 1M window into the memory and the driver will
21 "slide" the window around the PMC551's memory. This was
22 particularly useful on the 2.2 kernels on PPC architectures as there
23 was limited kernel space to deal with.
24
25config MTD_PMC551_BUGFIX
26 bool "PMC551 256M DRAM Bugfix"
27 depends on MTD_PMC551
28 help
29 Some of Ramix's PMC551 boards with 256M configurations have invalid
30 column and row mux values. This option will fix them, but will
31 break other memory configurations. If unsure say N.
32
33config MTD_PMC551_DEBUG
34 bool "PMC551 Debugging"
35 depends on MTD_PMC551
36 help
37 This option makes the PMC551 more verbose during its operation and
38 is only really useful if you are developing on this driver or
39 suspect a possible hardware or driver bug. If unsure say N.
40
41config MTD_MS02NV
42 tristate "DEC MS02-NV NVRAM module support"
43 depends on MTD && MACH_DECSTATION
44 help
45 This is an MTD driver for the DEC's MS02-NV (54-20948-01) battery
46 backed-up NVRAM module. The module was originally meant as an NFS
47 accelerator. Say Y here if you have a DECstation 5000/2x0 or a
48 DECsystem 5900 equipped with such a module.
49
50config MTD_SLRAM
51 tristate "Uncached system RAM"
52 depends on MTD
53 help
54 If your CPU cannot cache all of the physical memory in your machine,
55 you can still use it for storage or swap by using this driver to
56 present it to the system as a Memory Technology Device.
57
58config MTD_PHRAM
59 tristate "Physical system RAM"
60 depends on MTD
61 help
62 This is a re-implementation of the slram driver above.
63
64 Use this driver to access physical memory that the kernel proper
65 doesn't have access to, memory beyond the mem=xxx limit, nvram,
66 memory on the video card, etc...
67
68config MTD_LART
69 tristate "28F160xx flash driver for LART"
70 depends on SA1100_LART && MTD
71 help
72 This enables the flash driver for LART. Please note that you do
73 not need any mapping/chip driver for LART. This one does it all
74 for you, so go disable all of those if you enabled some of them (:
75
76config MTD_MTDRAM
77 tristate "Test driver using RAM"
78 depends on MTD
79 help
80 This enables a test MTD device driver which uses vmalloc() to
81 provide storage. You probably want to say 'N' unless you're
82 testing stuff.
83
84config MTDRAM_TOTAL_SIZE
85 int "MTDRAM device size in KiB"
86 depends on MTD_MTDRAM
87 default "4096"
88 help
89 This allows you to configure the total size of the MTD device
90 emulated by the MTDRAM driver. If the MTDRAM driver is built
91 as a module, it is also possible to specify this as a parameter when
92 loading the module.
93
94config MTDRAM_ERASE_SIZE
95 int "MTDRAM erase block size in KiB"
96 depends on MTD_MTDRAM
97 default "128"
98 help
99 This allows you to configure the size of the erase blocks in the
100 device emulated by the MTDRAM driver. If the MTDRAM driver is built
101 as a module, it is also possible to specify this as a parameter when
102 loading the module.
103
104#If not a module (I don't want to test it as a module)
105config MTDRAM_ABS_POS
106 hex "SRAM Hexadecimal Absolute position or 0"
107 depends on MTD_MTDRAM=y
108 default "0"
109 help
110 If you have system RAM accessible by the CPU but not used by Linux
111 in normal operation, you can give the physical address at which the
112 available RAM starts, and the MTDRAM driver will use it instead of
113 allocating space from Linux's available memory. Otherwise, leave
114 this set to zero. Most people will want to leave this as zero.
115
116config MTD_BLKMTD
117 tristate "MTD emulation using block device"
118 depends on MTD
119 help
120 This driver allows a block device to appear as an MTD. It would
121 generally be used in the following cases:
122
123 Using Compact Flash as an MTD, these usually present themselves to
124 the system as an ATA drive.
125 Testing MTD users (eg JFFS2) on large media and media that might
126 be removed during a write (using the floppy drive).
127
128config MTD_BLOCK2MTD
129 tristate "MTD using block device (rewrite)"
130 depends on MTD && EXPERIMENTAL
131 help
132 This driver is basically the same at MTD_BLKMTD above, but
133 experienced some interface changes plus serious speedups. In
134 the long term, it should replace MTD_BLKMTD. Right now, you
135 shouldn't entrust important data to it yet.
136
137comment "Disk-On-Chip Device Drivers"
138
139config MTD_DOC2000
140 tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)"
141 depends on MTD
142 select MTD_DOCPROBE
143 select MTD_NAND_IDS
144 ---help---
145 This provides an MTD device driver for the M-Systems DiskOnChip
146 2000 and Millennium devices. Originally designed for the DiskOnChip
147 2000, it also now includes support for the DiskOnChip Millennium.
148 If you have problems with this driver and the DiskOnChip Millennium,
149 you may wish to try the alternative Millennium driver below. To use
150 the alternative driver, you will need to undefine DOC_SINGLE_DRIVER
151 in the <file:drivers/mtd/devices/docprobe.c> source code.
152
153 If you use this device, you probably also want to enable the NFTL
154 'NAND Flash Translation Layer' option below, which is used to
155 emulate a block device by using a kind of file system on the flash
156 chips.
157
158 NOTE: This driver is deprecated and will probably be removed soon.
159 Please try the new DiskOnChip driver under "NAND Flash Device
160 Drivers".
161
162config MTD_DOC2001
163 tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)"
164 depends on MTD
165 select MTD_DOCPROBE
166 select MTD_NAND_IDS
167 ---help---
168 This provides an alternative MTD device driver for the M-Systems
169 DiskOnChip Millennium devices. Use this if you have problems with
170 the combined DiskOnChip 2000 and Millennium driver above. To get
171 the DiskOnChip probe code to load and use this driver instead of
172 the other one, you will need to undefine DOC_SINGLE_DRIVER near
173 the beginning of <file:drivers/mtd/devices/docprobe.c>.
174
175 If you use this device, you probably also want to enable the NFTL
176 'NAND Flash Translation Layer' option below, which is used to
177 emulate a block device by using a kind of file system on the flash
178 chips.
179
180 NOTE: This driver is deprecated and will probably be removed soon.
181 Please try the new DiskOnChip driver under "NAND Flash Device
182 Drivers".
183
184config MTD_DOC2001PLUS
185 tristate "M-Systems Disk-On-Chip Millennium Plus"
186 depends on MTD
187 select MTD_DOCPROBE
188 select MTD_NAND_IDS
189 ---help---
190 This provides an MTD device driver for the M-Systems DiskOnChip
191 Millennium Plus devices.
192
193 If you use this device, you probably also want to enable the INFTL
194 'Inverse NAND Flash Translation Layer' option below, which is used
195 to emulate a block device by using a kind of file system on the
196 flash chips.
197
198 NOTE: This driver will soon be replaced by the new DiskOnChip driver
199 under "NAND Flash Device Drivers" (currently that driver does not
200 support all Millennium Plus devices).
201
202config MTD_DOCPROBE
203 tristate
204 select MTD_DOCECC
205
206config MTD_DOCECC
207 tristate
208
209config MTD_DOCPROBE_ADVANCED
210 bool "Advanced detection options for DiskOnChip"
211 depends on MTD_DOCPROBE
212 help
213 This option allows you to specify nonstandard address at which to
214 probe for a DiskOnChip, or to change the detection options. You
215 are unlikely to need any of this unless you are using LinuxBIOS.
216 Say 'N'.
217
218config MTD_DOCPROBE_ADDRESS
219 hex "Physical address of DiskOnChip" if MTD_DOCPROBE_ADVANCED
220 depends on MTD_DOCPROBE
221 default "0x0000" if MTD_DOCPROBE_ADVANCED
222 default "0" if !MTD_DOCPROBE_ADVANCED
223 ---help---
224 By default, the probe for DiskOnChip devices will look for a
225 DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
226 This option allows you to specify a single address at which to probe
227 for the device, which is useful if you have other devices in that
228 range which get upset when they are probed.
229
230 (Note that on PowerPC, the normal probe will only check at
231 0xE4000000.)
232
233 Normally, you should leave this set to zero, to allow the probe at
234 the normal addresses.
235
236config MTD_DOCPROBE_HIGH
237 bool "Probe high addresses"
238 depends on MTD_DOCPROBE_ADVANCED
239 help
240 By default, the probe for DiskOnChip devices will look for a
241 DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
242 This option changes to make it probe between 0xFFFC8000 and
243 0xFFFEE000. Unless you are using LinuxBIOS, this is unlikely to be
244 useful to you. Say 'N'.
245
246config MTD_DOCPROBE_55AA
247 bool "Probe for 0x55 0xAA BIOS Extension Signature"
248 depends on MTD_DOCPROBE_ADVANCED
249 help
250 Check for the 0x55 0xAA signature of a DiskOnChip, and do not
251 continue with probing if it is absent. The signature will always be
252 present for a DiskOnChip 2000 or a normal DiskOnChip Millennium.
253 Only if you have overwritten the first block of a DiskOnChip
254 Millennium will it be absent. Enable this option if you are using
255 LinuxBIOS or if you need to recover a DiskOnChip Millennium on which
256 you have managed to wipe the first block.
257
258endmenu
259
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
new file mode 100644
index 000000000000..e38db348057d
--- /dev/null
+++ b/drivers/mtd/devices/Makefile
@@ -0,0 +1,25 @@
1#
2# linux/drivers/devices/Makefile
3#
4# $Id: Makefile.common,v 1.7 2004/12/22 17:51:15 joern Exp $
5
6# *** BIG UGLY NOTE ***
7#
8# The removal of get_module_symbol() and replacement with
9# inter_module_register() et al has introduced a link order dependency
10# here where previously there was none. We now have to ensure that
11# doc200[01].o are linked before docprobe.o
12
13obj-$(CONFIG_MTD_DOC2000) += doc2000.o
14obj-$(CONFIG_MTD_DOC2001) += doc2001.o
15obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o
16obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o
17obj-$(CONFIG_MTD_DOCECC) += docecc.o
18obj-$(CONFIG_MTD_SLRAM) += slram.o
19obj-$(CONFIG_MTD_PHRAM) += phram.o
20obj-$(CONFIG_MTD_PMC551) += pmc551.o
21obj-$(CONFIG_MTD_MS02NV) += ms02-nv.o
22obj-$(CONFIG_MTD_MTDRAM) += mtdram.o
23obj-$(CONFIG_MTD_LART) += lart.o
24obj-$(CONFIG_MTD_BLKMTD) += blkmtd.o
25obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
diff --git a/drivers/mtd/devices/blkmtd.c b/drivers/mtd/devices/blkmtd.c
new file mode 100644
index 000000000000..662e807801ed
--- /dev/null
+++ b/drivers/mtd/devices/blkmtd.c
@@ -0,0 +1,823 @@
1/*
2 * $Id: blkmtd.c,v 1.24 2004/11/16 18:29:01 dwmw2 Exp $
3 *
4 * blkmtd.c - use a block device as a fake MTD
5 *
6 * Author: Simon Evans <spse@secret.org.uk>
7 *
8 * Copyright (C) 2001,2002 Simon Evans
9 *
10 * Licence: GPL
11 *
12 * How it works:
13 * The driver uses raw/io to read/write the device and the page
14 * cache to cache access. Writes update the page cache with the
15 * new data and mark it dirty and add the page into a BIO which
16 * is then written out.
17 *
18 * It can be loaded Read-Only to prevent erases and writes to the
19 * medium.
20 *
21 */
22
23#include <linux/config.h>
24#include <linux/module.h>
25#include <linux/fs.h>
26#include <linux/blkdev.h>
27#include <linux/bio.h>
28#include <linux/pagemap.h>
29#include <linux/list.h>
30#include <linux/init.h>
31#include <linux/mtd/mtd.h>
32
33
34#define err(format, arg...) printk(KERN_ERR "blkmtd: " format "\n" , ## arg)
35#define info(format, arg...) printk(KERN_INFO "blkmtd: " format "\n" , ## arg)
36#define warn(format, arg...) printk(KERN_WARNING "blkmtd: " format "\n" , ## arg)
37#define crit(format, arg...) printk(KERN_CRIT "blkmtd: " format "\n" , ## arg)
38
39
40/* Default erase size in K, always make it a multiple of PAGE_SIZE */
41#define CONFIG_MTD_BLKDEV_ERASESIZE (128 << 10) /* 128KiB */
42#define VERSION "$Revision: 1.24 $"
43
44/* Info for the block device */
45struct blkmtd_dev {
46 struct list_head list;
47 struct block_device *blkdev;
48 struct mtd_info mtd_info;
49 struct semaphore wrbuf_mutex;
50};
51
52
53/* Static info about the MTD, used in cleanup_module */
54static LIST_HEAD(blkmtd_device_list);
55
56
57static void blkmtd_sync(struct mtd_info *mtd);
58
59#define MAX_DEVICES 4
60
61/* Module parameters passed by insmod/modprobe */
62static char *device[MAX_DEVICES]; /* the block device to use */
63static int erasesz[MAX_DEVICES]; /* optional default erase size */
64static int ro[MAX_DEVICES]; /* optional read only flag */
65static int sync;
66
67
68MODULE_LICENSE("GPL");
69MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>");
70MODULE_DESCRIPTION("Emulate an MTD using a block device");
71module_param_array(device, charp, NULL, 0);
72MODULE_PARM_DESC(device, "block device to use");
73module_param_array(erasesz, int, NULL, 0);
74MODULE_PARM_DESC(erasesz, "optional erase size to use in KiB. eg 4=4KiB.");
75module_param_array(ro, bool, NULL, 0);
76MODULE_PARM_DESC(ro, "1=Read only, writes and erases cause errors");
77module_param(sync, bool, 0);
78MODULE_PARM_DESC(sync, "1=Synchronous writes");
79
80
81/* completion handler for BIO reads */
82static int bi_read_complete(struct bio *bio, unsigned int bytes_done, int error)
83{
84 if (bio->bi_size)
85 return 1;
86
87 complete((struct completion*)bio->bi_private);
88 return 0;
89}
90
91
92/* completion handler for BIO writes */
93static int bi_write_complete(struct bio *bio, unsigned int bytes_done, int error)
94{
95 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
96 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
97
98 if (bio->bi_size)
99 return 1;
100
101 if(!uptodate)
102 err("bi_write_complete: not uptodate\n");
103
104 do {
105 struct page *page = bvec->bv_page;
106 DEBUG(3, "Cleaning up page %ld\n", page->index);
107 if (--bvec >= bio->bi_io_vec)
108 prefetchw(&bvec->bv_page->flags);
109
110 if (uptodate) {
111 SetPageUptodate(page);
112 } else {
113 ClearPageUptodate(page);
114 SetPageError(page);
115 }
116 ClearPageDirty(page);
117 unlock_page(page);
118 page_cache_release(page);
119 } while (bvec >= bio->bi_io_vec);
120
121 complete((struct completion*)bio->bi_private);
122 return 0;
123}
124
125
126/* read one page from the block device */
127static int blkmtd_readpage(struct blkmtd_dev *dev, struct page *page)
128{
129 struct bio *bio;
130 struct completion event;
131 int err = -ENOMEM;
132
133 if(PageUptodate(page)) {
134 DEBUG(2, "blkmtd: readpage page %ld is already upto date\n", page->index);
135 unlock_page(page);
136 return 0;
137 }
138
139 ClearPageUptodate(page);
140 ClearPageError(page);
141
142 bio = bio_alloc(GFP_KERNEL, 1);
143 if(bio) {
144 init_completion(&event);
145 bio->bi_bdev = dev->blkdev;
146 bio->bi_sector = page->index << (PAGE_SHIFT-9);
147 bio->bi_private = &event;
148 bio->bi_end_io = bi_read_complete;
149 if(bio_add_page(bio, page, PAGE_SIZE, 0) == PAGE_SIZE) {
150 submit_bio(READ_SYNC, bio);
151 wait_for_completion(&event);
152 err = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : -EIO;
153 bio_put(bio);
154 }
155 }
156
157 if(err)
158 SetPageError(page);
159 else
160 SetPageUptodate(page);
161 flush_dcache_page(page);
162 unlock_page(page);
163 return err;
164}
165
166
167/* write out the current BIO and wait for it to finish */
168static int blkmtd_write_out(struct bio *bio)
169{
170 struct completion event;
171 int err;
172
173 if(!bio->bi_vcnt) {
174 bio_put(bio);
175 return 0;
176 }
177
178 init_completion(&event);
179 bio->bi_private = &event;
180 bio->bi_end_io = bi_write_complete;
181 submit_bio(WRITE_SYNC, bio);
182 wait_for_completion(&event);
183 DEBUG(3, "submit_bio completed, bi_vcnt = %d\n", bio->bi_vcnt);
184 err = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : -EIO;
185 bio_put(bio);
186 return err;
187}
188
189
190/**
191 * blkmtd_add_page - add a page to the current BIO
192 * @bio: bio to add to (NULL to alloc initial bio)
193 * @blkdev: block device
194 * @page: page to add
195 * @pagecnt: pages left to add
196 *
197 * Adds a page to the current bio, allocating it if necessary. If it cannot be
198 * added, the current bio is written out and a new one is allocated. Returns
199 * the new bio to add or NULL on error
200 */
201static struct bio *blkmtd_add_page(struct bio *bio, struct block_device *blkdev,
202 struct page *page, int pagecnt)
203{
204
205 retry:
206 if(!bio) {
207 bio = bio_alloc(GFP_KERNEL, pagecnt);
208 if(!bio)
209 return NULL;
210 bio->bi_sector = page->index << (PAGE_SHIFT-9);
211 bio->bi_bdev = blkdev;
212 }
213
214 if(bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) {
215 blkmtd_write_out(bio);
216 bio = NULL;
217 goto retry;
218 }
219 return bio;
220}
221
222
223/**
224 * write_pages - write block of data to device via the page cache
225 * @dev: device to write to
226 * @buf: data source or NULL if erase (output is set to 0xff)
227 * @to: offset into output device
228 * @len: amount to data to write
229 * @retlen: amount of data written
230 *
231 * Grab pages from the page cache and fill them with the source data.
232 * Non page aligned start and end result in a readin of the page and
233 * part of the page being modified. Pages are added to the bio and then written
234 * out.
235 */
236static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to,
237 size_t len, size_t *retlen)
238{
239 int pagenr, offset;
240 size_t start_len = 0, end_len;
241 int pagecnt = 0;
242 int err = 0;
243 struct bio *bio = NULL;
244 size_t thislen = 0;
245
246 pagenr = to >> PAGE_SHIFT;
247 offset = to & ~PAGE_MASK;
248
249 DEBUG(2, "blkmtd: write_pages: buf = %p to = %ld len = %zd pagenr = %d offset = %d\n",
250 buf, (long)to, len, pagenr, offset);
251
252 /* see if we have to do a partial write at the start */
253 if(offset) {
254 start_len = ((offset + len) > PAGE_SIZE) ? PAGE_SIZE - offset : len;
255 len -= start_len;
256 }
257
258 /* calculate the length of the other two regions */
259 end_len = len & ~PAGE_MASK;
260 len -= end_len;
261
262 if(start_len)
263 pagecnt++;
264
265 if(len)
266 pagecnt += len >> PAGE_SHIFT;
267
268 if(end_len)
269 pagecnt++;
270
271 down(&dev->wrbuf_mutex);
272
273 DEBUG(3, "blkmtd: write: start_len = %zd len = %zd end_len = %zd pagecnt = %d\n",
274 start_len, len, end_len, pagecnt);
275
276 if(start_len) {
277 /* do partial start region */
278 struct page *page;
279
280 DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %zd offset = %d\n",
281 pagenr, start_len, offset);
282
283 BUG_ON(!buf);
284 page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev);
285 lock_page(page);
286 if(PageDirty(page)) {
287 err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d\n",
288 to, start_len, len, end_len, pagenr);
289 BUG();
290 }
291 memcpy(page_address(page)+offset, buf, start_len);
292 SetPageDirty(page);
293 SetPageUptodate(page);
294 buf += start_len;
295 thislen = start_len;
296 bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt);
297 if(!bio) {
298 err = -ENOMEM;
299 err("bio_add_page failed\n");
300 goto write_err;
301 }
302 pagecnt--;
303 pagenr++;
304 }
305
306 /* Now do the main loop to a page aligned, n page sized output */
307 if(len) {
308 int pagesc = len >> PAGE_SHIFT;
309 DEBUG(3, "blkmtd: write: whole pages start = %d, count = %d\n",
310 pagenr, pagesc);
311 while(pagesc) {
312 struct page *page;
313
314 /* see if page is in the page cache */
315 DEBUG(3, "blkmtd: write: grabbing page %d from page cache\n", pagenr);
316 page = grab_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr);
317 if(PageDirty(page)) {
318 BUG();
319 }
320 if(!page) {
321 warn("write: cannot grab cache page %d", pagenr);
322 err = -ENOMEM;
323 goto write_err;
324 }
325 if(!buf) {
326 memset(page_address(page), 0xff, PAGE_SIZE);
327 } else {
328 memcpy(page_address(page), buf, PAGE_SIZE);
329 buf += PAGE_SIZE;
330 }
331 bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt);
332 if(!bio) {
333 err = -ENOMEM;
334 err("bio_add_page failed\n");
335 goto write_err;
336 }
337 pagenr++;
338 pagecnt--;
339 SetPageDirty(page);
340 SetPageUptodate(page);
341 pagesc--;
342 thislen += PAGE_SIZE;
343 }
344 }
345
346 if(end_len) {
347 /* do the third region */
348 struct page *page;
349 DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %zd\n",
350 pagenr, end_len);
351 BUG_ON(!buf);
352 page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev);
353 lock_page(page);
354 if(PageDirty(page)) {
355 err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d\n",
356 to, start_len, len, end_len, pagenr);
357 BUG();
358 }
359 memcpy(page_address(page), buf, end_len);
360 SetPageDirty(page);
361 SetPageUptodate(page);
362 DEBUG(3, "blkmtd: write: writing out partial end\n");
363 thislen += end_len;
364 bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt);
365 if(!bio) {
366 err = -ENOMEM;
367 err("bio_add_page failed\n");
368 goto write_err;
369 }
370 pagenr++;
371 }
372
373 DEBUG(3, "blkmtd: write: got %d vectors to write\n", bio->bi_vcnt);
374 write_err:
375 if(bio)
376 blkmtd_write_out(bio);
377
378 DEBUG(2, "blkmtd: write: end, retlen = %zd, err = %d\n", *retlen, err);
379 up(&dev->wrbuf_mutex);
380
381 if(retlen)
382 *retlen = thislen;
383 return err;
384}
385
386
387/* erase a specified part of the device */
388static int blkmtd_erase(struct mtd_info *mtd, struct erase_info *instr)
389{
390 struct blkmtd_dev *dev = mtd->priv;
391 struct mtd_erase_region_info *einfo = mtd->eraseregions;
392 int numregions = mtd->numeraseregions;
393 size_t from;
394 u_long len;
395 int err = -EIO;
396 size_t retlen;
397
398 instr->state = MTD_ERASING;
399 from = instr->addr;
400 len = instr->len;
401
402 /* check erase region has valid start and length */
403 DEBUG(2, "blkmtd: erase: dev = `%s' from = 0x%zx len = 0x%lx\n",
404 mtd->name+9, from, len);
405 while(numregions) {
406 DEBUG(3, "blkmtd: checking erase region = 0x%08X size = 0x%X num = 0x%x\n",
407 einfo->offset, einfo->erasesize, einfo->numblocks);
408 if(from >= einfo->offset
409 && from < einfo->offset + (einfo->erasesize * einfo->numblocks)) {
410 if(len == einfo->erasesize
411 && ( (from - einfo->offset) % einfo->erasesize == 0))
412 break;
413 }
414 numregions--;
415 einfo++;
416 }
417
418 if(!numregions) {
419 /* Not a valid erase block */
420 err("erase: invalid erase request 0x%lX @ 0x%08zX", len, from);
421 instr->state = MTD_ERASE_FAILED;
422 err = -EIO;
423 }
424
425 if(instr->state != MTD_ERASE_FAILED) {
426 /* do the erase */
427 DEBUG(3, "Doing erase from = %zd len = %ld\n", from, len);
428 err = write_pages(dev, NULL, from, len, &retlen);
429 if(err || retlen != len) {
430 err("erase failed err = %d", err);
431 instr->state = MTD_ERASE_FAILED;
432 } else {
433 instr->state = MTD_ERASE_DONE;
434 }
435 }
436
437 DEBUG(3, "blkmtd: erase: checking callback\n");
438 mtd_erase_callback(instr);
439 DEBUG(2, "blkmtd: erase: finished (err = %d)\n", err);
440 return err;
441}
442
443
444/* read a range of the data via the page cache */
445static int blkmtd_read(struct mtd_info *mtd, loff_t from, size_t len,
446 size_t *retlen, u_char *buf)
447{
448 struct blkmtd_dev *dev = mtd->priv;
449 int err = 0;
450 int offset;
451 int pagenr, pages;
452 size_t thislen = 0;
453
454 DEBUG(2, "blkmtd: read: dev = `%s' from = %lld len = %zd buf = %p\n",
455 mtd->name+9, from, len, buf);
456
457 if(from > mtd->size)
458 return -EINVAL;
459 if(from + len > mtd->size)
460 len = mtd->size - from;
461
462 pagenr = from >> PAGE_SHIFT;
463 offset = from - (pagenr << PAGE_SHIFT);
464
465 pages = (offset+len+PAGE_SIZE-1) >> PAGE_SHIFT;
466 DEBUG(3, "blkmtd: read: pagenr = %d offset = %d, pages = %d\n",
467 pagenr, offset, pages);
468
469 while(pages) {
470 struct page *page;
471 int cpylen;
472
473 DEBUG(3, "blkmtd: read: looking for page: %d\n", pagenr);
474 page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev);
475 if(IS_ERR(page)) {
476 err = -EIO;
477 goto readerr;
478 }
479
480 cpylen = (PAGE_SIZE > len) ? len : PAGE_SIZE;
481 if(offset+cpylen > PAGE_SIZE)
482 cpylen = PAGE_SIZE-offset;
483
484 memcpy(buf + thislen, page_address(page) + offset, cpylen);
485 offset = 0;
486 len -= cpylen;
487 thislen += cpylen;
488 pagenr++;
489 pages--;
490 if(!PageDirty(page))
491 page_cache_release(page);
492 }
493
494 readerr:
495 if(retlen)
496 *retlen = thislen;
497 DEBUG(2, "blkmtd: end read: retlen = %zd, err = %d\n", thislen, err);
498 return err;
499}
500
501
502/* write data to the underlying device */
503static int blkmtd_write(struct mtd_info *mtd, loff_t to, size_t len,
504 size_t *retlen, const u_char *buf)
505{
506 struct blkmtd_dev *dev = mtd->priv;
507 int err;
508
509 if(!len)
510 return 0;
511
512 DEBUG(2, "blkmtd: write: dev = `%s' to = %lld len = %zd buf = %p\n",
513 mtd->name+9, to, len, buf);
514
515 if(to >= mtd->size) {
516 return -ENOSPC;
517 }
518
519 if(to + len > mtd->size) {
520 len = mtd->size - to;
521 }
522
523 err = write_pages(dev, buf, to, len, retlen);
524 if(err > 0)
525 err = 0;
526 DEBUG(2, "blkmtd: write: end, err = %d\n", err);
527 return err;
528}
529
530
531/* sync the device - wait until the write queue is empty */
532static void blkmtd_sync(struct mtd_info *mtd)
533{
534 /* Currently all writes are synchronous */
535}
536
537
538static void free_device(struct blkmtd_dev *dev)
539{
540 DEBUG(2, "blkmtd: free_device() dev = %p\n", dev);
541 if(dev) {
542 if(dev->mtd_info.eraseregions)
543 kfree(dev->mtd_info.eraseregions);
544 if(dev->mtd_info.name)
545 kfree(dev->mtd_info.name);
546
547 if(dev->blkdev) {
548 invalidate_inode_pages(dev->blkdev->bd_inode->i_mapping);
549 close_bdev_excl(dev->blkdev);
550 }
551 kfree(dev);
552 }
553}
554
555
556/* For a given size and initial erase size, calculate the number
557 * and size of each erase region. Goes round the loop twice,
558 * once to find out how many regions, then allocates space,
559 * then round the loop again to fill it in.
560 */
561static struct mtd_erase_region_info *calc_erase_regions(
562 size_t erase_size, size_t total_size, int *regions)
563{
564 struct mtd_erase_region_info *info = NULL;
565
566 DEBUG(2, "calc_erase_regions, es = %zd size = %zd regions = %d\n",
567 erase_size, total_size, *regions);
568 /* Make any user specified erasesize be a power of 2
569 and at least PAGE_SIZE */
570 if(erase_size) {
571 int es = erase_size;
572 erase_size = 1;
573 while(es != 1) {
574 es >>= 1;
575 erase_size <<= 1;
576 }
577 if(erase_size < PAGE_SIZE)
578 erase_size = PAGE_SIZE;
579 } else {
580 erase_size = CONFIG_MTD_BLKDEV_ERASESIZE;
581 }
582
583 *regions = 0;
584
585 do {
586 int tot_size = total_size;
587 int er_size = erase_size;
588 int count = 0, offset = 0, regcnt = 0;
589
590 while(tot_size) {
591 count = tot_size / er_size;
592 if(count) {
593 tot_size = tot_size % er_size;
594 if(info) {
595 DEBUG(2, "adding to erase info off=%d er=%d cnt=%d\n",
596 offset, er_size, count);
597 (info+regcnt)->offset = offset;
598 (info+regcnt)->erasesize = er_size;
599 (info+regcnt)->numblocks = count;
600 (*regions)++;
601 }
602 regcnt++;
603 offset += (count * er_size);
604 }
605 while(er_size > tot_size)
606 er_size >>= 1;
607 }
608 if(info == NULL) {
609 info = kmalloc(regcnt * sizeof(struct mtd_erase_region_info), GFP_KERNEL);
610 if(!info)
611 break;
612 }
613 } while(!(*regions));
614 DEBUG(2, "calc_erase_regions done, es = %zd size = %zd regions = %d\n",
615 erase_size, total_size, *regions);
616 return info;
617}
618
619
620extern dev_t __init name_to_dev_t(const char *line);
621
622static struct blkmtd_dev *add_device(char *devname, int readonly, int erase_size)
623{
624 struct block_device *bdev;
625 int mode;
626 struct blkmtd_dev *dev;
627
628 if(!devname)
629 return NULL;
630
631 /* Get a handle on the device */
632
633
634#ifdef MODULE
635 mode = (readonly) ? O_RDONLY : O_RDWR;
636 bdev = open_bdev_excl(devname, mode, NULL);
637#else
638 mode = (readonly) ? FMODE_READ : FMODE_WRITE;
639 bdev = open_by_devnum(name_to_dev_t(devname), mode);
640#endif
641 if(IS_ERR(bdev)) {
642 err("error: cannot open device %s", devname);
643 DEBUG(2, "blkmtd: opening bdev returned %ld\n", PTR_ERR(bdev));
644 return NULL;
645 }
646
647 DEBUG(1, "blkmtd: found a block device major = %d, minor = %d\n",
648 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
649
650 if(MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
651 err("attempting to use an MTD device as a block device");
652 blkdev_put(bdev);
653 return NULL;
654 }
655
656 dev = kmalloc(sizeof(struct blkmtd_dev), GFP_KERNEL);
657 if(dev == NULL) {
658 blkdev_put(bdev);
659 return NULL;
660 }
661
662 memset(dev, 0, sizeof(struct blkmtd_dev));
663 dev->blkdev = bdev;
664 if(!readonly) {
665 init_MUTEX(&dev->wrbuf_mutex);
666 }
667
668 dev->mtd_info.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
669
670 /* Setup the MTD structure */
671 /* make the name contain the block device in */
672 dev->mtd_info.name = kmalloc(sizeof("blkmtd: ") + strlen(devname), GFP_KERNEL);
673 if(dev->mtd_info.name == NULL)
674 goto devinit_err;
675
676 sprintf(dev->mtd_info.name, "blkmtd: %s", devname);
677 dev->mtd_info.eraseregions = calc_erase_regions(erase_size, dev->mtd_info.size,
678 &dev->mtd_info.numeraseregions);
679 if(dev->mtd_info.eraseregions == NULL)
680 goto devinit_err;
681
682 dev->mtd_info.erasesize = dev->mtd_info.eraseregions->erasesize;
683 DEBUG(1, "blkmtd: init: found %d erase regions\n",
684 dev->mtd_info.numeraseregions);
685
686 if(readonly) {
687 dev->mtd_info.type = MTD_ROM;
688 dev->mtd_info.flags = MTD_CAP_ROM;
689 } else {
690 dev->mtd_info.type = MTD_RAM;
691 dev->mtd_info.flags = MTD_CAP_RAM;
692 dev->mtd_info.erase = blkmtd_erase;
693 dev->mtd_info.write = blkmtd_write;
694 dev->mtd_info.writev = default_mtd_writev;
695 dev->mtd_info.sync = blkmtd_sync;
696 }
697 dev->mtd_info.read = blkmtd_read;
698 dev->mtd_info.readv = default_mtd_readv;
699 dev->mtd_info.priv = dev;
700 dev->mtd_info.owner = THIS_MODULE;
701
702 list_add(&dev->list, &blkmtd_device_list);
703 if (add_mtd_device(&dev->mtd_info)) {
704 /* Device didnt get added, so free the entry */
705 list_del(&dev->list);
706 goto devinit_err;
707 } else {
708 info("mtd%d: [%s] erase_size = %dKiB %s",
709 dev->mtd_info.index, dev->mtd_info.name + strlen("blkmtd: "),
710 dev->mtd_info.erasesize >> 10,
711 readonly ? "(read-only)" : "");
712 }
713
714 return dev;
715
716 devinit_err:
717 free_device(dev);
718 return NULL;
719}
720
721
722/* Cleanup and exit - sync the device and kill of the kernel thread */
723static void __devexit cleanup_blkmtd(void)
724{
725 struct list_head *temp1, *temp2;
726
727 /* Remove the MTD devices */
728 list_for_each_safe(temp1, temp2, &blkmtd_device_list) {
729 struct blkmtd_dev *dev = list_entry(temp1, struct blkmtd_dev,
730 list);
731 blkmtd_sync(&dev->mtd_info);
732 del_mtd_device(&dev->mtd_info);
733 info("mtd%d: [%s] removed", dev->mtd_info.index,
734 dev->mtd_info.name + strlen("blkmtd: "));
735 list_del(&dev->list);
736 free_device(dev);
737 }
738}
739
740#ifndef MODULE
741
742/* Handle kernel boot params */
743
744
745static int __init param_blkmtd_device(char *str)
746{
747 int i;
748
749 for(i = 0; i < MAX_DEVICES; i++) {
750 device[i] = str;
751 DEBUG(2, "blkmtd: device setup: %d = %s\n", i, device[i]);
752 strsep(&str, ",");
753 }
754 return 1;
755}
756
757
758static int __init param_blkmtd_erasesz(char *str)
759{
760 int i;
761 for(i = 0; i < MAX_DEVICES; i++) {
762 char *val = strsep(&str, ",");
763 if(val)
764 erasesz[i] = simple_strtoul(val, NULL, 0);
765 DEBUG(2, "blkmtd: erasesz setup: %d = %d\n", i, erasesz[i]);
766 }
767
768 return 1;
769}
770
771
772static int __init param_blkmtd_ro(char *str)
773{
774 int i;
775 for(i = 0; i < MAX_DEVICES; i++) {
776 char *val = strsep(&str, ",");
777 if(val)
778 ro[i] = simple_strtoul(val, NULL, 0);
779 DEBUG(2, "blkmtd: ro setup: %d = %d\n", i, ro[i]);
780 }
781
782 return 1;
783}
784
785
786static int __init param_blkmtd_sync(char *str)
787{
788 if(str[0] == '1')
789 sync = 1;
790 return 1;
791}
792
793__setup("blkmtd_device=", param_blkmtd_device);
794__setup("blkmtd_erasesz=", param_blkmtd_erasesz);
795__setup("blkmtd_ro=", param_blkmtd_ro);
796__setup("blkmtd_sync=", param_blkmtd_sync);
797
798#endif
799
800
801/* Startup */
802static int __init init_blkmtd(void)
803{
804 int i;
805
806 info("version " VERSION);
807 /* Check args - device[0] is the bare minimum*/
808 if(!device[0]) {
809 err("error: missing `device' name\n");
810 return -EINVAL;
811 }
812
813 for(i = 0; i < MAX_DEVICES; i++)
814 add_device(device[i], ro[i], erasesz[i] << 10);
815
816 if(list_empty(&blkmtd_device_list))
817 return -EINVAL;
818
819 return 0;
820}
821
822module_init(init_blkmtd);
823module_exit(cleanup_blkmtd);
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
new file mode 100644
index 000000000000..cfe6ccf07972
--- /dev/null
+++ b/drivers/mtd/devices/block2mtd.c
@@ -0,0 +1,495 @@
1/*
2 * $Id: block2mtd.c,v 1.23 2005/01/05 17:05:46 dwmw2 Exp $
3 *
4 * block2mtd.c - create an mtd from a block device
5 *
6 * Copyright (C) 2001,2002 Simon Evans <spse@secret.org.uk>
7 * Copyright (C) 2004 Gareth Bult <Gareth@Encryptec.net>
8 * Copyright (C) 2004,2005 Jörn Engel <joern@wh.fh-wedel.de>
9 *
10 * Licence: GPL
11 */
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/fs.h>
15#include <linux/blkdev.h>
16#include <linux/bio.h>
17#include <linux/pagemap.h>
18#include <linux/list.h>
19#include <linux/init.h>
20#include <linux/mtd/mtd.h>
21#include <linux/buffer_head.h>
22
23#define VERSION "$Revision: 1.23 $"
24
25
26#define ERROR(fmt, args...) printk(KERN_ERR "block2mtd: " fmt "\n" , ## args)
27#define INFO(fmt, args...) printk(KERN_INFO "block2mtd: " fmt "\n" , ## args)
28
29
30/* Info for the block device */
31struct block2mtd_dev {
32 struct list_head list;
33 struct block_device *blkdev;
34 struct mtd_info mtd;
35 struct semaphore write_mutex;
36};
37
38
39/* Static info about the MTD, used in cleanup_module */
40static LIST_HEAD(blkmtd_device_list);
41
42
43#define PAGE_READAHEAD 64
44void cache_readahead(struct address_space *mapping, int index)
45{
46 filler_t *filler = (filler_t*)mapping->a_ops->readpage;
47 int i, pagei;
48 unsigned ret = 0;
49 unsigned long end_index;
50 struct page *page;
51 LIST_HEAD(page_pool);
52 struct inode *inode = mapping->host;
53 loff_t isize = i_size_read(inode);
54
55 if (!isize) {
56 INFO("iSize=0 in cache_readahead\n");
57 return;
58 }
59
60 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
61
62 read_lock_irq(&mapping->tree_lock);
63 for (i = 0; i < PAGE_READAHEAD; i++) {
64 pagei = index + i;
65 if (pagei > end_index) {
66 INFO("Overrun end of disk in cache readahead\n");
67 break;
68 }
69 page = radix_tree_lookup(&mapping->page_tree, pagei);
70 if (page && (!i))
71 break;
72 if (page)
73 continue;
74 read_unlock_irq(&mapping->tree_lock);
75 page = page_cache_alloc_cold(mapping);
76 read_lock_irq(&mapping->tree_lock);
77 if (!page)
78 break;
79 page->index = pagei;
80 list_add(&page->lru, &page_pool);
81 ret++;
82 }
83 read_unlock_irq(&mapping->tree_lock);
84 if (ret)
85 read_cache_pages(mapping, &page_pool, filler, NULL);
86}
87
88
89static struct page* page_readahead(struct address_space *mapping, int index)
90{
91 filler_t *filler = (filler_t*)mapping->a_ops->readpage;
92 //do_page_cache_readahead(mapping, index, XXX, 64);
93 cache_readahead(mapping, index);
94 return read_cache_page(mapping, index, filler, NULL);
95}
96
97
98/* erase a specified part of the device */
99static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
100{
101 struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
102 struct page *page;
103 int index = to >> PAGE_SHIFT; // page index
104 int pages = len >> PAGE_SHIFT;
105 u_long *p;
106 u_long *max;
107
108 while (pages) {
109 page = page_readahead(mapping, index);
110 if (!page)
111 return -ENOMEM;
112 if (IS_ERR(page))
113 return PTR_ERR(page);
114
115 max = (u_long*)page_address(page) + PAGE_SIZE;
116 for (p=(u_long*)page_address(page); p<max; p++)
117 if (*p != -1UL) {
118 lock_page(page);
119 memset(page_address(page), 0xff, PAGE_SIZE);
120 set_page_dirty(page);
121 unlock_page(page);
122 break;
123 }
124
125 page_cache_release(page);
126 pages--;
127 index++;
128 }
129 return 0;
130}
131static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
132{
133 struct block2mtd_dev *dev = mtd->priv;
134 size_t from = instr->addr;
135 size_t len = instr->len;
136 int err;
137
138 instr->state = MTD_ERASING;
139 down(&dev->write_mutex);
140 err = _block2mtd_erase(dev, from, len);
141 up(&dev->write_mutex);
142 if (err) {
143 ERROR("erase failed err = %d", err);
144 instr->state = MTD_ERASE_FAILED;
145 } else
146 instr->state = MTD_ERASE_DONE;
147
148 instr->state = MTD_ERASE_DONE;
149 mtd_erase_callback(instr);
150 return err;
151}
152
153
154static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
155 size_t *retlen, u_char *buf)
156{
157 struct block2mtd_dev *dev = mtd->priv;
158 struct page *page;
159 int index = from >> PAGE_SHIFT;
160 int offset = from & (PAGE_SHIFT-1);
161 int cpylen;
162
163 if (from > mtd->size)
164 return -EINVAL;
165 if (from + len > mtd->size)
166 len = mtd->size - from;
167
168 if (retlen)
169 *retlen = 0;
170
171 while (len) {
172 if ((offset + len) > PAGE_SIZE)
173 cpylen = PAGE_SIZE - offset; // multiple pages
174 else
175 cpylen = len; // this page
176 len = len - cpylen;
177
178 // Get page
179 page = page_readahead(dev->blkdev->bd_inode->i_mapping, index);
180 if (!page)
181 return -ENOMEM;
182 if (IS_ERR(page))
183 return PTR_ERR(page);
184
185 memcpy(buf, page_address(page) + offset, cpylen);
186 page_cache_release(page);
187
188 if (retlen)
189 *retlen += cpylen;
190 buf += cpylen;
191 offset = 0;
192 index++;
193 }
194 return 0;
195}
196
197
198/* write data to the underlying device */
199static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
200 loff_t to, size_t len, size_t *retlen)
201{
202 struct page *page;
203 struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
204 int index = to >> PAGE_SHIFT; // page index
205 int offset = to & ~PAGE_MASK; // page offset
206 int cpylen;
207
208 if (retlen)
209 *retlen = 0;
210 while (len) {
211 if ((offset+len) > PAGE_SIZE)
212 cpylen = PAGE_SIZE - offset; // multiple pages
213 else
214 cpylen = len; // this page
215 len = len - cpylen;
216
217 // Get page
218 page = page_readahead(mapping, index);
219 if (!page)
220 return -ENOMEM;
221 if (IS_ERR(page))
222 return PTR_ERR(page);
223
224 if (memcmp(page_address(page)+offset, buf, cpylen)) {
225 lock_page(page);
226 memcpy(page_address(page) + offset, buf, cpylen);
227 set_page_dirty(page);
228 unlock_page(page);
229 }
230 page_cache_release(page);
231
232 if (retlen)
233 *retlen += cpylen;
234
235 buf += cpylen;
236 offset = 0;
237 index++;
238 }
239 return 0;
240}
241static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
242 size_t *retlen, const u_char *buf)
243{
244 struct block2mtd_dev *dev = mtd->priv;
245 int err;
246
247 if (!len)
248 return 0;
249 if (to >= mtd->size)
250 return -ENOSPC;
251 if (to + len > mtd->size)
252 len = mtd->size - to;
253
254 down(&dev->write_mutex);
255 err = _block2mtd_write(dev, buf, to, len, retlen);
256 up(&dev->write_mutex);
257 if (err > 0)
258 err = 0;
259 return err;
260}
261
262
263/* sync the device - wait until the write queue is empty */
264static void block2mtd_sync(struct mtd_info *mtd)
265{
266 struct block2mtd_dev *dev = mtd->priv;
267 sync_blockdev(dev->blkdev);
268 return;
269}
270
271
272static void block2mtd_free_device(struct block2mtd_dev *dev)
273{
274 if (!dev)
275 return;
276
277 kfree(dev->mtd.name);
278
279 if (dev->blkdev) {
280 invalidate_inode_pages(dev->blkdev->bd_inode->i_mapping);
281 close_bdev_excl(dev->blkdev);
282 }
283
284 kfree(dev);
285}
286
287
288/* FIXME: ensure that mtd->size % erase_size == 0 */
289static struct block2mtd_dev *add_device(char *devname, int erase_size)
290{
291 struct block_device *bdev;
292 struct block2mtd_dev *dev;
293
294 if (!devname)
295 return NULL;
296
297 dev = kmalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
298 if (!dev)
299 return NULL;
300 memset(dev, 0, sizeof(*dev));
301
302 /* Get a handle on the device */
303 bdev = open_bdev_excl(devname, O_RDWR, NULL);
304 if (IS_ERR(bdev)) {
305 ERROR("error: cannot open device %s", devname);
306 goto devinit_err;
307 }
308 dev->blkdev = bdev;
309
310 if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
311 ERROR("attempting to use an MTD device as a block device");
312 goto devinit_err;
313 }
314
315 init_MUTEX(&dev->write_mutex);
316
317 /* Setup the MTD structure */
318 /* make the name contain the block device in */
319 dev->mtd.name = kmalloc(sizeof("block2mtd: ") + strlen(devname),
320 GFP_KERNEL);
321 if (!dev->mtd.name)
322 goto devinit_err;
323
324 sprintf(dev->mtd.name, "block2mtd: %s", devname);
325
326 dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
327 dev->mtd.erasesize = erase_size;
328 dev->mtd.type = MTD_RAM;
329 dev->mtd.flags = MTD_CAP_RAM;
330 dev->mtd.erase = block2mtd_erase;
331 dev->mtd.write = block2mtd_write;
332 dev->mtd.writev = default_mtd_writev;
333 dev->mtd.sync = block2mtd_sync;
334 dev->mtd.read = block2mtd_read;
335 dev->mtd.readv = default_mtd_readv;
336 dev->mtd.priv = dev;
337 dev->mtd.owner = THIS_MODULE;
338
339 if (add_mtd_device(&dev->mtd)) {
340 /* Device didnt get added, so free the entry */
341 goto devinit_err;
342 }
343 list_add(&dev->list, &blkmtd_device_list);
344 INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
345 dev->mtd.name + strlen("blkmtd: "),
346 dev->mtd.erasesize >> 10, dev->mtd.erasesize);
347 return dev;
348
349devinit_err:
350 block2mtd_free_device(dev);
351 return NULL;
352}
353
354
355static int ustrtoul(const char *cp, char **endp, unsigned int base)
356{
357 unsigned long result = simple_strtoul(cp, endp, base);
358 switch (**endp) {
359 case 'G' :
360 result *= 1024;
361 case 'M':
362 result *= 1024;
363 case 'k':
364 result *= 1024;
365 /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
366 if ((*endp)[1] == 'i')
367 (*endp) += 2;
368 }
369 return result;
370}
371
372
373static int parse_num32(u32 *num32, const char *token)
374{
375 char *endp;
376 unsigned long n;
377
378 n = ustrtoul(token, &endp, 0);
379 if (*endp)
380 return -EINVAL;
381
382 *num32 = n;
383 return 0;
384}
385
386
387static int parse_name(char **pname, const char *token, size_t limit)
388{
389 size_t len;
390 char *name;
391
392 len = strlen(token) + 1;
393 if (len > limit)
394 return -ENOSPC;
395
396 name = kmalloc(len, GFP_KERNEL);
397 if (!name)
398 return -ENOMEM;
399
400 strcpy(name, token);
401
402 *pname = name;
403 return 0;
404}
405
406
407static inline void kill_final_newline(char *str)
408{
409 char *newline = strrchr(str, '\n');
410 if (newline && !newline[1])
411 *newline = 0;
412}
413
414
415#define parse_err(fmt, args...) do { \
416 ERROR("block2mtd: " fmt "\n", ## args); \
417 return 0; \
418} while (0)
419
420static int block2mtd_setup(const char *val, struct kernel_param *kp)
421{
422 char buf[80+12], *str=buf; /* 80 for device, 12 for erase size */
423 char *token[2];
424 char *name;
425 u32 erase_size = PAGE_SIZE;
426 int i, ret;
427
428 if (strnlen(val, sizeof(buf)) >= sizeof(buf))
429 parse_err("parameter too long");
430
431 strcpy(str, val);
432 kill_final_newline(str);
433
434 for (i=0; i<2; i++)
435 token[i] = strsep(&str, ",");
436
437 if (str)
438 parse_err("too many arguments");
439
440 if (!token[0])
441 parse_err("no argument");
442
443 ret = parse_name(&name, token[0], 80);
444 if (ret == -ENOMEM)
445 parse_err("out of memory");
446 if (ret == -ENOSPC)
447 parse_err("name too long");
448 if (ret)
449 return 0;
450
451 if (token[1]) {
452 ret = parse_num32(&erase_size, token[1]);
453 if (ret)
454 parse_err("illegal erase size");
455 }
456
457 add_device(name, erase_size);
458
459 return 0;
460}
461
462
463module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
464MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
465
466static int __init block2mtd_init(void)
467{
468 INFO("version " VERSION);
469 return 0;
470}
471
472
473static void __devexit block2mtd_exit(void)
474{
475 struct list_head *pos, *next;
476
477 /* Remove the MTD devices */
478 list_for_each_safe(pos, next, &blkmtd_device_list) {
479 struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
480 block2mtd_sync(&dev->mtd);
481 del_mtd_device(&dev->mtd);
482 INFO("mtd%d: [%s] removed", dev->mtd.index,
483 dev->mtd.name + strlen("blkmtd: "));
484 list_del(&dev->list);
485 block2mtd_free_device(dev);
486 }
487}
488
489
490module_init(block2mtd_init);
491module_exit(block2mtd_exit);
492
493MODULE_LICENSE("GPL");
494MODULE_AUTHOR("Simon Evans <spse@secret.org.uk> and others");
495MODULE_DESCRIPTION("Emulate an MTD using a block device");
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
new file mode 100644
index 000000000000..5fc532895a24
--- /dev/null
+++ b/drivers/mtd/devices/doc2000.c
@@ -0,0 +1,1309 @@
1
2/*
3 * Linux driver for Disk-On-Chip 2000 and Millennium
4 * (c) 1999 Machine Vision Holdings, Inc.
5 * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
6 *
7 * $Id: doc2000.c,v 1.66 2005/01/05 18:05:12 dwmw2 Exp $
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <asm/errno.h>
13#include <asm/io.h>
14#include <asm/uaccess.h>
15#include <linux/miscdevice.h>
16#include <linux/pci.h>
17#include <linux/delay.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
20#include <linux/init.h>
21#include <linux/types.h>
22#include <linux/bitops.h>
23
24#include <linux/mtd/mtd.h>
25#include <linux/mtd/nand.h>
26#include <linux/mtd/doc2000.h>
27
28#define DOC_SUPPORT_2000
29#define DOC_SUPPORT_2000TSOP
30#define DOC_SUPPORT_MILLENNIUM
31
32#ifdef DOC_SUPPORT_2000
33#define DoC_is_2000(doc) (doc->ChipID == DOC_ChipID_Doc2k)
34#else
35#define DoC_is_2000(doc) (0)
36#endif
37
38#if defined(DOC_SUPPORT_2000TSOP) || defined(DOC_SUPPORT_MILLENNIUM)
39#define DoC_is_Millennium(doc) (doc->ChipID == DOC_ChipID_DocMil)
40#else
41#define DoC_is_Millennium(doc) (0)
42#endif
43
44/* #define ECC_DEBUG */
45
46/* I have no idea why some DoC chips can not use memcpy_from|to_io().
47 * This may be due to the different revisions of the ASIC controller built-in or
48 * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment
49 * this:
50 #undef USE_MEMCPY
51*/
52
53static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
54 size_t *retlen, u_char *buf);
55static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
56 size_t *retlen, const u_char *buf);
57static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
58 size_t *retlen, u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel);
59static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
60 size_t *retlen, const u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel);
61static int doc_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs,
62 unsigned long count, loff_t to, size_t *retlen,
63 u_char *eccbuf, struct nand_oobinfo *oobsel);
64static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
65 size_t *retlen, u_char *buf);
66static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
67 size_t *retlen, const u_char *buf);
68static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len,
69 size_t *retlen, const u_char *buf);
70static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
71
72static struct mtd_info *doc2klist = NULL;
73
74/* Perform the required delay cycles by reading from the appropriate register */
75static void DoC_Delay(struct DiskOnChip *doc, unsigned short cycles)
76{
77 volatile char dummy;
78 int i;
79
80 for (i = 0; i < cycles; i++) {
81 if (DoC_is_Millennium(doc))
82 dummy = ReadDOC(doc->virtadr, NOP);
83 else
84 dummy = ReadDOC(doc->virtadr, DOCStatus);
85 }
86
87}
88
89/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
90static int _DoC_WaitReady(struct DiskOnChip *doc)
91{
92 void __iomem *docptr = doc->virtadr;
93 unsigned long timeo = jiffies + (HZ * 10);
94
95 DEBUG(MTD_DEBUG_LEVEL3,
96 "_DoC_WaitReady called for out-of-line wait\n");
97
98 /* Out-of-line routine to wait for chip response */
99 while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
100 /* issue 2 read from NOP register after reading from CDSNControl register
101 see Software Requirement 11.4 item 2. */
102 DoC_Delay(doc, 2);
103
104 if (time_after(jiffies, timeo)) {
105 DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
106 return -EIO;
107 }
108 udelay(1);
109 cond_resched();
110 }
111
112 return 0;
113}
114
115static inline int DoC_WaitReady(struct DiskOnChip *doc)
116{
117 void __iomem *docptr = doc->virtadr;
118
119 /* This is inline, to optimise the common case, where it's ready instantly */
120 int ret = 0;
121
122 /* 4 read form NOP register should be issued in prior to the read from CDSNControl
123 see Software Requirement 11.4 item 2. */
124 DoC_Delay(doc, 4);
125
126 if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
127 /* Call the out-of-line routine to wait */
128 ret = _DoC_WaitReady(doc);
129
130 /* issue 2 read from NOP register after reading from CDSNControl register
131 see Software Requirement 11.4 item 2. */
132 DoC_Delay(doc, 2);
133
134 return ret;
135}
136
137/* DoC_Command: Send a flash command to the flash chip through the CDSN Slow IO register to
138 bypass the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
139 required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
140
141static inline int DoC_Command(struct DiskOnChip *doc, unsigned char command,
142 unsigned char xtraflags)
143{
144 void __iomem *docptr = doc->virtadr;
145
146 if (DoC_is_2000(doc))
147 xtraflags |= CDSN_CTRL_FLASH_IO;
148
149 /* Assert the CLE (Command Latch Enable) line to the flash chip */
150 WriteDOC(xtraflags | CDSN_CTRL_CLE | CDSN_CTRL_CE, docptr, CDSNControl);
151 DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
152
153 if (DoC_is_Millennium(doc))
154 WriteDOC(command, docptr, CDSNSlowIO);
155
156 /* Send the command */
157 WriteDOC_(command, docptr, doc->ioreg);
158 if (DoC_is_Millennium(doc))
159 WriteDOC(command, docptr, WritePipeTerm);
160
161 /* Lower the CLE line */
162 WriteDOC(xtraflags | CDSN_CTRL_CE, docptr, CDSNControl);
163 DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
164
165 /* Wait for the chip to respond - Software requirement 11.4.1 (extended for any command) */
166 return DoC_WaitReady(doc);
167}
168
169/* DoC_Address: Set the current address for the flash chip through the CDSN Slow IO register to
170 bypass the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
171 required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
172
173static int DoC_Address(struct DiskOnChip *doc, int numbytes, unsigned long ofs,
174 unsigned char xtraflags1, unsigned char xtraflags2)
175{
176 int i;
177 void __iomem *docptr = doc->virtadr;
178
179 if (DoC_is_2000(doc))
180 xtraflags1 |= CDSN_CTRL_FLASH_IO;
181
182 /* Assert the ALE (Address Latch Enable) line to the flash chip */
183 WriteDOC(xtraflags1 | CDSN_CTRL_ALE | CDSN_CTRL_CE, docptr, CDSNControl);
184
185 DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
186
187 /* Send the address */
188 /* Devices with 256-byte page are addressed as:
189 Column (bits 0-7), Page (bits 8-15, 16-23, 24-31)
190 * there is no device on the market with page256
191 and more than 24 bits.
192 Devices with 512-byte page are addressed as:
193 Column (bits 0-7), Page (bits 9-16, 17-24, 25-31)
194 * 25-31 is sent only if the chip support it.
195 * bit 8 changes the read command to be sent
196 (NAND_CMD_READ0 or NAND_CMD_READ1).
197 */
198
199 if (numbytes == ADDR_COLUMN || numbytes == ADDR_COLUMN_PAGE) {
200 if (DoC_is_Millennium(doc))
201 WriteDOC(ofs & 0xff, docptr, CDSNSlowIO);
202 WriteDOC_(ofs & 0xff, docptr, doc->ioreg);
203 }
204
205 if (doc->page256) {
206 ofs = ofs >> 8;
207 } else {
208 ofs = ofs >> 9;
209 }
210
211 if (numbytes == ADDR_PAGE || numbytes == ADDR_COLUMN_PAGE) {
212 for (i = 0; i < doc->pageadrlen; i++, ofs = ofs >> 8) {
213 if (DoC_is_Millennium(doc))
214 WriteDOC(ofs & 0xff, docptr, CDSNSlowIO);
215 WriteDOC_(ofs & 0xff, docptr, doc->ioreg);
216 }
217 }
218
219 if (DoC_is_Millennium(doc))
220 WriteDOC(ofs & 0xff, docptr, WritePipeTerm);
221
222 DoC_Delay(doc, 2); /* Needed for some slow flash chips. mf. */
223
224 /* FIXME: The SlowIO's for millennium could be replaced by
225 a single WritePipeTerm here. mf. */
226
227 /* Lower the ALE line */
228 WriteDOC(xtraflags1 | xtraflags2 | CDSN_CTRL_CE, docptr,
229 CDSNControl);
230
231 DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
232
233 /* Wait for the chip to respond - Software requirement 11.4.1 */
234 return DoC_WaitReady(doc);
235}
236
237/* Read a buffer from DoC, taking care of Millennium odditys */
238static void DoC_ReadBuf(struct DiskOnChip *doc, u_char * buf, int len)
239{
240 volatile int dummy;
241 int modulus = 0xffff;
242 void __iomem *docptr = doc->virtadr;
243 int i;
244
245 if (len <= 0)
246 return;
247
248 if (DoC_is_Millennium(doc)) {
249 /* Read the data via the internal pipeline through CDSN IO register,
250 see Pipelined Read Operations 11.3 */
251 dummy = ReadDOC(docptr, ReadPipeInit);
252
253 /* Millennium should use the LastDataRead register - Pipeline Reads */
254 len--;
255
256 /* This is needed for correctly ECC calculation */
257 modulus = 0xff;
258 }
259
260 for (i = 0; i < len; i++)
261 buf[i] = ReadDOC_(docptr, doc->ioreg + (i & modulus));
262
263 if (DoC_is_Millennium(doc)) {
264 buf[i] = ReadDOC(docptr, LastDataRead);
265 }
266}
267
268/* Write a buffer to DoC, taking care of Millennium odditys */
269static void DoC_WriteBuf(struct DiskOnChip *doc, const u_char * buf, int len)
270{
271 void __iomem *docptr = doc->virtadr;
272 int i;
273
274 if (len <= 0)
275 return;
276
277 for (i = 0; i < len; i++)
278 WriteDOC_(buf[i], docptr, doc->ioreg + i);
279
280 if (DoC_is_Millennium(doc)) {
281 WriteDOC(0x00, docptr, WritePipeTerm);
282 }
283}
284
285
286/* DoC_SelectChip: Select a given flash chip within the current floor */
287
288static inline int DoC_SelectChip(struct DiskOnChip *doc, int chip)
289{
290 void __iomem *docptr = doc->virtadr;
291
292 /* Software requirement 11.4.4 before writing DeviceSelect */
293 /* Deassert the CE line to eliminate glitches on the FCE# outputs */
294 WriteDOC(CDSN_CTRL_WP, docptr, CDSNControl);
295 DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
296
297 /* Select the individual flash chip requested */
298 WriteDOC(chip, docptr, CDSNDeviceSelect);
299 DoC_Delay(doc, 4);
300
301 /* Reassert the CE line */
302 WriteDOC(CDSN_CTRL_CE | CDSN_CTRL_FLASH_IO | CDSN_CTRL_WP, docptr,
303 CDSNControl);
304 DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
305
306 /* Wait for it to be ready */
307 return DoC_WaitReady(doc);
308}
309
310/* DoC_SelectFloor: Select a given floor (bank of flash chips) */
311
312static inline int DoC_SelectFloor(struct DiskOnChip *doc, int floor)
313{
314 void __iomem *docptr = doc->virtadr;
315
316 /* Select the floor (bank) of chips required */
317 WriteDOC(floor, docptr, FloorSelect);
318
319 /* Wait for the chip to be ready */
320 return DoC_WaitReady(doc);
321}
322
323/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */
324
325static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
326{
327 int mfr, id, i, j;
328 volatile char dummy;
329
330 /* Page in the required floor/chip */
331 DoC_SelectFloor(doc, floor);
332 DoC_SelectChip(doc, chip);
333
334 /* Reset the chip */
335 if (DoC_Command(doc, NAND_CMD_RESET, CDSN_CTRL_WP)) {
336 DEBUG(MTD_DEBUG_LEVEL2,
337 "DoC_Command (reset) for %d,%d returned true\n",
338 floor, chip);
339 return 0;
340 }
341
342
343 /* Read the NAND chip ID: 1. Send ReadID command */
344 if (DoC_Command(doc, NAND_CMD_READID, CDSN_CTRL_WP)) {
345 DEBUG(MTD_DEBUG_LEVEL2,
346 "DoC_Command (ReadID) for %d,%d returned true\n",
347 floor, chip);
348 return 0;
349 }
350
351 /* Read the NAND chip ID: 2. Send address byte zero */
352 DoC_Address(doc, ADDR_COLUMN, 0, CDSN_CTRL_WP, 0);
353
354 /* Read the manufacturer and device id codes from the device */
355
356 if (DoC_is_Millennium(doc)) {
357 DoC_Delay(doc, 2);
358 dummy = ReadDOC(doc->virtadr, ReadPipeInit);
359 mfr = ReadDOC(doc->virtadr, LastDataRead);
360
361 DoC_Delay(doc, 2);
362 dummy = ReadDOC(doc->virtadr, ReadPipeInit);
363 id = ReadDOC(doc->virtadr, LastDataRead);
364 } else {
365 /* CDSN Slow IO register see Software Req 11.4 item 5. */
366 dummy = ReadDOC(doc->virtadr, CDSNSlowIO);
367 DoC_Delay(doc, 2);
368 mfr = ReadDOC_(doc->virtadr, doc->ioreg);
369
370 /* CDSN Slow IO register see Software Req 11.4 item 5. */
371 dummy = ReadDOC(doc->virtadr, CDSNSlowIO);
372 DoC_Delay(doc, 2);
373 id = ReadDOC_(doc->virtadr, doc->ioreg);
374 }
375
376 /* No response - return failure */
377 if (mfr == 0xff || mfr == 0)
378 return 0;
379
380 /* Check it's the same as the first chip we identified.
381 * M-Systems say that any given DiskOnChip device should only
382 * contain _one_ type of flash part, although that's not a
383 * hardware restriction. */
384 if (doc->mfr) {
385 if (doc->mfr == mfr && doc->id == id)
386 return 1; /* This is another the same the first */
387 else
388 printk(KERN_WARNING
389 "Flash chip at floor %d, chip %d is different:\n",
390 floor, chip);
391 }
392
393 /* Print and store the manufacturer and ID codes. */
394 for (i = 0; nand_flash_ids[i].name != NULL; i++) {
395 if (id == nand_flash_ids[i].id) {
396 /* Try to identify manufacturer */
397 for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
398 if (nand_manuf_ids[j].id == mfr)
399 break;
400 }
401 printk(KERN_INFO
402 "Flash chip found: Manufacturer ID: %2.2X, "
403 "Chip ID: %2.2X (%s:%s)\n", mfr, id,
404 nand_manuf_ids[j].name, nand_flash_ids[i].name);
405 if (!doc->mfr) {
406 doc->mfr = mfr;
407 doc->id = id;
408 doc->chipshift =
409 ffs((nand_flash_ids[i].chipsize << 20)) - 1;
410 doc->page256 = (nand_flash_ids[i].pagesize == 256) ? 1 : 0;
411 doc->pageadrlen = doc->chipshift > 25 ? 3 : 2;
412 doc->erasesize =
413 nand_flash_ids[i].erasesize;
414 return 1;
415 }
416 return 0;
417 }
418 }
419
420
421 /* We haven't fully identified the chip. Print as much as we know. */
422 printk(KERN_WARNING "Unknown flash chip found: %2.2X %2.2X\n",
423 id, mfr);
424
425 printk(KERN_WARNING "Please report to dwmw2@infradead.org\n");
426 return 0;
427}
428
429/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */
430
431static void DoC_ScanChips(struct DiskOnChip *this, int maxchips)
432{
433 int floor, chip;
434 int numchips[MAX_FLOORS];
435 int ret = 1;
436
437 this->numchips = 0;
438 this->mfr = 0;
439 this->id = 0;
440
441 /* For each floor, find the number of valid chips it contains */
442 for (floor = 0; floor < MAX_FLOORS; floor++) {
443 ret = 1;
444 numchips[floor] = 0;
445 for (chip = 0; chip < maxchips && ret != 0; chip++) {
446
447 ret = DoC_IdentChip(this, floor, chip);
448 if (ret) {
449 numchips[floor]++;
450 this->numchips++;
451 }
452 }
453 }
454
455 /* If there are none at all that we recognise, bail */
456 if (!this->numchips) {
457 printk(KERN_NOTICE "No flash chips recognised.\n");
458 return;
459 }
460
461 /* Allocate an array to hold the information for each chip */
462 this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL);
463 if (!this->chips) {
464 printk(KERN_NOTICE "No memory for allocating chip info structures\n");
465 return;
466 }
467
468 ret = 0;
469
470 /* Fill out the chip array with {floor, chipno} for each
471 * detected chip in the device. */
472 for (floor = 0; floor < MAX_FLOORS; floor++) {
473 for (chip = 0; chip < numchips[floor]; chip++) {
474 this->chips[ret].floor = floor;
475 this->chips[ret].chip = chip;
476 this->chips[ret].curadr = 0;
477 this->chips[ret].curmode = 0x50;
478 ret++;
479 }
480 }
481
482 /* Calculate and print the total size of the device */
483 this->totlen = this->numchips * (1 << this->chipshift);
484
485 printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n",
486 this->numchips, this->totlen >> 20);
487}
488
489static int DoC2k_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
490{
491 int tmp1, tmp2, retval;
492 if (doc1->physadr == doc2->physadr)
493 return 1;
494
495 /* Use the alias resolution register which was set aside for this
496 * purpose. If it's value is the same on both chips, they might
497 * be the same chip, and we write to one and check for a change in
498 * the other. It's unclear if this register is usuable in the
499 * DoC 2000 (it's in the Millennium docs), but it seems to work. */
500 tmp1 = ReadDOC(doc1->virtadr, AliasResolution);
501 tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
502 if (tmp1 != tmp2)
503 return 0;
504
505 WriteDOC((tmp1 + 1) % 0xff, doc1->virtadr, AliasResolution);
506 tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
507 if (tmp2 == (tmp1 + 1) % 0xff)
508 retval = 1;
509 else
510 retval = 0;
511
512 /* Restore register contents. May not be necessary, but do it just to
513 * be safe. */
514 WriteDOC(tmp1, doc1->virtadr, AliasResolution);
515
516 return retval;
517}
518
519static const char im_name[] = "DoC2k_init";
520
521/* This routine is made available to other mtd code via
522 * inter_module_register. It must only be accessed through
523 * inter_module_get which will bump the use count of this module. The
524 * addresses passed back in mtd are valid as long as the use count of
525 * this module is non-zero, i.e. between inter_module_get and
526 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
527 */
528static void DoC2k_init(struct mtd_info *mtd)
529{
530 struct DiskOnChip *this = mtd->priv;
531 struct DiskOnChip *old = NULL;
532 int maxchips;
533
534 /* We must avoid being called twice for the same device. */
535
536 if (doc2klist)
537 old = doc2klist->priv;
538
539 while (old) {
540 if (DoC2k_is_alias(old, this)) {
541 printk(KERN_NOTICE
542 "Ignoring DiskOnChip 2000 at 0x%lX - already configured\n",
543 this->physadr);
544 iounmap(this->virtadr);
545 kfree(mtd);
546 return;
547 }
548 if (old->nextdoc)
549 old = old->nextdoc->priv;
550 else
551 old = NULL;
552 }
553
554
555 switch (this->ChipID) {
556 case DOC_ChipID_Doc2kTSOP:
557 mtd->name = "DiskOnChip 2000 TSOP";
558 this->ioreg = DoC_Mil_CDSN_IO;
559 /* Pretend it's a Millennium */
560 this->ChipID = DOC_ChipID_DocMil;
561 maxchips = MAX_CHIPS;
562 break;
563 case DOC_ChipID_Doc2k:
564 mtd->name = "DiskOnChip 2000";
565 this->ioreg = DoC_2k_CDSN_IO;
566 maxchips = MAX_CHIPS;
567 break;
568 case DOC_ChipID_DocMil:
569 mtd->name = "DiskOnChip Millennium";
570 this->ioreg = DoC_Mil_CDSN_IO;
571 maxchips = MAX_CHIPS_MIL;
572 break;
573 default:
574 printk("Unknown ChipID 0x%02x\n", this->ChipID);
575 kfree(mtd);
576 iounmap(this->virtadr);
577 return;
578 }
579
580 printk(KERN_NOTICE "%s found at address 0x%lX\n", mtd->name,
581 this->physadr);
582
583 mtd->type = MTD_NANDFLASH;
584 mtd->flags = MTD_CAP_NANDFLASH;
585 mtd->ecctype = MTD_ECC_RS_DiskOnChip;
586 mtd->size = 0;
587 mtd->erasesize = 0;
588 mtd->oobblock = 512;
589 mtd->oobsize = 16;
590 mtd->owner = THIS_MODULE;
591 mtd->erase = doc_erase;
592 mtd->point = NULL;
593 mtd->unpoint = NULL;
594 mtd->read = doc_read;
595 mtd->write = doc_write;
596 mtd->read_ecc = doc_read_ecc;
597 mtd->write_ecc = doc_write_ecc;
598 mtd->writev_ecc = doc_writev_ecc;
599 mtd->read_oob = doc_read_oob;
600 mtd->write_oob = doc_write_oob;
601 mtd->sync = NULL;
602
603 this->totlen = 0;
604 this->numchips = 0;
605
606 this->curfloor = -1;
607 this->curchip = -1;
608 init_MUTEX(&this->lock);
609
610 /* Ident all the chips present. */
611 DoC_ScanChips(this, maxchips);
612
613 if (!this->totlen) {
614 kfree(mtd);
615 iounmap(this->virtadr);
616 } else {
617 this->nextdoc = doc2klist;
618 doc2klist = mtd;
619 mtd->size = this->totlen;
620 mtd->erasesize = this->erasesize;
621 add_mtd_device(mtd);
622 return;
623 }
624}
625
626static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
627 size_t * retlen, u_char * buf)
628{
629 /* Just a special case of doc_read_ecc */
630 return doc_read_ecc(mtd, from, len, retlen, buf, NULL, NULL);
631}
632
633static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
634 size_t * retlen, u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel)
635{
636 struct DiskOnChip *this = mtd->priv;
637 void __iomem *docptr = this->virtadr;
638 struct Nand *mychip;
639 unsigned char syndrome[6];
640 volatile char dummy;
641 int i, len256 = 0, ret=0;
642 size_t left = len;
643
644 /* Don't allow read past end of device */
645 if (from >= this->totlen)
646 return -EINVAL;
647
648 down(&this->lock);
649
650 *retlen = 0;
651 while (left) {
652 len = left;
653
654 /* Don't allow a single read to cross a 512-byte block boundary */
655 if (from + len > ((from | 0x1ff) + 1))
656 len = ((from | 0x1ff) + 1) - from;
657
658 /* The ECC will not be calculated correctly if less than 512 is read */
659 if (len != 0x200 && eccbuf)
660 printk(KERN_WARNING
661 "ECC needs a full sector read (adr: %lx size %lx)\n",
662 (long) from, (long) len);
663
664 /* printk("DoC_Read (adr: %lx size %lx)\n", (long) from, (long) len); */
665
666
667 /* Find the chip which is to be used and select it */
668 mychip = &this->chips[from >> (this->chipshift)];
669
670 if (this->curfloor != mychip->floor) {
671 DoC_SelectFloor(this, mychip->floor);
672 DoC_SelectChip(this, mychip->chip);
673 } else if (this->curchip != mychip->chip) {
674 DoC_SelectChip(this, mychip->chip);
675 }
676
677 this->curfloor = mychip->floor;
678 this->curchip = mychip->chip;
679
680 DoC_Command(this,
681 (!this->page256
682 && (from & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0,
683 CDSN_CTRL_WP);
684 DoC_Address(this, ADDR_COLUMN_PAGE, from, CDSN_CTRL_WP,
685 CDSN_CTRL_ECC_IO);
686
687 if (eccbuf) {
688 /* Prime the ECC engine */
689 WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
690 WriteDOC(DOC_ECC_EN, docptr, ECCConf);
691 } else {
692 /* disable the ECC engine */
693 WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
694 WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
695 }
696
697 /* treat crossing 256-byte sector for 2M x 8bits devices */
698 if (this->page256 && from + len > (from | 0xff) + 1) {
699 len256 = (from | 0xff) + 1 - from;
700 DoC_ReadBuf(this, buf, len256);
701
702 DoC_Command(this, NAND_CMD_READ0, CDSN_CTRL_WP);
703 DoC_Address(this, ADDR_COLUMN_PAGE, from + len256,
704 CDSN_CTRL_WP, CDSN_CTRL_ECC_IO);
705 }
706
707 DoC_ReadBuf(this, &buf[len256], len - len256);
708
709 /* Let the caller know we completed it */
710 *retlen += len;
711
712 if (eccbuf) {
713 /* Read the ECC data through the DiskOnChip ECC logic */
714 /* Note: this will work even with 2M x 8bit devices as */
715 /* they have 8 bytes of OOB per 256 page. mf. */
716 DoC_ReadBuf(this, eccbuf, 6);
717
718 /* Flush the pipeline */
719 if (DoC_is_Millennium(this)) {
720 dummy = ReadDOC(docptr, ECCConf);
721 dummy = ReadDOC(docptr, ECCConf);
722 i = ReadDOC(docptr, ECCConf);
723 } else {
724 dummy = ReadDOC(docptr, 2k_ECCStatus);
725 dummy = ReadDOC(docptr, 2k_ECCStatus);
726 i = ReadDOC(docptr, 2k_ECCStatus);
727 }
728
729 /* Check the ECC Status */
730 if (i & 0x80) {
731 int nb_errors;
732 /* There was an ECC error */
733#ifdef ECC_DEBUG
734 printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from);
735#endif
736 /* Read the ECC syndrom through the DiskOnChip ECC logic.
737 These syndrome will be all ZERO when there is no error */
738 for (i = 0; i < 6; i++) {
739 syndrome[i] =
740 ReadDOC(docptr, ECCSyndrome0 + i);
741 }
742 nb_errors = doc_decode_ecc(buf, syndrome);
743
744#ifdef ECC_DEBUG
745 printk(KERN_ERR "Errors corrected: %x\n", nb_errors);
746#endif
747 if (nb_errors < 0) {
748 /* We return error, but have actually done the read. Not that
749 this can be told to user-space, via sys_read(), but at least
750 MTD-aware stuff can know about it by checking *retlen */
751 ret = -EIO;
752 }
753 }
754
755#ifdef PSYCHO_DEBUG
756 printk(KERN_DEBUG "ECC DATA at %lxB: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
757 (long)from, eccbuf[0], eccbuf[1], eccbuf[2],
758 eccbuf[3], eccbuf[4], eccbuf[5]);
759#endif
760
761 /* disable the ECC engine */
762 WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
763 }
764
765 /* according to 11.4.1, we need to wait for the busy line
766 * drop if we read to the end of the page. */
767 if(0 == ((from + len) & 0x1ff))
768 {
769 DoC_WaitReady(this);
770 }
771
772 from += len;
773 left -= len;
774 buf += len;
775 }
776
777 up(&this->lock);
778
779 return ret;
780}
781
782static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
783 size_t * retlen, const u_char * buf)
784{
785 char eccbuf[6];
786 return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf, NULL);
787}
788
789static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
790 size_t * retlen, const u_char * buf,
791 u_char * eccbuf, struct nand_oobinfo *oobsel)
792{
793 struct DiskOnChip *this = mtd->priv;
794 int di; /* Yes, DI is a hangover from when I was disassembling the binary driver */
795 void __iomem *docptr = this->virtadr;
796 volatile char dummy;
797 int len256 = 0;
798 struct Nand *mychip;
799 size_t left = len;
800 int status;
801
802 /* Don't allow write past end of device */
803 if (to >= this->totlen)
804 return -EINVAL;
805
806 down(&this->lock);
807
808 *retlen = 0;
809 while (left) {
810 len = left;
811
812 /* Don't allow a single write to cross a 512-byte block boundary */
813 if (to + len > ((to | 0x1ff) + 1))
814 len = ((to | 0x1ff) + 1) - to;
815
816 /* The ECC will not be calculated correctly if less than 512 is written */
817/* DBB-
818 if (len != 0x200 && eccbuf)
819 printk(KERN_WARNING
820 "ECC needs a full sector write (adr: %lx size %lx)\n",
821 (long) to, (long) len);
822 -DBB */
823
824 /* printk("DoC_Write (adr: %lx size %lx)\n", (long) to, (long) len); */
825
826 /* Find the chip which is to be used and select it */
827 mychip = &this->chips[to >> (this->chipshift)];
828
829 if (this->curfloor != mychip->floor) {
830 DoC_SelectFloor(this, mychip->floor);
831 DoC_SelectChip(this, mychip->chip);
832 } else if (this->curchip != mychip->chip) {
833 DoC_SelectChip(this, mychip->chip);
834 }
835
836 this->curfloor = mychip->floor;
837 this->curchip = mychip->chip;
838
839 /* Set device to main plane of flash */
840 DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP);
841 DoC_Command(this,
842 (!this->page256
843 && (to & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0,
844 CDSN_CTRL_WP);
845
846 DoC_Command(this, NAND_CMD_SEQIN, 0);
847 DoC_Address(this, ADDR_COLUMN_PAGE, to, 0, CDSN_CTRL_ECC_IO);
848
849 if (eccbuf) {
850 /* Prime the ECC engine */
851 WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
852 WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
853 } else {
854 /* disable the ECC engine */
855 WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
856 WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
857 }
858
859 /* treat crossing 256-byte sector for 2M x 8bits devices */
860 if (this->page256 && to + len > (to | 0xff) + 1) {
861 len256 = (to | 0xff) + 1 - to;
862 DoC_WriteBuf(this, buf, len256);
863
864 DoC_Command(this, NAND_CMD_PAGEPROG, 0);
865
866 DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
867 /* There's an implicit DoC_WaitReady() in DoC_Command */
868
869 dummy = ReadDOC(docptr, CDSNSlowIO);
870 DoC_Delay(this, 2);
871
872 if (ReadDOC_(docptr, this->ioreg) & 1) {
873 printk(KERN_ERR "Error programming flash\n");
874 /* Error in programming */
875 *retlen = 0;
876 up(&this->lock);
877 return -EIO;
878 }
879
880 DoC_Command(this, NAND_CMD_SEQIN, 0);
881 DoC_Address(this, ADDR_COLUMN_PAGE, to + len256, 0,
882 CDSN_CTRL_ECC_IO);
883 }
884
885 DoC_WriteBuf(this, &buf[len256], len - len256);
886
887 if (eccbuf) {
888 WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_CE, docptr,
889 CDSNControl);
890
891 if (DoC_is_Millennium(this)) {
892 WriteDOC(0, docptr, NOP);
893 WriteDOC(0, docptr, NOP);
894 WriteDOC(0, docptr, NOP);
895 } else {
896 WriteDOC_(0, docptr, this->ioreg);
897 WriteDOC_(0, docptr, this->ioreg);
898 WriteDOC_(0, docptr, this->ioreg);
899 }
900
901 WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_FLASH_IO | CDSN_CTRL_CE, docptr,
902 CDSNControl);
903
904 /* Read the ECC data through the DiskOnChip ECC logic */
905 for (di = 0; di < 6; di++) {
906 eccbuf[di] = ReadDOC(docptr, ECCSyndrome0 + di);
907 }
908
909 /* Reset the ECC engine */
910 WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
911
912#ifdef PSYCHO_DEBUG
913 printk
914 ("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
915 (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
916 eccbuf[4], eccbuf[5]);
917#endif
918 }
919
920 DoC_Command(this, NAND_CMD_PAGEPROG, 0);
921
922 DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
923 /* There's an implicit DoC_WaitReady() in DoC_Command */
924
925 if (DoC_is_Millennium(this)) {
926 ReadDOC(docptr, ReadPipeInit);
927 status = ReadDOC(docptr, LastDataRead);
928 } else {
929 dummy = ReadDOC(docptr, CDSNSlowIO);
930 DoC_Delay(this, 2);
931 status = ReadDOC_(docptr, this->ioreg);
932 }
933
934 if (status & 1) {
935 printk(KERN_ERR "Error programming flash\n");
936 /* Error in programming */
937 *retlen = 0;
938 up(&this->lock);
939 return -EIO;
940 }
941
942 /* Let the caller know we completed it */
943 *retlen += len;
944
945 if (eccbuf) {
946 unsigned char x[8];
947 size_t dummy;
948 int ret;
949
950 /* Write the ECC data to flash */
951 for (di=0; di<6; di++)
952 x[di] = eccbuf[di];
953
954 x[6]=0x55;
955 x[7]=0x55;
956
957 ret = doc_write_oob_nolock(mtd, to, 8, &dummy, x);
958 if (ret) {
959 up(&this->lock);
960 return ret;
961 }
962 }
963
964 to += len;
965 left -= len;
966 buf += len;
967 }
968
969 up(&this->lock);
970 return 0;
971}
972
973static int doc_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs,
974 unsigned long count, loff_t to, size_t *retlen,
975 u_char *eccbuf, struct nand_oobinfo *oobsel)
976{
977 static char static_buf[512];
978 static DECLARE_MUTEX(writev_buf_sem);
979
980 size_t totretlen = 0;
981 size_t thisvecofs = 0;
982 int ret= 0;
983
984 down(&writev_buf_sem);
985
986 while(count) {
987 size_t thislen, thisretlen;
988 unsigned char *buf;
989
990 buf = vecs->iov_base + thisvecofs;
991 thislen = vecs->iov_len - thisvecofs;
992
993
994 if (thislen >= 512) {
995 thislen = thislen & ~(512-1);
996 thisvecofs += thislen;
997 } else {
998 /* Not enough to fill a page. Copy into buf */
999 memcpy(static_buf, buf, thislen);
1000 buf = &static_buf[thislen];
1001
1002 while(count && thislen < 512) {
1003 vecs++;
1004 count--;
1005 thisvecofs = min((512-thislen), vecs->iov_len);
1006 memcpy(buf, vecs->iov_base, thisvecofs);
1007 thislen += thisvecofs;
1008 buf += thisvecofs;
1009 }
1010 buf = static_buf;
1011 }
1012 if (count && thisvecofs == vecs->iov_len) {
1013 thisvecofs = 0;
1014 vecs++;
1015 count--;
1016 }
1017 ret = doc_write_ecc(mtd, to, thislen, &thisretlen, buf, eccbuf, oobsel);
1018
1019 totretlen += thisretlen;
1020
1021 if (ret || thisretlen != thislen)
1022 break;
1023
1024 to += thislen;
1025 }
1026
1027 up(&writev_buf_sem);
1028 *retlen = totretlen;
1029 return ret;
1030}
1031
1032
1033static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
1034 size_t * retlen, u_char * buf)
1035{
1036 struct DiskOnChip *this = mtd->priv;
1037 int len256 = 0, ret;
1038 struct Nand *mychip;
1039
1040 down(&this->lock);
1041
1042 mychip = &this->chips[ofs >> this->chipshift];
1043
1044 if (this->curfloor != mychip->floor) {
1045 DoC_SelectFloor(this, mychip->floor);
1046 DoC_SelectChip(this, mychip->chip);
1047 } else if (this->curchip != mychip->chip) {
1048 DoC_SelectChip(this, mychip->chip);
1049 }
1050 this->curfloor = mychip->floor;
1051 this->curchip = mychip->chip;
1052
1053 /* update address for 2M x 8bit devices. OOB starts on the second */
1054 /* page to maintain compatibility with doc_read_ecc. */
1055 if (this->page256) {
1056 if (!(ofs & 0x8))
1057 ofs += 0x100;
1058 else
1059 ofs -= 0x8;
1060 }
1061
1062 DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP);
1063 DoC_Address(this, ADDR_COLUMN_PAGE, ofs, CDSN_CTRL_WP, 0);
1064
1065 /* treat crossing 8-byte OOB data for 2M x 8bit devices */
1066 /* Note: datasheet says it should automaticaly wrap to the */
1067 /* next OOB block, but it didn't work here. mf. */
1068 if (this->page256 && ofs + len > (ofs | 0x7) + 1) {
1069 len256 = (ofs | 0x7) + 1 - ofs;
1070 DoC_ReadBuf(this, buf, len256);
1071
1072 DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP);
1073 DoC_Address(this, ADDR_COLUMN_PAGE, ofs & (~0x1ff),
1074 CDSN_CTRL_WP, 0);
1075 }
1076
1077 DoC_ReadBuf(this, &buf[len256], len - len256);
1078
1079 *retlen = len;
1080 /* Reading the full OOB data drops us off of the end of the page,
1081 * causing the flash device to go into busy mode, so we need
1082 * to wait until ready 11.4.1 and Toshiba TC58256FT docs */
1083
1084 ret = DoC_WaitReady(this);
1085
1086 up(&this->lock);
1087 return ret;
1088
1089}
1090
1091static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len,
1092 size_t * retlen, const u_char * buf)
1093{
1094 struct DiskOnChip *this = mtd->priv;
1095 int len256 = 0;
1096 void __iomem *docptr = this->virtadr;
1097 struct Nand *mychip = &this->chips[ofs >> this->chipshift];
1098 volatile int dummy;
1099 int status;
1100
1101 // printk("doc_write_oob(%lx, %d): %2.2X %2.2X %2.2X %2.2X ... %2.2X %2.2X .. %2.2X %2.2X\n",(long)ofs, len,
1102 // buf[0], buf[1], buf[2], buf[3], buf[8], buf[9], buf[14],buf[15]);
1103
1104 /* Find the chip which is to be used and select it */
1105 if (this->curfloor != mychip->floor) {
1106 DoC_SelectFloor(this, mychip->floor);
1107 DoC_SelectChip(this, mychip->chip);
1108 } else if (this->curchip != mychip->chip) {
1109 DoC_SelectChip(this, mychip->chip);
1110 }
1111 this->curfloor = mychip->floor;
1112 this->curchip = mychip->chip;
1113
1114 /* disable the ECC engine */
1115 WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
1116 WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
1117
1118 /* Reset the chip, see Software Requirement 11.4 item 1. */
1119 DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP);
1120
1121 /* issue the Read2 command to set the pointer to the Spare Data Area. */
1122 DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP);
1123
1124 /* update address for 2M x 8bit devices. OOB starts on the second */
1125 /* page to maintain compatibility with doc_read_ecc. */
1126 if (this->page256) {
1127 if (!(ofs & 0x8))
1128 ofs += 0x100;
1129 else
1130 ofs -= 0x8;
1131 }
1132
1133 /* issue the Serial Data In command to initial the Page Program process */
1134 DoC_Command(this, NAND_CMD_SEQIN, 0);
1135 DoC_Address(this, ADDR_COLUMN_PAGE, ofs, 0, 0);
1136
1137 /* treat crossing 8-byte OOB data for 2M x 8bit devices */
1138 /* Note: datasheet says it should automaticaly wrap to the */
1139 /* next OOB block, but it didn't work here. mf. */
1140 if (this->page256 && ofs + len > (ofs | 0x7) + 1) {
1141 len256 = (ofs | 0x7) + 1 - ofs;
1142 DoC_WriteBuf(this, buf, len256);
1143
1144 DoC_Command(this, NAND_CMD_PAGEPROG, 0);
1145 DoC_Command(this, NAND_CMD_STATUS, 0);
1146 /* DoC_WaitReady() is implicit in DoC_Command */
1147
1148 if (DoC_is_Millennium(this)) {
1149 ReadDOC(docptr, ReadPipeInit);
1150 status = ReadDOC(docptr, LastDataRead);
1151 } else {
1152 dummy = ReadDOC(docptr, CDSNSlowIO);
1153 DoC_Delay(this, 2);
1154 status = ReadDOC_(docptr, this->ioreg);
1155 }
1156
1157 if (status & 1) {
1158 printk(KERN_ERR "Error programming oob data\n");
1159 /* There was an error */
1160 *retlen = 0;
1161 return -EIO;
1162 }
1163 DoC_Command(this, NAND_CMD_SEQIN, 0);
1164 DoC_Address(this, ADDR_COLUMN_PAGE, ofs & (~0x1ff), 0, 0);
1165 }
1166
1167 DoC_WriteBuf(this, &buf[len256], len - len256);
1168
1169 DoC_Command(this, NAND_CMD_PAGEPROG, 0);
1170 DoC_Command(this, NAND_CMD_STATUS, 0);
1171 /* DoC_WaitReady() is implicit in DoC_Command */
1172
1173 if (DoC_is_Millennium(this)) {
1174 ReadDOC(docptr, ReadPipeInit);
1175 status = ReadDOC(docptr, LastDataRead);
1176 } else {
1177 dummy = ReadDOC(docptr, CDSNSlowIO);
1178 DoC_Delay(this, 2);
1179 status = ReadDOC_(docptr, this->ioreg);
1180 }
1181
1182 if (status & 1) {
1183 printk(KERN_ERR "Error programming oob data\n");
1184 /* There was an error */
1185 *retlen = 0;
1186 return -EIO;
1187 }
1188
1189 *retlen = len;
1190 return 0;
1191
1192}
1193
1194static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
1195 size_t * retlen, const u_char * buf)
1196{
1197 struct DiskOnChip *this = mtd->priv;
1198 int ret;
1199
1200 down(&this->lock);
1201 ret = doc_write_oob_nolock(mtd, ofs, len, retlen, buf);
1202
1203 up(&this->lock);
1204 return ret;
1205}
1206
1207static int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
1208{
1209 struct DiskOnChip *this = mtd->priv;
1210 __u32 ofs = instr->addr;
1211 __u32 len = instr->len;
1212 volatile int dummy;
1213 void __iomem *docptr = this->virtadr;
1214 struct Nand *mychip;
1215 int status;
1216
1217 down(&this->lock);
1218
1219 if (ofs & (mtd->erasesize-1) || len & (mtd->erasesize-1)) {
1220 up(&this->lock);
1221 return -EINVAL;
1222 }
1223
1224 instr->state = MTD_ERASING;
1225
1226 /* FIXME: Do this in the background. Use timers or schedule_task() */
1227 while(len) {
1228 mychip = &this->chips[ofs >> this->chipshift];
1229
1230 if (this->curfloor != mychip->floor) {
1231 DoC_SelectFloor(this, mychip->floor);
1232 DoC_SelectChip(this, mychip->chip);
1233 } else if (this->curchip != mychip->chip) {
1234 DoC_SelectChip(this, mychip->chip);
1235 }
1236 this->curfloor = mychip->floor;
1237 this->curchip = mychip->chip;
1238
1239 DoC_Command(this, NAND_CMD_ERASE1, 0);
1240 DoC_Address(this, ADDR_PAGE, ofs, 0, 0);
1241 DoC_Command(this, NAND_CMD_ERASE2, 0);
1242
1243 DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
1244
1245 if (DoC_is_Millennium(this)) {
1246 ReadDOC(docptr, ReadPipeInit);
1247 status = ReadDOC(docptr, LastDataRead);
1248 } else {
1249 dummy = ReadDOC(docptr, CDSNSlowIO);
1250 DoC_Delay(this, 2);
1251 status = ReadDOC_(docptr, this->ioreg);
1252 }
1253
1254 if (status & 1) {
1255 printk(KERN_ERR "Error erasing at 0x%x\n", ofs);
1256 /* There was an error */
1257 instr->state = MTD_ERASE_FAILED;
1258 goto callback;
1259 }
1260 ofs += mtd->erasesize;
1261 len -= mtd->erasesize;
1262 }
1263 instr->state = MTD_ERASE_DONE;
1264
1265 callback:
1266 mtd_erase_callback(instr);
1267
1268 up(&this->lock);
1269 return 0;
1270}
1271
1272
1273/****************************************************************************
1274 *
1275 * Module stuff
1276 *
1277 ****************************************************************************/
1278
1279static int __init init_doc2000(void)
1280{
1281 inter_module_register(im_name, THIS_MODULE, &DoC2k_init);
1282 return 0;
1283}
1284
1285static void __exit cleanup_doc2000(void)
1286{
1287 struct mtd_info *mtd;
1288 struct DiskOnChip *this;
1289
1290 while ((mtd = doc2klist)) {
1291 this = mtd->priv;
1292 doc2klist = this->nextdoc;
1293
1294 del_mtd_device(mtd);
1295
1296 iounmap(this->virtadr);
1297 kfree(this->chips);
1298 kfree(mtd);
1299 }
1300 inter_module_unregister(im_name);
1301}
1302
1303module_exit(cleanup_doc2000);
1304module_init(init_doc2000);
1305
1306MODULE_LICENSE("GPL");
1307MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
1308MODULE_DESCRIPTION("MTD driver for DiskOnChip 2000 and Millennium");
1309
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
new file mode 100644
index 000000000000..1e704915ef08
--- /dev/null
+++ b/drivers/mtd/devices/doc2001.c
@@ -0,0 +1,888 @@
1
2/*
3 * Linux driver for Disk-On-Chip Millennium
4 * (c) 1999 Machine Vision Holdings, Inc.
5 * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
6 *
7 * $Id: doc2001.c,v 1.48 2005/01/05 18:05:12 dwmw2 Exp $
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <asm/errno.h>
13#include <asm/io.h>
14#include <asm/uaccess.h>
15#include <linux/miscdevice.h>
16#include <linux/pci.h>
17#include <linux/delay.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
20#include <linux/init.h>
21#include <linux/types.h>
22#include <linux/bitops.h>
23
24#include <linux/mtd/mtd.h>
25#include <linux/mtd/nand.h>
26#include <linux/mtd/doc2000.h>
27
28/* #define ECC_DEBUG */
29
30/* I have no idea why some DoC chips can not use memcop_form|to_io().
31 * This may be due to the different revisions of the ASIC controller built-in or
32 * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment
33 * this:*/
34#undef USE_MEMCPY
35
36static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
37 size_t *retlen, u_char *buf);
38static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
39 size_t *retlen, const u_char *buf);
40static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
41 size_t *retlen, u_char *buf, u_char *eccbuf,
42 struct nand_oobinfo *oobsel);
43static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
44 size_t *retlen, const u_char *buf, u_char *eccbuf,
45 struct nand_oobinfo *oobsel);
46static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
47 size_t *retlen, u_char *buf);
48static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
49 size_t *retlen, const u_char *buf);
50static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
51
52static struct mtd_info *docmillist = NULL;
53
54/* Perform the required delay cycles by reading from the NOP register */
55static void DoC_Delay(void __iomem * docptr, unsigned short cycles)
56{
57 volatile char dummy;
58 int i;
59
60 for (i = 0; i < cycles; i++)
61 dummy = ReadDOC(docptr, NOP);
62}
63
64/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
65static int _DoC_WaitReady(void __iomem * docptr)
66{
67 unsigned short c = 0xffff;
68
69 DEBUG(MTD_DEBUG_LEVEL3,
70 "_DoC_WaitReady called for out-of-line wait\n");
71
72 /* Out-of-line routine to wait for chip response */
73 while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B) && --c)
74 ;
75
76 if (c == 0)
77 DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
78
79 return (c == 0);
80}
81
82static inline int DoC_WaitReady(void __iomem * docptr)
83{
84 /* This is inline, to optimise the common case, where it's ready instantly */
85 int ret = 0;
86
87 /* 4 read form NOP register should be issued in prior to the read from CDSNControl
88 see Software Requirement 11.4 item 2. */
89 DoC_Delay(docptr, 4);
90
91 if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
92 /* Call the out-of-line routine to wait */
93 ret = _DoC_WaitReady(docptr);
94
95 /* issue 2 read from NOP register after reading from CDSNControl register
96 see Software Requirement 11.4 item 2. */
97 DoC_Delay(docptr, 2);
98
99 return ret;
100}
101
102/* DoC_Command: Send a flash command to the flash chip through the CDSN IO register
103 with the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
104 required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
105
106static inline void DoC_Command(void __iomem * docptr, unsigned char command,
107 unsigned char xtraflags)
108{
109 /* Assert the CLE (Command Latch Enable) line to the flash chip */
110 WriteDOC(xtraflags | CDSN_CTRL_CLE | CDSN_CTRL_CE, docptr, CDSNControl);
111 DoC_Delay(docptr, 4);
112
113 /* Send the command */
114 WriteDOC(command, docptr, Mil_CDSN_IO);
115 WriteDOC(0x00, docptr, WritePipeTerm);
116
117 /* Lower the CLE line */
118 WriteDOC(xtraflags | CDSN_CTRL_CE, docptr, CDSNControl);
119 DoC_Delay(docptr, 4);
120}
121
122/* DoC_Address: Set the current address for the flash chip through the CDSN IO register
123 with the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
124 required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
125
126static inline void DoC_Address(void __iomem * docptr, int numbytes, unsigned long ofs,
127 unsigned char xtraflags1, unsigned char xtraflags2)
128{
129 /* Assert the ALE (Address Latch Enable) line to the flash chip */
130 WriteDOC(xtraflags1 | CDSN_CTRL_ALE | CDSN_CTRL_CE, docptr, CDSNControl);
131 DoC_Delay(docptr, 4);
132
133 /* Send the address */
134 switch (numbytes)
135 {
136 case 1:
137 /* Send single byte, bits 0-7. */
138 WriteDOC(ofs & 0xff, docptr, Mil_CDSN_IO);
139 WriteDOC(0x00, docptr, WritePipeTerm);
140 break;
141 case 2:
142 /* Send bits 9-16 followed by 17-23 */
143 WriteDOC((ofs >> 9) & 0xff, docptr, Mil_CDSN_IO);
144 WriteDOC((ofs >> 17) & 0xff, docptr, Mil_CDSN_IO);
145 WriteDOC(0x00, docptr, WritePipeTerm);
146 break;
147 case 3:
148 /* Send 0-7, 9-16, then 17-23 */
149 WriteDOC(ofs & 0xff, docptr, Mil_CDSN_IO);
150 WriteDOC((ofs >> 9) & 0xff, docptr, Mil_CDSN_IO);
151 WriteDOC((ofs >> 17) & 0xff, docptr, Mil_CDSN_IO);
152 WriteDOC(0x00, docptr, WritePipeTerm);
153 break;
154 default:
155 return;
156 }
157
158 /* Lower the ALE line */
159 WriteDOC(xtraflags1 | xtraflags2 | CDSN_CTRL_CE, docptr, CDSNControl);
160 DoC_Delay(docptr, 4);
161}
162
163/* DoC_SelectChip: Select a given flash chip within the current floor */
164static int DoC_SelectChip(void __iomem * docptr, int chip)
165{
166 /* Select the individual flash chip requested */
167 WriteDOC(chip, docptr, CDSNDeviceSelect);
168 DoC_Delay(docptr, 4);
169
170 /* Wait for it to be ready */
171 return DoC_WaitReady(docptr);
172}
173
174/* DoC_SelectFloor: Select a given floor (bank of flash chips) */
175static int DoC_SelectFloor(void __iomem * docptr, int floor)
176{
177 /* Select the floor (bank) of chips required */
178 WriteDOC(floor, docptr, FloorSelect);
179
180 /* Wait for the chip to be ready */
181 return DoC_WaitReady(docptr);
182}
183
184/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */
185static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
186{
187 int mfr, id, i, j;
188 volatile char dummy;
189
190 /* Page in the required floor/chip
191 FIXME: is this supported by Millennium ?? */
192 DoC_SelectFloor(doc->virtadr, floor);
193 DoC_SelectChip(doc->virtadr, chip);
194
195 /* Reset the chip, see Software Requirement 11.4 item 1. */
196 DoC_Command(doc->virtadr, NAND_CMD_RESET, CDSN_CTRL_WP);
197 DoC_WaitReady(doc->virtadr);
198
199 /* Read the NAND chip ID: 1. Send ReadID command */
200 DoC_Command(doc->virtadr, NAND_CMD_READID, CDSN_CTRL_WP);
201
202 /* Read the NAND chip ID: 2. Send address byte zero */
203 DoC_Address(doc->virtadr, 1, 0x00, CDSN_CTRL_WP, 0x00);
204
205 /* Read the manufacturer and device id codes of the flash device through
206 CDSN IO register see Software Requirement 11.4 item 5.*/
207 dummy = ReadDOC(doc->virtadr, ReadPipeInit);
208 DoC_Delay(doc->virtadr, 2);
209 mfr = ReadDOC(doc->virtadr, Mil_CDSN_IO);
210
211 DoC_Delay(doc->virtadr, 2);
212 id = ReadDOC(doc->virtadr, Mil_CDSN_IO);
213 dummy = ReadDOC(doc->virtadr, LastDataRead);
214
215 /* No response - return failure */
216 if (mfr == 0xff || mfr == 0)
217 return 0;
218
219 /* FIXME: to deal with multi-flash on multi-Millennium case more carefully */
220 for (i = 0; nand_flash_ids[i].name != NULL; i++) {
221 if ( id == nand_flash_ids[i].id) {
222 /* Try to identify manufacturer */
223 for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
224 if (nand_manuf_ids[j].id == mfr)
225 break;
226 }
227 printk(KERN_INFO "Flash chip found: Manufacturer ID: %2.2X, "
228 "Chip ID: %2.2X (%s:%s)\n",
229 mfr, id, nand_manuf_ids[j].name, nand_flash_ids[i].name);
230 doc->mfr = mfr;
231 doc->id = id;
232 doc->chipshift = ffs((nand_flash_ids[i].chipsize << 20)) - 1;
233 break;
234 }
235 }
236
237 if (nand_flash_ids[i].name == NULL)
238 return 0;
239 else
240 return 1;
241}
242
243/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */
244static void DoC_ScanChips(struct DiskOnChip *this)
245{
246 int floor, chip;
247 int numchips[MAX_FLOORS_MIL];
248 int ret;
249
250 this->numchips = 0;
251 this->mfr = 0;
252 this->id = 0;
253
254 /* For each floor, find the number of valid chips it contains */
255 for (floor = 0,ret = 1; floor < MAX_FLOORS_MIL; floor++) {
256 numchips[floor] = 0;
257 for (chip = 0; chip < MAX_CHIPS_MIL && ret != 0; chip++) {
258 ret = DoC_IdentChip(this, floor, chip);
259 if (ret) {
260 numchips[floor]++;
261 this->numchips++;
262 }
263 }
264 }
265 /* If there are none at all that we recognise, bail */
266 if (!this->numchips) {
267 printk("No flash chips recognised.\n");
268 return;
269 }
270
271 /* Allocate an array to hold the information for each chip */
272 this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL);
273 if (!this->chips){
274 printk("No memory for allocating chip info structures\n");
275 return;
276 }
277
278 /* Fill out the chip array with {floor, chipno} for each
279 * detected chip in the device. */
280 for (floor = 0, ret = 0; floor < MAX_FLOORS_MIL; floor++) {
281 for (chip = 0 ; chip < numchips[floor] ; chip++) {
282 this->chips[ret].floor = floor;
283 this->chips[ret].chip = chip;
284 this->chips[ret].curadr = 0;
285 this->chips[ret].curmode = 0x50;
286 ret++;
287 }
288 }
289
290 /* Calculate and print the total size of the device */
291 this->totlen = this->numchips * (1 << this->chipshift);
292 printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n",
293 this->numchips ,this->totlen >> 20);
294}
295
296static int DoCMil_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
297{
298 int tmp1, tmp2, retval;
299
300 if (doc1->physadr == doc2->physadr)
301 return 1;
302
303 /* Use the alias resolution register which was set aside for this
304 * purpose. If it's value is the same on both chips, they might
305 * be the same chip, and we write to one and check for a change in
306 * the other. It's unclear if this register is usuable in the
307 * DoC 2000 (it's in the Millenium docs), but it seems to work. */
308 tmp1 = ReadDOC(doc1->virtadr, AliasResolution);
309 tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
310 if (tmp1 != tmp2)
311 return 0;
312
313 WriteDOC((tmp1+1) % 0xff, doc1->virtadr, AliasResolution);
314 tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
315 if (tmp2 == (tmp1+1) % 0xff)
316 retval = 1;
317 else
318 retval = 0;
319
320 /* Restore register contents. May not be necessary, but do it just to
321 * be safe. */
322 WriteDOC(tmp1, doc1->virtadr, AliasResolution);
323
324 return retval;
325}
326
327static const char im_name[] = "DoCMil_init";
328
329/* This routine is made available to other mtd code via
330 * inter_module_register. It must only be accessed through
331 * inter_module_get which will bump the use count of this module. The
332 * addresses passed back in mtd are valid as long as the use count of
333 * this module is non-zero, i.e. between inter_module_get and
334 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
335 */
336static void DoCMil_init(struct mtd_info *mtd)
337{
338 struct DiskOnChip *this = mtd->priv;
339 struct DiskOnChip *old = NULL;
340
341 /* We must avoid being called twice for the same device. */
342 if (docmillist)
343 old = docmillist->priv;
344
345 while (old) {
346 if (DoCMil_is_alias(this, old)) {
347 printk(KERN_NOTICE "Ignoring DiskOnChip Millennium at "
348 "0x%lX - already configured\n", this->physadr);
349 iounmap(this->virtadr);
350 kfree(mtd);
351 return;
352 }
353 if (old->nextdoc)
354 old = old->nextdoc->priv;
355 else
356 old = NULL;
357 }
358
359 mtd->name = "DiskOnChip Millennium";
360 printk(KERN_NOTICE "DiskOnChip Millennium found at address 0x%lX\n",
361 this->physadr);
362
363 mtd->type = MTD_NANDFLASH;
364 mtd->flags = MTD_CAP_NANDFLASH;
365 mtd->ecctype = MTD_ECC_RS_DiskOnChip;
366 mtd->size = 0;
367
368 /* FIXME: erase size is not always 8KiB */
369 mtd->erasesize = 0x2000;
370
371 mtd->oobblock = 512;
372 mtd->oobsize = 16;
373 mtd->owner = THIS_MODULE;
374 mtd->erase = doc_erase;
375 mtd->point = NULL;
376 mtd->unpoint = NULL;
377 mtd->read = doc_read;
378 mtd->write = doc_write;
379 mtd->read_ecc = doc_read_ecc;
380 mtd->write_ecc = doc_write_ecc;
381 mtd->read_oob = doc_read_oob;
382 mtd->write_oob = doc_write_oob;
383 mtd->sync = NULL;
384
385 this->totlen = 0;
386 this->numchips = 0;
387 this->curfloor = -1;
388 this->curchip = -1;
389
390 /* Ident all the chips present. */
391 DoC_ScanChips(this);
392
393 if (!this->totlen) {
394 kfree(mtd);
395 iounmap(this->virtadr);
396 } else {
397 this->nextdoc = docmillist;
398 docmillist = mtd;
399 mtd->size = this->totlen;
400 add_mtd_device(mtd);
401 return;
402 }
403}
404
405static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
406 size_t *retlen, u_char *buf)
407{
408 /* Just a special case of doc_read_ecc */
409 return doc_read_ecc(mtd, from, len, retlen, buf, NULL, NULL);
410}
411
412static int doc_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
413 size_t *retlen, u_char *buf, u_char *eccbuf,
414 struct nand_oobinfo *oobsel)
415{
416 int i, ret;
417 volatile char dummy;
418 unsigned char syndrome[6];
419 struct DiskOnChip *this = mtd->priv;
420 void __iomem *docptr = this->virtadr;
421 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
422
423 /* Don't allow read past end of device */
424 if (from >= this->totlen)
425 return -EINVAL;
426
427 /* Don't allow a single read to cross a 512-byte block boundary */
428 if (from + len > ((from | 0x1ff) + 1))
429 len = ((from | 0x1ff) + 1) - from;
430
431 /* Find the chip which is to be used and select it */
432 if (this->curfloor != mychip->floor) {
433 DoC_SelectFloor(docptr, mychip->floor);
434 DoC_SelectChip(docptr, mychip->chip);
435 } else if (this->curchip != mychip->chip) {
436 DoC_SelectChip(docptr, mychip->chip);
437 }
438 this->curfloor = mychip->floor;
439 this->curchip = mychip->chip;
440
441 /* issue the Read0 or Read1 command depend on which half of the page
442 we are accessing. Polling the Flash Ready bit after issue 3 bytes
443 address in Sequence Read Mode, see Software Requirement 11.4 item 1.*/
444 DoC_Command(docptr, (from >> 8) & 1, CDSN_CTRL_WP);
445 DoC_Address(docptr, 3, from, CDSN_CTRL_WP, 0x00);
446 DoC_WaitReady(docptr);
447
448 if (eccbuf) {
449 /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
450 WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
451 WriteDOC (DOC_ECC_EN, docptr, ECCConf);
452 } else {
453 /* disable the ECC engine */
454 WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
455 WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
456 }
457
458 /* Read the data via the internal pipeline through CDSN IO register,
459 see Pipelined Read Operations 11.3 */
460 dummy = ReadDOC(docptr, ReadPipeInit);
461#ifndef USE_MEMCPY
462 for (i = 0; i < len-1; i++) {
463 /* N.B. you have to increase the source address in this way or the
464 ECC logic will not work properly */
465 buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
466 }
467#else
468 memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len - 1);
469#endif
470 buf[len - 1] = ReadDOC(docptr, LastDataRead);
471
472 /* Let the caller know we completed it */
473 *retlen = len;
474 ret = 0;
475
476 if (eccbuf) {
477 /* Read the ECC data from Spare Data Area,
478 see Reed-Solomon EDC/ECC 11.1 */
479 dummy = ReadDOC(docptr, ReadPipeInit);
480#ifndef USE_MEMCPY
481 for (i = 0; i < 5; i++) {
482 /* N.B. you have to increase the source address in this way or the
483 ECC logic will not work properly */
484 eccbuf[i] = ReadDOC(docptr, Mil_CDSN_IO + i);
485 }
486#else
487 memcpy_fromio(eccbuf, docptr + DoC_Mil_CDSN_IO, 5);
488#endif
489 eccbuf[5] = ReadDOC(docptr, LastDataRead);
490
491 /* Flush the pipeline */
492 dummy = ReadDOC(docptr, ECCConf);
493 dummy = ReadDOC(docptr, ECCConf);
494
495 /* Check the ECC Status */
496 if (ReadDOC(docptr, ECCConf) & 0x80) {
497 int nb_errors;
498 /* There was an ECC error */
499#ifdef ECC_DEBUG
500 printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
501#endif
502 /* Read the ECC syndrom through the DiskOnChip ECC logic.
503 These syndrome will be all ZERO when there is no error */
504 for (i = 0; i < 6; i++) {
505 syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i);
506 }
507 nb_errors = doc_decode_ecc(buf, syndrome);
508#ifdef ECC_DEBUG
509 printk("ECC Errors corrected: %x\n", nb_errors);
510#endif
511 if (nb_errors < 0) {
512 /* We return error, but have actually done the read. Not that
513 this can be told to user-space, via sys_read(), but at least
514 MTD-aware stuff can know about it by checking *retlen */
515 ret = -EIO;
516 }
517 }
518
519#ifdef PSYCHO_DEBUG
520 printk("ECC DATA at %lx: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
521 (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
522 eccbuf[4], eccbuf[5]);
523#endif
524
525 /* disable the ECC engine */
526 WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
527 }
528
529 return ret;
530}
531
532static int doc_write (struct mtd_info *mtd, loff_t to, size_t len,
533 size_t *retlen, const u_char *buf)
534{
535 char eccbuf[6];
536 return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf, NULL);
537}
538
539static int doc_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
540 size_t *retlen, const u_char *buf, u_char *eccbuf,
541 struct nand_oobinfo *oobsel)
542{
543 int i,ret = 0;
544 volatile char dummy;
545 struct DiskOnChip *this = mtd->priv;
546 void __iomem *docptr = this->virtadr;
547 struct Nand *mychip = &this->chips[to >> (this->chipshift)];
548
549 /* Don't allow write past end of device */
550 if (to >= this->totlen)
551 return -EINVAL;
552
553#if 0
554 /* Don't allow a single write to cross a 512-byte block boundary */
555 if (to + len > ( (to | 0x1ff) + 1))
556 len = ((to | 0x1ff) + 1) - to;
557#else
558 /* Don't allow writes which aren't exactly one block */
559 if (to & 0x1ff || len != 0x200)
560 return -EINVAL;
561#endif
562
563 /* Find the chip which is to be used and select it */
564 if (this->curfloor != mychip->floor) {
565 DoC_SelectFloor(docptr, mychip->floor);
566 DoC_SelectChip(docptr, mychip->chip);
567 } else if (this->curchip != mychip->chip) {
568 DoC_SelectChip(docptr, mychip->chip);
569 }
570 this->curfloor = mychip->floor;
571 this->curchip = mychip->chip;
572
573 /* Reset the chip, see Software Requirement 11.4 item 1. */
574 DoC_Command(docptr, NAND_CMD_RESET, 0x00);
575 DoC_WaitReady(docptr);
576 /* Set device to main plane of flash */
577 DoC_Command(docptr, NAND_CMD_READ0, 0x00);
578
579 /* issue the Serial Data In command to initial the Page Program process */
580 DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
581 DoC_Address(docptr, 3, to, 0x00, 0x00);
582 DoC_WaitReady(docptr);
583
584 if (eccbuf) {
585 /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
586 WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
587 WriteDOC (DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
588 } else {
589 /* disable the ECC engine */
590 WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
591 WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
592 }
593
594 /* Write the data via the internal pipeline through CDSN IO register,
595 see Pipelined Write Operations 11.2 */
596#ifndef USE_MEMCPY
597 for (i = 0; i < len; i++) {
598 /* N.B. you have to increase the source address in this way or the
599 ECC logic will not work properly */
600 WriteDOC(buf[i], docptr, Mil_CDSN_IO + i);
601 }
602#else
603 memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len);
604#endif
605 WriteDOC(0x00, docptr, WritePipeTerm);
606
607 if (eccbuf) {
608 /* Write ECC data to flash, the ECC info is generated by the DiskOnChip ECC logic
609 see Reed-Solomon EDC/ECC 11.1 */
610 WriteDOC(0, docptr, NOP);
611 WriteDOC(0, docptr, NOP);
612 WriteDOC(0, docptr, NOP);
613
614 /* Read the ECC data through the DiskOnChip ECC logic */
615 for (i = 0; i < 6; i++) {
616 eccbuf[i] = ReadDOC(docptr, ECCSyndrome0 + i);
617 }
618
619 /* ignore the ECC engine */
620 WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
621
622#ifndef USE_MEMCPY
623 /* Write the ECC data to flash */
624 for (i = 0; i < 6; i++) {
625 /* N.B. you have to increase the source address in this way or the
626 ECC logic will not work properly */
627 WriteDOC(eccbuf[i], docptr, Mil_CDSN_IO + i);
628 }
629#else
630 memcpy_toio(docptr + DoC_Mil_CDSN_IO, eccbuf, 6);
631#endif
632
633 /* write the block status BLOCK_USED (0x5555) at the end of ECC data
634 FIXME: this is only a hack for programming the IPL area for LinuxBIOS
635 and should be replace with proper codes in user space utilities */
636 WriteDOC(0x55, docptr, Mil_CDSN_IO);
637 WriteDOC(0x55, docptr, Mil_CDSN_IO + 1);
638
639 WriteDOC(0x00, docptr, WritePipeTerm);
640
641#ifdef PSYCHO_DEBUG
642 printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
643 (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
644 eccbuf[4], eccbuf[5]);
645#endif
646 }
647
648 /* Commit the Page Program command and wait for ready
649 see Software Requirement 11.4 item 1.*/
650 DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
651 DoC_WaitReady(docptr);
652
653 /* Read the status of the flash device through CDSN IO register
654 see Software Requirement 11.4 item 5.*/
655 DoC_Command(docptr, NAND_CMD_STATUS, CDSN_CTRL_WP);
656 dummy = ReadDOC(docptr, ReadPipeInit);
657 DoC_Delay(docptr, 2);
658 if (ReadDOC(docptr, Mil_CDSN_IO) & 1) {
659 printk("Error programming flash\n");
660 /* Error in programming
661 FIXME: implement Bad Block Replacement (in nftl.c ??) */
662 *retlen = 0;
663 ret = -EIO;
664 }
665 dummy = ReadDOC(docptr, LastDataRead);
666
667 /* Let the caller know we completed it */
668 *retlen = len;
669
670 return ret;
671}
672
673static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
674 size_t *retlen, u_char *buf)
675{
676#ifndef USE_MEMCPY
677 int i;
678#endif
679 volatile char dummy;
680 struct DiskOnChip *this = mtd->priv;
681 void __iomem *docptr = this->virtadr;
682 struct Nand *mychip = &this->chips[ofs >> this->chipshift];
683
684 /* Find the chip which is to be used and select it */
685 if (this->curfloor != mychip->floor) {
686 DoC_SelectFloor(docptr, mychip->floor);
687 DoC_SelectChip(docptr, mychip->chip);
688 } else if (this->curchip != mychip->chip) {
689 DoC_SelectChip(docptr, mychip->chip);
690 }
691 this->curfloor = mychip->floor;
692 this->curchip = mychip->chip;
693
694 /* disable the ECC engine */
695 WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
696 WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
697
698 /* issue the Read2 command to set the pointer to the Spare Data Area.
699 Polling the Flash Ready bit after issue 3 bytes address in
700 Sequence Read Mode, see Software Requirement 11.4 item 1.*/
701 DoC_Command(docptr, NAND_CMD_READOOB, CDSN_CTRL_WP);
702 DoC_Address(docptr, 3, ofs, CDSN_CTRL_WP, 0x00);
703 DoC_WaitReady(docptr);
704
705 /* Read the data out via the internal pipeline through CDSN IO register,
706 see Pipelined Read Operations 11.3 */
707 dummy = ReadDOC(docptr, ReadPipeInit);
708#ifndef USE_MEMCPY
709 for (i = 0; i < len-1; i++) {
710 /* N.B. you have to increase the source address in this way or the
711 ECC logic will not work properly */
712 buf[i] = ReadDOC(docptr, Mil_CDSN_IO + i);
713 }
714#else
715 memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len - 1);
716#endif
717 buf[len - 1] = ReadDOC(docptr, LastDataRead);
718
719 *retlen = len;
720
721 return 0;
722}
723
724static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
725 size_t *retlen, const u_char *buf)
726{
727#ifndef USE_MEMCPY
728 int i;
729#endif
730 volatile char dummy;
731 int ret = 0;
732 struct DiskOnChip *this = mtd->priv;
733 void __iomem *docptr = this->virtadr;
734 struct Nand *mychip = &this->chips[ofs >> this->chipshift];
735
736 /* Find the chip which is to be used and select it */
737 if (this->curfloor != mychip->floor) {
738 DoC_SelectFloor(docptr, mychip->floor);
739 DoC_SelectChip(docptr, mychip->chip);
740 } else if (this->curchip != mychip->chip) {
741 DoC_SelectChip(docptr, mychip->chip);
742 }
743 this->curfloor = mychip->floor;
744 this->curchip = mychip->chip;
745
746 /* disable the ECC engine */
747 WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
748 WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
749
750 /* Reset the chip, see Software Requirement 11.4 item 1. */
751 DoC_Command(docptr, NAND_CMD_RESET, CDSN_CTRL_WP);
752 DoC_WaitReady(docptr);
753 /* issue the Read2 command to set the pointer to the Spare Data Area. */
754 DoC_Command(docptr, NAND_CMD_READOOB, CDSN_CTRL_WP);
755
756 /* issue the Serial Data In command to initial the Page Program process */
757 DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
758 DoC_Address(docptr, 3, ofs, 0x00, 0x00);
759
760 /* Write the data via the internal pipeline through CDSN IO register,
761 see Pipelined Write Operations 11.2 */
762#ifndef USE_MEMCPY
763 for (i = 0; i < len; i++) {
764 /* N.B. you have to increase the source address in this way or the
765 ECC logic will not work properly */
766 WriteDOC(buf[i], docptr, Mil_CDSN_IO + i);
767 }
768#else
769 memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len);
770#endif
771 WriteDOC(0x00, docptr, WritePipeTerm);
772
773 /* Commit the Page Program command and wait for ready
774 see Software Requirement 11.4 item 1.*/
775 DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
776 DoC_WaitReady(docptr);
777
778 /* Read the status of the flash device through CDSN IO register
779 see Software Requirement 11.4 item 5.*/
780 DoC_Command(docptr, NAND_CMD_STATUS, 0x00);
781 dummy = ReadDOC(docptr, ReadPipeInit);
782 DoC_Delay(docptr, 2);
783 if (ReadDOC(docptr, Mil_CDSN_IO) & 1) {
784 printk("Error programming oob data\n");
785 /* FIXME: implement Bad Block Replacement (in nftl.c ??) */
786 *retlen = 0;
787 ret = -EIO;
788 }
789 dummy = ReadDOC(docptr, LastDataRead);
790
791 *retlen = len;
792
793 return ret;
794}
795
796int doc_erase (struct mtd_info *mtd, struct erase_info *instr)
797{
798 volatile char dummy;
799 struct DiskOnChip *this = mtd->priv;
800 __u32 ofs = instr->addr;
801 __u32 len = instr->len;
802 void __iomem *docptr = this->virtadr;
803 struct Nand *mychip = &this->chips[ofs >> this->chipshift];
804
805 if (len != mtd->erasesize)
806 printk(KERN_WARNING "Erase not right size (%x != %x)n",
807 len, mtd->erasesize);
808
809 /* Find the chip which is to be used and select it */
810 if (this->curfloor != mychip->floor) {
811 DoC_SelectFloor(docptr, mychip->floor);
812 DoC_SelectChip(docptr, mychip->chip);
813 } else if (this->curchip != mychip->chip) {
814 DoC_SelectChip(docptr, mychip->chip);
815 }
816 this->curfloor = mychip->floor;
817 this->curchip = mychip->chip;
818
819 instr->state = MTD_ERASE_PENDING;
820
821 /* issue the Erase Setup command */
822 DoC_Command(docptr, NAND_CMD_ERASE1, 0x00);
823 DoC_Address(docptr, 2, ofs, 0x00, 0x00);
824
825 /* Commit the Erase Start command and wait for ready
826 see Software Requirement 11.4 item 1.*/
827 DoC_Command(docptr, NAND_CMD_ERASE2, 0x00);
828 DoC_WaitReady(docptr);
829
830 instr->state = MTD_ERASING;
831
832 /* Read the status of the flash device through CDSN IO register
833 see Software Requirement 11.4 item 5.
834 FIXME: it seems that we are not wait long enough, some blocks are not
835 erased fully */
836 DoC_Command(docptr, NAND_CMD_STATUS, CDSN_CTRL_WP);
837 dummy = ReadDOC(docptr, ReadPipeInit);
838 DoC_Delay(docptr, 2);
839 if (ReadDOC(docptr, Mil_CDSN_IO) & 1) {
840 printk("Error Erasing at 0x%x\n", ofs);
841 /* There was an error
842 FIXME: implement Bad Block Replacement (in nftl.c ??) */
843 instr->state = MTD_ERASE_FAILED;
844 } else
845 instr->state = MTD_ERASE_DONE;
846 dummy = ReadDOC(docptr, LastDataRead);
847
848 mtd_erase_callback(instr);
849
850 return 0;
851}
852
853/****************************************************************************
854 *
855 * Module stuff
856 *
857 ****************************************************************************/
858
859static int __init init_doc2001(void)
860{
861 inter_module_register(im_name, THIS_MODULE, &DoCMil_init);
862 return 0;
863}
864
865static void __exit cleanup_doc2001(void)
866{
867 struct mtd_info *mtd;
868 struct DiskOnChip *this;
869
870 while ((mtd=docmillist)) {
871 this = mtd->priv;
872 docmillist = this->nextdoc;
873
874 del_mtd_device(mtd);
875
876 iounmap(this->virtadr);
877 kfree(this->chips);
878 kfree(mtd);
879 }
880 inter_module_unregister(im_name);
881}
882
883module_exit(cleanup_doc2001);
884module_init(init_doc2001);
885
886MODULE_LICENSE("GPL");
887MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
888MODULE_DESCRIPTION("Alternative driver for DiskOnChip Millennium");
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
new file mode 100644
index 000000000000..ed47bafb2ce2
--- /dev/null
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -0,0 +1,1154 @@
1/*
2 * Linux driver for Disk-On-Chip Millennium Plus
3 *
4 * (c) 2002-2003 Greg Ungerer <gerg@snapgear.com>
5 * (c) 2002-2003 SnapGear Inc
6 * (c) 1999 Machine Vision Holdings, Inc.
7 * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
8 *
9 * $Id: doc2001plus.c,v 1.13 2005/01/05 18:05:12 dwmw2 Exp $
10 *
11 * Released under GPL
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <asm/errno.h>
17#include <asm/io.h>
18#include <asm/uaccess.h>
19#include <linux/miscdevice.h>
20#include <linux/pci.h>
21#include <linux/delay.h>
22#include <linux/slab.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <linux/types.h>
26#include <linux/bitops.h>
27
28#include <linux/mtd/mtd.h>
29#include <linux/mtd/nand.h>
30#include <linux/mtd/doc2000.h>
31
32/* #define ECC_DEBUG */
33
34/* I have no idea why some DoC chips can not use memcop_form|to_io().
35 * This may be due to the different revisions of the ASIC controller built-in or
36 * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment
37 * this:*/
38#undef USE_MEMCPY
39
40static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
41 size_t *retlen, u_char *buf);
42static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
43 size_t *retlen, const u_char *buf);
44static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
45 size_t *retlen, u_char *buf, u_char *eccbuf,
46 struct nand_oobinfo *oobsel);
47static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
48 size_t *retlen, const u_char *buf, u_char *eccbuf,
49 struct nand_oobinfo *oobsel);
50static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
51 size_t *retlen, u_char *buf);
52static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
53 size_t *retlen, const u_char *buf);
54static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
55
56static struct mtd_info *docmilpluslist = NULL;
57
58
59/* Perform the required delay cycles by writing to the NOP register */
60static void DoC_Delay(void __iomem * docptr, int cycles)
61{
62 int i;
63
64 for (i = 0; (i < cycles); i++)
65 WriteDOC(0, docptr, Mplus_NOP);
66}
67
68#define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
69
70/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
71static int _DoC_WaitReady(void __iomem * docptr)
72{
73 unsigned int c = 0xffff;
74
75 DEBUG(MTD_DEBUG_LEVEL3,
76 "_DoC_WaitReady called for out-of-line wait\n");
77
78 /* Out-of-line routine to wait for chip response */
79 while (((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) && --c)
80 ;
81
82 if (c == 0)
83 DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
84
85 return (c == 0);
86}
87
88static inline int DoC_WaitReady(void __iomem * docptr)
89{
90 /* This is inline, to optimise the common case, where it's ready instantly */
91 int ret = 0;
92
93 /* read form NOP register should be issued prior to the read from CDSNControl
94 see Software Requirement 11.4 item 2. */
95 DoC_Delay(docptr, 4);
96
97 if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
98 /* Call the out-of-line routine to wait */
99 ret = _DoC_WaitReady(docptr);
100
101 return ret;
102}
103
104/* For some reason the Millennium Plus seems to occassionally put itself
105 * into reset mode. For me this happens randomly, with no pattern that I
106 * can detect. M-systems suggest always check this on any block level
107 * operation and setting to normal mode if in reset mode.
108 */
109static inline void DoC_CheckASIC(void __iomem * docptr)
110{
111 /* Make sure the DoC is in normal mode */
112 if ((ReadDOC(docptr, Mplus_DOCControl) & DOC_MODE_NORMAL) == 0) {
113 WriteDOC((DOC_MODE_NORMAL | DOC_MODE_MDWREN), docptr, Mplus_DOCControl);
114 WriteDOC(~(DOC_MODE_NORMAL | DOC_MODE_MDWREN), docptr, Mplus_CtrlConfirm);
115 }
116}
117
118/* DoC_Command: Send a flash command to the flash chip through the Flash
119 * command register. Need 2 Write Pipeline Terminates to complete send.
120 */
121static inline void DoC_Command(void __iomem * docptr, unsigned char command,
122 unsigned char xtraflags)
123{
124 WriteDOC(command, docptr, Mplus_FlashCmd);
125 WriteDOC(command, docptr, Mplus_WritePipeTerm);
126 WriteDOC(command, docptr, Mplus_WritePipeTerm);
127}
128
129/* DoC_Address: Set the current address for the flash chip through the Flash
130 * Address register. Need 2 Write Pipeline Terminates to complete send.
131 */
132static inline void DoC_Address(struct DiskOnChip *doc, int numbytes,
133 unsigned long ofs, unsigned char xtraflags1,
134 unsigned char xtraflags2)
135{
136 void __iomem * docptr = doc->virtadr;
137
138 /* Allow for possible Mill Plus internal flash interleaving */
139 ofs >>= doc->interleave;
140
141 switch (numbytes) {
142 case 1:
143 /* Send single byte, bits 0-7. */
144 WriteDOC(ofs & 0xff, docptr, Mplus_FlashAddress);
145 break;
146 case 2:
147 /* Send bits 9-16 followed by 17-23 */
148 WriteDOC((ofs >> 9) & 0xff, docptr, Mplus_FlashAddress);
149 WriteDOC((ofs >> 17) & 0xff, docptr, Mplus_FlashAddress);
150 break;
151 case 3:
152 /* Send 0-7, 9-16, then 17-23 */
153 WriteDOC(ofs & 0xff, docptr, Mplus_FlashAddress);
154 WriteDOC((ofs >> 9) & 0xff, docptr, Mplus_FlashAddress);
155 WriteDOC((ofs >> 17) & 0xff, docptr, Mplus_FlashAddress);
156 break;
157 default:
158 return;
159 }
160
161 WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
162 WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
163}
164
165/* DoC_SelectChip: Select a given flash chip within the current floor */
166static int DoC_SelectChip(void __iomem * docptr, int chip)
167{
168 /* No choice for flash chip on Millennium Plus */
169 return 0;
170}
171
172/* DoC_SelectFloor: Select a given floor (bank of flash chips) */
173static int DoC_SelectFloor(void __iomem * docptr, int floor)
174{
175 WriteDOC((floor & 0x3), docptr, Mplus_DeviceSelect);
176 return 0;
177}
178
179/*
180 * Translate the given offset into the appropriate command and offset.
181 * This does the mapping using the 16bit interleave layout defined by
182 * M-Systems, and looks like this for a sector pair:
183 * +-----------+-------+-------+-------+--------------+---------+-----------+
184 * | 0 --- 511 |512-517|518-519|520-521| 522 --- 1033 |1034-1039|1040 - 1055|
185 * +-----------+-------+-------+-------+--------------+---------+-----------+
186 * | Data 0 | ECC 0 |Flags0 |Flags1 | Data 1 |ECC 1 | OOB 1 + 2 |
187 * +-----------+-------+-------+-------+--------------+---------+-----------+
188 */
189/* FIXME: This lives in INFTL not here. Other users of flash devices
190 may not want it */
191static unsigned int DoC_GetDataOffset(struct mtd_info *mtd, loff_t *from)
192{
193 struct DiskOnChip *this = mtd->priv;
194
195 if (this->interleave) {
196 unsigned int ofs = *from & 0x3ff;
197 unsigned int cmd;
198
199 if (ofs < 512) {
200 cmd = NAND_CMD_READ0;
201 ofs &= 0x1ff;
202 } else if (ofs < 1014) {
203 cmd = NAND_CMD_READ1;
204 ofs = (ofs & 0x1ff) + 10;
205 } else {
206 cmd = NAND_CMD_READOOB;
207 ofs = ofs - 1014;
208 }
209
210 *from = (*from & ~0x3ff) | ofs;
211 return cmd;
212 } else {
213 /* No interleave */
214 if ((*from) & 0x100)
215 return NAND_CMD_READ1;
216 return NAND_CMD_READ0;
217 }
218}
219
220static unsigned int DoC_GetECCOffset(struct mtd_info *mtd, loff_t *from)
221{
222 unsigned int ofs, cmd;
223
224 if (*from & 0x200) {
225 cmd = NAND_CMD_READOOB;
226 ofs = 10 + (*from & 0xf);
227 } else {
228 cmd = NAND_CMD_READ1;
229 ofs = (*from & 0xf);
230 }
231
232 *from = (*from & ~0x3ff) | ofs;
233 return cmd;
234}
235
236static unsigned int DoC_GetFlagsOffset(struct mtd_info *mtd, loff_t *from)
237{
238 unsigned int ofs, cmd;
239
240 cmd = NAND_CMD_READ1;
241 ofs = (*from & 0x200) ? 8 : 6;
242 *from = (*from & ~0x3ff) | ofs;
243 return cmd;
244}
245
246static unsigned int DoC_GetHdrOffset(struct mtd_info *mtd, loff_t *from)
247{
248 unsigned int ofs, cmd;
249
250 cmd = NAND_CMD_READOOB;
251 ofs = (*from & 0x200) ? 24 : 16;
252 *from = (*from & ~0x3ff) | ofs;
253 return cmd;
254}
255
256static inline void MemReadDOC(void __iomem * docptr, unsigned char *buf, int len)
257{
258#ifndef USE_MEMCPY
259 int i;
260 for (i = 0; i < len; i++)
261 buf[i] = ReadDOC(docptr, Mil_CDSN_IO + i);
262#else
263 memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len);
264#endif
265}
266
267static inline void MemWriteDOC(void __iomem * docptr, unsigned char *buf, int len)
268{
269#ifndef USE_MEMCPY
270 int i;
271 for (i = 0; i < len; i++)
272 WriteDOC(buf[i], docptr, Mil_CDSN_IO + i);
273#else
274 memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len);
275#endif
276}
277
278/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */
279static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
280{
281 int mfr, id, i, j;
282 volatile char dummy;
283 void __iomem * docptr = doc->virtadr;
284
285 /* Page in the required floor/chip */
286 DoC_SelectFloor(docptr, floor);
287 DoC_SelectChip(docptr, chip);
288
289 /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
290 WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
291
292 /* Reset the chip, see Software Requirement 11.4 item 1. */
293 DoC_Command(docptr, NAND_CMD_RESET, 0);
294 DoC_WaitReady(docptr);
295
296 /* Read the NAND chip ID: 1. Send ReadID command */
297 DoC_Command(docptr, NAND_CMD_READID, 0);
298
299 /* Read the NAND chip ID: 2. Send address byte zero */
300 DoC_Address(doc, 1, 0x00, 0, 0x00);
301
302 WriteDOC(0, docptr, Mplus_FlashControl);
303 DoC_WaitReady(docptr);
304
305 /* Read the manufacturer and device id codes of the flash device through
306 CDSN IO register see Software Requirement 11.4 item 5.*/
307 dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
308 dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
309
310 mfr = ReadDOC(docptr, Mil_CDSN_IO);
311 if (doc->interleave)
312 dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */
313
314 id = ReadDOC(docptr, Mil_CDSN_IO);
315 if (doc->interleave)
316 dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */
317
318 dummy = ReadDOC(docptr, Mplus_LastDataRead);
319 dummy = ReadDOC(docptr, Mplus_LastDataRead);
320
321 /* Disable flash internally */
322 WriteDOC(0, docptr, Mplus_FlashSelect);
323
324 /* No response - return failure */
325 if (mfr == 0xff || mfr == 0)
326 return 0;
327
328 for (i = 0; nand_flash_ids[i].name != NULL; i++) {
329 if (id == nand_flash_ids[i].id) {
330 /* Try to identify manufacturer */
331 for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
332 if (nand_manuf_ids[j].id == mfr)
333 break;
334 }
335 printk(KERN_INFO "Flash chip found: Manufacturer ID: %2.2X, "
336 "Chip ID: %2.2X (%s:%s)\n", mfr, id,
337 nand_manuf_ids[j].name, nand_flash_ids[i].name);
338 doc->mfr = mfr;
339 doc->id = id;
340 doc->chipshift = ffs((nand_flash_ids[i].chipsize << 20)) - 1;
341 doc->erasesize = nand_flash_ids[i].erasesize << doc->interleave;
342 break;
343 }
344 }
345
346 if (nand_flash_ids[i].name == NULL)
347 return 0;
348 return 1;
349}
350
351/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */
352static void DoC_ScanChips(struct DiskOnChip *this)
353{
354 int floor, chip;
355 int numchips[MAX_FLOORS_MPLUS];
356 int ret;
357
358 this->numchips = 0;
359 this->mfr = 0;
360 this->id = 0;
361
362 /* Work out the intended interleave setting */
363 this->interleave = 0;
364 if (this->ChipID == DOC_ChipID_DocMilPlus32)
365 this->interleave = 1;
366
367 /* Check the ASIC agrees */
368 if ( (this->interleave << 2) !=
369 (ReadDOC(this->virtadr, Mplus_Configuration) & 4)) {
370 u_char conf = ReadDOC(this->virtadr, Mplus_Configuration);
371 printk(KERN_NOTICE "Setting DiskOnChip Millennium Plus interleave to %s\n",
372 this->interleave?"on (16-bit)":"off (8-bit)");
373 conf ^= 4;
374 WriteDOC(conf, this->virtadr, Mplus_Configuration);
375 }
376
377 /* For each floor, find the number of valid chips it contains */
378 for (floor = 0,ret = 1; floor < MAX_FLOORS_MPLUS; floor++) {
379 numchips[floor] = 0;
380 for (chip = 0; chip < MAX_CHIPS_MPLUS && ret != 0; chip++) {
381 ret = DoC_IdentChip(this, floor, chip);
382 if (ret) {
383 numchips[floor]++;
384 this->numchips++;
385 }
386 }
387 }
388 /* If there are none at all that we recognise, bail */
389 if (!this->numchips) {
390 printk("No flash chips recognised.\n");
391 return;
392 }
393
394 /* Allocate an array to hold the information for each chip */
395 this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL);
396 if (!this->chips){
397 printk("MTD: No memory for allocating chip info structures\n");
398 return;
399 }
400
401 /* Fill out the chip array with {floor, chipno} for each
402 * detected chip in the device. */
403 for (floor = 0, ret = 0; floor < MAX_FLOORS_MPLUS; floor++) {
404 for (chip = 0 ; chip < numchips[floor] ; chip++) {
405 this->chips[ret].floor = floor;
406 this->chips[ret].chip = chip;
407 this->chips[ret].curadr = 0;
408 this->chips[ret].curmode = 0x50;
409 ret++;
410 }
411 }
412
413 /* Calculate and print the total size of the device */
414 this->totlen = this->numchips * (1 << this->chipshift);
415 printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n",
416 this->numchips ,this->totlen >> 20);
417}
418
419static int DoCMilPlus_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
420{
421 int tmp1, tmp2, retval;
422
423 if (doc1->physadr == doc2->physadr)
424 return 1;
425
426 /* Use the alias resolution register which was set aside for this
427 * purpose. If it's value is the same on both chips, they might
428 * be the same chip, and we write to one and check for a change in
429 * the other. It's unclear if this register is usuable in the
430 * DoC 2000 (it's in the Millennium docs), but it seems to work. */
431 tmp1 = ReadDOC(doc1->virtadr, Mplus_AliasResolution);
432 tmp2 = ReadDOC(doc2->virtadr, Mplus_AliasResolution);
433 if (tmp1 != tmp2)
434 return 0;
435
436 WriteDOC((tmp1+1) % 0xff, doc1->virtadr, Mplus_AliasResolution);
437 tmp2 = ReadDOC(doc2->virtadr, Mplus_AliasResolution);
438 if (tmp2 == (tmp1+1) % 0xff)
439 retval = 1;
440 else
441 retval = 0;
442
443 /* Restore register contents. May not be necessary, but do it just to
444 * be safe. */
445 WriteDOC(tmp1, doc1->virtadr, Mplus_AliasResolution);
446
447 return retval;
448}
449
450static const char im_name[] = "DoCMilPlus_init";
451
452/* This routine is made available to other mtd code via
453 * inter_module_register. It must only be accessed through
454 * inter_module_get which will bump the use count of this module. The
455 * addresses passed back in mtd are valid as long as the use count of
456 * this module is non-zero, i.e. between inter_module_get and
457 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
458 */
459static void DoCMilPlus_init(struct mtd_info *mtd)
460{
461 struct DiskOnChip *this = mtd->priv;
462 struct DiskOnChip *old = NULL;
463
464 /* We must avoid being called twice for the same device. */
465 if (docmilpluslist)
466 old = docmilpluslist->priv;
467
468 while (old) {
469 if (DoCMilPlus_is_alias(this, old)) {
470 printk(KERN_NOTICE "Ignoring DiskOnChip Millennium "
471 "Plus at 0x%lX - already configured\n",
472 this->physadr);
473 iounmap(this->virtadr);
474 kfree(mtd);
475 return;
476 }
477 if (old->nextdoc)
478 old = old->nextdoc->priv;
479 else
480 old = NULL;
481 }
482
483 mtd->name = "DiskOnChip Millennium Plus";
484 printk(KERN_NOTICE "DiskOnChip Millennium Plus found at "
485 "address 0x%lX\n", this->physadr);
486
487 mtd->type = MTD_NANDFLASH;
488 mtd->flags = MTD_CAP_NANDFLASH;
489 mtd->ecctype = MTD_ECC_RS_DiskOnChip;
490 mtd->size = 0;
491
492 mtd->erasesize = 0;
493 mtd->oobblock = 512;
494 mtd->oobsize = 16;
495 mtd->owner = THIS_MODULE;
496 mtd->erase = doc_erase;
497 mtd->point = NULL;
498 mtd->unpoint = NULL;
499 mtd->read = doc_read;
500 mtd->write = doc_write;
501 mtd->read_ecc = doc_read_ecc;
502 mtd->write_ecc = doc_write_ecc;
503 mtd->read_oob = doc_read_oob;
504 mtd->write_oob = doc_write_oob;
505 mtd->sync = NULL;
506
507 this->totlen = 0;
508 this->numchips = 0;
509 this->curfloor = -1;
510 this->curchip = -1;
511
512 /* Ident all the chips present. */
513 DoC_ScanChips(this);
514
515 if (!this->totlen) {
516 kfree(mtd);
517 iounmap(this->virtadr);
518 } else {
519 this->nextdoc = docmilpluslist;
520 docmilpluslist = mtd;
521 mtd->size = this->totlen;
522 mtd->erasesize = this->erasesize;
523 add_mtd_device(mtd);
524 return;
525 }
526}
527
528#if 0
529static int doc_dumpblk(struct mtd_info *mtd, loff_t from)
530{
531 int i;
532 loff_t fofs;
533 struct DiskOnChip *this = mtd->priv;
534 void __iomem * docptr = this->virtadr;
535 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
536 unsigned char *bp, buf[1056];
537 char c[32];
538
539 from &= ~0x3ff;
540
541 /* Don't allow read past end of device */
542 if (from >= this->totlen)
543 return -EINVAL;
544
545 DoC_CheckASIC(docptr);
546
547 /* Find the chip which is to be used and select it */
548 if (this->curfloor != mychip->floor) {
549 DoC_SelectFloor(docptr, mychip->floor);
550 DoC_SelectChip(docptr, mychip->chip);
551 } else if (this->curchip != mychip->chip) {
552 DoC_SelectChip(docptr, mychip->chip);
553 }
554 this->curfloor = mychip->floor;
555 this->curchip = mychip->chip;
556
557 /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
558 WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
559
560 /* Reset the chip, see Software Requirement 11.4 item 1. */
561 DoC_Command(docptr, NAND_CMD_RESET, 0);
562 DoC_WaitReady(docptr);
563
564 fofs = from;
565 DoC_Command(docptr, DoC_GetDataOffset(mtd, &fofs), 0);
566 DoC_Address(this, 3, fofs, 0, 0x00);
567 WriteDOC(0, docptr, Mplus_FlashControl);
568 DoC_WaitReady(docptr);
569
570 /* disable the ECC engine */
571 WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
572
573 ReadDOC(docptr, Mplus_ReadPipeInit);
574 ReadDOC(docptr, Mplus_ReadPipeInit);
575
576 /* Read the data via the internal pipeline through CDSN IO
577 register, see Pipelined Read Operations 11.3 */
578 MemReadDOC(docptr, buf, 1054);
579 buf[1054] = ReadDOC(docptr, Mplus_LastDataRead);
580 buf[1055] = ReadDOC(docptr, Mplus_LastDataRead);
581
582 memset(&c[0], 0, sizeof(c));
583 printk("DUMP OFFSET=%x:\n", (int)from);
584
585 for (i = 0, bp = &buf[0]; (i < 1056); i++) {
586 if ((i % 16) == 0)
587 printk("%08x: ", i);
588 printk(" %02x", *bp);
589 c[(i & 0xf)] = ((*bp >= 0x20) && (*bp <= 0x7f)) ? *bp : '.';
590 bp++;
591 if (((i + 1) % 16) == 0)
592 printk(" %s\n", c);
593 }
594 printk("\n");
595
596 /* Disable flash internally */
597 WriteDOC(0, docptr, Mplus_FlashSelect);
598
599 return 0;
600}
601#endif
602
603static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
604 size_t *retlen, u_char *buf)
605{
606 /* Just a special case of doc_read_ecc */
607 return doc_read_ecc(mtd, from, len, retlen, buf, NULL, NULL);
608}
609
610static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
611 size_t *retlen, u_char *buf, u_char *eccbuf,
612 struct nand_oobinfo *oobsel)
613{
614 int ret, i;
615 volatile char dummy;
616 loff_t fofs;
617 unsigned char syndrome[6];
618 struct DiskOnChip *this = mtd->priv;
619 void __iomem * docptr = this->virtadr;
620 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
621
622 /* Don't allow read past end of device */
623 if (from >= this->totlen)
624 return -EINVAL;
625
626 /* Don't allow a single read to cross a 512-byte block boundary */
627 if (from + len > ((from | 0x1ff) + 1))
628 len = ((from | 0x1ff) + 1) - from;
629
630 DoC_CheckASIC(docptr);
631
632 /* Find the chip which is to be used and select it */
633 if (this->curfloor != mychip->floor) {
634 DoC_SelectFloor(docptr, mychip->floor);
635 DoC_SelectChip(docptr, mychip->chip);
636 } else if (this->curchip != mychip->chip) {
637 DoC_SelectChip(docptr, mychip->chip);
638 }
639 this->curfloor = mychip->floor;
640 this->curchip = mychip->chip;
641
642 /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
643 WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
644
645 /* Reset the chip, see Software Requirement 11.4 item 1. */
646 DoC_Command(docptr, NAND_CMD_RESET, 0);
647 DoC_WaitReady(docptr);
648
649 fofs = from;
650 DoC_Command(docptr, DoC_GetDataOffset(mtd, &fofs), 0);
651 DoC_Address(this, 3, fofs, 0, 0x00);
652 WriteDOC(0, docptr, Mplus_FlashControl);
653 DoC_WaitReady(docptr);
654
655 if (eccbuf) {
656 /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
657 WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
658 WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
659 } else {
660 /* disable the ECC engine */
661 WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
662 }
663
664 /* Let the caller know we completed it */
665 *retlen = len;
666 ret = 0;
667
668 ReadDOC(docptr, Mplus_ReadPipeInit);
669 ReadDOC(docptr, Mplus_ReadPipeInit);
670
671 if (eccbuf) {
672 /* Read the data via the internal pipeline through CDSN IO
673 register, see Pipelined Read Operations 11.3 */
674 MemReadDOC(docptr, buf, len);
675
676 /* Read the ECC data following raw data */
677 MemReadDOC(docptr, eccbuf, 4);
678 eccbuf[4] = ReadDOC(docptr, Mplus_LastDataRead);
679 eccbuf[5] = ReadDOC(docptr, Mplus_LastDataRead);
680
681 /* Flush the pipeline */
682 dummy = ReadDOC(docptr, Mplus_ECCConf);
683 dummy = ReadDOC(docptr, Mplus_ECCConf);
684
685 /* Check the ECC Status */
686 if (ReadDOC(docptr, Mplus_ECCConf) & 0x80) {
687 int nb_errors;
688 /* There was an ECC error */
689#ifdef ECC_DEBUG
690 printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
691#endif
692 /* Read the ECC syndrom through the DiskOnChip ECC logic.
693 These syndrome will be all ZERO when there is no error */
694 for (i = 0; i < 6; i++)
695 syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i);
696
697 nb_errors = doc_decode_ecc(buf, syndrome);
698#ifdef ECC_DEBUG
699 printk("ECC Errors corrected: %x\n", nb_errors);
700#endif
701 if (nb_errors < 0) {
702 /* We return error, but have actually done the read. Not that
703 this can be told to user-space, via sys_read(), but at least
704 MTD-aware stuff can know about it by checking *retlen */
705#ifdef ECC_DEBUG
706 printk("%s(%d): Millennium Plus ECC error (from=0x%x:\n",
707 __FILE__, __LINE__, (int)from);
708 printk(" syndrome= %02x:%02x:%02x:%02x:%02x:"
709 "%02x\n",
710 syndrome[0], syndrome[1], syndrome[2],
711 syndrome[3], syndrome[4], syndrome[5]);
712 printk(" eccbuf= %02x:%02x:%02x:%02x:%02x:"
713 "%02x\n",
714 eccbuf[0], eccbuf[1], eccbuf[2],
715 eccbuf[3], eccbuf[4], eccbuf[5]);
716#endif
717 ret = -EIO;
718 }
719 }
720
721#ifdef PSYCHO_DEBUG
722 printk("ECC DATA at %lx: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
723 (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
724 eccbuf[4], eccbuf[5]);
725#endif
726
727 /* disable the ECC engine */
728 WriteDOC(DOC_ECC_DIS, docptr , Mplus_ECCConf);
729 } else {
730 /* Read the data via the internal pipeline through CDSN IO
731 register, see Pipelined Read Operations 11.3 */
732 MemReadDOC(docptr, buf, len-2);
733 buf[len-2] = ReadDOC(docptr, Mplus_LastDataRead);
734 buf[len-1] = ReadDOC(docptr, Mplus_LastDataRead);
735 }
736
737 /* Disable flash internally */
738 WriteDOC(0, docptr, Mplus_FlashSelect);
739
740 return ret;
741}
742
743static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
744 size_t *retlen, const u_char *buf)
745{
746 char eccbuf[6];
747 return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf, NULL);
748}
749
750static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
751 size_t *retlen, const u_char *buf, u_char *eccbuf,
752 struct nand_oobinfo *oobsel)
753{
754 int i, before, ret = 0;
755 loff_t fto;
756 volatile char dummy;
757 struct DiskOnChip *this = mtd->priv;
758 void __iomem * docptr = this->virtadr;
759 struct Nand *mychip = &this->chips[to >> (this->chipshift)];
760
761 /* Don't allow write past end of device */
762 if (to >= this->totlen)
763 return -EINVAL;
764
765 /* Don't allow writes which aren't exactly one block (512 bytes) */
766 if ((to & 0x1ff) || (len != 0x200))
767 return -EINVAL;
768
769 /* Determine position of OOB flags, before or after data */
770 before = (this->interleave && (to & 0x200));
771
772 DoC_CheckASIC(docptr);
773
774 /* Find the chip which is to be used and select it */
775 if (this->curfloor != mychip->floor) {
776 DoC_SelectFloor(docptr, mychip->floor);
777 DoC_SelectChip(docptr, mychip->chip);
778 } else if (this->curchip != mychip->chip) {
779 DoC_SelectChip(docptr, mychip->chip);
780 }
781 this->curfloor = mychip->floor;
782 this->curchip = mychip->chip;
783
784 /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
785 WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect);
786
787 /* Reset the chip, see Software Requirement 11.4 item 1. */
788 DoC_Command(docptr, NAND_CMD_RESET, 0);
789 DoC_WaitReady(docptr);
790
791 /* Set device to appropriate plane of flash */
792 fto = to;
793 WriteDOC(DoC_GetDataOffset(mtd, &fto), docptr, Mplus_FlashCmd);
794
795 /* On interleaved devices the flags for 2nd half 512 are before data */
796 if (eccbuf && before)
797 fto -= 2;
798
799 /* issue the Serial Data In command to initial the Page Program process */
800 DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
801 DoC_Address(this, 3, fto, 0x00, 0x00);
802
803 /* Disable the ECC engine */
804 WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
805
806 if (eccbuf) {
807 if (before) {
808 /* Write the block status BLOCK_USED (0x5555) */
809 WriteDOC(0x55, docptr, Mil_CDSN_IO);
810 WriteDOC(0x55, docptr, Mil_CDSN_IO);
811 }
812
813 /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
814 WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
815 }
816
817 MemWriteDOC(docptr, (unsigned char *) buf, len);
818
819 if (eccbuf) {
820 /* Write ECC data to flash, the ECC info is generated by
821 the DiskOnChip ECC logic see Reed-Solomon EDC/ECC 11.1 */
822 DoC_Delay(docptr, 3);
823
824 /* Read the ECC data through the DiskOnChip ECC logic */
825 for (i = 0; i < 6; i++)
826 eccbuf[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i);
827
828 /* disable the ECC engine */
829 WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
830
831 /* Write the ECC data to flash */
832 MemWriteDOC(docptr, eccbuf, 6);
833
834 if (!before) {
835 /* Write the block status BLOCK_USED (0x5555) */
836 WriteDOC(0x55, docptr, Mil_CDSN_IO+6);
837 WriteDOC(0x55, docptr, Mil_CDSN_IO+7);
838 }
839
840#ifdef PSYCHO_DEBUG
841 printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
842 (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
843 eccbuf[4], eccbuf[5]);
844#endif
845 }
846
847 WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
848 WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
849
850 /* Commit the Page Program command and wait for ready
851 see Software Requirement 11.4 item 1.*/
852 DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
853 DoC_WaitReady(docptr);
854
855 /* Read the status of the flash device through CDSN IO register
856 see Software Requirement 11.4 item 5.*/
857 DoC_Command(docptr, NAND_CMD_STATUS, 0);
858 dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
859 dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
860 DoC_Delay(docptr, 2);
861 if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) {
862 printk("MTD: Error 0x%x programming at 0x%x\n", dummy, (int)to);
863 /* Error in programming
864 FIXME: implement Bad Block Replacement (in nftl.c ??) */
865 *retlen = 0;
866 ret = -EIO;
867 }
868 dummy = ReadDOC(docptr, Mplus_LastDataRead);
869
870 /* Disable flash internally */
871 WriteDOC(0, docptr, Mplus_FlashSelect);
872
873 /* Let the caller know we completed it */
874 *retlen = len;
875
876 return ret;
877}
878
879static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
880 size_t *retlen, u_char *buf)
881{
882 loff_t fofs, base;
883 struct DiskOnChip *this = mtd->priv;
884 void __iomem * docptr = this->virtadr;
885 struct Nand *mychip = &this->chips[ofs >> this->chipshift];
886 size_t i, size, got, want;
887
888 DoC_CheckASIC(docptr);
889
890 /* Find the chip which is to be used and select it */
891 if (this->curfloor != mychip->floor) {
892 DoC_SelectFloor(docptr, mychip->floor);
893 DoC_SelectChip(docptr, mychip->chip);
894 } else if (this->curchip != mychip->chip) {
895 DoC_SelectChip(docptr, mychip->chip);
896 }
897 this->curfloor = mychip->floor;
898 this->curchip = mychip->chip;
899
900 /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
901 WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
902
903 /* disable the ECC engine */
904 WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
905 DoC_WaitReady(docptr);
906
907 /* Maximum of 16 bytes in the OOB region, so limit read to that */
908 if (len > 16)
909 len = 16;
910 got = 0;
911 want = len;
912
913 for (i = 0; ((i < 3) && (want > 0)); i++) {
914 /* Figure out which region we are accessing... */
915 fofs = ofs;
916 base = ofs & 0xf;
917 if (!this->interleave) {
918 DoC_Command(docptr, NAND_CMD_READOOB, 0);
919 size = 16 - base;
920 } else if (base < 6) {
921 DoC_Command(docptr, DoC_GetECCOffset(mtd, &fofs), 0);
922 size = 6 - base;
923 } else if (base < 8) {
924 DoC_Command(docptr, DoC_GetFlagsOffset(mtd, &fofs), 0);
925 size = 8 - base;
926 } else {
927 DoC_Command(docptr, DoC_GetHdrOffset(mtd, &fofs), 0);
928 size = 16 - base;
929 }
930 if (size > want)
931 size = want;
932
933 /* Issue read command */
934 DoC_Address(this, 3, fofs, 0, 0x00);
935 WriteDOC(0, docptr, Mplus_FlashControl);
936 DoC_WaitReady(docptr);
937
938 ReadDOC(docptr, Mplus_ReadPipeInit);
939 ReadDOC(docptr, Mplus_ReadPipeInit);
940 MemReadDOC(docptr, &buf[got], size - 2);
941 buf[got + size - 2] = ReadDOC(docptr, Mplus_LastDataRead);
942 buf[got + size - 1] = ReadDOC(docptr, Mplus_LastDataRead);
943
944 ofs += size;
945 got += size;
946 want -= size;
947 }
948
949 /* Disable flash internally */
950 WriteDOC(0, docptr, Mplus_FlashSelect);
951
952 *retlen = len;
953 return 0;
954}
955
956static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
957 size_t *retlen, const u_char *buf)
958{
959 volatile char dummy;
960 loff_t fofs, base;
961 struct DiskOnChip *this = mtd->priv;
962 void __iomem * docptr = this->virtadr;
963 struct Nand *mychip = &this->chips[ofs >> this->chipshift];
964 size_t i, size, got, want;
965 int ret = 0;
966
967 DoC_CheckASIC(docptr);
968
969 /* Find the chip which is to be used and select it */
970 if (this->curfloor != mychip->floor) {
971 DoC_SelectFloor(docptr, mychip->floor);
972 DoC_SelectChip(docptr, mychip->chip);
973 } else if (this->curchip != mychip->chip) {
974 DoC_SelectChip(docptr, mychip->chip);
975 }
976 this->curfloor = mychip->floor;
977 this->curchip = mychip->chip;
978
979 /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
980 WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect);
981
982
983 /* Maximum of 16 bytes in the OOB region, so limit write to that */
984 if (len > 16)
985 len = 16;
986 got = 0;
987 want = len;
988
989 for (i = 0; ((i < 3) && (want > 0)); i++) {
990 /* Reset the chip, see Software Requirement 11.4 item 1. */
991 DoC_Command(docptr, NAND_CMD_RESET, 0);
992 DoC_WaitReady(docptr);
993
994 /* Figure out which region we are accessing... */
995 fofs = ofs;
996 base = ofs & 0x0f;
997 if (!this->interleave) {
998 WriteDOC(NAND_CMD_READOOB, docptr, Mplus_FlashCmd);
999 size = 16 - base;
1000 } else if (base < 6) {
1001 WriteDOC(DoC_GetECCOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
1002 size = 6 - base;
1003 } else if (base < 8) {
1004 WriteDOC(DoC_GetFlagsOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
1005 size = 8 - base;
1006 } else {
1007 WriteDOC(DoC_GetHdrOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
1008 size = 16 - base;
1009 }
1010 if (size > want)
1011 size = want;
1012
1013 /* Issue the Serial Data In command to initial the Page Program process */
1014 DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
1015 DoC_Address(this, 3, fofs, 0, 0x00);
1016
1017 /* Disable the ECC engine */
1018 WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
1019
1020 /* Write the data via the internal pipeline through CDSN IO
1021 register, see Pipelined Write Operations 11.2 */
1022 MemWriteDOC(docptr, (unsigned char *) &buf[got], size);
1023 WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
1024 WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
1025
1026 /* Commit the Page Program command and wait for ready
1027 see Software Requirement 11.4 item 1.*/
1028 DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
1029 DoC_WaitReady(docptr);
1030
1031 /* Read the status of the flash device through CDSN IO register
1032 see Software Requirement 11.4 item 5.*/
1033 DoC_Command(docptr, NAND_CMD_STATUS, 0x00);
1034 dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
1035 dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
1036 DoC_Delay(docptr, 2);
1037 if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) {
1038 printk("MTD: Error 0x%x programming oob at 0x%x\n",
1039 dummy, (int)ofs);
1040 /* FIXME: implement Bad Block Replacement */
1041 *retlen = 0;
1042 ret = -EIO;
1043 }
1044 dummy = ReadDOC(docptr, Mplus_LastDataRead);
1045
1046 ofs += size;
1047 got += size;
1048 want -= size;
1049 }
1050
1051 /* Disable flash internally */
1052 WriteDOC(0, docptr, Mplus_FlashSelect);
1053
1054 *retlen = len;
1055 return ret;
1056}
1057
1058int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
1059{
1060 volatile char dummy;
1061 struct DiskOnChip *this = mtd->priv;
1062 __u32 ofs = instr->addr;
1063 __u32 len = instr->len;
1064 void __iomem * docptr = this->virtadr;
1065 struct Nand *mychip = &this->chips[ofs >> this->chipshift];
1066
1067 DoC_CheckASIC(docptr);
1068
1069 if (len != mtd->erasesize)
1070 printk(KERN_WARNING "MTD: Erase not right size (%x != %x)n",
1071 len, mtd->erasesize);
1072
1073 /* Find the chip which is to be used and select it */
1074 if (this->curfloor != mychip->floor) {
1075 DoC_SelectFloor(docptr, mychip->floor);
1076 DoC_SelectChip(docptr, mychip->chip);
1077 } else if (this->curchip != mychip->chip) {
1078 DoC_SelectChip(docptr, mychip->chip);
1079 }
1080 this->curfloor = mychip->floor;
1081 this->curchip = mychip->chip;
1082
1083 instr->state = MTD_ERASE_PENDING;
1084
1085 /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
1086 WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect);
1087
1088 DoC_Command(docptr, NAND_CMD_RESET, 0x00);
1089 DoC_WaitReady(docptr);
1090
1091 DoC_Command(docptr, NAND_CMD_ERASE1, 0);
1092 DoC_Address(this, 2, ofs, 0, 0x00);
1093 DoC_Command(docptr, NAND_CMD_ERASE2, 0);
1094 DoC_WaitReady(docptr);
1095 instr->state = MTD_ERASING;
1096
1097 /* Read the status of the flash device through CDSN IO register
1098 see Software Requirement 11.4 item 5. */
1099 DoC_Command(docptr, NAND_CMD_STATUS, 0);
1100 dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
1101 dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
1102 if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) {
1103 printk("MTD: Error 0x%x erasing at 0x%x\n", dummy, ofs);
1104 /* FIXME: implement Bad Block Replacement (in nftl.c ??) */
1105 instr->state = MTD_ERASE_FAILED;
1106 } else {
1107 instr->state = MTD_ERASE_DONE;
1108 }
1109 dummy = ReadDOC(docptr, Mplus_LastDataRead);
1110
1111 /* Disable flash internally */
1112 WriteDOC(0, docptr, Mplus_FlashSelect);
1113
1114 mtd_erase_callback(instr);
1115
1116 return 0;
1117}
1118
1119/****************************************************************************
1120 *
1121 * Module stuff
1122 *
1123 ****************************************************************************/
1124
1125static int __init init_doc2001plus(void)
1126{
1127 inter_module_register(im_name, THIS_MODULE, &DoCMilPlus_init);
1128 return 0;
1129}
1130
1131static void __exit cleanup_doc2001plus(void)
1132{
1133 struct mtd_info *mtd;
1134 struct DiskOnChip *this;
1135
1136 while ((mtd=docmilpluslist)) {
1137 this = mtd->priv;
1138 docmilpluslist = this->nextdoc;
1139
1140 del_mtd_device(mtd);
1141
1142 iounmap(this->virtadr);
1143 kfree(this->chips);
1144 kfree(mtd);
1145 }
1146 inter_module_unregister(im_name);
1147}
1148
1149module_exit(cleanup_doc2001plus);
1150module_init(init_doc2001plus);
1151
1152MODULE_LICENSE("GPL");
1153MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com> et al.");
1154MODULE_DESCRIPTION("Driver for DiskOnChip Millennium Plus");
diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c
new file mode 100644
index 000000000000..933877ff4d88
--- /dev/null
+++ b/drivers/mtd/devices/docecc.c
@@ -0,0 +1,526 @@
1/*
2 * ECC algorithm for M-systems disk on chip. We use the excellent Reed
3 * Solmon code of Phil Karn (karn@ka9q.ampr.org) available under the
4 * GNU GPL License. The rest is simply to convert the disk on chip
5 * syndrom into a standard syndom.
6 *
7 * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
8 * Copyright (C) 2000 Netgem S.A.
9 *
10 * $Id: docecc.c,v 1.5 2003/05/21 15:15:06 dwmw2 Exp $
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <asm/errno.h>
29#include <asm/io.h>
30#include <asm/uaccess.h>
31#include <linux/miscdevice.h>
32#include <linux/pci.h>
33#include <linux/delay.h>
34#include <linux/slab.h>
35#include <linux/sched.h>
36#include <linux/init.h>
37#include <linux/types.h>
38
39#include <linux/mtd/compatmac.h> /* for min() in older kernels */
40#include <linux/mtd/mtd.h>
41#include <linux/mtd/doc2000.h>
42
43/* need to undef it (from asm/termbits.h) */
44#undef B0
45
46#define MM 10 /* Symbol size in bits */
47#define KK (1023-4) /* Number of data symbols per block */
48#define B0 510 /* First root of generator polynomial, alpha form */
49#define PRIM 1 /* power of alpha used to generate roots of generator poly */
50#define NN ((1 << MM) - 1)
51
52typedef unsigned short dtype;
53
54/* 1+x^3+x^10 */
55static const int Pp[MM+1] = { 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1 };
56
57/* This defines the type used to store an element of the Galois Field
58 * used by the code. Make sure this is something larger than a char if
59 * if anything larger than GF(256) is used.
60 *
61 * Note: unsigned char will work up to GF(256) but int seems to run
62 * faster on the Pentium.
63 */
64typedef int gf;
65
66/* No legal value in index form represents zero, so
67 * we need a special value for this purpose
68 */
69#define A0 (NN)
70
71/* Compute x % NN, where NN is 2**MM - 1,
72 * without a slow divide
73 */
74static inline gf
75modnn(int x)
76{
77 while (x >= NN) {
78 x -= NN;
79 x = (x >> MM) + (x & NN);
80 }
81 return x;
82}
83
84#define CLEAR(a,n) {\
85int ci;\
86for(ci=(n)-1;ci >=0;ci--)\
87(a)[ci] = 0;\
88}
89
90#define COPY(a,b,n) {\
91int ci;\
92for(ci=(n)-1;ci >=0;ci--)\
93(a)[ci] = (b)[ci];\
94}
95
96#define COPYDOWN(a,b,n) {\
97int ci;\
98for(ci=(n)-1;ci >=0;ci--)\
99(a)[ci] = (b)[ci];\
100}
101
102#define Ldec 1
103
104/* generate GF(2**m) from the irreducible polynomial p(X) in Pp[0]..Pp[m]
105 lookup tables: index->polynomial form alpha_to[] contains j=alpha**i;
106 polynomial form -> index form index_of[j=alpha**i] = i
107 alpha=2 is the primitive element of GF(2**m)
108 HARI's COMMENT: (4/13/94) alpha_to[] can be used as follows:
109 Let @ represent the primitive element commonly called "alpha" that
110 is the root of the primitive polynomial p(x). Then in GF(2^m), for any
111 0 <= i <= 2^m-2,
112 @^i = a(0) + a(1) @ + a(2) @^2 + ... + a(m-1) @^(m-1)
113 where the binary vector (a(0),a(1),a(2),...,a(m-1)) is the representation
114 of the integer "alpha_to[i]" with a(0) being the LSB and a(m-1) the MSB. Thus for
115 example the polynomial representation of @^5 would be given by the binary
116 representation of the integer "alpha_to[5]".
117 Similarily, index_of[] can be used as follows:
118 As above, let @ represent the primitive element of GF(2^m) that is
119 the root of the primitive polynomial p(x). In order to find the power
120 of @ (alpha) that has the polynomial representation
121 a(0) + a(1) @ + a(2) @^2 + ... + a(m-1) @^(m-1)
122 we consider the integer "i" whose binary representation with a(0) being LSB
123 and a(m-1) MSB is (a(0),a(1),...,a(m-1)) and locate the entry
124 "index_of[i]". Now, @^index_of[i] is that element whose polynomial
125 representation is (a(0),a(1),a(2),...,a(m-1)).
126 NOTE:
127 The element alpha_to[2^m-1] = 0 always signifying that the
128 representation of "@^infinity" = 0 is (0,0,0,...,0).
129 Similarily, the element index_of[0] = A0 always signifying
130 that the power of alpha which has the polynomial representation
131 (0,0,...,0) is "infinity".
132
133*/
134
135static void
136generate_gf(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1])
137{
138 register int i, mask;
139
140 mask = 1;
141 Alpha_to[MM] = 0;
142 for (i = 0; i < MM; i++) {
143 Alpha_to[i] = mask;
144 Index_of[Alpha_to[i]] = i;
145 /* If Pp[i] == 1 then, term @^i occurs in poly-repr of @^MM */
146 if (Pp[i] != 0)
147 Alpha_to[MM] ^= mask; /* Bit-wise EXOR operation */
148 mask <<= 1; /* single left-shift */
149 }
150 Index_of[Alpha_to[MM]] = MM;
151 /*
152 * Have obtained poly-repr of @^MM. Poly-repr of @^(i+1) is given by
153 * poly-repr of @^i shifted left one-bit and accounting for any @^MM
154 * term that may occur when poly-repr of @^i is shifted.
155 */
156 mask >>= 1;
157 for (i = MM + 1; i < NN; i++) {
158 if (Alpha_to[i - 1] >= mask)
159 Alpha_to[i] = Alpha_to[MM] ^ ((Alpha_to[i - 1] ^ mask) << 1);
160 else
161 Alpha_to[i] = Alpha_to[i - 1] << 1;
162 Index_of[Alpha_to[i]] = i;
163 }
164 Index_of[0] = A0;
165 Alpha_to[NN] = 0;
166}
167
168/*
169 * Performs ERRORS+ERASURES decoding of RS codes. bb[] is the content
170 * of the feedback shift register after having processed the data and
171 * the ECC.
172 *
173 * Return number of symbols corrected, or -1 if codeword is illegal
174 * or uncorrectable. If eras_pos is non-null, the detected error locations
175 * are written back. NOTE! This array must be at least NN-KK elements long.
176 * The corrected data are written in eras_val[]. They must be xor with the data
177 * to retrieve the correct data : data[erase_pos[i]] ^= erase_val[i] .
178 *
179 * First "no_eras" erasures are declared by the calling program. Then, the
180 * maximum # of errors correctable is t_after_eras = floor((NN-KK-no_eras)/2).
181 * If the number of channel errors is not greater than "t_after_eras" the
182 * transmitted codeword will be recovered. Details of algorithm can be found
183 * in R. Blahut's "Theory ... of Error-Correcting Codes".
184
185 * Warning: the eras_pos[] array must not contain duplicate entries; decoder failure
186 * will result. The decoder *could* check for this condition, but it would involve
187 * extra time on every decoding operation.
188 * */
189static int
190eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
191 gf bb[NN - KK + 1], gf eras_val[NN-KK], int eras_pos[NN-KK],
192 int no_eras)
193{
194 int deg_lambda, el, deg_omega;
195 int i, j, r,k;
196 gf u,q,tmp,num1,num2,den,discr_r;
197 gf lambda[NN-KK + 1], s[NN-KK + 1]; /* Err+Eras Locator poly
198 * and syndrome poly */
199 gf b[NN-KK + 1], t[NN-KK + 1], omega[NN-KK + 1];
200 gf root[NN-KK], reg[NN-KK + 1], loc[NN-KK];
201 int syn_error, count;
202
203 syn_error = 0;
204 for(i=0;i<NN-KK;i++)
205 syn_error |= bb[i];
206
207 if (!syn_error) {
208 /* if remainder is zero, data[] is a codeword and there are no
209 * errors to correct. So return data[] unmodified
210 */
211 count = 0;
212 goto finish;
213 }
214
215 for(i=1;i<=NN-KK;i++){
216 s[i] = bb[0];
217 }
218 for(j=1;j<NN-KK;j++){
219 if(bb[j] == 0)
220 continue;
221 tmp = Index_of[bb[j]];
222
223 for(i=1;i<=NN-KK;i++)
224 s[i] ^= Alpha_to[modnn(tmp + (B0+i-1)*PRIM*j)];
225 }
226
227 /* undo the feedback register implicit multiplication and convert
228 syndromes to index form */
229
230 for(i=1;i<=NN-KK;i++) {
231 tmp = Index_of[s[i]];
232 if (tmp != A0)
233 tmp = modnn(tmp + 2 * KK * (B0+i-1)*PRIM);
234 s[i] = tmp;
235 }
236
237 CLEAR(&lambda[1],NN-KK);
238 lambda[0] = 1;
239
240 if (no_eras > 0) {
241 /* Init lambda to be the erasure locator polynomial */
242 lambda[1] = Alpha_to[modnn(PRIM * eras_pos[0])];
243 for (i = 1; i < no_eras; i++) {
244 u = modnn(PRIM*eras_pos[i]);
245 for (j = i+1; j > 0; j--) {
246 tmp = Index_of[lambda[j - 1]];
247 if(tmp != A0)
248 lambda[j] ^= Alpha_to[modnn(u + tmp)];
249 }
250 }
251#if DEBUG >= 1
252 /* Test code that verifies the erasure locator polynomial just constructed
253 Needed only for decoder debugging. */
254
255 /* find roots of the erasure location polynomial */
256 for(i=1;i<=no_eras;i++)
257 reg[i] = Index_of[lambda[i]];
258 count = 0;
259 for (i = 1,k=NN-Ldec; i <= NN; i++,k = modnn(NN+k-Ldec)) {
260 q = 1;
261 for (j = 1; j <= no_eras; j++)
262 if (reg[j] != A0) {
263 reg[j] = modnn(reg[j] + j);
264 q ^= Alpha_to[reg[j]];
265 }
266 if (q != 0)
267 continue;
268 /* store root and error location number indices */
269 root[count] = i;
270 loc[count] = k;
271 count++;
272 }
273 if (count != no_eras) {
274 printf("\n lambda(x) is WRONG\n");
275 count = -1;
276 goto finish;
277 }
278#if DEBUG >= 2
279 printf("\n Erasure positions as determined by roots of Eras Loc Poly:\n");
280 for (i = 0; i < count; i++)
281 printf("%d ", loc[i]);
282 printf("\n");
283#endif
284#endif
285 }
286 for(i=0;i<NN-KK+1;i++)
287 b[i] = Index_of[lambda[i]];
288
289 /*
290 * Begin Berlekamp-Massey algorithm to determine error+erasure
291 * locator polynomial
292 */
293 r = no_eras;
294 el = no_eras;
295 while (++r <= NN-KK) { /* r is the step number */
296 /* Compute discrepancy at the r-th step in poly-form */
297 discr_r = 0;
298 for (i = 0; i < r; i++){
299 if ((lambda[i] != 0) && (s[r - i] != A0)) {
300 discr_r ^= Alpha_to[modnn(Index_of[lambda[i]] + s[r - i])];
301 }
302 }
303 discr_r = Index_of[discr_r]; /* Index form */
304 if (discr_r == A0) {
305 /* 2 lines below: B(x) <-- x*B(x) */
306 COPYDOWN(&b[1],b,NN-KK);
307 b[0] = A0;
308 } else {
309 /* 7 lines below: T(x) <-- lambda(x) - discr_r*x*b(x) */
310 t[0] = lambda[0];
311 for (i = 0 ; i < NN-KK; i++) {
312 if(b[i] != A0)
313 t[i+1] = lambda[i+1] ^ Alpha_to[modnn(discr_r + b[i])];
314 else
315 t[i+1] = lambda[i+1];
316 }
317 if (2 * el <= r + no_eras - 1) {
318 el = r + no_eras - el;
319 /*
320 * 2 lines below: B(x) <-- inv(discr_r) *
321 * lambda(x)
322 */
323 for (i = 0; i <= NN-KK; i++)
324 b[i] = (lambda[i] == 0) ? A0 : modnn(Index_of[lambda[i]] - discr_r + NN);
325 } else {
326 /* 2 lines below: B(x) <-- x*B(x) */
327 COPYDOWN(&b[1],b,NN-KK);
328 b[0] = A0;
329 }
330 COPY(lambda,t,NN-KK+1);
331 }
332 }
333
334 /* Convert lambda to index form and compute deg(lambda(x)) */
335 deg_lambda = 0;
336 for(i=0;i<NN-KK+1;i++){
337 lambda[i] = Index_of[lambda[i]];
338 if(lambda[i] != A0)
339 deg_lambda = i;
340 }
341 /*
342 * Find roots of the error+erasure locator polynomial by Chien
343 * Search
344 */
345 COPY(&reg[1],&lambda[1],NN-KK);
346 count = 0; /* Number of roots of lambda(x) */
347 for (i = 1,k=NN-Ldec; i <= NN; i++,k = modnn(NN+k-Ldec)) {
348 q = 1;
349 for (j = deg_lambda; j > 0; j--){
350 if (reg[j] != A0) {
351 reg[j] = modnn(reg[j] + j);
352 q ^= Alpha_to[reg[j]];
353 }
354 }
355 if (q != 0)
356 continue;
357 /* store root (index-form) and error location number */
358 root[count] = i;
359 loc[count] = k;
360 /* If we've already found max possible roots,
361 * abort the search to save time
362 */
363 if(++count == deg_lambda)
364 break;
365 }
366 if (deg_lambda != count) {
367 /*
368 * deg(lambda) unequal to number of roots => uncorrectable
369 * error detected
370 */
371 count = -1;
372 goto finish;
373 }
374 /*
375 * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo
376 * x**(NN-KK)). in index form. Also find deg(omega).
377 */
378 deg_omega = 0;
379 for (i = 0; i < NN-KK;i++){
380 tmp = 0;
381 j = (deg_lambda < i) ? deg_lambda : i;
382 for(;j >= 0; j--){
383 if ((s[i + 1 - j] != A0) && (lambda[j] != A0))
384 tmp ^= Alpha_to[modnn(s[i + 1 - j] + lambda[j])];
385 }
386 if(tmp != 0)
387 deg_omega = i;
388 omega[i] = Index_of[tmp];
389 }
390 omega[NN-KK] = A0;
391
392 /*
393 * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 =
394 * inv(X(l))**(B0-1) and den = lambda_pr(inv(X(l))) all in poly-form
395 */
396 for (j = count-1; j >=0; j--) {
397 num1 = 0;
398 for (i = deg_omega; i >= 0; i--) {
399 if (omega[i] != A0)
400 num1 ^= Alpha_to[modnn(omega[i] + i * root[j])];
401 }
402 num2 = Alpha_to[modnn(root[j] * (B0 - 1) + NN)];
403 den = 0;
404
405 /* lambda[i+1] for i even is the formal derivative lambda_pr of lambda[i] */
406 for (i = min(deg_lambda,NN-KK-1) & ~1; i >= 0; i -=2) {
407 if(lambda[i+1] != A0)
408 den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])];
409 }
410 if (den == 0) {
411#if DEBUG >= 1
412 printf("\n ERROR: denominator = 0\n");
413#endif
414 /* Convert to dual- basis */
415 count = -1;
416 goto finish;
417 }
418 /* Apply error to data */
419 if (num1 != 0) {
420 eras_val[j] = Alpha_to[modnn(Index_of[num1] + Index_of[num2] + NN - Index_of[den])];
421 } else {
422 eras_val[j] = 0;
423 }
424 }
425 finish:
426 for(i=0;i<count;i++)
427 eras_pos[i] = loc[i];
428 return count;
429}
430
431/***************************************************************************/
432/* The DOC specific code begins here */
433
434#define SECTOR_SIZE 512
435/* The sector bytes are packed into NB_DATA MM bits words */
436#define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / MM)
437
438/*
439 * Correct the errors in 'sector[]' by using 'ecc1[]' which is the
440 * content of the feedback shift register applyied to the sector and
441 * the ECC. Return the number of errors corrected (and correct them in
442 * sector), or -1 if error
443 */
444int doc_decode_ecc(unsigned char sector[SECTOR_SIZE], unsigned char ecc1[6])
445{
446 int parity, i, nb_errors;
447 gf bb[NN - KK + 1];
448 gf error_val[NN-KK];
449 int error_pos[NN-KK], pos, bitpos, index, val;
450 dtype *Alpha_to, *Index_of;
451
452 /* init log and exp tables here to save memory. However, it is slower */
453 Alpha_to = kmalloc((NN + 1) * sizeof(dtype), GFP_KERNEL);
454 if (!Alpha_to)
455 return -1;
456
457 Index_of = kmalloc((NN + 1) * sizeof(dtype), GFP_KERNEL);
458 if (!Index_of) {
459 kfree(Alpha_to);
460 return -1;
461 }
462
463 generate_gf(Alpha_to, Index_of);
464
465 parity = ecc1[1];
466
467 bb[0] = (ecc1[4] & 0xff) | ((ecc1[5] & 0x03) << 8);
468 bb[1] = ((ecc1[5] & 0xfc) >> 2) | ((ecc1[2] & 0x0f) << 6);
469 bb[2] = ((ecc1[2] & 0xf0) >> 4) | ((ecc1[3] & 0x3f) << 4);
470 bb[3] = ((ecc1[3] & 0xc0) >> 6) | ((ecc1[0] & 0xff) << 2);
471
472 nb_errors = eras_dec_rs(Alpha_to, Index_of, bb,
473 error_val, error_pos, 0);
474 if (nb_errors <= 0)
475 goto the_end;
476
477 /* correct the errors */
478 for(i=0;i<nb_errors;i++) {
479 pos = error_pos[i];
480 if (pos >= NB_DATA && pos < KK) {
481 nb_errors = -1;
482 goto the_end;
483 }
484 if (pos < NB_DATA) {
485 /* extract bit position (MSB first) */
486 pos = 10 * (NB_DATA - 1 - pos) - 6;
487 /* now correct the following 10 bits. At most two bytes
488 can be modified since pos is even */
489 index = (pos >> 3) ^ 1;
490 bitpos = pos & 7;
491 if ((index >= 0 && index < SECTOR_SIZE) ||
492 index == (SECTOR_SIZE + 1)) {
493 val = error_val[i] >> (2 + bitpos);
494 parity ^= val;
495 if (index < SECTOR_SIZE)
496 sector[index] ^= val;
497 }
498 index = ((pos >> 3) + 1) ^ 1;
499 bitpos = (bitpos + 10) & 7;
500 if (bitpos == 0)
501 bitpos = 8;
502 if ((index >= 0 && index < SECTOR_SIZE) ||
503 index == (SECTOR_SIZE + 1)) {
504 val = error_val[i] << (8 - bitpos);
505 parity ^= val;
506 if (index < SECTOR_SIZE)
507 sector[index] ^= val;
508 }
509 }
510 }
511
512 /* use parity to test extra errors */
513 if ((parity & 0xff) != 0)
514 nb_errors = -1;
515
516 the_end:
517 kfree(Alpha_to);
518 kfree(Index_of);
519 return nb_errors;
520}
521
522EXPORT_SYMBOL_GPL(doc_decode_ecc);
523
524MODULE_LICENSE("GPL");
525MODULE_AUTHOR("Fabrice Bellard <fabrice.bellard@netgem.com>");
526MODULE_DESCRIPTION("ECC code for correcting errors detected by DiskOnChip 2000 and Millennium ECC hardware");
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
new file mode 100644
index 000000000000..197d67045e1e
--- /dev/null
+++ b/drivers/mtd/devices/docprobe.c
@@ -0,0 +1,355 @@
1
2/* Linux driver for Disk-On-Chip devices */
3/* Probe routines common to all DoC devices */
4/* (C) 1999 Machine Vision Holdings, Inc. */
5/* (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> */
6
7/* $Id: docprobe.c,v 1.44 2005/01/05 12:40:36 dwmw2 Exp $ */
8
9
10
11/* DOC_PASSIVE_PROBE:
12 In order to ensure that the BIOS checksum is correct at boot time, and
13 hence that the onboard BIOS extension gets executed, the DiskOnChip
14 goes into reset mode when it is read sequentially: all registers
15 return 0xff until the chip is woken up again by writing to the
16 DOCControl register.
17
18 Unfortunately, this means that the probe for the DiskOnChip is unsafe,
19 because one of the first things it does is write to where it thinks
20 the DOCControl register should be - which may well be shared memory
21 for another device. I've had machines which lock up when this is
22 attempted. Hence the possibility to do a passive probe, which will fail
23 to detect a chip in reset mode, but is at least guaranteed not to lock
24 the machine.
25
26 If you have this problem, uncomment the following line:
27#define DOC_PASSIVE_PROBE
28*/
29
30
31/* DOC_SINGLE_DRIVER:
32 Millennium driver has been merged into DOC2000 driver.
33
34 The old Millennium-only driver has been retained just in case there
35 are problems with the new code. If the combined driver doesn't work
36 for you, you can try the old one by undefining DOC_SINGLE_DRIVER
37 below and also enabling it in your configuration. If this fixes the
38 problems, please send a report to the MTD mailing list at
39 <linux-mtd@lists.infradead.org>.
40*/
41#define DOC_SINGLE_DRIVER
42
43#include <linux/config.h>
44#include <linux/kernel.h>
45#include <linux/module.h>
46#include <asm/errno.h>
47#include <asm/io.h>
48#include <linux/delay.h>
49#include <linux/slab.h>
50#include <linux/init.h>
51#include <linux/types.h>
52
53#include <linux/mtd/mtd.h>
54#include <linux/mtd/nand.h>
55#include <linux/mtd/doc2000.h>
56#include <linux/mtd/compatmac.h>
57
58/* Where to look for the devices? */
59#ifndef CONFIG_MTD_DOCPROBE_ADDRESS
60#define CONFIG_MTD_DOCPROBE_ADDRESS 0
61#endif
62
63
64static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS;
65module_param(doc_config_location, ulong, 0);
66MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
67
68static unsigned long __initdata doc_locations[] = {
69#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
70#ifdef CONFIG_MTD_DOCPROBE_HIGH
71 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
72 0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
73 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
74 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
75 0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
76#else /* CONFIG_MTD_DOCPROBE_HIGH */
77 0xc8000, 0xca000, 0xcc000, 0xce000,
78 0xd0000, 0xd2000, 0xd4000, 0xd6000,
79 0xd8000, 0xda000, 0xdc000, 0xde000,
80 0xe0000, 0xe2000, 0xe4000, 0xe6000,
81 0xe8000, 0xea000, 0xec000, 0xee000,
82#endif /* CONFIG_MTD_DOCPROBE_HIGH */
83#elif defined(__PPC__)
84 0xe4000000,
85#elif defined(CONFIG_MOMENCO_OCELOT)
86 0x2f000000,
87 0xff000000,
88#elif defined(CONFIG_MOMENCO_OCELOT_G) || defined (CONFIG_MOMENCO_OCELOT_C)
89 0xff000000,
90##else
91#warning Unknown architecture for DiskOnChip. No default probe locations defined
92#endif
93 0xffffffff };
94
95/* doccheck: Probe a given memory window to see if there's a DiskOnChip present */
96
97static inline int __init doccheck(void __iomem *potential, unsigned long physadr)
98{
99 void __iomem *window=potential;
100 unsigned char tmp, tmpb, tmpc, ChipID;
101#ifndef DOC_PASSIVE_PROBE
102 unsigned char tmp2;
103#endif
104
105 /* Routine copied from the Linux DOC driver */
106
107#ifdef CONFIG_MTD_DOCPROBE_55AA
108 /* Check for 0x55 0xAA signature at beginning of window,
109 this is no longer true once we remove the IPL (for Millennium */
110 if (ReadDOC(window, Sig1) != 0x55 || ReadDOC(window, Sig2) != 0xaa)
111 return 0;
112#endif /* CONFIG_MTD_DOCPROBE_55AA */
113
114#ifndef DOC_PASSIVE_PROBE
115 /* It's not possible to cleanly detect the DiskOnChip - the
116 * bootup procedure will put the device into reset mode, and
117 * it's not possible to talk to it without actually writing
118 * to the DOCControl register. So we store the current contents
119 * of the DOCControl register's location, in case we later decide
120 * that it's not a DiskOnChip, and want to put it back how we
121 * found it.
122 */
123 tmp2 = ReadDOC(window, DOCControl);
124
125 /* Reset the DiskOnChip ASIC */
126 WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
127 window, DOCControl);
128 WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
129 window, DOCControl);
130
131 /* Enable the DiskOnChip ASIC */
132 WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
133 window, DOCControl);
134 WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
135 window, DOCControl);
136#endif /* !DOC_PASSIVE_PROBE */
137
138 /* We need to read the ChipID register four times. For some
139 newer DiskOnChip 2000 units, the first three reads will
140 return the DiskOnChip Millennium ident. Don't ask. */
141 ChipID = ReadDOC(window, ChipID);
142
143 switch (ChipID) {
144 case DOC_ChipID_Doc2k:
145 /* Check the TOGGLE bit in the ECC register */
146 tmp = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
147 tmpb = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
148 tmpc = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
149 if (tmp != tmpb && tmp == tmpc)
150 return ChipID;
151 break;
152
153 case DOC_ChipID_DocMil:
154 /* Check for the new 2000 with Millennium ASIC */
155 ReadDOC(window, ChipID);
156 ReadDOC(window, ChipID);
157 if (ReadDOC(window, ChipID) != DOC_ChipID_DocMil)
158 ChipID = DOC_ChipID_Doc2kTSOP;
159
160 /* Check the TOGGLE bit in the ECC register */
161 tmp = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
162 tmpb = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
163 tmpc = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
164 if (tmp != tmpb && tmp == tmpc)
165 return ChipID;
166 break;
167
168 case DOC_ChipID_DocMilPlus16:
169 case DOC_ChipID_DocMilPlus32:
170 case 0:
171 /* Possible Millennium+, need to do more checks */
172#ifndef DOC_PASSIVE_PROBE
173 /* Possibly release from power down mode */
174 for (tmp = 0; (tmp < 4); tmp++)
175 ReadDOC(window, Mplus_Power);
176
177 /* Reset the DiskOnChip ASIC */
178 tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
179 DOC_MODE_BDECT;
180 WriteDOC(tmp, window, Mplus_DOCControl);
181 WriteDOC(~tmp, window, Mplus_CtrlConfirm);
182
183 mdelay(1);
184 /* Enable the DiskOnChip ASIC */
185 tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
186 DOC_MODE_BDECT;
187 WriteDOC(tmp, window, Mplus_DOCControl);
188 WriteDOC(~tmp, window, Mplus_CtrlConfirm);
189 mdelay(1);
190#endif /* !DOC_PASSIVE_PROBE */
191
192 ChipID = ReadDOC(window, ChipID);
193
194 switch (ChipID) {
195 case DOC_ChipID_DocMilPlus16:
196 case DOC_ChipID_DocMilPlus32:
197 /* Check the TOGGLE bit in the toggle register */
198 tmp = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
199 tmpb = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
200 tmpc = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
201 if (tmp != tmpb && tmp == tmpc)
202 return ChipID;
203 default:
204 break;
205 }
206 /* FALL TRHU */
207
208 default:
209
210#ifdef CONFIG_MTD_DOCPROBE_55AA
211 printk(KERN_DEBUG "Possible DiskOnChip with unknown ChipID %2.2X found at 0x%lx\n",
212 ChipID, physadr);
213#endif
214#ifndef DOC_PASSIVE_PROBE
215 /* Put back the contents of the DOCControl register, in case it's not
216 * actually a DiskOnChip.
217 */
218 WriteDOC(tmp2, window, DOCControl);
219#endif
220 return 0;
221 }
222
223 printk(KERN_WARNING "DiskOnChip failed TOGGLE test, dropping.\n");
224
225#ifndef DOC_PASSIVE_PROBE
226 /* Put back the contents of the DOCControl register: it's not a DiskOnChip */
227 WriteDOC(tmp2, window, DOCControl);
228#endif
229 return 0;
230}
231
232static int docfound;
233
234static void __init DoC_Probe(unsigned long physadr)
235{
236 void __iomem *docptr;
237 struct DiskOnChip *this;
238 struct mtd_info *mtd;
239 int ChipID;
240 char namebuf[15];
241 char *name = namebuf;
242 char *im_funcname = NULL;
243 char *im_modname = NULL;
244 void (*initroutine)(struct mtd_info *) = NULL;
245
246 docptr = ioremap(physadr, DOC_IOREMAP_LEN);
247
248 if (!docptr)
249 return;
250
251 if ((ChipID = doccheck(docptr, physadr))) {
252 if (ChipID == DOC_ChipID_Doc2kTSOP) {
253 /* Remove this at your own peril. The hardware driver works but nothing prevents you from erasing bad blocks */
254 printk(KERN_NOTICE "Refusing to drive DiskOnChip 2000 TSOP until Bad Block Table is correctly supported by INFTL\n");
255 iounmap(docptr);
256 return;
257 }
258 docfound = 1;
259 mtd = kmalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
260
261 if (!mtd) {
262 printk(KERN_WARNING "Cannot allocate memory for data structures. Dropping.\n");
263 iounmap(docptr);
264 return;
265 }
266
267 this = (struct DiskOnChip *)(&mtd[1]);
268
269 memset((char *)mtd,0, sizeof(struct mtd_info));
270 memset((char *)this, 0, sizeof(struct DiskOnChip));
271
272 mtd->priv = this;
273 this->virtadr = docptr;
274 this->physadr = physadr;
275 this->ChipID = ChipID;
276 sprintf(namebuf, "with ChipID %2.2X", ChipID);
277
278 switch(ChipID) {
279 case DOC_ChipID_Doc2kTSOP:
280 name="2000 TSOP";
281 im_funcname = "DoC2k_init";
282 im_modname = "doc2000";
283 break;
284
285 case DOC_ChipID_Doc2k:
286 name="2000";
287 im_funcname = "DoC2k_init";
288 im_modname = "doc2000";
289 break;
290
291 case DOC_ChipID_DocMil:
292 name="Millennium";
293#ifdef DOC_SINGLE_DRIVER
294 im_funcname = "DoC2k_init";
295 im_modname = "doc2000";
296#else
297 im_funcname = "DoCMil_init";
298 im_modname = "doc2001";
299#endif /* DOC_SINGLE_DRIVER */
300 break;
301
302 case DOC_ChipID_DocMilPlus16:
303 case DOC_ChipID_DocMilPlus32:
304 name="MillenniumPlus";
305 im_funcname = "DoCMilPlus_init";
306 im_modname = "doc2001plus";
307 break;
308 }
309
310 if (im_funcname)
311 initroutine = inter_module_get_request(im_funcname, im_modname);
312
313 if (initroutine) {
314 (*initroutine)(mtd);
315 inter_module_put(im_funcname);
316 return;
317 }
318 printk(KERN_NOTICE "Cannot find driver for DiskOnChip %s at 0x%lX\n", name, physadr);
319 kfree(mtd);
320 }
321 iounmap(docptr);
322}
323
324
325/****************************************************************************
326 *
327 * Module stuff
328 *
329 ****************************************************************************/
330
331static int __init init_doc(void)
332{
333 int i;
334
335 if (doc_config_location) {
336 printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location);
337 DoC_Probe(doc_config_location);
338 } else {
339 for (i=0; (doc_locations[i] != 0xffffffff); i++) {
340 DoC_Probe(doc_locations[i]);
341 }
342 }
343 /* No banner message any more. Print a message if no DiskOnChip
344 found, so the user knows we at least tried. */
345 if (!docfound)
346 printk(KERN_INFO "No recognised DiskOnChip devices found\n");
347 return -EAGAIN;
348}
349
350module_init(init_doc);
351
352MODULE_LICENSE("GPL");
353MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
354MODULE_DESCRIPTION("Probe code for DiskOnChip 2000 and Millennium devices");
355
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
new file mode 100644
index 000000000000..dfd335e4a2a8
--- /dev/null
+++ b/drivers/mtd/devices/lart.c
@@ -0,0 +1,711 @@
1
2/*
3 * MTD driver for the 28F160F3 Flash Memory (non-CFI) on LART.
4 *
5 * $Id: lart.c,v 1.7 2004/08/09 13:19:44 dwmw2 Exp $
6 *
7 * Author: Abraham vd Merwe <abraham@2d3d.co.za>
8 *
9 * Copyright (c) 2001, 2d3D, Inc.
10 *
11 * This code is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * References:
16 *
17 * [1] 3 Volt Fast Boot Block Flash Memory" Intel Datasheet
18 * - Order Number: 290644-005
19 * - January 2000
20 *
21 * [2] MTD internal API documentation
22 * - http://www.linux-mtd.infradead.org/tech/
23 *
24 * Limitations:
25 *
26 * Even though this driver is written for 3 Volt Fast Boot
27 * Block Flash Memory, it is rather specific to LART. With
28 * Minor modifications, notably the without data/address line
29 * mangling and different bus settings, etc. it should be
30 * trivial to adapt to other platforms.
31 *
32 * If somebody would sponsor me a different board, I'll
33 * adapt the driver (:
34 */
35
36/* debugging */
37//#define LART_DEBUG
38
39/* partition support */
40#define HAVE_PARTITIONS
41
42#include <linux/kernel.h>
43#include <linux/module.h>
44#include <linux/types.h>
45#include <linux/init.h>
46#include <linux/errno.h>
47#include <linux/mtd/mtd.h>
48#ifdef HAVE_PARTITIONS
49#include <linux/mtd/partitions.h>
50#endif
51
52#ifndef CONFIG_SA1100_LART
53#error This is for LART architecture only
54#endif
55
56static char module_name[] = "lart";
57
58/*
59 * These values is specific to 28Fxxxx3 flash memory.
60 * See section 2.3.1 in "3 Volt Fast Boot Block Flash Memory" Intel Datasheet
61 */
62#define FLASH_BLOCKSIZE_PARAM (4096 * BUSWIDTH)
63#define FLASH_NUMBLOCKS_16m_PARAM 8
64#define FLASH_NUMBLOCKS_8m_PARAM 8
65
66/*
67 * These values is specific to 28Fxxxx3 flash memory.
68 * See section 2.3.2 in "3 Volt Fast Boot Block Flash Memory" Intel Datasheet
69 */
70#define FLASH_BLOCKSIZE_MAIN (32768 * BUSWIDTH)
71#define FLASH_NUMBLOCKS_16m_MAIN 31
72#define FLASH_NUMBLOCKS_8m_MAIN 15
73
74/*
75 * These values are specific to LART
76 */
77
78/* general */
79#define BUSWIDTH 4 /* don't change this - a lot of the code _will_ break if you change this */
80#define FLASH_OFFSET 0xe8000000 /* see linux/arch/arm/mach-sa1100/lart.c */
81
82/* blob */
83#define NUM_BLOB_BLOCKS FLASH_NUMBLOCKS_16m_PARAM
84#define BLOB_START 0x00000000
85#define BLOB_LEN (NUM_BLOB_BLOCKS * FLASH_BLOCKSIZE_PARAM)
86
87/* kernel */
88#define NUM_KERNEL_BLOCKS 7
89#define KERNEL_START (BLOB_START + BLOB_LEN)
90#define KERNEL_LEN (NUM_KERNEL_BLOCKS * FLASH_BLOCKSIZE_MAIN)
91
92/* initial ramdisk */
93#define NUM_INITRD_BLOCKS 24
94#define INITRD_START (KERNEL_START + KERNEL_LEN)
95#define INITRD_LEN (NUM_INITRD_BLOCKS * FLASH_BLOCKSIZE_MAIN)
96
97/*
98 * See section 4.0 in "3 Volt Fast Boot Block Flash Memory" Intel Datasheet
99 */
100#define READ_ARRAY 0x00FF00FF /* Read Array/Reset */
101#define READ_ID_CODES 0x00900090 /* Read Identifier Codes */
102#define ERASE_SETUP 0x00200020 /* Block Erase */
103#define ERASE_CONFIRM 0x00D000D0 /* Block Erase and Program Resume */
104#define PGM_SETUP 0x00400040 /* Program */
105#define STATUS_READ 0x00700070 /* Read Status Register */
106#define STATUS_CLEAR 0x00500050 /* Clear Status Register */
107#define STATUS_BUSY 0x00800080 /* Write State Machine Status (WSMS) */
108#define STATUS_ERASE_ERR 0x00200020 /* Erase Status (ES) */
109#define STATUS_PGM_ERR 0x00100010 /* Program Status (PS) */
110
111/*
112 * See section 4.2 in "3 Volt Fast Boot Block Flash Memory" Intel Datasheet
113 */
114#define FLASH_MANUFACTURER 0x00890089
115#define FLASH_DEVICE_8mbit_TOP 0x88f188f1
116#define FLASH_DEVICE_8mbit_BOTTOM 0x88f288f2
117#define FLASH_DEVICE_16mbit_TOP 0x88f388f3
118#define FLASH_DEVICE_16mbit_BOTTOM 0x88f488f4
119
120/***************************************************************************************************/
121
122/*
123 * The data line mapping on LART is as follows:
124 *
125 * U2 CPU | U3 CPU
126 * -------------------
127 * 0 20 | 0 12
128 * 1 22 | 1 14
129 * 2 19 | 2 11
130 * 3 17 | 3 9
131 * 4 24 | 4 0
132 * 5 26 | 5 2
133 * 6 31 | 6 7
134 * 7 29 | 7 5
135 * 8 21 | 8 13
136 * 9 23 | 9 15
137 * 10 18 | 10 10
138 * 11 16 | 11 8
139 * 12 25 | 12 1
140 * 13 27 | 13 3
141 * 14 30 | 14 6
142 * 15 28 | 15 4
143 */
144
145/* Mangle data (x) */
146#define DATA_TO_FLASH(x) \
147 ( \
148 (((x) & 0x08009000) >> 11) + \
149 (((x) & 0x00002000) >> 10) + \
150 (((x) & 0x04004000) >> 8) + \
151 (((x) & 0x00000010) >> 4) + \
152 (((x) & 0x91000820) >> 3) + \
153 (((x) & 0x22080080) >> 2) + \
154 ((x) & 0x40000400) + \
155 (((x) & 0x00040040) << 1) + \
156 (((x) & 0x00110000) << 4) + \
157 (((x) & 0x00220100) << 5) + \
158 (((x) & 0x00800208) << 6) + \
159 (((x) & 0x00400004) << 9) + \
160 (((x) & 0x00000001) << 12) + \
161 (((x) & 0x00000002) << 13) \
162 )
163
164/* Unmangle data (x) */
165#define FLASH_TO_DATA(x) \
166 ( \
167 (((x) & 0x00010012) << 11) + \
168 (((x) & 0x00000008) << 10) + \
169 (((x) & 0x00040040) << 8) + \
170 (((x) & 0x00000001) << 4) + \
171 (((x) & 0x12200104) << 3) + \
172 (((x) & 0x08820020) << 2) + \
173 ((x) & 0x40000400) + \
174 (((x) & 0x00080080) >> 1) + \
175 (((x) & 0x01100000) >> 4) + \
176 (((x) & 0x04402000) >> 5) + \
177 (((x) & 0x20008200) >> 6) + \
178 (((x) & 0x80000800) >> 9) + \
179 (((x) & 0x00001000) >> 12) + \
180 (((x) & 0x00004000) >> 13) \
181 )
182
183/*
184 * The address line mapping on LART is as follows:
185 *
186 * U3 CPU | U2 CPU
187 * -------------------
188 * 0 2 | 0 2
189 * 1 3 | 1 3
190 * 2 9 | 2 9
191 * 3 13 | 3 8
192 * 4 8 | 4 7
193 * 5 12 | 5 6
194 * 6 11 | 6 5
195 * 7 10 | 7 4
196 * 8 4 | 8 10
197 * 9 5 | 9 11
198 * 10 6 | 10 12
199 * 11 7 | 11 13
200 *
201 * BOOT BLOCK BOUNDARY
202 *
203 * 12 15 | 12 15
204 * 13 14 | 13 14
205 * 14 16 | 14 16
206 *
207 * MAIN BLOCK BOUNDARY
208 *
209 * 15 17 | 15 18
210 * 16 18 | 16 17
211 * 17 20 | 17 20
212 * 18 19 | 18 19
213 * 19 21 | 19 21
214 *
215 * As we can see from above, the addresses aren't mangled across
216 * block boundaries, so we don't need to worry about address
217 * translations except for sending/reading commands during
218 * initialization
219 */
220
221/* Mangle address (x) on chip U2 */
222#define ADDR_TO_FLASH_U2(x) \
223 ( \
224 (((x) & 0x00000f00) >> 4) + \
225 (((x) & 0x00042000) << 1) + \
226 (((x) & 0x0009c003) << 2) + \
227 (((x) & 0x00021080) << 3) + \
228 (((x) & 0x00000010) << 4) + \
229 (((x) & 0x00000040) << 5) + \
230 (((x) & 0x00000024) << 7) + \
231 (((x) & 0x00000008) << 10) \
232 )
233
234/* Unmangle address (x) on chip U2 */
235#define FLASH_U2_TO_ADDR(x) \
236 ( \
237 (((x) << 4) & 0x00000f00) + \
238 (((x) >> 1) & 0x00042000) + \
239 (((x) >> 2) & 0x0009c003) + \
240 (((x) >> 3) & 0x00021080) + \
241 (((x) >> 4) & 0x00000010) + \
242 (((x) >> 5) & 0x00000040) + \
243 (((x) >> 7) & 0x00000024) + \
244 (((x) >> 10) & 0x00000008) \
245 )
246
247/* Mangle address (x) on chip U3 */
248#define ADDR_TO_FLASH_U3(x) \
249 ( \
250 (((x) & 0x00000080) >> 3) + \
251 (((x) & 0x00000040) >> 1) + \
252 (((x) & 0x00052020) << 1) + \
253 (((x) & 0x00084f03) << 2) + \
254 (((x) & 0x00029010) << 3) + \
255 (((x) & 0x00000008) << 5) + \
256 (((x) & 0x00000004) << 7) \
257 )
258
259/* Unmangle address (x) on chip U3 */
260#define FLASH_U3_TO_ADDR(x) \
261 ( \
262 (((x) << 3) & 0x00000080) + \
263 (((x) << 1) & 0x00000040) + \
264 (((x) >> 1) & 0x00052020) + \
265 (((x) >> 2) & 0x00084f03) + \
266 (((x) >> 3) & 0x00029010) + \
267 (((x) >> 5) & 0x00000008) + \
268 (((x) >> 7) & 0x00000004) \
269 )
270
271/***************************************************************************************************/
272
273static __u8 read8 (__u32 offset)
274{
275 volatile __u8 *data = (__u8 *) (FLASH_OFFSET + offset);
276#ifdef LART_DEBUG
277 printk (KERN_DEBUG "%s(): 0x%.8x -> 0x%.2x\n",__FUNCTION__,offset,*data);
278#endif
279 return (*data);
280}
281
282static __u32 read32 (__u32 offset)
283{
284 volatile __u32 *data = (__u32 *) (FLASH_OFFSET + offset);
285#ifdef LART_DEBUG
286 printk (KERN_DEBUG "%s(): 0x%.8x -> 0x%.8x\n",__FUNCTION__,offset,*data);
287#endif
288 return (*data);
289}
290
291static void write32 (__u32 x,__u32 offset)
292{
293 volatile __u32 *data = (__u32 *) (FLASH_OFFSET + offset);
294 *data = x;
295#ifdef LART_DEBUG
296 printk (KERN_DEBUG "%s(): 0x%.8x <- 0x%.8x\n",__FUNCTION__,offset,*data);
297#endif
298}
299
300/***************************************************************************************************/
301
302/*
303 * Probe for 16mbit flash memory on a LART board without doing
304 * too much damage. Since we need to write 1 dword to memory,
305 * we're f**cked if this happens to be DRAM since we can't
306 * restore the memory (otherwise we might exit Read Array mode).
307 *
308 * Returns 1 if we found 16mbit flash memory on LART, 0 otherwise.
309 */
310static int flash_probe (void)
311{
312 __u32 manufacturer,devtype;
313
314 /* setup "Read Identifier Codes" mode */
315 write32 (DATA_TO_FLASH (READ_ID_CODES),0x00000000);
316
317 /* probe U2. U2/U3 returns the same data since the first 3
318 * address lines is mangled in the same way */
319 manufacturer = FLASH_TO_DATA (read32 (ADDR_TO_FLASH_U2 (0x00000000)));
320 devtype = FLASH_TO_DATA (read32 (ADDR_TO_FLASH_U2 (0x00000001)));
321
322 /* put the flash back into command mode */
323 write32 (DATA_TO_FLASH (READ_ARRAY),0x00000000);
324
325 return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || FLASH_DEVICE_16mbit_BOTTOM));
326}
327
328/*
329 * Erase one block of flash memory at offset ``offset'' which is any
330 * address within the block which should be erased.
331 *
332 * Returns 1 if successful, 0 otherwise.
333 */
334static inline int erase_block (__u32 offset)
335{
336 __u32 status;
337
338#ifdef LART_DEBUG
339 printk (KERN_DEBUG "%s(): 0x%.8x\n",__FUNCTION__,offset);
340#endif
341
342 /* erase and confirm */
343 write32 (DATA_TO_FLASH (ERASE_SETUP),offset);
344 write32 (DATA_TO_FLASH (ERASE_CONFIRM),offset);
345
346 /* wait for block erase to finish */
347 do
348 {
349 write32 (DATA_TO_FLASH (STATUS_READ),offset);
350 status = FLASH_TO_DATA (read32 (offset));
351 }
352 while ((~status & STATUS_BUSY) != 0);
353
354 /* put the flash back into command mode */
355 write32 (DATA_TO_FLASH (READ_ARRAY),offset);
356
357 /* was the erase successfull? */
358 if ((status & STATUS_ERASE_ERR))
359 {
360 printk (KERN_WARNING "%s: erase error at address 0x%.8x.\n",module_name,offset);
361 return (0);
362 }
363
364 return (1);
365}
366
367static int flash_erase (struct mtd_info *mtd,struct erase_info *instr)
368{
369 __u32 addr,len;
370 int i,first;
371
372#ifdef LART_DEBUG
373 printk (KERN_DEBUG "%s(addr = 0x%.8x, len = %d)\n",__FUNCTION__,instr->addr,instr->len);
374#endif
375
376 /* sanity checks */
377 if (instr->addr + instr->len > mtd->size) return (-EINVAL);
378
379 /*
380 * check that both start and end of the requested erase are
381 * aligned with the erasesize at the appropriate addresses.
382 *
383 * skip all erase regions which are ended before the start of
384 * the requested erase. Actually, to save on the calculations,
385 * we skip to the first erase region which starts after the
386 * start of the requested erase, and then go back one.
387 */
388 for (i = 0; i < mtd->numeraseregions && instr->addr >= mtd->eraseregions[i].offset; i++) ;
389 i--;
390
391 /*
392 * ok, now i is pointing at the erase region in which this
393 * erase request starts. Check the start of the requested
394 * erase range is aligned with the erase size which is in
395 * effect here.
396 */
397 if (instr->addr & (mtd->eraseregions[i].erasesize - 1)) return (-EINVAL);
398
399 /* Remember the erase region we start on */
400 first = i;
401
402 /*
403 * next, check that the end of the requested erase is aligned
404 * with the erase region at that address.
405 *
406 * as before, drop back one to point at the region in which
407 * the address actually falls
408 */
409 for (; i < mtd->numeraseregions && instr->addr + instr->len >= mtd->eraseregions[i].offset; i++) ;
410 i--;
411
412 /* is the end aligned on a block boundary? */
413 if ((instr->addr + instr->len) & (mtd->eraseregions[i].erasesize - 1)) return (-EINVAL);
414
415 addr = instr->addr;
416 len = instr->len;
417
418 i = first;
419
420 /* now erase those blocks */
421 while (len)
422 {
423 if (!erase_block (addr))
424 {
425 instr->state = MTD_ERASE_FAILED;
426 return (-EIO);
427 }
428
429 addr += mtd->eraseregions[i].erasesize;
430 len -= mtd->eraseregions[i].erasesize;
431
432 if (addr == mtd->eraseregions[i].offset + (mtd->eraseregions[i].erasesize * mtd->eraseregions[i].numblocks)) i++;
433 }
434
435 instr->state = MTD_ERASE_DONE;
436 mtd_erase_callback(instr);
437
438 return (0);
439}
440
441static int flash_read (struct mtd_info *mtd,loff_t from,size_t len,size_t *retlen,u_char *buf)
442{
443#ifdef LART_DEBUG
444 printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n",__FUNCTION__,(__u32) from,len);
445#endif
446
447 /* sanity checks */
448 if (!len) return (0);
449 if (from + len > mtd->size) return (-EINVAL);
450
451 /* we always read len bytes */
452 *retlen = len;
453
454 /* first, we read bytes until we reach a dword boundary */
455 if (from & (BUSWIDTH - 1))
456 {
457 int gap = BUSWIDTH - (from & (BUSWIDTH - 1));
458
459 while (len && gap--) *buf++ = read8 (from++), len--;
460 }
461
462 /* now we read dwords until we reach a non-dword boundary */
463 while (len >= BUSWIDTH)
464 {
465 *((__u32 *) buf) = read32 (from);
466
467 buf += BUSWIDTH;
468 from += BUSWIDTH;
469 len -= BUSWIDTH;
470 }
471
472 /* top up the last unaligned bytes */
473 if (len & (BUSWIDTH - 1))
474 while (len--) *buf++ = read8 (from++);
475
476 return (0);
477}
478
479/*
480 * Write one dword ``x'' to flash memory at offset ``offset''. ``offset''
481 * must be 32 bits, i.e. it must be on a dword boundary.
482 *
483 * Returns 1 if successful, 0 otherwise.
484 */
485static inline int write_dword (__u32 offset,__u32 x)
486{
487 __u32 status;
488
489#ifdef LART_DEBUG
490 printk (KERN_DEBUG "%s(): 0x%.8x <- 0x%.8x\n",__FUNCTION__,offset,x);
491#endif
492
493 /* setup writing */
494 write32 (DATA_TO_FLASH (PGM_SETUP),offset);
495
496 /* write the data */
497 write32 (x,offset);
498
499 /* wait for the write to finish */
500 do
501 {
502 write32 (DATA_TO_FLASH (STATUS_READ),offset);
503 status = FLASH_TO_DATA (read32 (offset));
504 }
505 while ((~status & STATUS_BUSY) != 0);
506
507 /* put the flash back into command mode */
508 write32 (DATA_TO_FLASH (READ_ARRAY),offset);
509
510 /* was the write successfull? */
511 if ((status & STATUS_PGM_ERR) || read32 (offset) != x)
512 {
513 printk (KERN_WARNING "%s: write error at address 0x%.8x.\n",module_name,offset);
514 return (0);
515 }
516
517 return (1);
518}
519
520static int flash_write (struct mtd_info *mtd,loff_t to,size_t len,size_t *retlen,const u_char *buf)
521{
522 __u8 tmp[4];
523 int i,n;
524
525#ifdef LART_DEBUG
526 printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n",__FUNCTION__,(__u32) to,len);
527#endif
528
529 *retlen = 0;
530
531 /* sanity checks */
532 if (!len) return (0);
533 if (to + len > mtd->size) return (-EINVAL);
534
535 /* first, we write a 0xFF.... padded byte until we reach a dword boundary */
536 if (to & (BUSWIDTH - 1))
537 {
538 __u32 aligned = to & ~(BUSWIDTH - 1);
539 int gap = to - aligned;
540
541 i = n = 0;
542
543 while (gap--) tmp[i++] = 0xFF;
544 while (len && i < BUSWIDTH) tmp[i++] = buf[n++], len--;
545 while (i < BUSWIDTH) tmp[i++] = 0xFF;
546
547 if (!write_dword (aligned,*((__u32 *) tmp))) return (-EIO);
548
549 to += n;
550 buf += n;
551 *retlen += n;
552 }
553
554 /* now we write dwords until we reach a non-dword boundary */
555 while (len >= BUSWIDTH)
556 {
557 if (!write_dword (to,*((__u32 *) buf))) return (-EIO);
558
559 to += BUSWIDTH;
560 buf += BUSWIDTH;
561 *retlen += BUSWIDTH;
562 len -= BUSWIDTH;
563 }
564
565 /* top up the last unaligned bytes, padded with 0xFF.... */
566 if (len & (BUSWIDTH - 1))
567 {
568 i = n = 0;
569
570 while (len--) tmp[i++] = buf[n++];
571 while (i < BUSWIDTH) tmp[i++] = 0xFF;
572
573 if (!write_dword (to,*((__u32 *) tmp))) return (-EIO);
574
575 *retlen += n;
576 }
577
578 return (0);
579}
580
581/***************************************************************************************************/
582
583#define NB_OF(x) (sizeof (x) / sizeof (x[0]))
584
585static struct mtd_info mtd;
586
587static struct mtd_erase_region_info erase_regions[] = {
588 /* parameter blocks */
589 {
590 .offset = 0x00000000,
591 .erasesize = FLASH_BLOCKSIZE_PARAM,
592 .numblocks = FLASH_NUMBLOCKS_16m_PARAM,
593 },
594 /* main blocks */
595 {
596 .offset = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM,
597 .erasesize = FLASH_BLOCKSIZE_MAIN,
598 .numblocks = FLASH_NUMBLOCKS_16m_MAIN,
599 }
600};
601
602#ifdef HAVE_PARTITIONS
603static struct mtd_partition lart_partitions[] = {
604 /* blob */
605 {
606 .name = "blob",
607 .offset = BLOB_START,
608 .size = BLOB_LEN,
609 },
610 /* kernel */
611 {
612 .name = "kernel",
613 .offset = KERNEL_START, /* MTDPART_OFS_APPEND */
614 .size = KERNEL_LEN,
615 },
616 /* initial ramdisk / file system */
617 {
618 .name = "file system",
619 .offset = INITRD_START, /* MTDPART_OFS_APPEND */
620 .size = INITRD_LEN, /* MTDPART_SIZ_FULL */
621 }
622};
623#endif
624
625int __init lart_flash_init (void)
626{
627 int result;
628 memset (&mtd,0,sizeof (mtd));
629 printk ("MTD driver for LART. Written by Abraham vd Merwe <abraham@2d3d.co.za>\n");
630 printk ("%s: Probing for 28F160x3 flash on LART...\n",module_name);
631 if (!flash_probe ())
632 {
633 printk (KERN_WARNING "%s: Found no LART compatible flash device\n",module_name);
634 return (-ENXIO);
635 }
636 printk ("%s: This looks like a LART board to me.\n",module_name);
637 mtd.name = module_name;
638 mtd.type = MTD_NORFLASH;
639 mtd.flags = MTD_CAP_NORFLASH;
640 mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN;
641 mtd.erasesize = FLASH_BLOCKSIZE_MAIN;
642 mtd.numeraseregions = NB_OF (erase_regions);
643 mtd.eraseregions = erase_regions;
644 mtd.erase = flash_erase;
645 mtd.read = flash_read;
646 mtd.write = flash_write;
647 mtd.owner = THIS_MODULE;
648
649#ifdef LART_DEBUG
650 printk (KERN_DEBUG
651 "mtd.name = %s\n"
652 "mtd.size = 0x%.8x (%uM)\n"
653 "mtd.erasesize = 0x%.8x (%uK)\n"
654 "mtd.numeraseregions = %d\n",
655 mtd.name,
656 mtd.size,mtd.size / (1024*1024),
657 mtd.erasesize,mtd.erasesize / 1024,
658 mtd.numeraseregions);
659
660 if (mtd.numeraseregions)
661 for (result = 0; result < mtd.numeraseregions; result++)
662 printk (KERN_DEBUG
663 "\n\n"
664 "mtd.eraseregions[%d].offset = 0x%.8x\n"
665 "mtd.eraseregions[%d].erasesize = 0x%.8x (%uK)\n"
666 "mtd.eraseregions[%d].numblocks = %d\n",
667 result,mtd.eraseregions[result].offset,
668 result,mtd.eraseregions[result].erasesize,mtd.eraseregions[result].erasesize / 1024,
669 result,mtd.eraseregions[result].numblocks);
670
671#ifdef HAVE_PARTITIONS
672 printk ("\npartitions = %d\n",NB_OF (lart_partitions));
673
674 for (result = 0; result < NB_OF (lart_partitions); result++)
675 printk (KERN_DEBUG
676 "\n\n"
677 "lart_partitions[%d].name = %s\n"
678 "lart_partitions[%d].offset = 0x%.8x\n"
679 "lart_partitions[%d].size = 0x%.8x (%uK)\n",
680 result,lart_partitions[result].name,
681 result,lart_partitions[result].offset,
682 result,lart_partitions[result].size,lart_partitions[result].size / 1024);
683#endif
684#endif
685
686#ifndef HAVE_PARTITIONS
687 result = add_mtd_device (&mtd);
688#else
689 result = add_mtd_partitions (&mtd,lart_partitions,NB_OF (lart_partitions));
690#endif
691
692 return (result);
693}
694
695void __exit lart_flash_exit (void)
696{
697#ifndef HAVE_PARTITIONS
698 del_mtd_device (&mtd);
699#else
700 del_mtd_partitions (&mtd);
701#endif
702}
703
704module_init (lart_flash_init);
705module_exit (lart_flash_exit);
706
707MODULE_LICENSE("GPL");
708MODULE_AUTHOR("Abraham vd Merwe <abraham@2d3d.co.za>");
709MODULE_DESCRIPTION("MTD driver for Intel 28F160F3 on LART board");
710
711
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
new file mode 100644
index 000000000000..380ff08d29e4
--- /dev/null
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -0,0 +1,326 @@
1/*
2 * Copyright (c) 2001 Maciej W. Rozycki
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * $Id: ms02-nv.c,v 1.8 2005/01/05 18:05:12 dwmw2 Exp $
10 */
11
12#include <linux/init.h>
13#include <linux/ioport.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/mtd/mtd.h>
17#include <linux/slab.h>
18#include <linux/types.h>
19
20#include <asm/addrspace.h>
21#include <asm/bootinfo.h>
22#include <asm/dec/ioasic_addrs.h>
23#include <asm/dec/kn02.h>
24#include <asm/dec/kn03.h>
25#include <asm/io.h>
26#include <asm/paccess.h>
27
28#include "ms02-nv.h"
29
30
31static char version[] __initdata =
32 "ms02-nv.c: v.1.0.0 13 Aug 2001 Maciej W. Rozycki.\n";
33
34MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
35MODULE_DESCRIPTION("DEC MS02-NV NVRAM module driver");
36MODULE_LICENSE("GPL");
37
38
39/*
40 * Addresses we probe for an MS02-NV at. Modules may be located
41 * at any 8MiB boundary within a 0MiB up to 112MiB range or at any 32MiB
42 * boundary within a 0MiB up to 448MiB range. We don't support a module
43 * at 0MiB, though.
44 */
45static ulong ms02nv_addrs[] __initdata = {
46 0x07000000, 0x06800000, 0x06000000, 0x05800000, 0x05000000,
47 0x04800000, 0x04000000, 0x03800000, 0x03000000, 0x02800000,
48 0x02000000, 0x01800000, 0x01000000, 0x00800000
49};
50
51static const char ms02nv_name[] = "DEC MS02-NV NVRAM";
52static const char ms02nv_res_diag_ram[] = "Diagnostic RAM";
53static const char ms02nv_res_user_ram[] = "General-purpose RAM";
54static const char ms02nv_res_csr[] = "Control and status register";
55
56static struct mtd_info *root_ms02nv_mtd;
57
58
59static int ms02nv_read(struct mtd_info *mtd, loff_t from,
60 size_t len, size_t *retlen, u_char *buf)
61{
62 struct ms02nv_private *mp = mtd->priv;
63
64 if (from + len > mtd->size)
65 return -EINVAL;
66
67 memcpy(buf, mp->uaddr + from, len);
68 *retlen = len;
69
70 return 0;
71}
72
73static int ms02nv_write(struct mtd_info *mtd, loff_t to,
74 size_t len, size_t *retlen, const u_char *buf)
75{
76 struct ms02nv_private *mp = mtd->priv;
77
78 if (to + len > mtd->size)
79 return -EINVAL;
80
81 memcpy(mp->uaddr + to, buf, len);
82 *retlen = len;
83
84 return 0;
85}
86
87
88static inline uint ms02nv_probe_one(ulong addr)
89{
90 ms02nv_uint *ms02nv_diagp;
91 ms02nv_uint *ms02nv_magicp;
92 uint ms02nv_diag;
93 uint ms02nv_magic;
94 size_t size;
95
96 int err;
97
98 /*
99 * The firmware writes MS02NV_ID at MS02NV_MAGIC and also
100 * a diagnostic status at MS02NV_DIAG.
101 */
102 ms02nv_diagp = (ms02nv_uint *)(KSEG1ADDR(addr + MS02NV_DIAG));
103 ms02nv_magicp = (ms02nv_uint *)(KSEG1ADDR(addr + MS02NV_MAGIC));
104 err = get_dbe(ms02nv_magic, ms02nv_magicp);
105 if (err)
106 return 0;
107 if (ms02nv_magic != MS02NV_ID)
108 return 0;
109
110 ms02nv_diag = *ms02nv_diagp;
111 size = (ms02nv_diag & MS02NV_DIAG_SIZE_MASK) << MS02NV_DIAG_SIZE_SHIFT;
112 if (size > MS02NV_CSR)
113 size = MS02NV_CSR;
114
115 return size;
116}
117
118static int __init ms02nv_init_one(ulong addr)
119{
120 struct mtd_info *mtd;
121 struct ms02nv_private *mp;
122 struct resource *mod_res;
123 struct resource *diag_res;
124 struct resource *user_res;
125 struct resource *csr_res;
126 ulong fixaddr;
127 size_t size, fixsize;
128
129 static int version_printed;
130
131 int ret = -ENODEV;
132
133 /* The module decodes 8MiB of address space. */
134 mod_res = kmalloc(sizeof(*mod_res), GFP_KERNEL);
135 if (!mod_res)
136 return -ENOMEM;
137
138 memset(mod_res, 0, sizeof(*mod_res));
139 mod_res->name = ms02nv_name;
140 mod_res->start = addr;
141 mod_res->end = addr + MS02NV_SLOT_SIZE - 1;
142 mod_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
143 if (request_resource(&iomem_resource, mod_res) < 0)
144 goto err_out_mod_res;
145
146 size = ms02nv_probe_one(addr);
147 if (!size)
148 goto err_out_mod_res_rel;
149
150 if (!version_printed) {
151 printk(KERN_INFO "%s", version);
152 version_printed = 1;
153 }
154
155 ret = -ENOMEM;
156 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
157 if (!mtd)
158 goto err_out_mod_res_rel;
159 memset(mtd, 0, sizeof(*mtd));
160 mp = kmalloc(sizeof(*mp), GFP_KERNEL);
161 if (!mp)
162 goto err_out_mtd;
163 memset(mp, 0, sizeof(*mp));
164
165 mtd->priv = mp;
166 mp->resource.module = mod_res;
167
168 /* Firmware's diagnostic NVRAM area. */
169 diag_res = kmalloc(sizeof(*diag_res), GFP_KERNEL);
170 if (!diag_res)
171 goto err_out_mp;
172
173 memset(diag_res, 0, sizeof(*diag_res));
174 diag_res->name = ms02nv_res_diag_ram;
175 diag_res->start = addr;
176 diag_res->end = addr + MS02NV_RAM - 1;
177 diag_res->flags = IORESOURCE_BUSY;
178 request_resource(mod_res, diag_res);
179
180 mp->resource.diag_ram = diag_res;
181
182 /* User-available general-purpose NVRAM area. */
183 user_res = kmalloc(sizeof(*user_res), GFP_KERNEL);
184 if (!user_res)
185 goto err_out_diag_res;
186
187 memset(user_res, 0, sizeof(*user_res));
188 user_res->name = ms02nv_res_user_ram;
189 user_res->start = addr + MS02NV_RAM;
190 user_res->end = addr + size - 1;
191 user_res->flags = IORESOURCE_BUSY;
192 request_resource(mod_res, user_res);
193
194 mp->resource.user_ram = user_res;
195
196 /* Control and status register. */
197 csr_res = kmalloc(sizeof(*csr_res), GFP_KERNEL);
198 if (!csr_res)
199 goto err_out_user_res;
200
201 memset(csr_res, 0, sizeof(*csr_res));
202 csr_res->name = ms02nv_res_csr;
203 csr_res->start = addr + MS02NV_CSR;
204 csr_res->end = addr + MS02NV_CSR + 3;
205 csr_res->flags = IORESOURCE_BUSY;
206 request_resource(mod_res, csr_res);
207
208 mp->resource.csr = csr_res;
209
210 mp->addr = phys_to_virt(addr);
211 mp->size = size;
212
213 /*
214 * Hide the firmware's diagnostic area. It may get destroyed
215 * upon a reboot. Take paging into account for mapping support.
216 */
217 fixaddr = (addr + MS02NV_RAM + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
218 fixsize = (size - (fixaddr - addr)) & ~(PAGE_SIZE - 1);
219 mp->uaddr = phys_to_virt(fixaddr);
220
221 mtd->type = MTD_RAM;
222 mtd->flags = MTD_CAP_RAM | MTD_XIP;
223 mtd->size = fixsize;
224 mtd->name = (char *)ms02nv_name;
225 mtd->owner = THIS_MODULE;
226 mtd->read = ms02nv_read;
227 mtd->write = ms02nv_write;
228
229 ret = -EIO;
230 if (add_mtd_device(mtd)) {
231 printk(KERN_ERR
232 "ms02-nv: Unable to register MTD device, aborting!\n");
233 goto err_out_csr_res;
234 }
235
236 printk(KERN_INFO "mtd%d: %s at 0x%08lx, size %uMiB.\n",
237 mtd->index, ms02nv_name, addr, size >> 20);
238
239 mp->next = root_ms02nv_mtd;
240 root_ms02nv_mtd = mtd;
241
242 return 0;
243
244
245err_out_csr_res:
246 release_resource(csr_res);
247 kfree(csr_res);
248err_out_user_res:
249 release_resource(user_res);
250 kfree(user_res);
251err_out_diag_res:
252 release_resource(diag_res);
253 kfree(diag_res);
254err_out_mp:
255 kfree(mp);
256err_out_mtd:
257 kfree(mtd);
258err_out_mod_res_rel:
259 release_resource(mod_res);
260err_out_mod_res:
261 kfree(mod_res);
262 return ret;
263}
264
265static void __exit ms02nv_remove_one(void)
266{
267 struct mtd_info *mtd = root_ms02nv_mtd;
268 struct ms02nv_private *mp = mtd->priv;
269
270 root_ms02nv_mtd = mp->next;
271
272 del_mtd_device(mtd);
273
274 release_resource(mp->resource.csr);
275 kfree(mp->resource.csr);
276 release_resource(mp->resource.user_ram);
277 kfree(mp->resource.user_ram);
278 release_resource(mp->resource.diag_ram);
279 kfree(mp->resource.diag_ram);
280 release_resource(mp->resource.module);
281 kfree(mp->resource.module);
282 kfree(mp);
283 kfree(mtd);
284}
285
286
287static int __init ms02nv_init(void)
288{
289 volatile u32 *csr;
290 uint stride = 0;
291 int count = 0;
292 int i;
293
294 switch (mips_machtype) {
295 case MACH_DS5000_200:
296 csr = (volatile u32 *)KN02_CSR_BASE;
297 if (*csr & KN02_CSR_BNK32M)
298 stride = 2;
299 break;
300 case MACH_DS5000_2X0:
301 case MACH_DS5900:
302 csr = (volatile u32 *)KN03_MCR_BASE;
303 if (*csr & KN03_MCR_BNK32M)
304 stride = 2;
305 break;
306 default:
307 return -ENODEV;
308 break;
309 }
310
311 for (i = 0; i < (sizeof(ms02nv_addrs) / sizeof(*ms02nv_addrs)); i++)
312 if (!ms02nv_init_one(ms02nv_addrs[i] << stride))
313 count++;
314
315 return (count > 0) ? 0 : -ENODEV;
316}
317
318static void __exit ms02nv_cleanup(void)
319{
320 while (root_ms02nv_mtd)
321 ms02nv_remove_one();
322}
323
324
325module_init(ms02nv_init);
326module_exit(ms02nv_cleanup);
diff --git a/drivers/mtd/devices/ms02-nv.h b/drivers/mtd/devices/ms02-nv.h
new file mode 100644
index 000000000000..8a6eef7cfee3
--- /dev/null
+++ b/drivers/mtd/devices/ms02-nv.h
@@ -0,0 +1,107 @@
1/*
2 * Copyright (c) 2001, 2003 Maciej W. Rozycki
3 *
4 * DEC MS02-NV (54-20948-01) battery backed-up NVRAM module for
5 * DECstation/DECsystem 5000/2x0 and DECsystem 5900 and 5900/260
6 * systems.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * $Id: ms02-nv.h,v 1.3 2003/08/19 09:25:36 dwmw2 Exp $
14 */
15
16#include <linux/ioport.h>
17#include <linux/mtd/mtd.h>
18
19/*
20 * Addresses are decoded as follows:
21 *
22 * 0x000000 - 0x3fffff SRAM
23 * 0x400000 - 0x7fffff CSR
24 *
25 * Within the SRAM area the following ranges are forced by the system
26 * firmware:
27 *
28 * 0x000000 - 0x0003ff diagnostic area, destroyed upon a reboot
29 * 0x000400 - ENDofRAM storage area, available to operating systems
30 *
31 * but we can't really use the available area right from 0x000400 as
32 * the first word is used by the firmware as a status flag passed
33 * from an operating system. If anything but the valid data magic
34 * ID value is found, the firmware considers the SRAM clean, i.e.
35 * containing no valid data, and disables the battery resulting in
36 * data being erased as soon as power is switched off. So the choice
37 * for the start address of the user-available is 0x001000 which is
38 * nicely page aligned. The area between 0x000404 and 0x000fff may
39 * be used by the driver for own needs.
40 *
41 * The diagnostic area defines two status words to be read by an
42 * operating system, a magic ID to distinguish a MS02-NV board from
43 * anything else and a status information providing results of tests
44 * as well as the size of SRAM available, which can be 1MiB or 2MiB
45 * (that's what the firmware handles; no idea if 2MiB modules ever
46 * existed).
47 *
48 * The firmware only handles the MS02-NV board if installed in the
49 * last (15th) slot, so for any other location the status information
50 * stored in the SRAM cannot be relied upon. But from the hardware
51 * point of view there is no problem using up to 14 such boards in a
52 * system -- only the 1st slot needs to be filled with a DRAM module.
53 * The MS02-NV board is ECC-protected, like other MS02 memory boards.
54 *
55 * The state of the battery as provided by the CSR is reflected on
56 * the two onboard LEDs. When facing the battery side of the board,
57 * with the LEDs at the top left and the battery at the bottom right
58 * (i.e. looking from the back side of the system box), their meaning
59 * is as follows (the system has to be powered on):
60 *
61 * left LED battery disable status: lit = enabled
62 * right LED battery condition status: lit = OK
63 */
64
65/* MS02-NV iomem register offsets. */
66#define MS02NV_CSR 0x400000 /* control & status register */
67
68/* MS02-NV CSR status bits. */
69#define MS02NV_CSR_BATT_OK 0x01 /* battery OK */
70#define MS02NV_CSR_BATT_OFF 0x02 /* battery disabled */
71
72
73/* MS02-NV memory offsets. */
74#define MS02NV_DIAG 0x0003f8 /* diagnostic status */
75#define MS02NV_MAGIC 0x0003fc /* MS02-NV magic ID */
76#define MS02NV_VALID 0x000400 /* valid data magic ID */
77#define MS02NV_RAM 0x001000 /* user-exposed RAM start */
78
79/* MS02-NV diagnostic status bits. */
80#define MS02NV_DIAG_TEST 0x01 /* SRAM test done (?) */
81#define MS02NV_DIAG_RO 0x02 /* SRAM r/o test done */
82#define MS02NV_DIAG_RW 0x04 /* SRAM r/w test done */
83#define MS02NV_DIAG_FAIL 0x08 /* SRAM test failed */
84#define MS02NV_DIAG_SIZE_MASK 0xf0 /* SRAM size mask */
85#define MS02NV_DIAG_SIZE_SHIFT 0x10 /* SRAM size shift (left) */
86
87/* MS02-NV general constants. */
88#define MS02NV_ID 0x03021966 /* MS02-NV magic ID value */
89#define MS02NV_VALID_ID 0xbd100248 /* valid data magic ID value */
90#define MS02NV_SLOT_SIZE 0x800000 /* size of the address space
91 decoded by the module */
92
93
94typedef volatile u32 ms02nv_uint;
95
96struct ms02nv_private {
97 struct mtd_info *next;
98 struct {
99 struct resource *module;
100 struct resource *diag_ram;
101 struct resource *user_ram;
102 struct resource *csr;
103 } resource;
104 u_char *addr;
105 size_t size;
106 u_char *uaddr;
107};
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
new file mode 100644
index 000000000000..edac4156d69c
--- /dev/null
+++ b/drivers/mtd/devices/mtdram.c
@@ -0,0 +1,235 @@
1/*
2 * mtdram - a test mtd device
3 * $Id: mtdram.c,v 1.35 2005/01/05 18:05:12 dwmw2 Exp $
4 * Author: Alexander Larsson <alex@cendio.se>
5 *
6 * Copyright (c) 1999 Alexander Larsson <alex@cendio.se>
7 *
8 * This code is GPL
9 *
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/ioport.h>
16#include <linux/vmalloc.h>
17#include <linux/init.h>
18#include <linux/mtd/compatmac.h>
19#include <linux/mtd/mtd.h>
20
21#ifndef CONFIG_MTDRAM_ABS_POS
22 #define CONFIG_MTDRAM_ABS_POS 0
23#endif
24
25#if CONFIG_MTDRAM_ABS_POS > 0
26 #include <asm/io.h>
27#endif
28
29#ifdef MODULE
30static unsigned long total_size = CONFIG_MTDRAM_TOTAL_SIZE;
31static unsigned long erase_size = CONFIG_MTDRAM_ERASE_SIZE;
32module_param(total_size,ulong,0);
33MODULE_PARM_DESC(total_size, "Total device size in KiB");
34module_param(erase_size,ulong,0);
35MODULE_PARM_DESC(erase_size, "Device erase block size in KiB");
36#define MTDRAM_TOTAL_SIZE (total_size * 1024)
37#define MTDRAM_ERASE_SIZE (erase_size * 1024)
38#else
39#define MTDRAM_TOTAL_SIZE (CONFIG_MTDRAM_TOTAL_SIZE * 1024)
40#define MTDRAM_ERASE_SIZE (CONFIG_MTDRAM_ERASE_SIZE * 1024)
41#endif
42
43
44// We could store these in the mtd structure, but we only support 1 device..
45static struct mtd_info *mtd_info;
46
47
48static int
49ram_erase(struct mtd_info *mtd, struct erase_info *instr)
50{
51 DEBUG(MTD_DEBUG_LEVEL2, "ram_erase(pos:%ld, len:%ld)\n", (long)instr->addr, (long)instr->len);
52 if (instr->addr + instr->len > mtd->size) {
53 DEBUG(MTD_DEBUG_LEVEL1, "ram_erase() out of bounds (%ld > %ld)\n", (long)(instr->addr + instr->len), (long)mtd->size);
54 return -EINVAL;
55 }
56
57 memset((char *)mtd->priv + instr->addr, 0xff, instr->len);
58
59 instr->state = MTD_ERASE_DONE;
60 mtd_erase_callback(instr);
61
62 return 0;
63}
64
65static int ram_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
66{
67 if (from + len > mtd->size)
68 return -EINVAL;
69
70 *mtdbuf = mtd->priv + from;
71 *retlen = len;
72 return 0;
73}
74
75static void ram_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
76 size_t len)
77{
78 DEBUG(MTD_DEBUG_LEVEL2, "ram_unpoint\n");
79}
80
81static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
82 size_t *retlen, u_char *buf)
83{
84 DEBUG(MTD_DEBUG_LEVEL2, "ram_read(pos:%ld, len:%ld)\n", (long)from, (long)len);
85 if (from + len > mtd->size) {
86 DEBUG(MTD_DEBUG_LEVEL1, "ram_read() out of bounds (%ld > %ld)\n", (long)(from + len), (long)mtd->size);
87 return -EINVAL;
88 }
89
90 memcpy(buf, mtd->priv + from, len);
91
92 *retlen=len;
93 return 0;
94}
95
96static int ram_write(struct mtd_info *mtd, loff_t to, size_t len,
97 size_t *retlen, const u_char *buf)
98{
99 DEBUG(MTD_DEBUG_LEVEL2, "ram_write(pos:%ld, len:%ld)\n", (long)to, (long)len);
100 if (to + len > mtd->size) {
101 DEBUG(MTD_DEBUG_LEVEL1, "ram_write() out of bounds (%ld > %ld)\n", (long)(to + len), (long)mtd->size);
102 return -EINVAL;
103 }
104
105 memcpy ((char *)mtd->priv + to, buf, len);
106
107 *retlen=len;
108 return 0;
109}
110
111static void __exit cleanup_mtdram(void)
112{
113 if (mtd_info) {
114 del_mtd_device(mtd_info);
115#if CONFIG_MTDRAM_TOTAL_SIZE > 0
116 if (mtd_info->priv)
117#if CONFIG_MTDRAM_ABS_POS > 0
118 iounmap(mtd_info->priv);
119#else
120 vfree(mtd_info->priv);
121#endif
122#endif
123 kfree(mtd_info);
124 }
125}
126
127int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
128 unsigned long size, char *name)
129{
130 memset(mtd, 0, sizeof(*mtd));
131
132 /* Setup the MTD structure */
133 mtd->name = name;
134 mtd->type = MTD_RAM;
135 mtd->flags = MTD_CAP_RAM;
136 mtd->size = size;
137 mtd->erasesize = MTDRAM_ERASE_SIZE;
138 mtd->priv = mapped_address;
139
140 mtd->owner = THIS_MODULE;
141 mtd->erase = ram_erase;
142 mtd->point = ram_point;
143 mtd->unpoint = ram_unpoint;
144 mtd->read = ram_read;
145 mtd->write = ram_write;
146
147 if (add_mtd_device(mtd)) {
148 return -EIO;
149 }
150
151 return 0;
152}
153
154#if CONFIG_MTDRAM_TOTAL_SIZE > 0
155#if CONFIG_MTDRAM_ABS_POS > 0
156static int __init init_mtdram(void)
157{
158 void *addr;
159 int err;
160 /* Allocate some memory */
161 mtd_info = kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
162 if (!mtd_info)
163 return -ENOMEM;
164
165 addr = ioremap(CONFIG_MTDRAM_ABS_POS, MTDRAM_TOTAL_SIZE);
166 if (!addr) {
167 DEBUG(MTD_DEBUG_LEVEL1,
168 "Failed to ioremap) memory region of size %ld at ABS_POS:%ld\n",
169 (long)MTDRAM_TOTAL_SIZE, (long)CONFIG_MTDRAM_ABS_POS);
170 kfree(mtd_info);
171 mtd_info = NULL;
172 return -ENOMEM;
173 }
174 err = mtdram_init_device(mtd_info, addr,
175 MTDRAM_TOTAL_SIZE, "mtdram test device");
176 if (err)
177 {
178 iounmap(addr);
179 kfree(mtd_info);
180 mtd_info = NULL;
181 return err;
182 }
183 memset(mtd_info->priv, 0xff, MTDRAM_TOTAL_SIZE);
184 return err;
185}
186
187#else /* CONFIG_MTDRAM_ABS_POS > 0 */
188
189static int __init init_mtdram(void)
190{
191 void *addr;
192 int err;
193 /* Allocate some memory */
194 mtd_info = kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
195 if (!mtd_info)
196 return -ENOMEM;
197
198 addr = vmalloc(MTDRAM_TOTAL_SIZE);
199 if (!addr) {
200 DEBUG(MTD_DEBUG_LEVEL1,
201 "Failed to vmalloc memory region of size %ld\n",
202 (long)MTDRAM_TOTAL_SIZE);
203 kfree(mtd_info);
204 mtd_info = NULL;
205 return -ENOMEM;
206 }
207 err = mtdram_init_device(mtd_info, addr,
208 MTDRAM_TOTAL_SIZE, "mtdram test device");
209 if (err)
210 {
211 vfree(addr);
212 kfree(mtd_info);
213 mtd_info = NULL;
214 return err;
215 }
216 memset(mtd_info->priv, 0xff, MTDRAM_TOTAL_SIZE);
217 return err;
218}
219#endif /* !(CONFIG_MTDRAM_ABS_POS > 0) */
220
221#else /* CONFIG_MTDRAM_TOTAL_SIZE > 0 */
222
223static int __init init_mtdram(void)
224{
225 return 0;
226}
227#endif /* !(CONFIG_MTDRAM_TOTAL_SIZE > 0) */
228
229module_init(init_mtdram);
230module_exit(cleanup_mtdram);
231
232MODULE_LICENSE("GPL");
233MODULE_AUTHOR("Alexander Larsson <alexl@redhat.com>");
234MODULE_DESCRIPTION("Simulated MTD driver for testing");
235
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
new file mode 100644
index 000000000000..5f8e164ddb71
--- /dev/null
+++ b/drivers/mtd/devices/phram.c
@@ -0,0 +1,285 @@
1/**
2 * $Id: phram.c,v 1.11 2005/01/05 18:05:13 dwmw2 Exp $
3 *
4 * Copyright (c) ???? Jochen Schäuble <psionic@psionic.de>
5 * Copyright (c) 2003-2004 Jörn Engel <joern@wh.fh-wedel.de>
6 *
7 * Usage:
8 *
9 * one commend line parameter per device, each in the form:
10 * phram=<name>,<start>,<len>
11 * <name> may be up to 63 characters.
12 * <start> and <len> can be octal, decimal or hexadecimal. If followed
13 * by "ki", "Mi" or "Gi", the numbers will be interpreted as kilo, mega or
14 * gigabytes.
15 *
16 * Example:
17 * phram=swap,64Mi,128Mi phram=test,900Mi,1Mi
18 *
19 */
20
21#include <asm/io.h>
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/list.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/mtd/mtd.h>
28
29#define ERROR(fmt, args...) printk(KERN_ERR "phram: " fmt , ## args)
30
31struct phram_mtd_list {
32 struct mtd_info mtd;
33 struct list_head list;
34};
35
36static LIST_HEAD(phram_list);
37
38
39
40static int phram_erase(struct mtd_info *mtd, struct erase_info *instr)
41{
42 u_char *start = mtd->priv;
43
44 if (instr->addr + instr->len > mtd->size)
45 return -EINVAL;
46
47 memset(start + instr->addr, 0xff, instr->len);
48
49 /* This'll catch a few races. Free the thing before returning :)
50 * I don't feel at all ashamed. This kind of thing is possible anyway
51 * with flash, but unlikely.
52 */
53
54 instr->state = MTD_ERASE_DONE;
55
56 mtd_erase_callback(instr);
57
58 return 0;
59}
60
61static int phram_point(struct mtd_info *mtd, loff_t from, size_t len,
62 size_t *retlen, u_char **mtdbuf)
63{
64 u_char *start = mtd->priv;
65
66 if (from + len > mtd->size)
67 return -EINVAL;
68
69 *mtdbuf = start + from;
70 *retlen = len;
71 return 0;
72}
73
74static void phram_unpoint(struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
75{
76}
77
78static int phram_read(struct mtd_info *mtd, loff_t from, size_t len,
79 size_t *retlen, u_char *buf)
80{
81 u_char *start = mtd->priv;
82
83 if (from + len > mtd->size)
84 return -EINVAL;
85
86 memcpy(buf, start + from, len);
87
88 *retlen = len;
89 return 0;
90}
91
92static int phram_write(struct mtd_info *mtd, loff_t to, size_t len,
93 size_t *retlen, const u_char *buf)
94{
95 u_char *start = mtd->priv;
96
97 if (to + len > mtd->size)
98 return -EINVAL;
99
100 memcpy(start + to, buf, len);
101
102 *retlen = len;
103 return 0;
104}
105
106
107
108static void unregister_devices(void)
109{
110 struct phram_mtd_list *this;
111
112 list_for_each_entry(this, &phram_list, list) {
113 del_mtd_device(&this->mtd);
114 iounmap(this->mtd.priv);
115 kfree(this);
116 }
117}
118
119static int register_device(char *name, unsigned long start, unsigned long len)
120{
121 struct phram_mtd_list *new;
122 int ret = -ENOMEM;
123
124 new = kmalloc(sizeof(*new), GFP_KERNEL);
125 if (!new)
126 goto out0;
127
128 memset(new, 0, sizeof(*new));
129
130 ret = -EIO;
131 new->mtd.priv = ioremap(start, len);
132 if (!new->mtd.priv) {
133 ERROR("ioremap failed\n");
134 goto out1;
135 }
136
137
138 new->mtd.name = name;
139 new->mtd.size = len;
140 new->mtd.flags = MTD_CAP_RAM | MTD_ERASEABLE | MTD_VOLATILE;
141 new->mtd.erase = phram_erase;
142 new->mtd.point = phram_point;
143 new->mtd.unpoint = phram_unpoint;
144 new->mtd.read = phram_read;
145 new->mtd.write = phram_write;
146 new->mtd.owner = THIS_MODULE;
147 new->mtd.type = MTD_RAM;
148 new->mtd.erasesize = 0;
149
150 ret = -EAGAIN;
151 if (add_mtd_device(&new->mtd)) {
152 ERROR("Failed to register new device\n");
153 goto out2;
154 }
155
156 list_add_tail(&new->list, &phram_list);
157 return 0;
158
159out2:
160 iounmap(new->mtd.priv);
161out1:
162 kfree(new);
163out0:
164 return ret;
165}
166
167static int ustrtoul(const char *cp, char **endp, unsigned int base)
168{
169 unsigned long result = simple_strtoul(cp, endp, base);
170
171 switch (**endp) {
172 case 'G':
173 result *= 1024;
174 case 'M':
175 result *= 1024;
176 case 'k':
177 result *= 1024;
178 /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
179 if ((*endp)[1] == 'i')
180 (*endp) += 2;
181 }
182 return result;
183}
184
185static int parse_num32(uint32_t *num32, const char *token)
186{
187 char *endp;
188 unsigned long n;
189
190 n = ustrtoul(token, &endp, 0);
191 if (*endp)
192 return -EINVAL;
193
194 *num32 = n;
195 return 0;
196}
197
198static int parse_name(char **pname, const char *token)
199{
200 size_t len;
201 char *name;
202
203 len = strlen(token) + 1;
204 if (len > 64)
205 return -ENOSPC;
206
207 name = kmalloc(len, GFP_KERNEL);
208 if (!name)
209 return -ENOMEM;
210
211 strcpy(name, token);
212
213 *pname = name;
214 return 0;
215}
216
217#define parse_err(fmt, args...) do { \
218 ERROR(fmt , ## args); \
219 return 0; \
220} while (0)
221
222static int phram_setup(const char *val, struct kernel_param *kp)
223{
224 char buf[64+12+12], *str = buf;
225 char *token[3];
226 char *name;
227 uint32_t start;
228 uint32_t len;
229 int i, ret;
230
231 if (strnlen(val, sizeof(buf)) >= sizeof(buf))
232 parse_err("parameter too long\n");
233
234 strcpy(str, val);
235
236 for (i=0; i<3; i++)
237 token[i] = strsep(&str, ",");
238
239 if (str)
240 parse_err("too many arguments\n");
241
242 if (!token[2])
243 parse_err("not enough arguments\n");
244
245 ret = parse_name(&name, token[0]);
246 if (ret == -ENOMEM)
247 parse_err("out of memory\n");
248 if (ret == -ENOSPC)
249 parse_err("name too long\n");
250 if (ret)
251 return 0;
252
253 ret = parse_num32(&start, token[1]);
254 if (ret)
255 parse_err("illegal start address\n");
256
257 ret = parse_num32(&len, token[2]);
258 if (ret)
259 parse_err("illegal device length\n");
260
261 register_device(name, start, len);
262
263 return 0;
264}
265
266module_param_call(phram, phram_setup, NULL, NULL, 000);
267MODULE_PARM_DESC(phram,"Memory region to map. \"map=<name>,<start>,<length>\"");
268
269
270static int __init init_phram(void)
271{
272 return 0;
273}
274
275static void __exit cleanup_phram(void)
276{
277 unregister_devices();
278}
279
280module_init(init_phram);
281module_exit(cleanup_phram);
282
283MODULE_LICENSE("GPL");
284MODULE_AUTHOR("Jörn Engel <joern@wh.fh-wedel.de>");
285MODULE_DESCRIPTION("MTD driver for physical RAM");
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
new file mode 100644
index 000000000000..5b3defadf884
--- /dev/null
+++ b/drivers/mtd/devices/pmc551.c
@@ -0,0 +1,843 @@
1/*
2 * $Id: pmc551.c,v 1.30 2005/01/05 18:05:13 dwmw2 Exp $
3 *
4 * PMC551 PCI Mezzanine Ram Device
5 *
6 * Author:
7 * Mark Ferrell <mferrell@mvista.com>
8 * Copyright 1999,2000 Nortel Networks
9 *
10 * License:
11 * As part of this driver was derived from the slram.c driver it
12 * falls under the same license, which is GNU General Public
13 * License v2
14 *
15 * Description:
16 * This driver is intended to support the PMC551 PCI Ram device
17 * from Ramix Inc. The PMC551 is a PMC Mezzanine module for
18 * cPCI embedded systems. The device contains a single SROM
19 * that initially programs the V370PDC chipset onboard the
20 * device, and various banks of DRAM/SDRAM onboard. This driver
21 * implements this PCI Ram device as an MTD (Memory Technology
22 * Device) so that it can be used to hold a file system, or for
23 * added swap space in embedded systems. Since the memory on
24 * this board isn't as fast as main memory we do not try to hook
25 * it into main memory as that would simply reduce performance
26 * on the system. Using it as a block device allows us to use
27 * it as high speed swap or for a high speed disk device of some
28 * sort. Which becomes very useful on diskless systems in the
29 * embedded market I might add.
30 *
31 * Notes:
32 * Due to what I assume is more buggy SROM, the 64M PMC551 I
33 * have available claims that all 4 of it's DRAM banks have 64M
34 * of ram configured (making a grand total of 256M onboard).
35 * This is slightly annoying since the BAR0 size reflects the
36 * aperture size, not the dram size, and the V370PDC supplies no
37 * other method for memory size discovery. This problem is
38 * mostly only relevant when compiled as a module, as the
39 * unloading of the module with an aperture size smaller then
40 * the ram will cause the driver to detect the onboard memory
41 * size to be equal to the aperture size when the module is
42 * reloaded. Soooo, to help, the module supports an msize
43 * option to allow the specification of the onboard memory, and
44 * an asize option, to allow the specification of the aperture
45 * size. The aperture must be equal to or less then the memory
46 * size, the driver will correct this if you screw it up. This
47 * problem is not relevant for compiled in drivers as compiled
48 * in drivers only init once.
49 *
50 * Credits:
51 * Saeed Karamooz <saeed@ramix.com> of Ramix INC. for the
52 * initial example code of how to initialize this device and for
53 * help with questions I had concerning operation of the device.
54 *
55 * Most of the MTD code for this driver was originally written
56 * for the slram.o module in the MTD drivers package which
57 * allows the mapping of system memory into an MTD device.
58 * Since the PMC551 memory module is accessed in the same
59 * fashion as system memory, the slram.c code became a very nice
60 * fit to the needs of this driver. All we added was PCI
61 * detection/initialization to the driver and automatically figure
62 * out the size via the PCI detection.o, later changes by Corey
63 * Minyard set up the card to utilize a 1M sliding apature.
64 *
65 * Corey Minyard <minyard@nortelnetworks.com>
66 * * Modified driver to utilize a sliding aperture instead of
67 * mapping all memory into kernel space which turned out to
68 * be very wasteful.
69 * * Located a bug in the SROM's initialization sequence that
70 * made the memory unusable, added a fix to code to touch up
71 * the DRAM some.
72 *
73 * Bugs/FIXME's:
74 * * MUST fix the init function to not spin on a register
75 * waiting for it to set .. this does not safely handle busted
76 * devices that never reset the register correctly which will
77 * cause the system to hang w/ a reboot being the only chance at
78 * recover. [sort of fixed, could be better]
79 * * Add I2C handling of the SROM so we can read the SROM's information
80 * about the aperture size. This should always accurately reflect the
81 * onboard memory size.
82 * * Comb the init routine. It's still a bit cludgy on a few things.
83 */
84
85#include <linux/version.h>
86#include <linux/config.h>
87#include <linux/kernel.h>
88#include <linux/module.h>
89#include <asm/uaccess.h>
90#include <linux/types.h>
91#include <linux/sched.h>
92#include <linux/init.h>
93#include <linux/ptrace.h>
94#include <linux/slab.h>
95#include <linux/string.h>
96#include <linux/timer.h>
97#include <linux/major.h>
98#include <linux/fs.h>
99#include <linux/ioctl.h>
100#include <asm/io.h>
101#include <asm/system.h>
102#include <linux/pci.h>
103
104#ifndef CONFIG_PCI
105#error Enable PCI in your kernel config
106#endif
107
108#include <linux/mtd/mtd.h>
109#include <linux/mtd/pmc551.h>
110#include <linux/mtd/compatmac.h>
111
112static struct mtd_info *pmc551list;
113
114static int pmc551_erase (struct mtd_info *mtd, struct erase_info *instr)
115{
116 struct mypriv *priv = mtd->priv;
117 u32 soff_hi, soff_lo; /* start address offset hi/lo */
118 u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
119 unsigned long end;
120 u_char *ptr;
121 size_t retlen;
122
123#ifdef CONFIG_MTD_PMC551_DEBUG
124 printk(KERN_DEBUG "pmc551_erase(pos:%ld, len:%ld)\n", (long)instr->addr, (long)instr->len);
125#endif
126
127 end = instr->addr + instr->len - 1;
128
129 /* Is it past the end? */
130 if ( end > mtd->size ) {
131#ifdef CONFIG_MTD_PMC551_DEBUG
132 printk(KERN_DEBUG "pmc551_erase() out of bounds (%ld > %ld)\n", (long)end, (long)mtd->size);
133#endif
134 return -EINVAL;
135 }
136
137 eoff_hi = end & ~(priv->asize - 1);
138 soff_hi = instr->addr & ~(priv->asize - 1);
139 eoff_lo = end & (priv->asize - 1);
140 soff_lo = instr->addr & (priv->asize - 1);
141
142 pmc551_point (mtd, instr->addr, instr->len, &retlen, &ptr);
143
144 if ( soff_hi == eoff_hi || mtd->size == priv->asize) {
145 /* The whole thing fits within one access, so just one shot
146 will do it. */
147 memset(ptr, 0xff, instr->len);
148 } else {
149 /* We have to do multiple writes to get all the data
150 written. */
151 while (soff_hi != eoff_hi) {
152#ifdef CONFIG_MTD_PMC551_DEBUG
153 printk( KERN_DEBUG "pmc551_erase() soff_hi: %ld, eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
154#endif
155 memset(ptr, 0xff, priv->asize);
156 if (soff_hi + priv->asize >= mtd->size) {
157 goto out;
158 }
159 soff_hi += priv->asize;
160 pmc551_point (mtd,(priv->base_map0|soff_hi),
161 priv->asize, &retlen, &ptr);
162 }
163 memset (ptr, 0xff, eoff_lo);
164 }
165
166out:
167 instr->state = MTD_ERASE_DONE;
168#ifdef CONFIG_MTD_PMC551_DEBUG
169 printk(KERN_DEBUG "pmc551_erase() done\n");
170#endif
171
172 mtd_erase_callback(instr);
173 return 0;
174}
175
176
177static int pmc551_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
178{
179 struct mypriv *priv = mtd->priv;
180 u32 soff_hi;
181 u32 soff_lo;
182
183#ifdef CONFIG_MTD_PMC551_DEBUG
184 printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len);
185#endif
186
187 if (from + len > mtd->size) {
188#ifdef CONFIG_MTD_PMC551_DEBUG
189 printk(KERN_DEBUG "pmc551_point() out of bounds (%ld > %ld)\n", (long)from+len, (long)mtd->size);
190#endif
191 return -EINVAL;
192 }
193
194 soff_hi = from & ~(priv->asize - 1);
195 soff_lo = from & (priv->asize - 1);
196
197 /* Cheap hack optimization */
198 if( priv->curr_map0 != from ) {
199 pci_write_config_dword ( priv->dev, PMC551_PCI_MEM_MAP0,
200 (priv->base_map0 | soff_hi) );
201 priv->curr_map0 = soff_hi;
202 }
203
204 *mtdbuf = priv->start + soff_lo;
205 *retlen = len;
206 return 0;
207}
208
209
210static void pmc551_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
211{
212#ifdef CONFIG_MTD_PMC551_DEBUG
213 printk(KERN_DEBUG "pmc551_unpoint()\n");
214#endif
215}
216
217
218static int pmc551_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
219{
220 struct mypriv *priv = mtd->priv;
221 u32 soff_hi, soff_lo; /* start address offset hi/lo */
222 u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
223 unsigned long end;
224 u_char *ptr;
225 u_char *copyto = buf;
226
227#ifdef CONFIG_MTD_PMC551_DEBUG
228 printk(KERN_DEBUG "pmc551_read(pos:%ld, len:%ld) asize: %ld\n", (long)from, (long)len, (long)priv->asize);
229#endif
230
231 end = from + len - 1;
232
233 /* Is it past the end? */
234 if (end > mtd->size) {
235#ifdef CONFIG_MTD_PMC551_DEBUG
236 printk(KERN_DEBUG "pmc551_read() out of bounds (%ld > %ld)\n", (long) end, (long)mtd->size);
237#endif
238 return -EINVAL;
239 }
240
241 soff_hi = from & ~(priv->asize - 1);
242 eoff_hi = end & ~(priv->asize - 1);
243 soff_lo = from & (priv->asize - 1);
244 eoff_lo = end & (priv->asize - 1);
245
246 pmc551_point (mtd, from, len, retlen, &ptr);
247
248 if (soff_hi == eoff_hi) {
249 /* The whole thing fits within one access, so just one shot
250 will do it. */
251 memcpy(copyto, ptr, len);
252 copyto += len;
253 } else {
254 /* We have to do multiple writes to get all the data
255 written. */
256 while (soff_hi != eoff_hi) {
257#ifdef CONFIG_MTD_PMC551_DEBUG
258 printk( KERN_DEBUG "pmc551_read() soff_hi: %ld, eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
259#endif
260 memcpy(copyto, ptr, priv->asize);
261 copyto += priv->asize;
262 if (soff_hi + priv->asize >= mtd->size) {
263 goto out;
264 }
265 soff_hi += priv->asize;
266 pmc551_point (mtd, soff_hi, priv->asize, retlen, &ptr);
267 }
268 memcpy(copyto, ptr, eoff_lo);
269 copyto += eoff_lo;
270 }
271
272out:
273#ifdef CONFIG_MTD_PMC551_DEBUG
274 printk(KERN_DEBUG "pmc551_read() done\n");
275#endif
276 *retlen = copyto - buf;
277 return 0;
278}
279
280static int pmc551_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
281{
282 struct mypriv *priv = mtd->priv;
283 u32 soff_hi, soff_lo; /* start address offset hi/lo */
284 u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
285 unsigned long end;
286 u_char *ptr;
287 const u_char *copyfrom = buf;
288
289
290#ifdef CONFIG_MTD_PMC551_DEBUG
291 printk(KERN_DEBUG "pmc551_write(pos:%ld, len:%ld) asize:%ld\n", (long)to, (long)len, (long)priv->asize);
292#endif
293
294 end = to + len - 1;
295 /* Is it past the end? or did the u32 wrap? */
296 if (end > mtd->size ) {
297#ifdef CONFIG_MTD_PMC551_DEBUG
298 printk(KERN_DEBUG "pmc551_write() out of bounds (end: %ld, size: %ld, to: %ld)\n", (long) end, (long)mtd->size, (long)to);
299#endif
300 return -EINVAL;
301 }
302
303 soff_hi = to & ~(priv->asize - 1);
304 eoff_hi = end & ~(priv->asize - 1);
305 soff_lo = to & (priv->asize - 1);
306 eoff_lo = end & (priv->asize - 1);
307
308 pmc551_point (mtd, to, len, retlen, &ptr);
309
310 if (soff_hi == eoff_hi) {
311 /* The whole thing fits within one access, so just one shot
312 will do it. */
313 memcpy(ptr, copyfrom, len);
314 copyfrom += len;
315 } else {
316 /* We have to do multiple writes to get all the data
317 written. */
318 while (soff_hi != eoff_hi) {
319#ifdef CONFIG_MTD_PMC551_DEBUG
320 printk( KERN_DEBUG "pmc551_write() soff_hi: %ld, eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
321#endif
322 memcpy(ptr, copyfrom, priv->asize);
323 copyfrom += priv->asize;
324 if (soff_hi >= mtd->size) {
325 goto out;
326 }
327 soff_hi += priv->asize;
328 pmc551_point (mtd, soff_hi, priv->asize, retlen, &ptr);
329 }
330 memcpy(ptr, copyfrom, eoff_lo);
331 copyfrom += eoff_lo;
332 }
333
334out:
335#ifdef CONFIG_MTD_PMC551_DEBUG
336 printk(KERN_DEBUG "pmc551_write() done\n");
337#endif
338 *retlen = copyfrom - buf;
339 return 0;
340}
341
342/*
343 * Fixup routines for the V370PDC
344 * PCI device ID 0x020011b0
345 *
346 * This function basicly kick starts the DRAM oboard the card and gets it
347 * ready to be used. Before this is done the device reads VERY erratic, so
348 * much that it can crash the Linux 2.2.x series kernels when a user cat's
349 * /proc/pci .. though that is mainly a kernel bug in handling the PCI DEVSEL
350 * register. FIXME: stop spinning on registers .. must implement a timeout
351 * mechanism
352 * returns the size of the memory region found.
353 */
354static u32 fixup_pmc551 (struct pci_dev *dev)
355{
356#ifdef CONFIG_MTD_PMC551_BUGFIX
357 u32 dram_data;
358#endif
359 u32 size, dcmd, cfg, dtmp;
360 u16 cmd, tmp, i;
361 u8 bcmd, counter;
362
363 /* Sanity Check */
364 if(!dev) {
365 return -ENODEV;
366 }
367
368 /*
369 * Attempt to reset the card
370 * FIXME: Stop Spinning registers
371 */
372 counter=0;
373 /* unlock registers */
374 pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, 0xA5 );
375 /* read in old data */
376 pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd );
377 /* bang the reset line up and down for a few */
378 for(i=0;i<10;i++) {
379 counter=0;
380 bcmd &= ~0x80;
381 while(counter++ < 100) {
382 pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
383 }
384 counter=0;
385 bcmd |= 0x80;
386 while(counter++ < 100) {
387 pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
388 }
389 }
390 bcmd |= (0x40|0x20);
391 pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
392
393 /*
394 * Take care and turn off the memory on the device while we
395 * tweak the configurations
396 */
397 pci_read_config_word(dev, PCI_COMMAND, &cmd);
398 tmp = cmd & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY);
399 pci_write_config_word(dev, PCI_COMMAND, tmp);
400
401 /*
402 * Disable existing aperture before probing memory size
403 */
404 pci_read_config_dword(dev, PMC551_PCI_MEM_MAP0, &dcmd);
405 dtmp=(dcmd|PMC551_PCI_MEM_MAP_ENABLE|PMC551_PCI_MEM_MAP_REG_EN);
406 pci_write_config_dword(dev, PMC551_PCI_MEM_MAP0, dtmp);
407 /*
408 * Grab old BAR0 config so that we can figure out memory size
409 * This is another bit of kludge going on. The reason for the
410 * redundancy is I am hoping to retain the original configuration
411 * previously assigned to the card by the BIOS or some previous
412 * fixup routine in the kernel. So we read the old config into cfg,
413 * then write all 1's to the memory space, read back the result into
414 * "size", and then write back all the old config.
415 */
416 pci_read_config_dword( dev, PCI_BASE_ADDRESS_0, &cfg );
417#ifndef CONFIG_MTD_PMC551_BUGFIX
418 pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, ~0 );
419 pci_read_config_dword( dev, PCI_BASE_ADDRESS_0, &size );
420 size = (size&PCI_BASE_ADDRESS_MEM_MASK);
421 size &= ~(size-1);
422 pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, cfg );
423#else
424 /*
425 * Get the size of the memory by reading all the DRAM size values
426 * and adding them up.
427 *
428 * KLUDGE ALERT: the boards we are using have invalid column and
429 * row mux values. We fix them here, but this will break other
430 * memory configurations.
431 */
432 pci_read_config_dword(dev, PMC551_DRAM_BLK0, &dram_data);
433 size = PMC551_DRAM_BLK_GET_SIZE(dram_data);
434 dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
435 dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
436 pci_write_config_dword(dev, PMC551_DRAM_BLK0, dram_data);
437
438 pci_read_config_dword(dev, PMC551_DRAM_BLK1, &dram_data);
439 size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
440 dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
441 dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
442 pci_write_config_dword(dev, PMC551_DRAM_BLK1, dram_data);
443
444 pci_read_config_dword(dev, PMC551_DRAM_BLK2, &dram_data);
445 size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
446 dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
447 dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
448 pci_write_config_dword(dev, PMC551_DRAM_BLK2, dram_data);
449
450 pci_read_config_dword(dev, PMC551_DRAM_BLK3, &dram_data);
451 size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
452 dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
453 dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
454 pci_write_config_dword(dev, PMC551_DRAM_BLK3, dram_data);
455
456 /*
457 * Oops .. something went wrong
458 */
459 if( (size &= PCI_BASE_ADDRESS_MEM_MASK) == 0) {
460 return -ENODEV;
461 }
462#endif /* CONFIG_MTD_PMC551_BUGFIX */
463
464 if ((cfg&PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_MEMORY) {
465 return -ENODEV;
466 }
467
468 /*
469 * Precharge Dram
470 */
471 pci_write_config_word( dev, PMC551_SDRAM_MA, 0x0400 );
472 pci_write_config_word( dev, PMC551_SDRAM_CMD, 0x00bf );
473
474 /*
475 * Wait until command has gone through
476 * FIXME: register spinning issue
477 */
478 do { pci_read_config_word( dev, PMC551_SDRAM_CMD, &cmd );
479 if(counter++ > 100)break;
480 } while ( (PCI_COMMAND_IO) & cmd );
481
482 /*
483 * Turn on auto refresh
484 * The loop is taken directly from Ramix's example code. I assume that
485 * this must be held high for some duration of time, but I can find no
486 * documentation refrencing the reasons why.
487 */
488 for ( i = 1; i<=8 ; i++) {
489 pci_write_config_word (dev, PMC551_SDRAM_CMD, 0x0df);
490
491 /*
492 * Make certain command has gone through
493 * FIXME: register spinning issue
494 */
495 counter=0;
496 do { pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd);
497 if(counter++ > 100)break;
498 } while ( (PCI_COMMAND_IO) & cmd );
499 }
500
501 pci_write_config_word ( dev, PMC551_SDRAM_MA, 0x0020);
502 pci_write_config_word ( dev, PMC551_SDRAM_CMD, 0x0ff);
503
504 /*
505 * Wait until command completes
506 * FIXME: register spinning issue
507 */
508 counter=0;
509 do { pci_read_config_word ( dev, PMC551_SDRAM_CMD, &cmd);
510 if(counter++ > 100)break;
511 } while ( (PCI_COMMAND_IO) & cmd );
512
513 pci_read_config_dword ( dev, PMC551_DRAM_CFG, &dcmd);
514 dcmd |= 0x02000000;
515 pci_write_config_dword ( dev, PMC551_DRAM_CFG, dcmd);
516
517 /*
518 * Check to make certain fast back-to-back, if not
519 * then set it so
520 */
521 pci_read_config_word( dev, PCI_STATUS, &cmd);
522 if((cmd&PCI_COMMAND_FAST_BACK) == 0) {
523 cmd |= PCI_COMMAND_FAST_BACK;
524 pci_write_config_word( dev, PCI_STATUS, cmd);
525 }
526
527 /*
528 * Check to make certain the DEVSEL is set correctly, this device
529 * has a tendancy to assert DEVSEL and TRDY when a write is performed
530 * to the memory when memory is read-only
531 */
532 if((cmd&PCI_STATUS_DEVSEL_MASK) != 0x0) {
533 cmd &= ~PCI_STATUS_DEVSEL_MASK;
534 pci_write_config_word( dev, PCI_STATUS, cmd );
535 }
536 /*
537 * Set to be prefetchable and put everything back based on old cfg.
538 * it's possible that the reset of the V370PDC nuked the original
539 * setup
540 */
541 /*
542 cfg |= PCI_BASE_ADDRESS_MEM_PREFETCH;
543 pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, cfg );
544 */
545
546 /*
547 * Turn PCI memory and I/O bus access back on
548 */
549 pci_write_config_word( dev, PCI_COMMAND,
550 PCI_COMMAND_MEMORY | PCI_COMMAND_IO );
551#ifdef CONFIG_MTD_PMC551_DEBUG
552 /*
553 * Some screen fun
554 */
555 printk(KERN_DEBUG "pmc551: %d%c (0x%x) of %sprefetchable memory at 0x%lx\n",
556 (size<1024)?size:(size<1048576)?size>>10:size>>20,
557 (size<1024)?'B':(size<1048576)?'K':'M',
558 size, ((dcmd&(0x1<<3)) == 0)?"non-":"",
559 (dev->resource[0].start)&PCI_BASE_ADDRESS_MEM_MASK );
560
561 /*
562 * Check to see the state of the memory
563 */
564 pci_read_config_dword( dev, PMC551_DRAM_BLK0, &dcmd );
565 printk(KERN_DEBUG "pmc551: DRAM_BLK0 Flags: %s,%s\n"
566 "pmc551: DRAM_BLK0 Size: %d at %d\n"
567 "pmc551: DRAM_BLK0 Row MUX: %d, Col MUX: %d\n",
568 (((0x1<<1)&dcmd) == 0)?"RW":"RO",
569 (((0x1<<0)&dcmd) == 0)?"Off":"On",
570 PMC551_DRAM_BLK_GET_SIZE(dcmd),
571 ((dcmd>>20)&0x7FF), ((dcmd>>13)&0x7), ((dcmd>>9)&0xF) );
572
573 pci_read_config_dword( dev, PMC551_DRAM_BLK1, &dcmd );
574 printk(KERN_DEBUG "pmc551: DRAM_BLK1 Flags: %s,%s\n"
575 "pmc551: DRAM_BLK1 Size: %d at %d\n"
576 "pmc551: DRAM_BLK1 Row MUX: %d, Col MUX: %d\n",
577 (((0x1<<1)&dcmd) == 0)?"RW":"RO",
578 (((0x1<<0)&dcmd) == 0)?"Off":"On",
579 PMC551_DRAM_BLK_GET_SIZE(dcmd),
580 ((dcmd>>20)&0x7FF), ((dcmd>>13)&0x7), ((dcmd>>9)&0xF) );
581
582 pci_read_config_dword( dev, PMC551_DRAM_BLK2, &dcmd );
583 printk(KERN_DEBUG "pmc551: DRAM_BLK2 Flags: %s,%s\n"
584 "pmc551: DRAM_BLK2 Size: %d at %d\n"
585 "pmc551: DRAM_BLK2 Row MUX: %d, Col MUX: %d\n",
586 (((0x1<<1)&dcmd) == 0)?"RW":"RO",
587 (((0x1<<0)&dcmd) == 0)?"Off":"On",
588 PMC551_DRAM_BLK_GET_SIZE(dcmd),
589 ((dcmd>>20)&0x7FF), ((dcmd>>13)&0x7), ((dcmd>>9)&0xF) );
590
591 pci_read_config_dword( dev, PMC551_DRAM_BLK3, &dcmd );
592 printk(KERN_DEBUG "pmc551: DRAM_BLK3 Flags: %s,%s\n"
593 "pmc551: DRAM_BLK3 Size: %d at %d\n"
594 "pmc551: DRAM_BLK3 Row MUX: %d, Col MUX: %d\n",
595 (((0x1<<1)&dcmd) == 0)?"RW":"RO",
596 (((0x1<<0)&dcmd) == 0)?"Off":"On",
597 PMC551_DRAM_BLK_GET_SIZE(dcmd),
598 ((dcmd>>20)&0x7FF), ((dcmd>>13)&0x7), ((dcmd>>9)&0xF) );
599
600 pci_read_config_word( dev, PCI_COMMAND, &cmd );
601 printk( KERN_DEBUG "pmc551: Memory Access %s\n",
602 (((0x1<<1)&cmd) == 0)?"off":"on" );
603 printk( KERN_DEBUG "pmc551: I/O Access %s\n",
604 (((0x1<<0)&cmd) == 0)?"off":"on" );
605
606 pci_read_config_word( dev, PCI_STATUS, &cmd );
607 printk( KERN_DEBUG "pmc551: Devsel %s\n",
608 ((PCI_STATUS_DEVSEL_MASK&cmd)==0x000)?"Fast":
609 ((PCI_STATUS_DEVSEL_MASK&cmd)==0x200)?"Medium":
610 ((PCI_STATUS_DEVSEL_MASK&cmd)==0x400)?"Slow":"Invalid" );
611
612 printk( KERN_DEBUG "pmc551: %sFast Back-to-Back\n",
613 ((PCI_COMMAND_FAST_BACK&cmd) == 0)?"Not ":"" );
614
615 pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd );
616 printk( KERN_DEBUG "pmc551: EEPROM is under %s control\n"
617 "pmc551: System Control Register is %slocked to PCI access\n"
618 "pmc551: System Control Register is %slocked to EEPROM access\n",
619 (bcmd&0x1)?"software":"hardware",
620 (bcmd&0x20)?"":"un", (bcmd&0x40)?"":"un");
621#endif
622 return size;
623}
624
625/*
626 * Kernel version specific module stuffages
627 */
628
629
630MODULE_LICENSE("GPL");
631MODULE_AUTHOR("Mark Ferrell <mferrell@mvista.com>");
632MODULE_DESCRIPTION(PMC551_VERSION);
633
634/*
635 * Stuff these outside the ifdef so as to not bust compiled in driver support
636 */
637static int msize=0;
638#if defined(CONFIG_MTD_PMC551_APERTURE_SIZE)
639static int asize=CONFIG_MTD_PMC551_APERTURE_SIZE
640#else
641static int asize=0;
642#endif
643
644module_param(msize, int, 0);
645MODULE_PARM_DESC(msize, "memory size in Megabytes [1 - 1024]");
646module_param(asize, int, 0);
647MODULE_PARM_DESC(asize, "aperture size, must be <= memsize [1-1024]");
648
649/*
650 * PMC551 Card Initialization
651 */
652static int __init init_pmc551(void)
653{
654 struct pci_dev *PCI_Device = NULL;
655 struct mypriv *priv;
656 int count, found=0;
657 struct mtd_info *mtd;
658 u32 length = 0;
659
660 if(msize) {
661 msize = (1 << (ffs(msize) - 1))<<20;
662 if (msize > (1<<30)) {
663 printk(KERN_NOTICE "pmc551: Invalid memory size [%d]\n", msize);
664 return -EINVAL;
665 }
666 }
667
668 if(asize) {
669 asize = (1 << (ffs(asize) - 1))<<20;
670 if (asize > (1<<30) ) {
671 printk(KERN_NOTICE "pmc551: Invalid aperture size [%d]\n", asize);
672 return -EINVAL;
673 }
674 }
675
676 printk(KERN_INFO PMC551_VERSION);
677
678 /*
679 * PCU-bus chipset probe.
680 */
681 for( count = 0; count < MAX_MTD_DEVICES; count++ ) {
682
683 if ((PCI_Device = pci_find_device(PCI_VENDOR_ID_V3_SEMI,
684 PCI_DEVICE_ID_V3_SEMI_V370PDC,
685 PCI_Device ) ) == NULL) {
686 break;
687 }
688
689 printk(KERN_NOTICE "pmc551: Found PCI V370PDC at 0x%lX\n",
690 PCI_Device->resource[0].start);
691
692 /*
693 * The PMC551 device acts VERY weird if you don't init it
694 * first. i.e. it will not correctly report devsel. If for
695 * some reason the sdram is in a wrote-protected state the
696 * device will DEVSEL when it is written to causing problems
697 * with the oldproc.c driver in
698 * some kernels (2.2.*)
699 */
700 if((length = fixup_pmc551(PCI_Device)) <= 0) {
701 printk(KERN_NOTICE "pmc551: Cannot init SDRAM\n");
702 break;
703 }
704
705 /*
706 * This is needed until the driver is capable of reading the
707 * onboard I2C SROM to discover the "real" memory size.
708 */
709 if(msize) {
710 length = msize;
711 printk(KERN_NOTICE "pmc551: Using specified memory size 0x%x\n", length);
712 } else {
713 msize = length;
714 }
715
716 mtd = kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
717 if (!mtd) {
718 printk(KERN_NOTICE "pmc551: Cannot allocate new MTD device.\n");
719 break;
720 }
721
722 memset(mtd, 0, sizeof(struct mtd_info));
723
724 priv = kmalloc (sizeof(struct mypriv), GFP_KERNEL);
725 if (!priv) {
726 printk(KERN_NOTICE "pmc551: Cannot allocate new MTD device.\n");
727 kfree(mtd);
728 break;
729 }
730 memset(priv, 0, sizeof(*priv));
731 mtd->priv = priv;
732 priv->dev = PCI_Device;
733
734 if(asize > length) {
735 printk(KERN_NOTICE "pmc551: reducing aperture size to fit %dM\n",length>>20);
736 priv->asize = asize = length;
737 } else if (asize == 0 || asize == length) {
738 printk(KERN_NOTICE "pmc551: Using existing aperture size %dM\n", length>>20);
739 priv->asize = asize = length;
740 } else {
741 printk(KERN_NOTICE "pmc551: Using specified aperture size %dM\n", asize>>20);
742 priv->asize = asize;
743 }
744 priv->start = ioremap(((PCI_Device->resource[0].start)
745 & PCI_BASE_ADDRESS_MEM_MASK),
746 priv->asize);
747
748 if (!priv->start) {
749 printk(KERN_NOTICE "pmc551: Unable to map IO space\n");
750 kfree(mtd->priv);
751 kfree(mtd);
752 break;
753 }
754
755#ifdef CONFIG_MTD_PMC551_DEBUG
756 printk( KERN_DEBUG "pmc551: setting aperture to %d\n",
757 ffs(priv->asize>>20)-1);
758#endif
759
760 priv->base_map0 = ( PMC551_PCI_MEM_MAP_REG_EN
761 | PMC551_PCI_MEM_MAP_ENABLE
762 | (ffs(priv->asize>>20)-1)<<4 );
763 priv->curr_map0 = priv->base_map0;
764 pci_write_config_dword ( priv->dev, PMC551_PCI_MEM_MAP0,
765 priv->curr_map0 );
766
767#ifdef CONFIG_MTD_PMC551_DEBUG
768 printk( KERN_DEBUG "pmc551: aperture set to %d\n",
769 (priv->base_map0 & 0xF0)>>4 );
770#endif
771
772 mtd->size = msize;
773 mtd->flags = MTD_CAP_RAM;
774 mtd->erase = pmc551_erase;
775 mtd->read = pmc551_read;
776 mtd->write = pmc551_write;
777 mtd->point = pmc551_point;
778 mtd->unpoint = pmc551_unpoint;
779 mtd->type = MTD_RAM;
780 mtd->name = "PMC551 RAM board";
781 mtd->erasesize = 0x10000;
782 mtd->owner = THIS_MODULE;
783
784 if (add_mtd_device(mtd)) {
785 printk(KERN_NOTICE "pmc551: Failed to register new device\n");
786 iounmap(priv->start);
787 kfree(mtd->priv);
788 kfree(mtd);
789 break;
790 }
791 printk(KERN_NOTICE "Registered pmc551 memory device.\n");
792 printk(KERN_NOTICE "Mapped %dM of memory from 0x%p to 0x%p\n",
793 priv->asize>>20,
794 priv->start,
795 priv->start + priv->asize);
796 printk(KERN_NOTICE "Total memory is %d%c\n",
797 (length<1024)?length:
798 (length<1048576)?length>>10:length>>20,
799 (length<1024)?'B':(length<1048576)?'K':'M');
800 priv->nextpmc551 = pmc551list;
801 pmc551list = mtd;
802 found++;
803 }
804
805 if( !pmc551list ) {
806 printk(KERN_NOTICE "pmc551: not detected\n");
807 return -ENODEV;
808 } else {
809 printk(KERN_NOTICE "pmc551: %d pmc551 devices loaded\n", found);
810 return 0;
811 }
812}
813
814/*
815 * PMC551 Card Cleanup
816 */
817static void __exit cleanup_pmc551(void)
818{
819 int found=0;
820 struct mtd_info *mtd;
821 struct mypriv *priv;
822
823 while((mtd=pmc551list)) {
824 priv = mtd->priv;
825 pmc551list = priv->nextpmc551;
826
827 if(priv->start) {
828 printk (KERN_DEBUG "pmc551: unmapping %dM starting at 0x%p\n",
829 priv->asize>>20, priv->start);
830 iounmap (priv->start);
831 }
832
833 kfree (mtd->priv);
834 del_mtd_device (mtd);
835 kfree (mtd);
836 found++;
837 }
838
839 printk(KERN_NOTICE "pmc551: %d pmc551 devices unloaded\n", found);
840}
841
842module_init(init_pmc551);
843module_exit(cleanup_pmc551);
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
new file mode 100644
index 000000000000..5ab15e643be7
--- /dev/null
+++ b/drivers/mtd/devices/slram.c
@@ -0,0 +1,357 @@
1/*======================================================================
2
3 $Id: slram.c,v 1.33 2005/01/05 18:05:13 dwmw2 Exp $
4
5 This driver provides a method to access memory not used by the kernel
6 itself (i.e. if the kernel commandline mem=xxx is used). To actually
7 use slram at least mtdblock or mtdchar is required (for block or
8 character device access).
9
10 Usage:
11
12 if compiled as loadable module:
13 modprobe slram map=<name>,<start>,<end/offset>
14 if statically linked into the kernel use the following kernel cmd.line
15 slram=<name>,<start>,<end/offset>
16
17 <name>: name of the device that will be listed in /proc/mtd
18 <start>: start of the memory region, decimal or hex (0xabcdef)
19 <end/offset>: end of the memory region. It's possible to use +0x1234
20 to specify the offset instead of the absolute address
21
22 NOTE:
23 With slram it's only possible to map a contigous memory region. Therfore
24 if there's a device mapped somewhere in the region specified slram will
25 fail to load (see kernel log if modprobe fails).
26
27 -
28
29 Jochen Schaeuble <psionic@psionic.de>
30
31======================================================================*/
32
33
34#include <linux/module.h>
35#include <asm/uaccess.h>
36#include <linux/types.h>
37#include <linux/kernel.h>
38#include <linux/sched.h>
39#include <linux/ptrace.h>
40#include <linux/slab.h>
41#include <linux/string.h>
42#include <linux/timer.h>
43#include <linux/major.h>
44#include <linux/fs.h>
45#include <linux/ioctl.h>
46#include <linux/init.h>
47#include <asm/io.h>
48#include <asm/system.h>
49
50#include <linux/mtd/mtd.h>
51
52#define SLRAM_MAX_DEVICES_PARAMS 6 /* 3 parameters / device */
53
54#define T(fmt, args...) printk(KERN_DEBUG fmt, ## args)
55#define E(fmt, args...) printk(KERN_NOTICE fmt, ## args)
56
57typedef struct slram_priv {
58 u_char *start;
59 u_char *end;
60} slram_priv_t;
61
62typedef struct slram_mtd_list {
63 struct mtd_info *mtdinfo;
64 struct slram_mtd_list *next;
65} slram_mtd_list_t;
66
67#ifdef MODULE
68static char *map[SLRAM_MAX_DEVICES_PARAMS];
69
70module_param_array(map, charp, NULL, 0);
71MODULE_PARM_DESC(map, "List of memory regions to map. \"map=<name>, <start>, <length / end>\"");
72#else
73static char *map;
74#endif
75
76static slram_mtd_list_t *slram_mtdlist = NULL;
77
78static int slram_erase(struct mtd_info *, struct erase_info *);
79static int slram_point(struct mtd_info *, loff_t, size_t, size_t *, u_char **);
80static void slram_unpoint(struct mtd_info *, u_char *, loff_t, size_t);
81static int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
82static int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
83
84static int slram_erase(struct mtd_info *mtd, struct erase_info *instr)
85{
86 slram_priv_t *priv = mtd->priv;
87
88 if (instr->addr + instr->len > mtd->size) {
89 return(-EINVAL);
90 }
91
92 memset(priv->start + instr->addr, 0xff, instr->len);
93
94 /* This'll catch a few races. Free the thing before returning :)
95 * I don't feel at all ashamed. This kind of thing is possible anyway
96 * with flash, but unlikely.
97 */
98
99 instr->state = MTD_ERASE_DONE;
100
101 mtd_erase_callback(instr);
102
103 return(0);
104}
105
106static int slram_point(struct mtd_info *mtd, loff_t from, size_t len,
107 size_t *retlen, u_char **mtdbuf)
108{
109 slram_priv_t *priv = mtd->priv;
110
111 *mtdbuf = priv->start + from;
112 *retlen = len;
113 return(0);
114}
115
116static void slram_unpoint(struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
117{
118}
119
120static int slram_read(struct mtd_info *mtd, loff_t from, size_t len,
121 size_t *retlen, u_char *buf)
122{
123 slram_priv_t *priv = mtd->priv;
124
125 memcpy(buf, priv->start + from, len);
126
127 *retlen = len;
128 return(0);
129}
130
131static int slram_write(struct mtd_info *mtd, loff_t to, size_t len,
132 size_t *retlen, const u_char *buf)
133{
134 slram_priv_t *priv = mtd->priv;
135
136 memcpy(priv->start + to, buf, len);
137
138 *retlen = len;
139 return(0);
140}
141
142/*====================================================================*/
143
144static int register_device(char *name, unsigned long start, unsigned long length)
145{
146 slram_mtd_list_t **curmtd;
147
148 curmtd = &slram_mtdlist;
149 while (*curmtd) {
150 curmtd = &(*curmtd)->next;
151 }
152
153 *curmtd = kmalloc(sizeof(slram_mtd_list_t), GFP_KERNEL);
154 if (!(*curmtd)) {
155 E("slram: Cannot allocate new MTD device.\n");
156 return(-ENOMEM);
157 }
158 (*curmtd)->mtdinfo = kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
159 (*curmtd)->next = NULL;
160
161 if ((*curmtd)->mtdinfo) {
162 memset((char *)(*curmtd)->mtdinfo, 0, sizeof(struct mtd_info));
163 (*curmtd)->mtdinfo->priv =
164 kmalloc(sizeof(slram_priv_t), GFP_KERNEL);
165
166 if (!(*curmtd)->mtdinfo->priv) {
167 kfree((*curmtd)->mtdinfo);
168 (*curmtd)->mtdinfo = NULL;
169 } else {
170 memset((*curmtd)->mtdinfo->priv,0,sizeof(slram_priv_t));
171 }
172 }
173
174 if (!(*curmtd)->mtdinfo) {
175 E("slram: Cannot allocate new MTD device.\n");
176 return(-ENOMEM);
177 }
178
179 if (!(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start =
180 ioremap(start, length))) {
181 E("slram: ioremap failed\n");
182 return -EIO;
183 }
184 ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end =
185 ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start + length;
186
187
188 (*curmtd)->mtdinfo->name = name;
189 (*curmtd)->mtdinfo->size = length;
190 (*curmtd)->mtdinfo->flags = MTD_CLEAR_BITS | MTD_SET_BITS |
191 MTD_WRITEB_WRITEABLE | MTD_VOLATILE;
192 (*curmtd)->mtdinfo->erase = slram_erase;
193 (*curmtd)->mtdinfo->point = slram_point;
194 (*curmtd)->mtdinfo->unpoint = slram_unpoint;
195 (*curmtd)->mtdinfo->read = slram_read;
196 (*curmtd)->mtdinfo->write = slram_write;
197 (*curmtd)->mtdinfo->owner = THIS_MODULE;
198 (*curmtd)->mtdinfo->type = MTD_RAM;
199 (*curmtd)->mtdinfo->erasesize = 0x0;
200
201 if (add_mtd_device((*curmtd)->mtdinfo)) {
202 E("slram: Failed to register new device\n");
203 iounmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
204 kfree((*curmtd)->mtdinfo->priv);
205 kfree((*curmtd)->mtdinfo);
206 return(-EAGAIN);
207 }
208 T("slram: Registered device %s from %luKiB to %luKiB\n", name,
209 (start / 1024), ((start + length) / 1024));
210 T("slram: Mapped from 0x%p to 0x%p\n",
211 ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start,
212 ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end);
213 return(0);
214}
215
216static void unregister_devices(void)
217{
218 slram_mtd_list_t *nextitem;
219
220 while (slram_mtdlist) {
221 nextitem = slram_mtdlist->next;
222 del_mtd_device(slram_mtdlist->mtdinfo);
223 iounmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
224 kfree(slram_mtdlist->mtdinfo->priv);
225 kfree(slram_mtdlist->mtdinfo);
226 kfree(slram_mtdlist);
227 slram_mtdlist = nextitem;
228 }
229}
230
231static unsigned long handle_unit(unsigned long value, char *unit)
232{
233 if ((*unit == 'M') || (*unit == 'm')) {
234 return(value * 1024 * 1024);
235 } else if ((*unit == 'K') || (*unit == 'k')) {
236 return(value * 1024);
237 }
238 return(value);
239}
240
241static int parse_cmdline(char *devname, char *szstart, char *szlength)
242{
243 char *buffer;
244 unsigned long devstart;
245 unsigned long devlength;
246
247 if ((!devname) || (!szstart) || (!szlength)) {
248 unregister_devices();
249 return(-EINVAL);
250 }
251
252 devstart = simple_strtoul(szstart, &buffer, 0);
253 devstart = handle_unit(devstart, buffer);
254
255 if (*(szlength) != '+') {
256 devlength = simple_strtoul(szlength, &buffer, 0);
257 devlength = handle_unit(devlength, buffer) - devstart;
258 } else {
259 devlength = simple_strtoul(szlength + 1, &buffer, 0);
260 devlength = handle_unit(devlength, buffer);
261 }
262 T("slram: devname=%s, devstart=0x%lx, devlength=0x%lx\n",
263 devname, devstart, devlength);
264 if ((devstart < 0) || (devlength < 0)) {
265 E("slram: Illegal start / length parameter.\n");
266 return(-EINVAL);
267 }
268
269 if ((devstart = register_device(devname, devstart, devlength))){
270 unregister_devices();
271 return((int)devstart);
272 }
273 return(0);
274}
275
276#ifndef MODULE
277
278static int __init mtd_slram_setup(char *str)
279{
280 map = str;
281 return(1);
282}
283
284__setup("slram=", mtd_slram_setup);
285
286#endif
287
288static int init_slram(void)
289{
290 char *devname;
291 int i;
292
293#ifndef MODULE
294 char *devstart;
295 char *devlength;
296
297 i = 0;
298
299 if (!map) {
300 E("slram: not enough parameters.\n");
301 return(-EINVAL);
302 }
303 while (map) {
304 devname = devstart = devlength = NULL;
305
306 if (!(devname = strsep(&map, ","))) {
307 E("slram: No devicename specified.\n");
308 break;
309 }
310 T("slram: devname = %s\n", devname);
311 if ((!map) || (!(devstart = strsep(&map, ",")))) {
312 E("slram: No devicestart specified.\n");
313 }
314 T("slram: devstart = %s\n", devstart);
315 if ((!map) || (!(devlength = strsep(&map, ",")))) {
316 E("slram: No devicelength / -end specified.\n");
317 }
318 T("slram: devlength = %s\n", devlength);
319 if (parse_cmdline(devname, devstart, devlength) != 0) {
320 return(-EINVAL);
321 }
322 }
323#else
324 int count;
325
326 for (count = 0; (map[count]) && (count < SLRAM_MAX_DEVICES_PARAMS);
327 count++) {
328 }
329
330 if ((count % 3 != 0) || (count == 0)) {
331 E("slram: not enough parameters.\n");
332 return(-EINVAL);
333 }
334 for (i = 0; i < (count / 3); i++) {
335 devname = map[i * 3];
336
337 if (parse_cmdline(devname, map[i * 3 + 1], map[i * 3 + 2])!=0) {
338 return(-EINVAL);
339 }
340
341 }
342#endif /* !MODULE */
343
344 return(0);
345}
346
347static void __exit cleanup_slram(void)
348{
349 unregister_devices();
350}
351
352module_init(init_slram);
353module_exit(cleanup_slram);
354
355MODULE_LICENSE("GPL");
356MODULE_AUTHOR("Jochen Schaeuble <psionic@psionic.de>");
357MODULE_DESCRIPTION("MTD driver for uncached system RAM");
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
new file mode 100644
index 000000000000..18cc8846e733
--- /dev/null
+++ b/drivers/mtd/ftl.c
@@ -0,0 +1,1115 @@
1/* This version ported to the Linux-MTD system by dwmw2@infradead.org
2 * $Id: ftl.c,v 1.54 2004/11/16 18:33:15 dwmw2 Exp $
3 *
4 * Fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 * - fixes some leaks on failure in build_maps and ftl_notify_add, cleanups
6 *
7 * Based on:
8 */
9/*======================================================================
10
11 A Flash Translation Layer memory card driver
12
13 This driver implements a disk-like block device driver with an
14 apparent block size of 512 bytes for flash memory cards.
15
16 ftl_cs.c 1.62 2000/02/01 00:59:04
17
18 The contents of this file are subject to the Mozilla Public
19 License Version 1.1 (the "License"); you may not use this file
20 except in compliance with the License. You may obtain a copy of
21 the License at http://www.mozilla.org/MPL/
22
23 Software distributed under the License is distributed on an "AS
24 IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
25 implied. See the License for the specific language governing
26 rights and limitations under the License.
27
28 The initial developer of the original code is David A. Hinds
29 <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
30 are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
31
32 Alternatively, the contents of this file may be used under the
33 terms of the GNU General Public License version 2 (the "GPL"), in
34 which case the provisions of the GPL are applicable instead of the
35 above. If you wish to allow the use of your version of this file
36 only under the terms of the GPL and not to allow others to use
37 your version of this file under the MPL, indicate your decision
38 by deleting the provisions above and replace them with the notice
39 and other provisions required by the GPL. If you do not delete
40 the provisions above, a recipient may use your version of this
41 file under either the MPL or the GPL.
42
43 LEGAL NOTE: The FTL format is patented by M-Systems. They have
44 granted a license for its use with PCMCIA devices:
45
46 "M-Systems grants a royalty-free, non-exclusive license under
47 any presently existing M-Systems intellectual property rights
48 necessary for the design and development of FTL-compatible
49 drivers, file systems and utilities using the data formats with
50 PCMCIA PC Cards as described in the PCMCIA Flash Translation
51 Layer (FTL) Specification."
52
53 Use of the FTL format for non-PCMCIA applications may be an
54 infringement of these patents. For additional information,
55 contact M-Systems (http://www.m-sys.com) directly.
56
57======================================================================*/
58#include <linux/mtd/blktrans.h>
59#include <linux/module.h>
60#include <linux/mtd/mtd.h>
61/*#define PSYCHO_DEBUG */
62
63#include <linux/kernel.h>
64#include <linux/sched.h>
65#include <linux/ptrace.h>
66#include <linux/slab.h>
67#include <linux/string.h>
68#include <linux/timer.h>
69#include <linux/major.h>
70#include <linux/fs.h>
71#include <linux/init.h>
72#include <linux/hdreg.h>
73#include <linux/vmalloc.h>
74#include <linux/blkpg.h>
75#include <asm/uaccess.h>
76
77#include <linux/mtd/ftl.h>
78
79/*====================================================================*/
80
81/* Parameters that can be set with 'insmod' */
82static int shuffle_freq = 50;
83module_param(shuffle_freq, int, 0);
84
85/*====================================================================*/
86
87/* Major device # for FTL device */
88#ifndef FTL_MAJOR
89#define FTL_MAJOR 44
90#endif
91
92
93/*====================================================================*/
94
95/* Maximum number of separate memory devices we'll allow */
96#define MAX_DEV 4
97
98/* Maximum number of regions per device */
99#define MAX_REGION 4
100
101/* Maximum number of partitions in an FTL region */
102#define PART_BITS 4
103
104/* Maximum number of outstanding erase requests per socket */
105#define MAX_ERASE 8
106
107/* Sector size -- shouldn't need to change */
108#define SECTOR_SIZE 512
109
110
111/* Each memory region corresponds to a minor device */
112typedef struct partition_t {
113 struct mtd_blktrans_dev mbd;
114 u_int32_t state;
115 u_int32_t *VirtualBlockMap;
116 u_int32_t *VirtualPageMap;
117 u_int32_t FreeTotal;
118 struct eun_info_t {
119 u_int32_t Offset;
120 u_int32_t EraseCount;
121 u_int32_t Free;
122 u_int32_t Deleted;
123 } *EUNInfo;
124 struct xfer_info_t {
125 u_int32_t Offset;
126 u_int32_t EraseCount;
127 u_int16_t state;
128 } *XferInfo;
129 u_int16_t bam_index;
130 u_int32_t *bam_cache;
131 u_int16_t DataUnits;
132 u_int32_t BlocksPerUnit;
133 erase_unit_header_t header;
134#if 0
135 region_info_t region;
136 memory_handle_t handle;
137#endif
138} partition_t;
139
140void ftl_freepart(partition_t *part);
141
142/* Partition state flags */
143#define FTL_FORMATTED 0x01
144
145/* Transfer unit states */
146#define XFER_UNKNOWN 0x00
147#define XFER_ERASING 0x01
148#define XFER_ERASED 0x02
149#define XFER_PREPARED 0x03
150#define XFER_FAILED 0x04
151
152/*====================================================================*/
153
154
155static void ftl_erase_callback(struct erase_info *done);
156
157
158/*======================================================================
159
160 Scan_header() checks to see if a memory region contains an FTL
161 partition. build_maps() reads all the erase unit headers, builds
162 the erase unit map, and then builds the virtual page map.
163
164======================================================================*/
165
166static int scan_header(partition_t *part)
167{
168 erase_unit_header_t header;
169 loff_t offset, max_offset;
170 size_t ret;
171 int err;
172 part->header.FormattedSize = 0;
173 max_offset = (0x100000<part->mbd.mtd->size)?0x100000:part->mbd.mtd->size;
174 /* Search first megabyte for a valid FTL header */
175 for (offset = 0;
176 (offset + sizeof(header)) < max_offset;
177 offset += part->mbd.mtd->erasesize ? : 0x2000) {
178
179 err = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret,
180 (unsigned char *)&header);
181
182 if (err)
183 return err;
184
185 if (strcmp(header.DataOrgTuple+3, "FTL100") == 0) break;
186 }
187
188 if (offset == max_offset) {
189 printk(KERN_NOTICE "ftl_cs: FTL header not found.\n");
190 return -ENOENT;
191 }
192 if (header.BlockSize != 9 ||
193 (header.EraseUnitSize < 10) || (header.EraseUnitSize > 31) ||
194 (header.NumTransferUnits >= le16_to_cpu(header.NumEraseUnits))) {
195 printk(KERN_NOTICE "ftl_cs: FTL header corrupt!\n");
196 return -1;
197 }
198 if ((1 << header.EraseUnitSize) != part->mbd.mtd->erasesize) {
199 printk(KERN_NOTICE "ftl: FTL EraseUnitSize %x != MTD erasesize %x\n",
200 1 << header.EraseUnitSize,part->mbd.mtd->erasesize);
201 return -1;
202 }
203 part->header = header;
204 return 0;
205}
206
207static int build_maps(partition_t *part)
208{
209 erase_unit_header_t header;
210 u_int16_t xvalid, xtrans, i;
211 u_int blocks, j;
212 int hdr_ok, ret = -1;
213 ssize_t retval;
214 loff_t offset;
215
216 /* Set up erase unit maps */
217 part->DataUnits = le16_to_cpu(part->header.NumEraseUnits) -
218 part->header.NumTransferUnits;
219 part->EUNInfo = kmalloc(part->DataUnits * sizeof(struct eun_info_t),
220 GFP_KERNEL);
221 if (!part->EUNInfo)
222 goto out;
223 for (i = 0; i < part->DataUnits; i++)
224 part->EUNInfo[i].Offset = 0xffffffff;
225 part->XferInfo =
226 kmalloc(part->header.NumTransferUnits * sizeof(struct xfer_info_t),
227 GFP_KERNEL);
228 if (!part->XferInfo)
229 goto out_EUNInfo;
230
231 xvalid = xtrans = 0;
232 for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) {
233 offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN))
234 << part->header.EraseUnitSize);
235 ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &retval,
236 (unsigned char *)&header);
237
238 if (ret)
239 goto out_XferInfo;
240
241 ret = -1;
242 /* Is this a transfer partition? */
243 hdr_ok = (strcmp(header.DataOrgTuple+3, "FTL100") == 0);
244 if (hdr_ok && (le16_to_cpu(header.LogicalEUN) < part->DataUnits) &&
245 (part->EUNInfo[le16_to_cpu(header.LogicalEUN)].Offset == 0xffffffff)) {
246 part->EUNInfo[le16_to_cpu(header.LogicalEUN)].Offset = offset;
247 part->EUNInfo[le16_to_cpu(header.LogicalEUN)].EraseCount =
248 le32_to_cpu(header.EraseCount);
249 xvalid++;
250 } else {
251 if (xtrans == part->header.NumTransferUnits) {
252 printk(KERN_NOTICE "ftl_cs: format error: too many "
253 "transfer units!\n");
254 goto out_XferInfo;
255 }
256 if (hdr_ok && (le16_to_cpu(header.LogicalEUN) == 0xffff)) {
257 part->XferInfo[xtrans].state = XFER_PREPARED;
258 part->XferInfo[xtrans].EraseCount = le32_to_cpu(header.EraseCount);
259 } else {
260 part->XferInfo[xtrans].state = XFER_UNKNOWN;
261 /* Pick anything reasonable for the erase count */
262 part->XferInfo[xtrans].EraseCount =
263 le32_to_cpu(part->header.EraseCount);
264 }
265 part->XferInfo[xtrans].Offset = offset;
266 xtrans++;
267 }
268 }
269 /* Check for format trouble */
270 header = part->header;
271 if ((xtrans != header.NumTransferUnits) ||
272 (xvalid+xtrans != le16_to_cpu(header.NumEraseUnits))) {
273 printk(KERN_NOTICE "ftl_cs: format error: erase units "
274 "don't add up!\n");
275 goto out_XferInfo;
276 }
277
278 /* Set up virtual page map */
279 blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize;
280 part->VirtualBlockMap = vmalloc(blocks * sizeof(u_int32_t));
281 if (!part->VirtualBlockMap)
282 goto out_XferInfo;
283
284 memset(part->VirtualBlockMap, 0xff, blocks * sizeof(u_int32_t));
285 part->BlocksPerUnit = (1 << header.EraseUnitSize) >> header.BlockSize;
286
287 part->bam_cache = kmalloc(part->BlocksPerUnit * sizeof(u_int32_t),
288 GFP_KERNEL);
289 if (!part->bam_cache)
290 goto out_VirtualBlockMap;
291
292 part->bam_index = 0xffff;
293 part->FreeTotal = 0;
294
295 for (i = 0; i < part->DataUnits; i++) {
296 part->EUNInfo[i].Free = 0;
297 part->EUNInfo[i].Deleted = 0;
298 offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);
299
300 ret = part->mbd.mtd->read(part->mbd.mtd, offset,
301 part->BlocksPerUnit * sizeof(u_int32_t), &retval,
302 (unsigned char *)part->bam_cache);
303
304 if (ret)
305 goto out_bam_cache;
306
307 for (j = 0; j < part->BlocksPerUnit; j++) {
308 if (BLOCK_FREE(le32_to_cpu(part->bam_cache[j]))) {
309 part->EUNInfo[i].Free++;
310 part->FreeTotal++;
311 } else if ((BLOCK_TYPE(le32_to_cpu(part->bam_cache[j])) == BLOCK_DATA) &&
312 (BLOCK_NUMBER(le32_to_cpu(part->bam_cache[j])) < blocks))
313 part->VirtualBlockMap[BLOCK_NUMBER(le32_to_cpu(part->bam_cache[j]))] =
314 (i << header.EraseUnitSize) + (j << header.BlockSize);
315 else if (BLOCK_DELETED(le32_to_cpu(part->bam_cache[j])))
316 part->EUNInfo[i].Deleted++;
317 }
318 }
319
320 ret = 0;
321 goto out;
322
323out_bam_cache:
324 kfree(part->bam_cache);
325out_VirtualBlockMap:
326 vfree(part->VirtualBlockMap);
327out_XferInfo:
328 kfree(part->XferInfo);
329out_EUNInfo:
330 kfree(part->EUNInfo);
331out:
332 return ret;
333} /* build_maps */
334
335/*======================================================================
336
337 Erase_xfer() schedules an asynchronous erase operation for a
338 transfer unit.
339
340======================================================================*/
341
342static int erase_xfer(partition_t *part,
343 u_int16_t xfernum)
344{
345 int ret;
346 struct xfer_info_t *xfer;
347 struct erase_info *erase;
348
349 xfer = &part->XferInfo[xfernum];
350 DEBUG(1, "ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset);
351 xfer->state = XFER_ERASING;
352
353 /* Is there a free erase slot? Always in MTD. */
354
355
356 erase=kmalloc(sizeof(struct erase_info), GFP_KERNEL);
357 if (!erase)
358 return -ENOMEM;
359
360 erase->callback = ftl_erase_callback;
361 erase->addr = xfer->Offset;
362 erase->len = 1 << part->header.EraseUnitSize;
363 erase->priv = (u_long)part;
364
365 ret = part->mbd.mtd->erase(part->mbd.mtd, erase);
366
367 if (!ret)
368 xfer->EraseCount++;
369 else
370 kfree(erase);
371
372 return ret;
373} /* erase_xfer */
374
375/*======================================================================
376
377 Prepare_xfer() takes a freshly erased transfer unit and gives
378 it an appropriate header.
379
380======================================================================*/
381
382static void ftl_erase_callback(struct erase_info *erase)
383{
384 partition_t *part;
385 struct xfer_info_t *xfer;
386 int i;
387
388 /* Look up the transfer unit */
389 part = (partition_t *)(erase->priv);
390
391 for (i = 0; i < part->header.NumTransferUnits; i++)
392 if (part->XferInfo[i].Offset == erase->addr) break;
393
394 if (i == part->header.NumTransferUnits) {
395 printk(KERN_NOTICE "ftl_cs: internal error: "
396 "erase lookup failed!\n");
397 return;
398 }
399
400 xfer = &part->XferInfo[i];
401 if (erase->state == MTD_ERASE_DONE)
402 xfer->state = XFER_ERASED;
403 else {
404 xfer->state = XFER_FAILED;
405 printk(KERN_NOTICE "ftl_cs: erase failed: state = %d\n",
406 erase->state);
407 }
408
409 kfree(erase);
410
411} /* ftl_erase_callback */
412
413static int prepare_xfer(partition_t *part, int i)
414{
415 erase_unit_header_t header;
416 struct xfer_info_t *xfer;
417 int nbam, ret;
418 u_int32_t ctl;
419 ssize_t retlen;
420 loff_t offset;
421
422 xfer = &part->XferInfo[i];
423 xfer->state = XFER_FAILED;
424
425 DEBUG(1, "ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset);
426
427 /* Write the transfer unit header */
428 header = part->header;
429 header.LogicalEUN = cpu_to_le16(0xffff);
430 header.EraseCount = cpu_to_le32(xfer->EraseCount);
431
432 ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset, sizeof(header),
433 &retlen, (u_char *)&header);
434
435 if (ret) {
436 return ret;
437 }
438
439 /* Write the BAM stub */
440 nbam = (part->BlocksPerUnit * sizeof(u_int32_t) +
441 le32_to_cpu(part->header.BAMOffset) + SECTOR_SIZE - 1) / SECTOR_SIZE;
442
443 offset = xfer->Offset + le32_to_cpu(part->header.BAMOffset);
444 ctl = cpu_to_le32(BLOCK_CONTROL);
445
446 for (i = 0; i < nbam; i++, offset += sizeof(u_int32_t)) {
447
448 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t),
449 &retlen, (u_char *)&ctl);
450
451 if (ret)
452 return ret;
453 }
454 xfer->state = XFER_PREPARED;
455 return 0;
456
457} /* prepare_xfer */
458
459/*======================================================================
460
461 Copy_erase_unit() takes a full erase block and a transfer unit,
462 copies everything to the transfer unit, then swaps the block
463 pointers.
464
465 All data blocks are copied to the corresponding blocks in the
466 target unit, so the virtual block map does not need to be
467 updated.
468
469======================================================================*/
470
471static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
472 u_int16_t xferunit)
473{
474 u_char buf[SECTOR_SIZE];
475 struct eun_info_t *eun;
476 struct xfer_info_t *xfer;
477 u_int32_t src, dest, free, i;
478 u_int16_t unit;
479 int ret;
480 ssize_t retlen;
481 loff_t offset;
482 u_int16_t srcunitswap = cpu_to_le16(srcunit);
483
484 eun = &part->EUNInfo[srcunit];
485 xfer = &part->XferInfo[xferunit];
486 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
487 eun->Offset, xfer->Offset);
488
489
490 /* Read current BAM */
491 if (part->bam_index != srcunit) {
492
493 offset = eun->Offset + le32_to_cpu(part->header.BAMOffset);
494
495 ret = part->mbd.mtd->read(part->mbd.mtd, offset,
496 part->BlocksPerUnit * sizeof(u_int32_t),
497 &retlen, (u_char *) (part->bam_cache));
498
499 /* mark the cache bad, in case we get an error later */
500 part->bam_index = 0xffff;
501
502 if (ret) {
503 printk( KERN_WARNING "ftl: Failed to read BAM cache in copy_erase_unit()!\n");
504 return ret;
505 }
506 }
507
508 /* Write the LogicalEUN for the transfer unit */
509 xfer->state = XFER_UNKNOWN;
510 offset = xfer->Offset + 20; /* Bad! */
511 unit = cpu_to_le16(0x7fff);
512
513 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int16_t),
514 &retlen, (u_char *) &unit);
515
516 if (ret) {
517 printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n");
518 return ret;
519 }
520
521 /* Copy all data blocks from source unit to transfer unit */
522 src = eun->Offset; dest = xfer->Offset;
523
524 free = 0;
525 ret = 0;
526 for (i = 0; i < part->BlocksPerUnit; i++) {
527 switch (BLOCK_TYPE(le32_to_cpu(part->bam_cache[i]))) {
528 case BLOCK_CONTROL:
529 /* This gets updated later */
530 break;
531 case BLOCK_DATA:
532 case BLOCK_REPLACEMENT:
533 ret = part->mbd.mtd->read(part->mbd.mtd, src, SECTOR_SIZE,
534 &retlen, (u_char *) buf);
535 if (ret) {
536 printk(KERN_WARNING "ftl: Error reading old xfer unit in copy_erase_unit\n");
537 return ret;
538 }
539
540
541 ret = part->mbd.mtd->write(part->mbd.mtd, dest, SECTOR_SIZE,
542 &retlen, (u_char *) buf);
543 if (ret) {
544 printk(KERN_WARNING "ftl: Error writing new xfer unit in copy_erase_unit\n");
545 return ret;
546 }
547
548 break;
549 default:
550 /* All other blocks must be free */
551 part->bam_cache[i] = cpu_to_le32(0xffffffff);
552 free++;
553 break;
554 }
555 src += SECTOR_SIZE;
556 dest += SECTOR_SIZE;
557 }
558
559 /* Write the BAM to the transfer unit */
560 ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + le32_to_cpu(part->header.BAMOffset),
561 part->BlocksPerUnit * sizeof(int32_t), &retlen,
562 (u_char *)part->bam_cache);
563 if (ret) {
564 printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n");
565 return ret;
566 }
567
568
569 /* All clear? Then update the LogicalEUN again */
570 ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(u_int16_t),
571 &retlen, (u_char *)&srcunitswap);
572
573 if (ret) {
574 printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n");
575 return ret;
576 }
577
578
579 /* Update the maps and usage stats*/
580 i = xfer->EraseCount;
581 xfer->EraseCount = eun->EraseCount;
582 eun->EraseCount = i;
583 i = xfer->Offset;
584 xfer->Offset = eun->Offset;
585 eun->Offset = i;
586 part->FreeTotal -= eun->Free;
587 part->FreeTotal += free;
588 eun->Free = free;
589 eun->Deleted = 0;
590
591 /* Now, the cache should be valid for the new block */
592 part->bam_index = srcunit;
593
594 return 0;
595} /* copy_erase_unit */
596
597/*======================================================================
598
599 reclaim_block() picks a full erase unit and a transfer unit and
600 then calls copy_erase_unit() to copy one to the other. Then, it
601 schedules an erase on the expired block.
602
603 What's a good way to decide which transfer unit and which erase
604 unit to use? Beats me. My way is to always pick the transfer
605 unit with the fewest erases, and usually pick the data unit with
606 the most deleted blocks. But with a small probability, pick the
607 oldest data unit instead. This means that we generally postpone
608 the next reclaimation as long as possible, but shuffle static
609 stuff around a bit for wear leveling.
610
611======================================================================*/
612
613static int reclaim_block(partition_t *part)
614{
615 u_int16_t i, eun, xfer;
616 u_int32_t best;
617 int queued, ret;
618
619 DEBUG(0, "ftl_cs: reclaiming space...\n");
620 DEBUG(3, "NumTransferUnits == %x\n", part->header.NumTransferUnits);
621 /* Pick the least erased transfer unit */
622 best = 0xffffffff; xfer = 0xffff;
623 do {
624 queued = 0;
625 for (i = 0; i < part->header.NumTransferUnits; i++) {
626 int n=0;
627 if (part->XferInfo[i].state == XFER_UNKNOWN) {
628 DEBUG(3,"XferInfo[%d].state == XFER_UNKNOWN\n",i);
629 n=1;
630 erase_xfer(part, i);
631 }
632 if (part->XferInfo[i].state == XFER_ERASING) {
633 DEBUG(3,"XferInfo[%d].state == XFER_ERASING\n",i);
634 n=1;
635 queued = 1;
636 }
637 else if (part->XferInfo[i].state == XFER_ERASED) {
638 DEBUG(3,"XferInfo[%d].state == XFER_ERASED\n",i);
639 n=1;
640 prepare_xfer(part, i);
641 }
642 if (part->XferInfo[i].state == XFER_PREPARED) {
643 DEBUG(3,"XferInfo[%d].state == XFER_PREPARED\n",i);
644 n=1;
645 if (part->XferInfo[i].EraseCount <= best) {
646 best = part->XferInfo[i].EraseCount;
647 xfer = i;
648 }
649 }
650 if (!n)
651 DEBUG(3,"XferInfo[%d].state == %x\n",i, part->XferInfo[i].state);
652
653 }
654 if (xfer == 0xffff) {
655 if (queued) {
656 DEBUG(1, "ftl_cs: waiting for transfer "
657 "unit to be prepared...\n");
658 if (part->mbd.mtd->sync)
659 part->mbd.mtd->sync(part->mbd.mtd);
660 } else {
661 static int ne = 0;
662 if (++ne < 5)
663 printk(KERN_NOTICE "ftl_cs: reclaim failed: no "
664 "suitable transfer units!\n");
665 else
666 DEBUG(1, "ftl_cs: reclaim failed: no "
667 "suitable transfer units!\n");
668
669 return -EIO;
670 }
671 }
672 } while (xfer == 0xffff);
673
674 eun = 0;
675 if ((jiffies % shuffle_freq) == 0) {
676 DEBUG(1, "ftl_cs: recycling freshest block...\n");
677 best = 0xffffffff;
678 for (i = 0; i < part->DataUnits; i++)
679 if (part->EUNInfo[i].EraseCount <= best) {
680 best = part->EUNInfo[i].EraseCount;
681 eun = i;
682 }
683 } else {
684 best = 0;
685 for (i = 0; i < part->DataUnits; i++)
686 if (part->EUNInfo[i].Deleted >= best) {
687 best = part->EUNInfo[i].Deleted;
688 eun = i;
689 }
690 if (best == 0) {
691 static int ne = 0;
692 if (++ne < 5)
693 printk(KERN_NOTICE "ftl_cs: reclaim failed: "
694 "no free blocks!\n");
695 else
696 DEBUG(1,"ftl_cs: reclaim failed: "
697 "no free blocks!\n");
698
699 return -EIO;
700 }
701 }
702 ret = copy_erase_unit(part, eun, xfer);
703 if (!ret)
704 erase_xfer(part, xfer);
705 else
706 printk(KERN_NOTICE "ftl_cs: copy_erase_unit failed!\n");
707 return ret;
708} /* reclaim_block */
709
710/*======================================================================
711
712 Find_free() searches for a free block. If necessary, it updates
713 the BAM cache for the erase unit containing the free block. It
714 returns the block index -- the erase unit is just the currently
715 cached unit. If there are no free blocks, it returns 0 -- this
716 is never a valid data block because it contains the header.
717
718======================================================================*/
719
720#ifdef PSYCHO_DEBUG
721static void dump_lists(partition_t *part)
722{
723 int i;
724 printk(KERN_DEBUG "ftl_cs: Free total = %d\n", part->FreeTotal);
725 for (i = 0; i < part->DataUnits; i++)
726 printk(KERN_DEBUG "ftl_cs: unit %d: %d phys, %d free, "
727 "%d deleted\n", i,
728 part->EUNInfo[i].Offset >> part->header.EraseUnitSize,
729 part->EUNInfo[i].Free, part->EUNInfo[i].Deleted);
730}
731#endif
732
733static u_int32_t find_free(partition_t *part)
734{
735 u_int16_t stop, eun;
736 u_int32_t blk;
737 size_t retlen;
738 int ret;
739
740 /* Find an erase unit with some free space */
741 stop = (part->bam_index == 0xffff) ? 0 : part->bam_index;
742 eun = stop;
743 do {
744 if (part->EUNInfo[eun].Free != 0) break;
745 /* Wrap around at end of table */
746 if (++eun == part->DataUnits) eun = 0;
747 } while (eun != stop);
748
749 if (part->EUNInfo[eun].Free == 0)
750 return 0;
751
752 /* Is this unit's BAM cached? */
753 if (eun != part->bam_index) {
754 /* Invalidate cache */
755 part->bam_index = 0xffff;
756
757 ret = part->mbd.mtd->read(part->mbd.mtd,
758 part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
759 part->BlocksPerUnit * sizeof(u_int32_t),
760 &retlen, (u_char *) (part->bam_cache));
761
762 if (ret) {
763 printk(KERN_WARNING"ftl: Error reading BAM in find_free\n");
764 return 0;
765 }
766 part->bam_index = eun;
767 }
768
769 /* Find a free block */
770 for (blk = 0; blk < part->BlocksPerUnit; blk++)
771 if (BLOCK_FREE(le32_to_cpu(part->bam_cache[blk]))) break;
772 if (blk == part->BlocksPerUnit) {
773#ifdef PSYCHO_DEBUG
774 static int ne = 0;
775 if (++ne == 1)
776 dump_lists(part);
777#endif
778 printk(KERN_NOTICE "ftl_cs: bad free list!\n");
779 return 0;
780 }
781 DEBUG(2, "ftl_cs: found free block at %d in %d\n", blk, eun);
782 return blk;
783
784} /* find_free */
785
786
787/*======================================================================
788
789 Read a series of sectors from an FTL partition.
790
791======================================================================*/
792
793static int ftl_read(partition_t *part, caddr_t buffer,
794 u_long sector, u_long nblocks)
795{
796 u_int32_t log_addr, bsize;
797 u_long i;
798 int ret;
799 size_t offset, retlen;
800
801 DEBUG(2, "ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n",
802 part, sector, nblocks);
803 if (!(part->state & FTL_FORMATTED)) {
804 printk(KERN_NOTICE "ftl_cs: bad partition\n");
805 return -EIO;
806 }
807 bsize = 1 << part->header.EraseUnitSize;
808
809 for (i = 0; i < nblocks; i++) {
810 if (((sector+i) * SECTOR_SIZE) >= le32_to_cpu(part->header.FormattedSize)) {
811 printk(KERN_NOTICE "ftl_cs: bad read offset\n");
812 return -EIO;
813 }
814 log_addr = part->VirtualBlockMap[sector+i];
815 if (log_addr == 0xffffffff)
816 memset(buffer, 0, SECTOR_SIZE);
817 else {
818 offset = (part->EUNInfo[log_addr / bsize].Offset
819 + (log_addr % bsize));
820 ret = part->mbd.mtd->read(part->mbd.mtd, offset, SECTOR_SIZE,
821 &retlen, (u_char *) buffer);
822
823 if (ret) {
824 printk(KERN_WARNING "Error reading MTD device in ftl_read()\n");
825 return ret;
826 }
827 }
828 buffer += SECTOR_SIZE;
829 }
830 return 0;
831} /* ftl_read */
832
833/*======================================================================
834
835 Write a series of sectors to an FTL partition
836
837======================================================================*/
838
839static int set_bam_entry(partition_t *part, u_int32_t log_addr,
840 u_int32_t virt_addr)
841{
842 u_int32_t bsize, blk, le_virt_addr;
843#ifdef PSYCHO_DEBUG
844 u_int32_t old_addr;
845#endif
846 u_int16_t eun;
847 int ret;
848 size_t retlen, offset;
849
850 DEBUG(2, "ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n",
851 part, log_addr, virt_addr);
852 bsize = 1 << part->header.EraseUnitSize;
853 eun = log_addr / bsize;
854 blk = (log_addr % bsize) / SECTOR_SIZE;
855 offset = (part->EUNInfo[eun].Offset + blk * sizeof(u_int32_t) +
856 le32_to_cpu(part->header.BAMOffset));
857
858#ifdef PSYCHO_DEBUG
859 ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(u_int32_t),
860 &retlen, (u_char *)&old_addr);
861 if (ret) {
862 printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret);
863 return ret;
864 }
865 old_addr = le32_to_cpu(old_addr);
866
867 if (((virt_addr == 0xfffffffe) && !BLOCK_FREE(old_addr)) ||
868 ((virt_addr == 0) && (BLOCK_TYPE(old_addr) != BLOCK_DATA)) ||
869 (!BLOCK_DELETED(virt_addr) && (old_addr != 0xfffffffe))) {
870 static int ne = 0;
871 if (++ne < 5) {
872 printk(KERN_NOTICE "ftl_cs: set_bam_entry() inconsistency!\n");
873 printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, old = 0x%x"
874 ", new = 0x%x\n", log_addr, old_addr, virt_addr);
875 }
876 return -EIO;
877 }
878#endif
879 le_virt_addr = cpu_to_le32(virt_addr);
880 if (part->bam_index == eun) {
881#ifdef PSYCHO_DEBUG
882 if (le32_to_cpu(part->bam_cache[blk]) != old_addr) {
883 static int ne = 0;
884 if (++ne < 5) {
885 printk(KERN_NOTICE "ftl_cs: set_bam_entry() "
886 "inconsistency!\n");
887 printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, cache"
888 " = 0x%x\n",
889 le32_to_cpu(part->bam_cache[blk]), old_addr);
890 }
891 return -EIO;
892 }
893#endif
894 part->bam_cache[blk] = le_virt_addr;
895 }
896 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t),
897 &retlen, (u_char *)&le_virt_addr);
898
899 if (ret) {
900 printk(KERN_NOTICE "ftl_cs: set_bam_entry() failed!\n");
901 printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, new = 0x%x\n",
902 log_addr, virt_addr);
903 }
904 return ret;
905} /* set_bam_entry */
906
907static int ftl_write(partition_t *part, caddr_t buffer,
908 u_long sector, u_long nblocks)
909{
910 u_int32_t bsize, log_addr, virt_addr, old_addr, blk;
911 u_long i;
912 int ret;
913 size_t retlen, offset;
914
915 DEBUG(2, "ftl_cs: ftl_write(0x%p, %ld, %ld)\n",
916 part, sector, nblocks);
917 if (!(part->state & FTL_FORMATTED)) {
918 printk(KERN_NOTICE "ftl_cs: bad partition\n");
919 return -EIO;
920 }
921 /* See if we need to reclaim space, before we start */
922 while (part->FreeTotal < nblocks) {
923 ret = reclaim_block(part);
924 if (ret)
925 return ret;
926 }
927
928 bsize = 1 << part->header.EraseUnitSize;
929
930 virt_addr = sector * SECTOR_SIZE | BLOCK_DATA;
931 for (i = 0; i < nblocks; i++) {
932 if (virt_addr >= le32_to_cpu(part->header.FormattedSize)) {
933 printk(KERN_NOTICE "ftl_cs: bad write offset\n");
934 return -EIO;
935 }
936
937 /* Grab a free block */
938 blk = find_free(part);
939 if (blk == 0) {
940 static int ne = 0;
941 if (++ne < 5)
942 printk(KERN_NOTICE "ftl_cs: internal error: "
943 "no free blocks!\n");
944 return -ENOSPC;
945 }
946
947 /* Tag the BAM entry, and write the new block */
948 log_addr = part->bam_index * bsize + blk * SECTOR_SIZE;
949 part->EUNInfo[part->bam_index].Free--;
950 part->FreeTotal--;
951 if (set_bam_entry(part, log_addr, 0xfffffffe))
952 return -EIO;
953 part->EUNInfo[part->bam_index].Deleted++;
954 offset = (part->EUNInfo[part->bam_index].Offset +
955 blk * SECTOR_SIZE);
956 ret = part->mbd.mtd->write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
957 buffer);
958
959 if (ret) {
960 printk(KERN_NOTICE "ftl_cs: block write failed!\n");
961 printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, virt_addr"
962 " = 0x%x, Offset = 0x%zx\n", log_addr, virt_addr,
963 offset);
964 return -EIO;
965 }
966
967 /* Only delete the old entry when the new entry is ready */
968 old_addr = part->VirtualBlockMap[sector+i];
969 if (old_addr != 0xffffffff) {
970 part->VirtualBlockMap[sector+i] = 0xffffffff;
971 part->EUNInfo[old_addr/bsize].Deleted++;
972 if (set_bam_entry(part, old_addr, 0))
973 return -EIO;
974 }
975
976 /* Finally, set up the new pointers */
977 if (set_bam_entry(part, log_addr, virt_addr))
978 return -EIO;
979 part->VirtualBlockMap[sector+i] = log_addr;
980 part->EUNInfo[part->bam_index].Deleted--;
981
982 buffer += SECTOR_SIZE;
983 virt_addr += SECTOR_SIZE;
984 }
985 return 0;
986} /* ftl_write */
987
988static int ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
989{
990 partition_t *part = (void *)dev;
991 u_long sect;
992
993 /* Sort of arbitrary: round size down to 4KiB boundary */
994 sect = le32_to_cpu(part->header.FormattedSize)/SECTOR_SIZE;
995
996 geo->heads = 1;
997 geo->sectors = 8;
998 geo->cylinders = sect >> 3;
999
1000 return 0;
1001}
1002
1003static int ftl_readsect(struct mtd_blktrans_dev *dev,
1004 unsigned long block, char *buf)
1005{
1006 return ftl_read((void *)dev, buf, block, 1);
1007}
1008
1009static int ftl_writesect(struct mtd_blktrans_dev *dev,
1010 unsigned long block, char *buf)
1011{
1012 return ftl_write((void *)dev, buf, block, 1);
1013}
1014
1015/*====================================================================*/
1016
1017void ftl_freepart(partition_t *part)
1018{
1019 if (part->VirtualBlockMap) {
1020 vfree(part->VirtualBlockMap);
1021 part->VirtualBlockMap = NULL;
1022 }
1023 if (part->VirtualPageMap) {
1024 kfree(part->VirtualPageMap);
1025 part->VirtualPageMap = NULL;
1026 }
1027 if (part->EUNInfo) {
1028 kfree(part->EUNInfo);
1029 part->EUNInfo = NULL;
1030 }
1031 if (part->XferInfo) {
1032 kfree(part->XferInfo);
1033 part->XferInfo = NULL;
1034 }
1035 if (part->bam_cache) {
1036 kfree(part->bam_cache);
1037 part->bam_cache = NULL;
1038 }
1039
1040} /* ftl_freepart */
1041
1042static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1043{
1044 partition_t *partition;
1045
1046 partition = kmalloc(sizeof(partition_t), GFP_KERNEL);
1047
1048 if (!partition) {
1049 printk(KERN_WARNING "No memory to scan for FTL on %s\n",
1050 mtd->name);
1051 return;
1052 }
1053
1054 memset(partition, 0, sizeof(partition_t));
1055
1056 partition->mbd.mtd = mtd;
1057
1058 if ((scan_header(partition) == 0) &&
1059 (build_maps(partition) == 0)) {
1060
1061 partition->state = FTL_FORMATTED;
1062#ifdef PCMCIA_DEBUG
1063 printk(KERN_INFO "ftl_cs: opening %d KiB FTL partition\n",
1064 le32_to_cpu(partition->header.FormattedSize) >> 10);
1065#endif
1066 partition->mbd.size = le32_to_cpu(partition->header.FormattedSize) >> 9;
1067 partition->mbd.blksize = SECTOR_SIZE;
1068 partition->mbd.tr = tr;
1069 partition->mbd.devnum = -1;
1070 if (!add_mtd_blktrans_dev((void *)partition))
1071 return;
1072 }
1073
1074 ftl_freepart(partition);
1075 kfree(partition);
1076}
1077
1078static void ftl_remove_dev(struct mtd_blktrans_dev *dev)
1079{
1080 del_mtd_blktrans_dev(dev);
1081 ftl_freepart((partition_t *)dev);
1082 kfree(dev);
1083}
1084
1085struct mtd_blktrans_ops ftl_tr = {
1086 .name = "ftl",
1087 .major = FTL_MAJOR,
1088 .part_bits = PART_BITS,
1089 .readsect = ftl_readsect,
1090 .writesect = ftl_writesect,
1091 .getgeo = ftl_getgeo,
1092 .add_mtd = ftl_add_mtd,
1093 .remove_dev = ftl_remove_dev,
1094 .owner = THIS_MODULE,
1095};
1096
1097int init_ftl(void)
1098{
1099 DEBUG(0, "$Id: ftl.c,v 1.54 2004/11/16 18:33:15 dwmw2 Exp $\n");
1100
1101 return register_mtd_blktrans(&ftl_tr);
1102}
1103
1104static void __exit cleanup_ftl(void)
1105{
1106 deregister_mtd_blktrans(&ftl_tr);
1107}
1108
1109module_init(init_ftl);
1110module_exit(cleanup_ftl);
1111
1112
1113MODULE_LICENSE("Dual MPL/GPL");
1114MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
1115MODULE_DESCRIPTION("Support code for Flash Translation Layer, used on PCMCIA devices");
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
new file mode 100644
index 000000000000..39eb53f6551f
--- /dev/null
+++ b/drivers/mtd/inftlcore.c
@@ -0,0 +1,912 @@
1/*
2 * inftlcore.c -- Linux driver for Inverse Flash Translation Layer (INFTL)
3 *
4 * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
5 *
6 * Based heavily on the nftlcore.c code which is:
7 * (c) 1999 Machine Vision Holdings, Inc.
8 * Author: David Woodhouse <dwmw2@infradead.org>
9 *
10 * $Id: inftlcore.c,v 1.18 2004/11/16 18:28:59 dwmw2 Exp $
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/config.h>
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/delay.h>
31#include <linux/slab.h>
32#include <linux/sched.h>
33#include <linux/init.h>
34#include <linux/kmod.h>
35#include <linux/hdreg.h>
36#include <linux/mtd/mtd.h>
37#include <linux/mtd/nftl.h>
38#include <linux/mtd/inftl.h>
39#include <asm/uaccess.h>
40#include <asm/errno.h>
41#include <asm/io.h>
42
43/*
44 * Maximum number of loops while examining next block, to have a
45 * chance to detect consistency problems (they should never happen
46 * because of the checks done in the mounting.
47 */
48#define MAX_LOOPS 10000
49
50extern void INFTL_dumptables(struct INFTLrecord *inftl);
51extern void INFTL_dumpVUchains(struct INFTLrecord *inftl);
52
53static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
54{
55 struct INFTLrecord *inftl;
56 unsigned long temp;
57
58 if (mtd->type != MTD_NANDFLASH)
59 return;
60 /* OK, this is moderately ugly. But probably safe. Alternatives? */
61 if (memcmp(mtd->name, "DiskOnChip", 10))
62 return;
63
64 if (!mtd->block_isbad) {
65 printk(KERN_ERR
66"INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
67"Please use the new diskonchip driver under the NAND subsystem.\n");
68 return;
69 }
70
71 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: add_mtd for %s\n", mtd->name);
72
73 inftl = kmalloc(sizeof(*inftl), GFP_KERNEL);
74
75 if (!inftl) {
76 printk(KERN_WARNING "INFTL: Out of memory for data structures\n");
77 return;
78 }
79 memset(inftl, 0, sizeof(*inftl));
80
81 inftl->mbd.mtd = mtd;
82 inftl->mbd.devnum = -1;
83 inftl->mbd.blksize = 512;
84 inftl->mbd.tr = tr;
85 memcpy(&inftl->oobinfo, &mtd->oobinfo, sizeof(struct nand_oobinfo));
86 inftl->oobinfo.useecc = MTD_NANDECC_PLACEONLY;
87
88 if (INFTL_mount(inftl) < 0) {
89 printk(KERN_WARNING "INFTL: could not mount device\n");
90 kfree(inftl);
91 return;
92 }
93
94 /* OK, it's a new one. Set up all the data structures. */
95
96 /* Calculate geometry */
97 inftl->cylinders = 1024;
98 inftl->heads = 16;
99
100 temp = inftl->cylinders * inftl->heads;
101 inftl->sectors = inftl->mbd.size / temp;
102 if (inftl->mbd.size % temp) {
103 inftl->sectors++;
104 temp = inftl->cylinders * inftl->sectors;
105 inftl->heads = inftl->mbd.size / temp;
106
107 if (inftl->mbd.size % temp) {
108 inftl->heads++;
109 temp = inftl->heads * inftl->sectors;
110 inftl->cylinders = inftl->mbd.size / temp;
111 }
112 }
113
114 if (inftl->mbd.size != inftl->heads * inftl->cylinders * inftl->sectors) {
115 /*
116 Oh no we don't have
117 mbd.size == heads * cylinders * sectors
118 */
119 printk(KERN_WARNING "INFTL: cannot calculate a geometry to "
120 "match size of 0x%lx.\n", inftl->mbd.size);
121 printk(KERN_WARNING "INFTL: using C:%d H:%d S:%d "
122 "(== 0x%lx sects)\n",
123 inftl->cylinders, inftl->heads , inftl->sectors,
124 (long)inftl->cylinders * (long)inftl->heads *
125 (long)inftl->sectors );
126 }
127
128 if (add_mtd_blktrans_dev(&inftl->mbd)) {
129 if (inftl->PUtable)
130 kfree(inftl->PUtable);
131 if (inftl->VUtable)
132 kfree(inftl->VUtable);
133 kfree(inftl);
134 return;
135 }
136#ifdef PSYCHO_DEBUG
137 printk(KERN_INFO "INFTL: Found new nftl%c\n", nftl->mbd.devnum + 'a');
138#endif
139 return;
140}
141
142static void inftl_remove_dev(struct mtd_blktrans_dev *dev)
143{
144 struct INFTLrecord *inftl = (void *)dev;
145
146 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: remove_dev (i=%d)\n", dev->devnum);
147
148 del_mtd_blktrans_dev(dev);
149
150 if (inftl->PUtable)
151 kfree(inftl->PUtable);
152 if (inftl->VUtable)
153 kfree(inftl->VUtable);
154 kfree(inftl);
155}
156
157/*
158 * Actual INFTL access routines.
159 */
160
161/*
162 * INFTL_findfreeblock: Find a free Erase Unit on the INFTL partition.
163 * This function is used when the give Virtual Unit Chain.
164 */
165static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate)
166{
167 u16 pot = inftl->LastFreeEUN;
168 int silly = inftl->nb_blocks;
169
170 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findfreeblock(inftl=%p,"
171 "desperate=%d)\n", inftl, desperate);
172
173 /*
174 * Normally, we force a fold to happen before we run out of free
175 * blocks completely.
176 */
177 if (!desperate && inftl->numfreeEUNs < 2) {
178 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free "
179 "EUNs (%d)\n", inftl->numfreeEUNs);
180 return 0xffff;
181 }
182
183 /* Scan for a free block */
184 do {
185 if (inftl->PUtable[pot] == BLOCK_FREE) {
186 inftl->LastFreeEUN = pot;
187 return pot;
188 }
189
190 if (++pot > inftl->lastEUN)
191 pot = 0;
192
193 if (!silly--) {
194 printk(KERN_WARNING "INFTL: no free blocks found! "
195 "EUN range = %d - %d\n", 0, inftl->LastFreeEUN);
196 return BLOCK_NIL;
197 }
198 } while (pot != inftl->LastFreeEUN);
199
200 return BLOCK_NIL;
201}
202
203static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned pendingblock)
204{
205 u16 BlockMap[MAX_SECTORS_PER_UNIT];
206 unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT];
207 unsigned int thisEUN, prevEUN, status;
208 int block, silly;
209 unsigned int targetEUN;
210 struct inftl_oob oob;
211 size_t retlen;
212
213 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
214 "pending=%d)\n", inftl, thisVUC, pendingblock);
215
216 memset(BlockMap, 0xff, sizeof(BlockMap));
217 memset(BlockDeleted, 0, sizeof(BlockDeleted));
218
219 thisEUN = targetEUN = inftl->VUtable[thisVUC];
220
221 if (thisEUN == BLOCK_NIL) {
222 printk(KERN_WARNING "INFTL: trying to fold non-existent "
223 "Virtual Unit Chain %d!\n", thisVUC);
224 return BLOCK_NIL;
225 }
226
227 /*
228 * Scan to find the Erase Unit which holds the actual data for each
229 * 512-byte block within the Chain.
230 */
231 silly = MAX_LOOPS;
232 while (thisEUN < inftl->nb_blocks) {
233 for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) {
234 if ((BlockMap[block] != 0xffff) || BlockDeleted[block])
235 continue;
236
237 if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize)
238 + (block * SECTORSIZE), 16 , &retlen,
239 (char *)&oob) < 0)
240 status = SECTOR_IGNORE;
241 else
242 status = oob.b.Status | oob.b.Status1;
243
244 switch(status) {
245 case SECTOR_FREE:
246 case SECTOR_IGNORE:
247 break;
248 case SECTOR_USED:
249 BlockMap[block] = thisEUN;
250 continue;
251 case SECTOR_DELETED:
252 BlockDeleted[block] = 1;
253 continue;
254 default:
255 printk(KERN_WARNING "INFTL: unknown status "
256 "for block %d in EUN %d: %x\n",
257 block, thisEUN, status);
258 break;
259 }
260 }
261
262 if (!silly--) {
263 printk(KERN_WARNING "INFTL: infinite loop in Virtual "
264 "Unit Chain 0x%x\n", thisVUC);
265 return BLOCK_NIL;
266 }
267
268 thisEUN = inftl->PUtable[thisEUN];
269 }
270
271 /*
272 * OK. We now know the location of every block in the Virtual Unit
273 * Chain, and the Erase Unit into which we are supposed to be copying.
274 * Go for it.
275 */
276 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: folding chain %d into unit %d\n",
277 thisVUC, targetEUN);
278
279 for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) {
280 unsigned char movebuf[SECTORSIZE];
281 int ret;
282
283 /*
284 * If it's in the target EUN already, or if it's pending write,
285 * do nothing.
286 */
287 if (BlockMap[block] == targetEUN || (pendingblock ==
288 (thisVUC * (inftl->EraseSize / SECTORSIZE) + block))) {
289 continue;
290 }
291
292 /*
293 * Copy only in non free block (free blocks can only
294 * happen in case of media errors or deleted blocks).
295 */
296 if (BlockMap[block] == BLOCK_NIL)
297 continue;
298
299 ret = MTD_READ(inftl->mbd.mtd, (inftl->EraseSize *
300 BlockMap[block]) + (block * SECTORSIZE), SECTORSIZE,
301 &retlen, movebuf);
302 if (ret < 0) {
303 ret = MTD_READ(inftl->mbd.mtd, (inftl->EraseSize *
304 BlockMap[block]) + (block * SECTORSIZE),
305 SECTORSIZE, &retlen, movebuf);
306 if (ret != -EIO)
307 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: error went "
308 "away on retry?\n");
309 }
310 memset(&oob, 0xff, sizeof(struct inftl_oob));
311 oob.b.Status = oob.b.Status1 = SECTOR_USED;
312 MTD_WRITEECC(inftl->mbd.mtd, (inftl->EraseSize * targetEUN) +
313 (block * SECTORSIZE), SECTORSIZE, &retlen,
314 movebuf, (char *)&oob, &inftl->oobinfo);
315 }
316
317 /*
318 * Newest unit in chain now contains data from _all_ older units.
319 * So go through and erase each unit in chain, oldest first. (This
320 * is important, by doing oldest first if we crash/reboot then it
321 * it is relatively simple to clean up the mess).
322 */
323 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: want to erase virtual chain %d\n",
324 thisVUC);
325
326 for (;;) {
327 /* Find oldest unit in chain. */
328 thisEUN = inftl->VUtable[thisVUC];
329 prevEUN = BLOCK_NIL;
330 while (inftl->PUtable[thisEUN] != BLOCK_NIL) {
331 prevEUN = thisEUN;
332 thisEUN = inftl->PUtable[thisEUN];
333 }
334
335 /* Check if we are all done */
336 if (thisEUN == targetEUN)
337 break;
338
339 if (INFTL_formatblock(inftl, thisEUN) < 0) {
340 /*
341 * Could not erase : mark block as reserved.
342 */
343 inftl->PUtable[thisEUN] = BLOCK_RESERVED;
344 } else {
345 /* Correctly erased : mark it as free */
346 inftl->PUtable[thisEUN] = BLOCK_FREE;
347 inftl->PUtable[prevEUN] = BLOCK_NIL;
348 inftl->numfreeEUNs++;
349 }
350 }
351
352 return targetEUN;
353}
354
355static u16 INFTL_makefreeblock(struct INFTLrecord *inftl, unsigned pendingblock)
356{
357 /*
358 * This is the part that needs some cleverness applied.
359 * For now, I'm doing the minimum applicable to actually
360 * get the thing to work.
361 * Wear-levelling and other clever stuff needs to be implemented
362 * and we also need to do some assessment of the results when
363 * the system loses power half-way through the routine.
364 */
365 u16 LongestChain = 0;
366 u16 ChainLength = 0, thislen;
367 u16 chain, EUN;
368
369 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_makefreeblock(inftl=%p,"
370 "pending=%d)\n", inftl, pendingblock);
371
372 for (chain = 0; chain < inftl->nb_blocks; chain++) {
373 EUN = inftl->VUtable[chain];
374 thislen = 0;
375
376 while (EUN <= inftl->lastEUN) {
377 thislen++;
378 EUN = inftl->PUtable[EUN];
379 if (thislen > 0xff00) {
380 printk(KERN_WARNING "INFTL: endless loop in "
381 "Virtual Chain %d: Unit %x\n",
382 chain, EUN);
383 /*
384 * Actually, don't return failure.
385 * Just ignore this chain and get on with it.
386 */
387 thislen = 0;
388 break;
389 }
390 }
391
392 if (thislen > ChainLength) {
393 ChainLength = thislen;
394 LongestChain = chain;
395 }
396 }
397
398 if (ChainLength < 2) {
399 printk(KERN_WARNING "INFTL: no Virtual Unit Chains available "
400 "for folding. Failing request\n");
401 return BLOCK_NIL;
402 }
403
404 return INFTL_foldchain(inftl, LongestChain, pendingblock);
405}
406
407static int nrbits(unsigned int val, int bitcount)
408{
409 int i, total = 0;
410
411 for (i = 0; (i < bitcount); i++)
412 total += (((0x1 << i) & val) ? 1 : 0);
413 return total;
414}
415
416/*
417 * INFTL_findwriteunit: Return the unit number into which we can write
418 * for this block. Make it available if it isn't already.
419 */
420static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
421{
422 unsigned int thisVUC = block / (inftl->EraseSize / SECTORSIZE);
423 unsigned int thisEUN, writeEUN, prev_block, status;
424 unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize -1);
425 struct inftl_oob oob;
426 struct inftl_bci bci;
427 unsigned char anac, nacs, parity;
428 size_t retlen;
429 int silly, silly2 = 3;
430
431 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findwriteunit(inftl=%p,"
432 "block=%d)\n", inftl, block);
433
434 do {
435 /*
436 * Scan the media to find a unit in the VUC which has
437 * a free space for the block in question.
438 */
439 writeEUN = BLOCK_NIL;
440 thisEUN = inftl->VUtable[thisVUC];
441 silly = MAX_LOOPS;
442
443 while (thisEUN <= inftl->lastEUN) {
444 MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) +
445 blockofs, 8, &retlen, (char *)&bci);
446
447 status = bci.Status | bci.Status1;
448 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: status of block %d in "
449 "EUN %d is %x\n", block , writeEUN, status);
450
451 switch(status) {
452 case SECTOR_FREE:
453 writeEUN = thisEUN;
454 break;
455 case SECTOR_DELETED:
456 case SECTOR_USED:
457 /* Can't go any further */
458 goto hitused;
459 case SECTOR_IGNORE:
460 break;
461 default:
462 /*
463 * Invalid block. Don't use it any more.
464 * Must implement.
465 */
466 break;
467 }
468
469 if (!silly--) {
470 printk(KERN_WARNING "INFTL: infinite loop in "
471 "Virtual Unit Chain 0x%x\n", thisVUC);
472 return 0xffff;
473 }
474
475 /* Skip to next block in chain */
476 thisEUN = inftl->PUtable[thisEUN];
477 }
478
479hitused:
480 if (writeEUN != BLOCK_NIL)
481 return writeEUN;
482
483
484 /*
485 * OK. We didn't find one in the existing chain, or there
486 * is no existing chain. Allocate a new one.
487 */
488 writeEUN = INFTL_findfreeblock(inftl, 0);
489
490 if (writeEUN == BLOCK_NIL) {
491 /*
492 * That didn't work - there were no free blocks just
493 * waiting to be picked up. We're going to have to fold
494 * a chain to make room.
495 */
496 thisEUN = INFTL_makefreeblock(inftl, 0xffff);
497
498 /*
499 * Hopefully we free something, lets try again.
500 * This time we are desperate...
501 */
502 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: using desperate==1 "
503 "to find free EUN to accommodate write to "
504 "VUC %d\n", thisVUC);
505 writeEUN = INFTL_findfreeblock(inftl, 1);
506 if (writeEUN == BLOCK_NIL) {
507 /*
508 * Ouch. This should never happen - we should
509 * always be able to make some room somehow.
510 * If we get here, we've allocated more storage
511 * space than actual media, or our makefreeblock
512 * routine is missing something.
513 */
514 printk(KERN_WARNING "INFTL: cannot make free "
515 "space.\n");
516#ifdef DEBUG
517 INFTL_dumptables(inftl);
518 INFTL_dumpVUchains(inftl);
519#endif
520 return BLOCK_NIL;
521 }
522 }
523
524 /*
525 * Insert new block into virtual chain. Firstly update the
526 * block headers in flash...
527 */
528 anac = 0;
529 nacs = 0;
530 thisEUN = inftl->VUtable[thisVUC];
531 if (thisEUN != BLOCK_NIL) {
532 MTD_READOOB(inftl->mbd.mtd, thisEUN * inftl->EraseSize
533 + 8, 8, &retlen, (char *)&oob.u);
534 anac = oob.u.a.ANAC + 1;
535 nacs = oob.u.a.NACs + 1;
536 }
537
538 prev_block = inftl->VUtable[thisVUC];
539 if (prev_block < inftl->nb_blocks)
540 prev_block -= inftl->firstEUN;
541
542 parity = (nrbits(thisVUC, 16) & 0x1) ? 0x1 : 0;
543 parity |= (nrbits(prev_block, 16) & 0x1) ? 0x2 : 0;
544 parity |= (nrbits(anac, 8) & 0x1) ? 0x4 : 0;
545 parity |= (nrbits(nacs, 8) & 0x1) ? 0x8 : 0;
546
547 oob.u.a.virtualUnitNo = cpu_to_le16(thisVUC);
548 oob.u.a.prevUnitNo = cpu_to_le16(prev_block);
549 oob.u.a.ANAC = anac;
550 oob.u.a.NACs = nacs;
551 oob.u.a.parityPerField = parity;
552 oob.u.a.discarded = 0xaa;
553
554 MTD_WRITEOOB(inftl->mbd.mtd, writeEUN * inftl->EraseSize + 8, 8,
555 &retlen, (char *)&oob.u);
556
557 /* Also back up header... */
558 oob.u.b.virtualUnitNo = cpu_to_le16(thisVUC);
559 oob.u.b.prevUnitNo = cpu_to_le16(prev_block);
560 oob.u.b.ANAC = anac;
561 oob.u.b.NACs = nacs;
562 oob.u.b.parityPerField = parity;
563 oob.u.b.discarded = 0xaa;
564
565 MTD_WRITEOOB(inftl->mbd.mtd, writeEUN * inftl->EraseSize +
566 SECTORSIZE * 4 + 8, 8, &retlen, (char *)&oob.u);
567
568 inftl->PUtable[writeEUN] = inftl->VUtable[thisVUC];
569 inftl->VUtable[thisVUC] = writeEUN;
570
571 inftl->numfreeEUNs--;
572 return writeEUN;
573
574 } while (silly2--);
575
576 printk(KERN_WARNING "INFTL: error folding to make room for Virtual "
577 "Unit Chain 0x%x\n", thisVUC);
578 return 0xffff;
579}
580
581/*
582 * Given a Virtual Unit Chain, see if it can be deleted, and if so do it.
583 */
584static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
585{
586 unsigned char BlockUsed[MAX_SECTORS_PER_UNIT];
587 unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT];
588 unsigned int thisEUN, status;
589 int block, silly;
590 struct inftl_bci bci;
591 size_t retlen;
592
593 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_trydeletechain(inftl=%p,"
594 "thisVUC=%d)\n", inftl, thisVUC);
595
596 memset(BlockUsed, 0, sizeof(BlockUsed));
597 memset(BlockDeleted, 0, sizeof(BlockDeleted));
598
599 thisEUN = inftl->VUtable[thisVUC];
600 if (thisEUN == BLOCK_NIL) {
601 printk(KERN_WARNING "INFTL: trying to delete non-existent "
602 "Virtual Unit Chain %d!\n", thisVUC);
603 return;
604 }
605
606 /*
607 * Scan through the Erase Units to determine whether any data is in
608 * each of the 512-byte blocks within the Chain.
609 */
610 silly = MAX_LOOPS;
611 while (thisEUN < inftl->nb_blocks) {
612 for (block = 0; block < inftl->EraseSize/SECTORSIZE; block++) {
613 if (BlockUsed[block] || BlockDeleted[block])
614 continue;
615
616 if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize)
617 + (block * SECTORSIZE), 8 , &retlen,
618 (char *)&bci) < 0)
619 status = SECTOR_IGNORE;
620 else
621 status = bci.Status | bci.Status1;
622
623 switch(status) {
624 case SECTOR_FREE:
625 case SECTOR_IGNORE:
626 break;
627 case SECTOR_USED:
628 BlockUsed[block] = 1;
629 continue;
630 case SECTOR_DELETED:
631 BlockDeleted[block] = 1;
632 continue;
633 default:
634 printk(KERN_WARNING "INFTL: unknown status "
635 "for block %d in EUN %d: 0x%x\n",
636 block, thisEUN, status);
637 }
638 }
639
640 if (!silly--) {
641 printk(KERN_WARNING "INFTL: infinite loop in Virtual "
642 "Unit Chain 0x%x\n", thisVUC);
643 return;
644 }
645
646 thisEUN = inftl->PUtable[thisEUN];
647 }
648
649 for (block = 0; block < inftl->EraseSize/SECTORSIZE; block++)
650 if (BlockUsed[block])
651 return;
652
653 /*
654 * For each block in the chain free it and make it available
655 * for future use. Erase from the oldest unit first.
656 */
657 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: deleting empty VUC %d\n", thisVUC);
658
659 for (;;) {
660 u16 *prevEUN = &inftl->VUtable[thisVUC];
661 thisEUN = *prevEUN;
662
663 /* If the chain is all gone already, we're done */
664 if (thisEUN == BLOCK_NIL) {
665 DEBUG(MTD_DEBUG_LEVEL2, "INFTL: Empty VUC %d for deletion was already absent\n", thisEUN);
666 return;
667 }
668
669 /* Find oldest unit in chain. */
670 while (inftl->PUtable[thisEUN] != BLOCK_NIL) {
671 BUG_ON(thisEUN >= inftl->nb_blocks);
672
673 prevEUN = &inftl->PUtable[thisEUN];
674 thisEUN = *prevEUN;
675 }
676
677 DEBUG(MTD_DEBUG_LEVEL3, "Deleting EUN %d from VUC %d\n",
678 thisEUN, thisVUC);
679
680 if (INFTL_formatblock(inftl, thisEUN) < 0) {
681 /*
682 * Could not erase : mark block as reserved.
683 */
684 inftl->PUtable[thisEUN] = BLOCK_RESERVED;
685 } else {
686 /* Correctly erased : mark it as free */
687 inftl->PUtable[thisEUN] = BLOCK_FREE;
688 inftl->numfreeEUNs++;
689 }
690
691 /* Now sort out whatever was pointing to it... */
692 *prevEUN = BLOCK_NIL;
693
694 /* Ideally we'd actually be responsive to new
695 requests while we're doing this -- if there's
696 free space why should others be made to wait? */
697 cond_resched();
698 }
699
700 inftl->VUtable[thisVUC] = BLOCK_NIL;
701}
702
703static int INFTL_deleteblock(struct INFTLrecord *inftl, unsigned block)
704{
705 unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)];
706 unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
707 unsigned int status;
708 int silly = MAX_LOOPS;
709 size_t retlen;
710 struct inftl_bci bci;
711
712 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_deleteblock(inftl=%p,"
713 "block=%d)\n", inftl, block);
714
715 while (thisEUN < inftl->nb_blocks) {
716 if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) +
717 blockofs, 8, &retlen, (char *)&bci) < 0)
718 status = SECTOR_IGNORE;
719 else
720 status = bci.Status | bci.Status1;
721
722 switch (status) {
723 case SECTOR_FREE:
724 case SECTOR_IGNORE:
725 break;
726 case SECTOR_DELETED:
727 thisEUN = BLOCK_NIL;
728 goto foundit;
729 case SECTOR_USED:
730 goto foundit;
731 default:
732 printk(KERN_WARNING "INFTL: unknown status for "
733 "block %d in EUN %d: 0x%x\n",
734 block, thisEUN, status);
735 break;
736 }
737
738 if (!silly--) {
739 printk(KERN_WARNING "INFTL: infinite loop in Virtual "
740 "Unit Chain 0x%x\n",
741 block / (inftl->EraseSize / SECTORSIZE));
742 return 1;
743 }
744 thisEUN = inftl->PUtable[thisEUN];
745 }
746
747foundit:
748 if (thisEUN != BLOCK_NIL) {
749 loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
750
751 if (MTD_READOOB(inftl->mbd.mtd, ptr, 8, &retlen, (char *)&bci) < 0)
752 return -EIO;
753 bci.Status = bci.Status1 = SECTOR_DELETED;
754 if (MTD_WRITEOOB(inftl->mbd.mtd, ptr, 8, &retlen, (char *)&bci) < 0)
755 return -EIO;
756 INFTL_trydeletechain(inftl, block / (inftl->EraseSize / SECTORSIZE));
757 }
758 return 0;
759}
760
761static int inftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
762 char *buffer)
763{
764 struct INFTLrecord *inftl = (void *)mbd;
765 unsigned int writeEUN;
766 unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
767 size_t retlen;
768 struct inftl_oob oob;
769 char *p, *pend;
770
771 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_writeblock(inftl=%p,block=%ld,"
772 "buffer=%p)\n", inftl, block, buffer);
773
774 /* Is block all zero? */
775 pend = buffer + SECTORSIZE;
776 for (p = buffer; p < pend && !*p; p++)
777 ;
778
779 if (p < pend) {
780 writeEUN = INFTL_findwriteunit(inftl, block);
781
782 if (writeEUN == BLOCK_NIL) {
783 printk(KERN_WARNING "inftl_writeblock(): cannot find "
784 "block to write to\n");
785 /*
786 * If we _still_ haven't got a block to use,
787 * we're screwed.
788 */
789 return 1;
790 }
791
792 memset(&oob, 0xff, sizeof(struct inftl_oob));
793 oob.b.Status = oob.b.Status1 = SECTOR_USED;
794 MTD_WRITEECC(inftl->mbd.mtd, (writeEUN * inftl->EraseSize) +
795 blockofs, SECTORSIZE, &retlen, (char *)buffer,
796 (char *)&oob, &inftl->oobinfo);
797 /*
798 * need to write SECTOR_USED flags since they are not written
799 * in mtd_writeecc
800 */
801 } else {
802 INFTL_deleteblock(inftl, block);
803 }
804
805 return 0;
806}
807
808static int inftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
809 char *buffer)
810{
811 struct INFTLrecord *inftl = (void *)mbd;
812 unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)];
813 unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
814 unsigned int status;
815 int silly = MAX_LOOPS;
816 struct inftl_bci bci;
817 size_t retlen;
818
819 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=%p,block=%ld,"
820 "buffer=%p)\n", inftl, block, buffer);
821
822 while (thisEUN < inftl->nb_blocks) {
823 if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) +
824 blockofs, 8, &retlen, (char *)&bci) < 0)
825 status = SECTOR_IGNORE;
826 else
827 status = bci.Status | bci.Status1;
828
829 switch (status) {
830 case SECTOR_DELETED:
831 thisEUN = BLOCK_NIL;
832 goto foundit;
833 case SECTOR_USED:
834 goto foundit;
835 case SECTOR_FREE:
836 case SECTOR_IGNORE:
837 break;
838 default:
839 printk(KERN_WARNING "INFTL: unknown status for "
840 "block %ld in EUN %d: 0x%04x\n",
841 block, thisEUN, status);
842 break;
843 }
844
845 if (!silly--) {
846 printk(KERN_WARNING "INFTL: infinite loop in "
847 "Virtual Unit Chain 0x%lx\n",
848 block / (inftl->EraseSize / SECTORSIZE));
849 return 1;
850 }
851
852 thisEUN = inftl->PUtable[thisEUN];
853 }
854
855foundit:
856 if (thisEUN == BLOCK_NIL) {
857 /* The requested block is not on the media, return all 0x00 */
858 memset(buffer, 0, SECTORSIZE);
859 } else {
860 size_t retlen;
861 loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
862 if (MTD_READ(inftl->mbd.mtd, ptr, SECTORSIZE, &retlen,
863 buffer))
864 return -EIO;
865 }
866 return 0;
867}
868
869static int inftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
870{
871 struct INFTLrecord *inftl = (void *)dev;
872
873 geo->heads = inftl->heads;
874 geo->sectors = inftl->sectors;
875 geo->cylinders = inftl->cylinders;
876
877 return 0;
878}
879
880static struct mtd_blktrans_ops inftl_tr = {
881 .name = "inftl",
882 .major = INFTL_MAJOR,
883 .part_bits = INFTL_PARTN_BITS,
884 .getgeo = inftl_getgeo,
885 .readsect = inftl_readblock,
886 .writesect = inftl_writeblock,
887 .add_mtd = inftl_add_mtd,
888 .remove_dev = inftl_remove_dev,
889 .owner = THIS_MODULE,
890};
891
892extern char inftlmountrev[];
893
894static int __init init_inftl(void)
895{
896 printk(KERN_INFO "INFTL: inftlcore.c $Revision: 1.18 $, "
897 "inftlmount.c %s\n", inftlmountrev);
898
899 return register_mtd_blktrans(&inftl_tr);
900}
901
902static void __exit cleanup_inftl(void)
903{
904 deregister_mtd_blktrans(&inftl_tr);
905}
906
907module_init(init_inftl);
908module_exit(cleanup_inftl);
909
910MODULE_LICENSE("GPL");
911MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>, David Woodhouse <dwmw2@infradead.org>, Fabrice Bellard <fabrice.bellard@netgem.com> et al.");
912MODULE_DESCRIPTION("Support code for Inverse Flash Translation Layer, used on M-Systems DiskOnChip 2000, Millennium and Millennium Plus");
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
new file mode 100644
index 000000000000..b5dda47395a7
--- /dev/null
+++ b/drivers/mtd/inftlmount.c
@@ -0,0 +1,804 @@
1/*
2 * inftlmount.c -- INFTL mount code with extensive checks.
3 *
4 * Author: Greg Ungerer (gerg@snapgear.com)
5 * (C) Copyright 2002-2003, Greg Ungerer (gerg@snapgear.com)
6 *
7 * Based heavily on the nftlmount.c code which is:
8 * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
9 * Copyright (C) 2000 Netgem S.A.
10 *
11 * $Id: inftlmount.c,v 1.16 2004/11/22 13:50:53 kalev Exp $
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <asm/errno.h>
31#include <asm/io.h>
32#include <asm/uaccess.h>
33#include <linux/miscdevice.h>
34#include <linux/pci.h>
35#include <linux/delay.h>
36#include <linux/slab.h>
37#include <linux/sched.h>
38#include <linux/init.h>
39#include <linux/mtd/mtd.h>
40#include <linux/mtd/nftl.h>
41#include <linux/mtd/inftl.h>
42#include <linux/mtd/compatmac.h>
43
44char inftlmountrev[]="$Revision: 1.16 $";
45
46/*
47 * find_boot_record: Find the INFTL Media Header and its Spare copy which
48 * contains the various device information of the INFTL partition and
49 * Bad Unit Table. Update the PUtable[] table according to the Bad
50 * Unit Table. PUtable[] is used for management of Erase Unit in
51 * other routines in inftlcore.c and inftlmount.c.
52 */
53static int find_boot_record(struct INFTLrecord *inftl)
54{
55 struct inftl_unittail h1;
56 //struct inftl_oob oob;
57 unsigned int i, block;
58 u8 buf[SECTORSIZE];
59 struct INFTLMediaHeader *mh = &inftl->MediaHdr;
60 struct INFTLPartition *ip;
61 size_t retlen;
62
63 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
64
65 /*
66 * Assume logical EraseSize == physical erasesize for starting the
67 * scan. We'll sort it out later if we find a MediaHeader which says
68 * otherwise.
69 */
70 inftl->EraseSize = inftl->mbd.mtd->erasesize;
71 inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize;
72
73 inftl->MediaUnit = BLOCK_NIL;
74
75 /* Search for a valid boot record */
76 for (block = 0; block < inftl->nb_blocks; block++) {
77 int ret;
78
79 /*
80 * Check for BNAND header first. Then whinge if it's found
81 * but later checks fail.
82 */
83 ret = MTD_READ(inftl->mbd.mtd, block * inftl->EraseSize,
84 SECTORSIZE, &retlen, buf);
85 /* We ignore ret in case the ECC of the MediaHeader is invalid
86 (which is apparently acceptable) */
87 if (retlen != SECTORSIZE) {
88 static int warncount = 5;
89
90 if (warncount) {
91 printk(KERN_WARNING "INFTL: block read at 0x%x "
92 "of mtd%d failed: %d\n",
93 block * inftl->EraseSize,
94 inftl->mbd.mtd->index, ret);
95 if (!--warncount)
96 printk(KERN_WARNING "INFTL: further "
97 "failures for this block will "
98 "not be printed\n");
99 }
100 continue;
101 }
102
103 if (retlen < 6 || memcmp(buf, "BNAND", 6)) {
104 /* BNAND\0 not found. Continue */
105 continue;
106 }
107
108 /* To be safer with BIOS, also use erase mark as discriminant */
109 if ((ret = MTD_READOOB(inftl->mbd.mtd, block * inftl->EraseSize +
110 SECTORSIZE + 8, 8, &retlen, (char *)&h1) < 0)) {
111 printk(KERN_WARNING "INFTL: ANAND header found at "
112 "0x%x in mtd%d, but OOB data read failed "
113 "(err %d)\n", block * inftl->EraseSize,
114 inftl->mbd.mtd->index, ret);
115 continue;
116 }
117
118
119 /*
120 * This is the first we've seen.
121 * Copy the media header structure into place.
122 */
123 memcpy(mh, buf, sizeof(struct INFTLMediaHeader));
124
125 /* Read the spare media header at offset 4096 */
126 MTD_READ(inftl->mbd.mtd, block * inftl->EraseSize + 4096,
127 SECTORSIZE, &retlen, buf);
128 if (retlen != SECTORSIZE) {
129 printk(KERN_WARNING "INFTL: Unable to read spare "
130 "Media Header\n");
131 return -1;
132 }
133 /* Check if this one is the same as the first one we found. */
134 if (memcmp(mh, buf, sizeof(struct INFTLMediaHeader))) {
135 printk(KERN_WARNING "INFTL: Primary and spare Media "
136 "Headers disagree.\n");
137 return -1;
138 }
139
140 mh->NoOfBootImageBlocks = le32_to_cpu(mh->NoOfBootImageBlocks);
141 mh->NoOfBinaryPartitions = le32_to_cpu(mh->NoOfBinaryPartitions);
142 mh->NoOfBDTLPartitions = le32_to_cpu(mh->NoOfBDTLPartitions);
143 mh->BlockMultiplierBits = le32_to_cpu(mh->BlockMultiplierBits);
144 mh->FormatFlags = le32_to_cpu(mh->FormatFlags);
145 mh->PercentUsed = le32_to_cpu(mh->PercentUsed);
146
147#ifdef CONFIG_MTD_DEBUG_VERBOSE
148 if (CONFIG_MTD_DEBUG_VERBOSE >= 2) {
149 printk("INFTL: Media Header ->\n"
150 " bootRecordID = %s\n"
151 " NoOfBootImageBlocks = %d\n"
152 " NoOfBinaryPartitions = %d\n"
153 " NoOfBDTLPartitions = %d\n"
154 " BlockMultiplerBits = %d\n"
155 " FormatFlgs = %d\n"
156 " OsakVersion = 0x%x\n"
157 " PercentUsed = %d\n",
158 mh->bootRecordID, mh->NoOfBootImageBlocks,
159 mh->NoOfBinaryPartitions,
160 mh->NoOfBDTLPartitions,
161 mh->BlockMultiplierBits, mh->FormatFlags,
162 mh->OsakVersion, mh->PercentUsed);
163 }
164#endif
165
166 if (mh->NoOfBDTLPartitions == 0) {
167 printk(KERN_WARNING "INFTL: Media Header sanity check "
168 "failed: NoOfBDTLPartitions (%d) == 0, "
169 "must be at least 1\n", mh->NoOfBDTLPartitions);
170 return -1;
171 }
172
173 if ((mh->NoOfBDTLPartitions + mh->NoOfBinaryPartitions) > 4) {
174 printk(KERN_WARNING "INFTL: Media Header sanity check "
175 "failed: Total Partitions (%d) > 4, "
176 "BDTL=%d Binary=%d\n", mh->NoOfBDTLPartitions +
177 mh->NoOfBinaryPartitions,
178 mh->NoOfBDTLPartitions,
179 mh->NoOfBinaryPartitions);
180 return -1;
181 }
182
183 if (mh->BlockMultiplierBits > 1) {
184 printk(KERN_WARNING "INFTL: sorry, we don't support "
185 "UnitSizeFactor 0x%02x\n",
186 mh->BlockMultiplierBits);
187 return -1;
188 } else if (mh->BlockMultiplierBits == 1) {
189 printk(KERN_WARNING "INFTL: support for INFTL with "
190 "UnitSizeFactor 0x%02x is experimental\n",
191 mh->BlockMultiplierBits);
192 inftl->EraseSize = inftl->mbd.mtd->erasesize <<
193 mh->BlockMultiplierBits;
194 inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize;
195 block >>= mh->BlockMultiplierBits;
196 }
197
198 /* Scan the partitions */
199 for (i = 0; (i < 4); i++) {
200 ip = &mh->Partitions[i];
201 ip->virtualUnits = le32_to_cpu(ip->virtualUnits);
202 ip->firstUnit = le32_to_cpu(ip->firstUnit);
203 ip->lastUnit = le32_to_cpu(ip->lastUnit);
204 ip->flags = le32_to_cpu(ip->flags);
205 ip->spareUnits = le32_to_cpu(ip->spareUnits);
206 ip->Reserved0 = le32_to_cpu(ip->Reserved0);
207
208#ifdef CONFIG_MTD_DEBUG_VERBOSE
209 if (CONFIG_MTD_DEBUG_VERBOSE >= 2) {
210 printk(" PARTITION[%d] ->\n"
211 " virtualUnits = %d\n"
212 " firstUnit = %d\n"
213 " lastUnit = %d\n"
214 " flags = 0x%x\n"
215 " spareUnits = %d\n",
216 i, ip->virtualUnits, ip->firstUnit,
217 ip->lastUnit, ip->flags,
218 ip->spareUnits);
219 }
220#endif
221
222 if (ip->Reserved0 != ip->firstUnit) {
223 struct erase_info *instr = &inftl->instr;
224
225 instr->mtd = inftl->mbd.mtd;
226
227 /*
228 * Most likely this is using the
229 * undocumented qiuck mount feature.
230 * We don't support that, we will need
231 * to erase the hidden block for full
232 * compatibility.
233 */
234 instr->addr = ip->Reserved0 * inftl->EraseSize;
235 instr->len = inftl->EraseSize;
236 MTD_ERASE(inftl->mbd.mtd, instr);
237 }
238 if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) {
239 printk(KERN_WARNING "INFTL: Media Header "
240 "Partition %d sanity check failed\n"
241 " firstUnit %d : lastUnit %d > "
242 "virtualUnits %d\n", i, ip->lastUnit,
243 ip->firstUnit, ip->Reserved0);
244 return -1;
245 }
246 if (ip->Reserved1 != 0) {
247 printk(KERN_WARNING "INFTL: Media Header "
248 "Partition %d sanity check failed: "
249 "Reserved1 %d != 0\n",
250 i, ip->Reserved1);
251 return -1;
252 }
253
254 if (ip->flags & INFTL_BDTL)
255 break;
256 }
257
258 if (i >= 4) {
259 printk(KERN_WARNING "INFTL: Media Header Partition "
260 "sanity check failed:\n No partition "
261 "marked as Disk Partition\n");
262 return -1;
263 }
264
265 inftl->nb_boot_blocks = ip->firstUnit;
266 inftl->numvunits = ip->virtualUnits;
267 if (inftl->numvunits > (inftl->nb_blocks -
268 inftl->nb_boot_blocks - 2)) {
269 printk(KERN_WARNING "INFTL: Media Header sanity check "
270 "failed:\n numvunits (%d) > nb_blocks "
271 "(%d) - nb_boot_blocks(%d) - 2\n",
272 inftl->numvunits, inftl->nb_blocks,
273 inftl->nb_boot_blocks);
274 return -1;
275 }
276
277 inftl->mbd.size = inftl->numvunits *
278 (inftl->EraseSize / SECTORSIZE);
279
280 /*
281 * Block count is set to last used EUN (we won't need to keep
282 * any meta-data past that point).
283 */
284 inftl->firstEUN = ip->firstUnit;
285 inftl->lastEUN = ip->lastUnit;
286 inftl->nb_blocks = ip->lastUnit + 1;
287
288 /* Memory alloc */
289 inftl->PUtable = kmalloc(inftl->nb_blocks * sizeof(u16), GFP_KERNEL);
290 if (!inftl->PUtable) {
291 printk(KERN_WARNING "INFTL: allocation of PUtable "
292 "failed (%zd bytes)\n",
293 inftl->nb_blocks * sizeof(u16));
294 return -ENOMEM;
295 }
296
297 inftl->VUtable = kmalloc(inftl->nb_blocks * sizeof(u16), GFP_KERNEL);
298 if (!inftl->VUtable) {
299 kfree(inftl->PUtable);
300 printk(KERN_WARNING "INFTL: allocation of VUtable "
301 "failed (%zd bytes)\n",
302 inftl->nb_blocks * sizeof(u16));
303 return -ENOMEM;
304 }
305
306 /* Mark the blocks before INFTL MediaHeader as reserved */
307 for (i = 0; i < inftl->nb_boot_blocks; i++)
308 inftl->PUtable[i] = BLOCK_RESERVED;
309 /* Mark all remaining blocks as potentially containing data */
310 for (; i < inftl->nb_blocks; i++)
311 inftl->PUtable[i] = BLOCK_NOTEXPLORED;
312
313 /* Mark this boot record (NFTL MediaHeader) block as reserved */
314 inftl->PUtable[block] = BLOCK_RESERVED;
315
316 /* Read Bad Erase Unit Table and modify PUtable[] accordingly */
317 for (i = 0; i < inftl->nb_blocks; i++) {
318 int physblock;
319 /* If any of the physical eraseblocks are bad, don't
320 use the unit. */
321 for (physblock = 0; physblock < inftl->EraseSize; physblock += inftl->mbd.mtd->erasesize) {
322 if (inftl->mbd.mtd->block_isbad(inftl->mbd.mtd, i * inftl->EraseSize + physblock))
323 inftl->PUtable[i] = BLOCK_RESERVED;
324 }
325 }
326
327 inftl->MediaUnit = block;
328 return 0;
329 }
330
331 /* Not found. */
332 return -1;
333}
334
335static int memcmpb(void *a, int c, int n)
336{
337 int i;
338 for (i = 0; i < n; i++) {
339 if (c != ((unsigned char *)a)[i])
340 return 1;
341 }
342 return 0;
343}
344
345/*
346 * check_free_sector: check if a free sector is actually FREE,
347 * i.e. All 0xff in data and oob area.
348 */
349static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
350 int len, int check_oob)
351{
352 u8 buf[SECTORSIZE + inftl->mbd.mtd->oobsize];
353 size_t retlen;
354 int i;
355
356 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: check_free_sectors(inftl=%p,"
357 "address=0x%x,len=%d,check_oob=%d)\n", inftl,
358 address, len, check_oob);
359
360 for (i = 0; i < len; i += SECTORSIZE) {
361 if (MTD_READECC(inftl->mbd.mtd, address, SECTORSIZE, &retlen, buf, &buf[SECTORSIZE], &inftl->oobinfo) < 0)
362 return -1;
363 if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
364 return -1;
365
366 if (check_oob) {
367 if (memcmpb(buf + SECTORSIZE, 0xff, inftl->mbd.mtd->oobsize) != 0)
368 return -1;
369 }
370 address += SECTORSIZE;
371 }
372
373 return 0;
374}
375
376/*
377 * INFTL_format: format a Erase Unit by erasing ALL Erase Zones in the Erase
378 * Unit and Update INFTL metadata. Each erase operation is
379 * checked with check_free_sectors.
380 *
381 * Return: 0 when succeed, -1 on error.
382 *
383 * ToDo: 1. Is it neceressary to check_free_sector after erasing ??
384 */
385int INFTL_formatblock(struct INFTLrecord *inftl, int block)
386{
387 size_t retlen;
388 struct inftl_unittail uci;
389 struct erase_info *instr = &inftl->instr;
390 int physblock;
391
392 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_formatblock(inftl=%p,"
393 "block=%d)\n", inftl, block);
394
395 memset(instr, 0, sizeof(struct erase_info));
396
397 /* FIXME: Shouldn't we be setting the 'discarded' flag to zero
398 _first_? */
399
400 /* Use async erase interface, test return code */
401 instr->mtd = inftl->mbd.mtd;
402 instr->addr = block * inftl->EraseSize;
403 instr->len = inftl->mbd.mtd->erasesize;
404 /* Erase one physical eraseblock at a time, even though the NAND api
405 allows us to group them. This way we if we have a failure, we can
406 mark only the failed block in the bbt. */
407 for (physblock = 0; physblock < inftl->EraseSize; physblock += instr->len, instr->addr += instr->len) {
408 MTD_ERASE(inftl->mbd.mtd, instr);
409
410 if (instr->state == MTD_ERASE_FAILED) {
411 printk(KERN_WARNING "INFTL: error while formatting block %d\n",
412 block);
413 goto fail;
414 }
415
416 /*
417 * Check the "freeness" of Erase Unit before updating metadata.
418 * FixMe: is this check really necessary? Since we have check the
419 * return code after the erase operation.
420 */
421 if (check_free_sectors(inftl, instr->addr, instr->len, 1) != 0)
422 goto fail;
423 }
424
425 uci.EraseMark = cpu_to_le16(ERASE_MARK);
426 uci.EraseMark1 = cpu_to_le16(ERASE_MARK);
427 uci.Reserved[0] = 0;
428 uci.Reserved[1] = 0;
429 uci.Reserved[2] = 0;
430 uci.Reserved[3] = 0;
431 instr->addr = block * inftl->EraseSize + SECTORSIZE * 2;
432 if (MTD_WRITEOOB(inftl->mbd.mtd, instr->addr +
433 8, 8, &retlen, (char *)&uci) < 0)
434 goto fail;
435 return 0;
436fail:
437 /* could not format, update the bad block table (caller is responsible
438 for setting the PUtable to BLOCK_RESERVED on failure) */
439 inftl->mbd.mtd->block_markbad(inftl->mbd.mtd, instr->addr);
440 return -1;
441}
442
443/*
444 * format_chain: Format an invalid Virtual Unit chain. It frees all the Erase
445 * Units in a Virtual Unit Chain, i.e. all the units are disconnected.
446 *
447 * Since the chain is invalid then we will have to erase it from its
448 * head (normally for INFTL we go from the oldest). But if it has a
449 * loop then there is no oldest...
450 */
451static void format_chain(struct INFTLrecord *inftl, unsigned int first_block)
452{
453 unsigned int block = first_block, block1;
454
455 printk(KERN_WARNING "INFTL: formatting chain at block %d\n",
456 first_block);
457
458 for (;;) {
459 block1 = inftl->PUtable[block];
460
461 printk(KERN_WARNING "INFTL: formatting block %d\n", block);
462 if (INFTL_formatblock(inftl, block) < 0) {
463 /*
464 * Cannot format !!!! Mark it as Bad Unit,
465 */
466 inftl->PUtable[block] = BLOCK_RESERVED;
467 } else {
468 inftl->PUtable[block] = BLOCK_FREE;
469 }
470
471 /* Goto next block on the chain */
472 block = block1;
473
474 if (block == BLOCK_NIL || block >= inftl->lastEUN)
475 break;
476 }
477}
478
479void INFTL_dumptables(struct INFTLrecord *s)
480{
481 int i;
482
483 printk("-------------------------------------------"
484 "----------------------------------\n");
485
486 printk("VUtable[%d] ->", s->nb_blocks);
487 for (i = 0; i < s->nb_blocks; i++) {
488 if ((i % 8) == 0)
489 printk("\n%04x: ", i);
490 printk("%04x ", s->VUtable[i]);
491 }
492
493 printk("\n-------------------------------------------"
494 "----------------------------------\n");
495
496 printk("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks);
497 for (i = 0; i <= s->lastEUN; i++) {
498 if ((i % 8) == 0)
499 printk("\n%04x: ", i);
500 printk("%04x ", s->PUtable[i]);
501 }
502
503 printk("\n-------------------------------------------"
504 "----------------------------------\n");
505
506 printk("INFTL ->\n"
507 " EraseSize = %d\n"
508 " h/s/c = %d/%d/%d\n"
509 " numvunits = %d\n"
510 " firstEUN = %d\n"
511 " lastEUN = %d\n"
512 " numfreeEUNs = %d\n"
513 " LastFreeEUN = %d\n"
514 " nb_blocks = %d\n"
515 " nb_boot_blocks = %d",
516 s->EraseSize, s->heads, s->sectors, s->cylinders,
517 s->numvunits, s->firstEUN, s->lastEUN, s->numfreeEUNs,
518 s->LastFreeEUN, s->nb_blocks, s->nb_boot_blocks);
519
520 printk("\n-------------------------------------------"
521 "----------------------------------\n");
522}
523
524void INFTL_dumpVUchains(struct INFTLrecord *s)
525{
526 int logical, block, i;
527
528 printk("-------------------------------------------"
529 "----------------------------------\n");
530
531 printk("INFTL Virtual Unit Chains:\n");
532 for (logical = 0; logical < s->nb_blocks; logical++) {
533 block = s->VUtable[logical];
534 if (block > s->nb_blocks)
535 continue;
536 printk(" LOGICAL %d --> %d ", logical, block);
537 for (i = 0; i < s->nb_blocks; i++) {
538 if (s->PUtable[block] == BLOCK_NIL)
539 break;
540 block = s->PUtable[block];
541 printk("%d ", block);
542 }
543 printk("\n");
544 }
545
546 printk("-------------------------------------------"
547 "----------------------------------\n");
548}
549
550int INFTL_mount(struct INFTLrecord *s)
551{
552 unsigned int block, first_block, prev_block, last_block;
553 unsigned int first_logical_block, logical_block, erase_mark;
554 int chain_length, do_format_chain;
555 struct inftl_unithead1 h0;
556 struct inftl_unittail h1;
557 size_t retlen;
558 int i;
559 u8 *ANACtable, ANAC;
560
561 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_mount(inftl=%p)\n", s);
562
563 /* Search for INFTL MediaHeader and Spare INFTL Media Header */
564 if (find_boot_record(s) < 0) {
565 printk(KERN_WARNING "INFTL: could not find valid boot record?\n");
566 return -1;
567 }
568
569 /* Init the logical to physical table */
570 for (i = 0; i < s->nb_blocks; i++)
571 s->VUtable[i] = BLOCK_NIL;
572
573 logical_block = block = BLOCK_NIL;
574
575 /* Temporary buffer to store ANAC numbers. */
576 ANACtable = kmalloc(s->nb_blocks * sizeof(u8), GFP_KERNEL);
577 memset(ANACtable, 0, s->nb_blocks);
578
579 /*
580 * First pass is to explore each physical unit, and construct the
581 * virtual chains that exist (newest physical unit goes into VUtable).
582 * Any block that is in any way invalid will be left in the
583 * NOTEXPLORED state. Then at the end we will try to format it and
584 * mark it as free.
585 */
586 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 1, explore each unit\n");
587 for (first_block = s->firstEUN; first_block <= s->lastEUN; first_block++) {
588 if (s->PUtable[first_block] != BLOCK_NOTEXPLORED)
589 continue;
590
591 do_format_chain = 0;
592 first_logical_block = BLOCK_NIL;
593 last_block = BLOCK_NIL;
594 block = first_block;
595
596 for (chain_length = 0; ; chain_length++) {
597
598 if ((chain_length == 0) &&
599 (s->PUtable[block] != BLOCK_NOTEXPLORED)) {
600 /* Nothing to do here, onto next block */
601 break;
602 }
603
604 if (MTD_READOOB(s->mbd.mtd, block * s->EraseSize + 8,
605 8, &retlen, (char *)&h0) < 0 ||
606 MTD_READOOB(s->mbd.mtd, block * s->EraseSize +
607 2 * SECTORSIZE + 8, 8, &retlen, (char *)&h1) < 0) {
608 /* Should never happen? */
609 do_format_chain++;
610 break;
611 }
612
613 logical_block = le16_to_cpu(h0.virtualUnitNo);
614 prev_block = le16_to_cpu(h0.prevUnitNo);
615 erase_mark = le16_to_cpu((h1.EraseMark | h1.EraseMark1));
616 ANACtable[block] = h0.ANAC;
617
618 /* Previous block is relative to start of Partition */
619 if (prev_block < s->nb_blocks)
620 prev_block += s->firstEUN;
621
622 /* Already explored partial chain? */
623 if (s->PUtable[block] != BLOCK_NOTEXPLORED) {
624 /* Check if chain for this logical */
625 if (logical_block == first_logical_block) {
626 if (last_block != BLOCK_NIL)
627 s->PUtable[last_block] = block;
628 }
629 break;
630 }
631
632 /* Check for invalid block */
633 if (erase_mark != ERASE_MARK) {
634 printk(KERN_WARNING "INFTL: corrupt block %d "
635 "in chain %d, chain length %d, erase "
636 "mark 0x%x?\n", block, first_block,
637 chain_length, erase_mark);
638 /*
639 * Assume end of chain, probably incomplete
640 * fold/erase...
641 */
642 if (chain_length == 0)
643 do_format_chain++;
644 break;
645 }
646
647 /* Check for it being free already then... */
648 if ((logical_block == BLOCK_FREE) ||
649 (logical_block == BLOCK_NIL)) {
650 s->PUtable[block] = BLOCK_FREE;
651 break;
652 }
653
654 /* Sanity checks on block numbers */
655 if ((logical_block >= s->nb_blocks) ||
656 ((prev_block >= s->nb_blocks) &&
657 (prev_block != BLOCK_NIL))) {
658 if (chain_length > 0) {
659 printk(KERN_WARNING "INFTL: corrupt "
660 "block %d in chain %d?\n",
661 block, first_block);
662 do_format_chain++;
663 }
664 break;
665 }
666
667 if (first_logical_block == BLOCK_NIL) {
668 first_logical_block = logical_block;
669 } else {
670 if (first_logical_block != logical_block) {
671 /* Normal for folded chain... */
672 break;
673 }
674 }
675
676 /*
677 * Current block is valid, so if we followed a virtual
678 * chain to get here then we can set the previous
679 * block pointer in our PUtable now. Then move onto
680 * the previous block in the chain.
681 */
682 s->PUtable[block] = BLOCK_NIL;
683 if (last_block != BLOCK_NIL)
684 s->PUtable[last_block] = block;
685 last_block = block;
686 block = prev_block;
687
688 /* Check for end of chain */
689 if (block == BLOCK_NIL)
690 break;
691
692 /* Validate next block before following it... */
693 if (block > s->lastEUN) {
694 printk(KERN_WARNING "INFTL: invalid previous "
695 "block %d in chain %d?\n", block,
696 first_block);
697 do_format_chain++;
698 break;
699 }
700 }
701
702 if (do_format_chain) {
703 format_chain(s, first_block);
704 continue;
705 }
706
707 /*
708 * Looks like a valid chain then. It may not really be the
709 * newest block in the chain, but it is the newest we have
710 * found so far. We might update it in later iterations of
711 * this loop if we find something newer.
712 */
713 s->VUtable[first_logical_block] = first_block;
714 logical_block = BLOCK_NIL;
715 }
716
717#ifdef CONFIG_MTD_DEBUG_VERBOSE
718 if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
719 INFTL_dumptables(s);
720#endif
721
722 /*
723 * Second pass, check for infinite loops in chains. These are
724 * possible because we don't update the previous pointers when
725 * we fold chains. No big deal, just fix them up in PUtable.
726 */
727 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 2, validate virtual chains\n");
728 for (logical_block = 0; logical_block < s->numvunits; logical_block++) {
729 block = s->VUtable[logical_block];
730 last_block = BLOCK_NIL;
731
732 /* Check for free/reserved/nil */
733 if (block >= BLOCK_RESERVED)
734 continue;
735
736 ANAC = ANACtable[block];
737 for (i = 0; i < s->numvunits; i++) {
738 if (s->PUtable[block] == BLOCK_NIL)
739 break;
740 if (s->PUtable[block] > s->lastEUN) {
741 printk(KERN_WARNING "INFTL: invalid prev %d, "
742 "in virtual chain %d\n",
743 s->PUtable[block], logical_block);
744 s->PUtable[block] = BLOCK_NIL;
745
746 }
747 if (ANACtable[block] != ANAC) {
748 /*
749 * Chain must point back to itself. This is ok,
750 * but we will need adjust the tables with this
751 * newest block and oldest block.
752 */
753 s->VUtable[logical_block] = block;
754 s->PUtable[last_block] = BLOCK_NIL;
755 break;
756 }
757
758 ANAC--;
759 last_block = block;
760 block = s->PUtable[block];
761 }
762
763 if (i >= s->nb_blocks) {
764 /*
765 * Uhoo, infinite chain with valid ANACS!
766 * Format whole chain...
767 */
768 format_chain(s, first_block);
769 }
770 }
771
772#ifdef CONFIG_MTD_DEBUG_VERBOSE
773 if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
774 INFTL_dumptables(s);
775 if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
776 INFTL_dumpVUchains(s);
777#endif
778
779 /*
780 * Third pass, format unreferenced blocks and init free block count.
781 */
782 s->numfreeEUNs = 0;
783 s->LastFreeEUN = BLOCK_NIL;
784
785 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 3, format unused blocks\n");
786 for (block = s->firstEUN; block <= s->lastEUN; block++) {
787 if (s->PUtable[block] == BLOCK_NOTEXPLORED) {
788 printk("INFTL: unreferenced block %d, formatting it\n",
789 block);
790 if (INFTL_formatblock(s, block) < 0)
791 s->PUtable[block] = BLOCK_RESERVED;
792 else
793 s->PUtable[block] = BLOCK_FREE;
794 }
795 if (s->PUtable[block] == BLOCK_FREE) {
796 s->numfreeEUNs++;
797 if (s->LastFreeEUN == BLOCK_NIL)
798 s->LastFreeEUN = block;
799 }
800 }
801
802 kfree(ANACtable);
803 return 0;
804}
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
new file mode 100644
index 000000000000..8480057eadb4
--- /dev/null
+++ b/drivers/mtd/maps/Kconfig
@@ -0,0 +1,663 @@
1# drivers/mtd/maps/Kconfig
2# $Id: Kconfig,v 1.42 2005/01/05 16:59:50 dwmw2 Exp $
3
4menu "Mapping drivers for chip access"
5 depends on MTD!=n
6
7config MTD_COMPLEX_MAPPINGS
8 bool "Support non-linear mappings of flash chips"
9 depends on MTD
10 help
11 This causes the chip drivers to allow for complicated
12 paged mappings of flash chips.
13
14config MTD_PHYSMAP
15 tristate "CFI Flash device in physical memory map"
16 depends on MTD_CFI
17 help
18 This provides a 'mapping' driver which allows the CFI probe and
19 command set driver code to communicate with flash chips which
20 are mapped physically into the CPU's memory. You will need to
21 configure the physical address and size of the flash chips on
22 your particular board as well as the bus width, either statically
23 with config options or at run-time.
24
25config MTD_PHYSMAP_START
26 hex "Physical start address of flash mapping"
27 depends on MTD_PHYSMAP
28 default "0x8000000"
29 help
30 This is the physical memory location at which the flash chips
31 are mapped on your particular target board. Refer to the
32 memory map which should hopefully be in the documentation for
33 your board.
34 Ignore this option if you use run-time physmap configuration
35 (i.e., run-time calling physmap_configure()).
36
37config MTD_PHYSMAP_LEN
38 hex "Physical length of flash mapping"
39 depends on MTD_PHYSMAP
40 default "0x4000000"
41 help
42 This is the total length of the mapping of the flash chips on
43 your particular board. If there is space, or aliases, in the
44 physical memory map between the chips, this could be larger
45 than the total amount of flash present. Refer to the memory
46 map which should hopefully be in the documentation for your
47 board.
48 Ignore this option if you use run-time physmap configuration
49 (i.e., run-time calling physmap_configure()).
50
51config MTD_PHYSMAP_BANKWIDTH
52 int "Bank width in octets"
53 depends on MTD_PHYSMAP
54 default "2"
55 help
56 This is the total width of the data bus of the flash devices
57 in octets. For example, if you have a data bus width of 32
58 bits, you would set the bus width octect value to 4. This is
59 used internally by the CFI drivers.
60 Ignore this option if you use run-time physmap configuration
61 (i.e., run-time calling physmap_configure()).
62
63config MTD_SUN_UFLASH
64 tristate "Sun Microsystems userflash support"
65 depends on (SPARC32 || SPARC64) && MTD_CFI
66 help
67 This provides a 'mapping' driver which supports the way in
68 which user-programmable flash chips are connected on various
69 Sun Microsystems boardsets. This driver will require CFI support
70 in the kernel, so if you did not enable CFI previously, do that now.
71
72config MTD_PNC2000
73 tristate "CFI Flash device mapped on Photron PNC-2000"
74 depends on X86 && MTD_CFI && MTD_PARTITIONS
75 help
76 PNC-2000 is the name of Network Camera product from PHOTRON
77 Ltd. in Japan. It uses CFI-compliant flash.
78
79config MTD_SC520CDP
80 tristate "CFI Flash device mapped on AMD SC520 CDP"
81 depends on X86 && MTD_CFI
82 help
83 The SC520 CDP board has two banks of CFI-compliant chips and one
84 Dual-in-line JEDEC chip. This 'mapping' driver supports that
85 arrangement, implementing three MTD devices.
86
87config MTD_NETSC520
88 tristate "CFI Flash device mapped on AMD NetSc520"
89 depends on X86 && MTD_CFI && MTD_PARTITIONS
90 help
91 This enables access routines for the flash chips on the AMD NetSc520
92 demonstration board. If you have one of these boards and would like
93 to use the flash chips on it, say 'Y'.
94
95config MTD_TS5500
96 tristate "JEDEC Flash device mapped on Technologic Systems TS-5500"
97 depends on X86 && MTD_JEDECPROBE && MTD_PARTITIONS
98 help
99 This provides a driver for the on-board flash of the Technologic
100 System's TS-5500 board. The flash is split into 3 partitions
101 which are accessed as separate MTD devices.
102
103 mtd0 and mtd2 are the two BIOS drives. Unfortunately the BIOS
104 uses a proprietary flash translation layer from General Software,
105 which is not supported (the drives cannot be mounted). You can
106 create your own file system (jffs for example), but the BIOS
107 won't be able to boot from it.
108
109 mtd1 allows you to reprogram your BIOS. BE VERY CAREFUL.
110
111 Note that jumper 3 ("Write Enable Drive A") must be set
112 otherwise detection won't succeeed.
113
114config MTD_SBC_GXX
115 tristate "CFI Flash device mapped on Arcom SBC-GXx boards"
116 depends on X86 && MTD_CFI_INTELEXT && MTD_PARTITIONS && MTD_COMPLEX_MAPPINGS
117 help
118 This provides a driver for the on-board flash of Arcom Control
119 Systems' SBC-GXn family of boards, formerly known as SBC-MediaGX.
120 By default the flash is split into 3 partitions which are accessed
121 as separate MTD devices. This board utilizes Intel StrataFlash.
122 More info at
123 <http://www.arcomcontrols.com/products/icp/pc104/processors/SBC_GX1.htm>.
124
125config MTD_ELAN_104NC
126 tristate "CFI Flash device mapped on Arcom ELAN-104NC"
127 depends on X86 && MTD_CFI_INTELEXT && MTD_PARTITIONS && MTD_COMPLEX_MAPPINGS
128 help
129 This provides a driver for the on-board flash of the Arcom Control
130 System's ELAN-104NC development board. By default the flash
131 is split into 3 partitions which are accessed as separate MTD
132 devices. This board utilizes Intel StrataFlash. More info at
133 <http://www.arcomcontrols.com/products/icp/pc104/processors/ELAN104NC.htm>.
134
135config MTD_LUBBOCK
136 tristate "CFI Flash device mapped on Intel Lubbock XScale eval board"
137 depends on ARCH_LUBBOCK && MTD_CFI_INTELEXT && MTD_PARTITIONS
138 help
139 This provides a driver for the on-board flash of the Intel
140 'Lubbock' XScale evaluation board.
141
142config MTD_OCTAGON
143 tristate "JEDEC Flash device mapped on Octagon 5066 SBC"
144 depends on X86 && MTD_JEDEC && MTD_COMPLEX_MAPPINGS
145 help
146 This provides a 'mapping' driver which supports the way in which
147 the flash chips are connected in the Octagon-5066 Single Board
148 Computer. More information on the board is available at
149 <http://www.octagonsystems.com/CPUpages/5066.html>.
150
151config MTD_VMAX
152 tristate "JEDEC Flash device mapped on Tempustech VMAX SBC301"
153 depends on X86 && MTD_JEDEC && MTD_COMPLEX_MAPPINGS
154 help
155 This provides a 'mapping' driver which supports the way in which
156 the flash chips are connected in the Tempustech VMAX SBC301 Single
157 Board Computer. More information on the board is available at
158 <http://www.tempustech.com/>.
159
160config MTD_SCx200_DOCFLASH
161 tristate "Flash device mapped with DOCCS on NatSemi SCx200"
162 depends on SCx200 && MTD_CFI && MTD_PARTITIONS
163 help
164 Enable support for a flash chip mapped using the DOCCS signal on a
165 National Semiconductor SCx200 processor.
166
167 If you don't know what to do here, say N.
168
169 If compiled as a module, it will be called scx200_docflash.
170
171config MTD_AMD76XROM
172 tristate "BIOS flash chip on AMD76x southbridge"
173 depends on X86 && MTD_JEDECPROBE
174 help
175 Support for treating the BIOS flash chip on AMD76x motherboards
176 as an MTD device - with this you can reprogram your BIOS.
177
178 BE VERY CAREFUL.
179
180config MTD_ICHXROM
181 tristate "BIOS flash chip on Intel Controller Hub 2/3/4/5"
182 depends on X86 && MTD_JEDECPROBE
183 help
184 Support for treating the BIOS flash chip on ICHX motherboards
185 as an MTD device - with this you can reprogram your BIOS.
186
187 BE VERY CAREFUL.
188
189config MTD_SCB2_FLASH
190 tristate "BIOS flash chip on Intel SCB2 boards"
191 depends on X86 && MTD_JEDECPROBE
192 help
193 Support for treating the BIOS flash chip on Intel SCB2 boards
194 as an MTD device - with this you can reprogram your BIOS.
195
196 BE VERY CAREFUL.
197
198config MTD_TSUNAMI
199 tristate "Flash chips on Tsunami TIG bus"
200 depends on ALPHA_TSUNAMI && MTD_COMPLEX_MAPPINGS
201 help
202 Support for the flash chip on Tsunami TIG bus.
203
204config MTD_LASAT
205 tristate "Flash chips on LASAT board"
206 depends on LASAT
207 help
208 Support for the flash chips on the Lasat 100 and 200 boards.
209
210config MTD_NETtel
211 tristate "CFI flash device on SnapGear/SecureEdge"
212 depends on X86 && MTD_PARTITIONS && MTD_JEDECPROBE
213 help
214 Support for flash chips on NETtel/SecureEdge/SnapGear boards.
215
216config MTD_PB1XXX
217 tristate "Flash devices on Alchemy PB1xxx boards"
218 depends on MIPS && ( MIPS_PB1000 || MIPS_PB1100 || MIPS_PB1500 )
219 help
220 Flash memory access on Alchemy Pb1000/Pb1100/Pb1500 boards
221
222config MTD_PB1XXX_BOOT
223 bool "PB1x00 boot flash device"
224 depends on MTD_PB1XXX && ( MIPS_PB1100 || MIPS_PB1500 )
225 help
226 Use the first of the two 32MiB flash banks on Pb1100/Pb1500 board.
227 You can say 'Y' to both this and 'MTD_PB1XXX_USER' below, to use
228 both banks.
229
230config MTD_PB1XXX_USER
231 bool "PB1x00 user flash device"
232 depends on MTD_PB1XXX && ( MIPS_PB1100 || MIPS_PB1500 )
233 default y if MTD_PB1XX_BOOT = n
234 help
235 Use the second of the two 32MiB flash banks on Pb1100/Pb1500 board.
236 You can say 'Y' to both this and 'MTD_PB1XXX_BOOT' above, to use
237 both banks.
238
239config MTD_PB1550
240 tristate "Flash devices on Alchemy PB1550 board"
241 depends on MIPS && MIPS_PB1550
242 help
243 Flash memory access on Alchemy Pb1550 board
244
245config MTD_PB1550_BOOT
246 bool "PB1550 boot flash device"
247 depends on MTD_PB1550
248 help
249 Use the first of the two 64MiB flash banks on Pb1550 board.
250 You can say 'Y' to both this and 'MTD_PB1550_USER' below, to use
251 both banks.
252
253config MTD_PB1550_USER
254 bool "PB1550 user flash device"
255 depends on MTD_PB1550
256 default y if MTD_PB1550_BOOT = n
257 help
258 Use the second of the two 64MiB flash banks on Pb1550 board.
259 You can say 'Y' to both this and 'MTD_PB1550_BOOT' above, to use
260 both banks.
261
262config MTD_DB1550
263 tristate "Flash devices on Alchemy DB1550 board"
264 depends on MIPS && MIPS_DB1550
265 help
266 Flash memory access on Alchemy Db1550 board
267
268config MTD_DB1550_BOOT
269 bool "DB1550 boot flash device"
270 depends on MTD_DB1550
271 help
272 Use the first of the two 64MiB flash banks on Db1550 board.
273 You can say 'Y' to both this and 'MTD_DB1550_USER' below, to use
274 both banks.
275
276config MTD_DB1550_USER
277 bool "DB1550 user flash device"
278 depends on MTD_DB1550
279 default y if MTD_DB1550_BOOT = n
280 help
281 Use the second of the two 64MiB flash banks on Db1550 board.
282 You can say 'Y' to both this and 'MTD_DB1550_BOOT' above, to use
283 both banks.
284
285config MTD_DILNETPC
286 tristate "CFI Flash device mapped on DIL/Net PC"
287 depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT
288 help
289 MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP".
290 For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm>
291 and <http://www.ssv-embedded.de/ssv/pc104/p170.htm>
292
293config MTD_DILNETPC_BOOTSIZE
294 hex "Size of DIL/Net PC flash boot partition"
295 depends on MTD_DILNETPC
296 default "0x80000"
297 help
298 The amount of space taken up by the kernel or Etherboot
299 on the DIL/Net PC flash chips.
300
301config MTD_L440GX
302 tristate "BIOS flash chip on Intel L440GX boards"
303 depends on X86 && MTD_JEDECPROBE
304 help
305 Support for treating the BIOS flash chip on Intel L440GX motherboards
306 as an MTD device - with this you can reprogram your BIOS.
307
308 BE VERY CAREFUL.
309
310config MTD_SBC8240
311 tristate "Flash device on SBC8240"
312 depends on PPC32 && MTD_JEDECPROBE && 6xx && 8260
313 help
314 Flash access on the SBC8240 board from Wind River. See
315 <http://www.windriver.com/products/sbc8240/>
316
317config MTD_TQM8XXL
318 tristate "CFI Flash device mapped on TQM8XXL"
319 depends on MTD_CFI && PPC32 && 8xx && TQM8xxL
320 help
321 The TQM8xxL PowerPC board has up to two banks of CFI-compliant
322 chips, currently uses AMD one. This 'mapping' driver supports
323 that arrangement, allowing the CFI probe and command set driver
324 code to communicate with the chips on the TQM8xxL board. More at
325 <http://www.denx.de/embedded-ppc-en.html>.
326
327config MTD_RPXLITE
328 tristate "CFI Flash device mapped on RPX Lite or CLLF"
329 depends on MTD_CFI && PPC32 && 8xx && (RPXCLASSIC || RPXLITE)
330 help
331 The RPXLite PowerPC board has CFI-compliant chips mapped in
332 a strange sparse mapping. This 'mapping' driver supports that
333 arrangement, allowing the CFI probe and command set driver code
334 to communicate with the chips on the RPXLite board. More at
335 <http://www.embeddedplanet.com/>.
336
337config MTD_MBX860
338 tristate "System flash on MBX860 board"
339 depends on MTD_CFI && PPC32 && 8xx && MBX
340 help
341 This enables access routines for the flash chips on the Motorola
342 MBX860 board. If you have one of these boards and would like
343 to use the flash chips on it, say 'Y'.
344
345config MTD_DBOX2
346 tristate "CFI Flash device mapped on D-Box2"
347 depends on PPC32 && 8xx && DBOX2 && MTD_CFI_INTELSTD && MTD_CFI_INTELEXT && MTD_CFI_AMDSTD
348 help
349 This enables access routines for the flash chips on the Nokia/Sagem
350 D-Box 2 board. If you have one of these boards and would like to use
351 the flash chips on it, say 'Y'.
352
353config MTD_CFI_FLAGADM
354 tristate "CFI Flash device mapping on FlagaDM"
355 depends on PPC32 && 8xx && MTD_CFI
356 help
357 Mapping for the Flaga digital module. If you don't have one, ignore
358 this setting.
359
360config MTD_BEECH
361 tristate "CFI Flash device mapped on IBM 405LP Beech"
362 depends on MTD_CFI && PPC32 && 40x && BEECH
363 help
364 This enables access routines for the flash chips on the IBM
365 405LP Beech board. If you have one of these boards and would like
366 to use the flash chips on it, say 'Y'.
367
368config MTD_ARCTIC
369 tristate "CFI Flash device mapped on IBM 405LP Arctic"
370 depends on MTD_CFI && PPC32 && 40x && ARCTIC2
371 help
372 This enables access routines for the flash chips on the IBM 405LP
373 Arctic board. If you have one of these boards and would like to
374 use the flash chips on it, say 'Y'.
375
376config MTD_WALNUT
377 tristate "Flash device mapped on IBM 405GP Walnut"
378 depends on MTD_JEDECPROBE && PPC32 && 40x && WALNUT
379 help
380 This enables access routines for the flash chips on the IBM 405GP
381 Walnut board. If you have one of these boards and would like to
382 use the flash chips on it, say 'Y'.
383
384config MTD_EBONY
385 tristate "Flash devices mapped on IBM 440GP Ebony"
386 depends on MTD_JEDECPROBE && PPC32 && 44x && EBONY
387 help
388 This enables access routines for the flash chips on the IBM 440GP
389 Ebony board. If you have one of these boards and would like to
390 use the flash chips on it, say 'Y'.
391
392config MTD_OCOTEA
393 tristate "Flash devices mapped on IBM 440GX Ocotea"
394 depends on MTD_CFI && PPC32 && 44x && OCOTEA
395 help
396 This enables access routines for the flash chips on the IBM 440GX
397 Ocotea board. If you have one of these boards and would like to
398 use the flash chips on it, say 'Y'.
399
400config MTD_REDWOOD
401 tristate "CFI Flash devices mapped on IBM Redwood"
402 depends on MTD_CFI && PPC32 && 4xx && 40x && ( REDWOOD_4 || REDWOOD_5 || REDWOOD_6 )
403 help
404 This enables access routines for the flash chips on the IBM
405 Redwood board. If you have one of these boards and would like to
406 use the flash chips on it, say 'Y'.
407
408config MTD_CSTM_MIPS_IXX
409 tristate "Flash chip mapping on ITE QED-4N-S01B, Globespan IVR or custom board"
410 depends on MIPS && MTD_CFI && MTD_JEDECPROBE && MTD_PARTITIONS
411 help
412 This provides a mapping driver for the Integrated Technology
413 Express, Inc (ITE) QED-4N-S01B eval board and the Globespan IVR
414 Reference Board. It provides the necessary addressing, length,
415 buswidth, vpp code and addition setup of the flash device for
416 these boards. In addition, this mapping driver can be used for
417 other boards via setting of the CONFIG_MTD_CSTM_MIPS_IXX_START/
418 LEN/BUSWIDTH parameters. This mapping will provide one mtd device
419 using one partition. The start address can be offset from the
420 beginning of flash and the len can be less than the total flash
421 device size to allow a window into the flash. Both CFI and JEDEC
422 probes are called.
423
424config MTD_CSTM_MIPS_IXX_START
425 hex "Physical start address of flash mapping"
426 depends on MTD_CSTM_MIPS_IXX
427 default "0x8000000"
428 help
429 This is the physical memory location that the MTD driver will
430 use for the flash chips on your particular target board.
431 Refer to the memory map which should hopefully be in the
432 documentation for your board.
433
434config MTD_CSTM_MIPS_IXX_LEN
435 hex "Physical length of flash mapping"
436 depends on MTD_CSTM_MIPS_IXX
437 default "0x4000000"
438 help
439 This is the total length that the MTD driver will use for the
440 flash chips on your particular board. Refer to the memory
441 map which should hopefully be in the documentation for your
442 board.
443
444config MTD_CSTM_MIPS_IXX_BUSWIDTH
445 int "Bus width in octets"
446 depends on MTD_CSTM_MIPS_IXX
447 default "2"
448 help
449 This is the total bus width of the mapping of the flash chips
450 on your particular board.
451
452config MTD_OCELOT
453 tristate "Momenco Ocelot boot flash device"
454 depends on MIPS && MOMENCO_OCELOT
455 help
456 This enables access routines for the boot flash device and for the
457 NVRAM on the Momenco Ocelot board. If you have one of these boards
458 and would like access to either of these, say 'Y'.
459
460config MTD_SOLUTIONENGINE
461 tristate "CFI Flash device mapped on Hitachi SolutionEngine"
462 depends on SUPERH && MTD_CFI && MTD_REDBOOT_PARTS
463 help
464 This enables access to the flash chips on the Hitachi SolutionEngine and
465 similar boards. Say 'Y' if you are building a kernel for such a board.
466
467config MTD_ARM_INTEGRATOR
468 tristate "CFI Flash device mapped on ARM Integrator/P720T"
469 depends on ARM && MTD_CFI
470
471config MTD_CDB89712
472 tristate "Cirrus CDB89712 evaluation board mappings"
473 depends on ARM && MTD_CFI && ARCH_CDB89712
474 help
475 This enables access to the flash or ROM chips on the CDB89712 board.
476 If you have such a board, say 'Y'.
477
478config MTD_SA1100
479 tristate "CFI Flash device mapped on StrongARM SA11x0"
480 depends on ARM && MTD_CFI && ARCH_SA1100 && MTD_PARTITIONS
481 help
482 This enables access to the flash chips on most platforms based on
483 the SA1100 and SA1110, including the Assabet and the Compaq iPAQ.
484 If you have such a board, say 'Y'.
485
486config MTD_IPAQ
487 tristate "CFI Flash device mapped on Compaq/HP iPAQ"
488 depends on ARM && IPAQ_HANDHELD && MTD_CFI
489 help
490 This provides a driver for the on-board flash of the iPAQ.
491
492config MTD_DC21285
493 tristate "CFI Flash device mapped on DC21285 Footbridge"
494 depends on ARM && MTD_CFI && ARCH_FOOTBRIDGE && MTD_COMPLEX_MAPPINGS
495 help
496 This provides a driver for the flash accessed using Intel's
497 21285 bridge used with Intel's StrongARM processors. More info at
498 <http://www.intel.com/design/bridge/docs/21285_documentation.htm>.
499
500config MTD_IQ80310
501 tristate "CFI Flash device mapped on the XScale IQ80310 board"
502 depends on ARM && MTD_CFI && ARCH_IQ80310
503 help
504 This enables access routines for the flash chips on the Intel XScale
505 IQ80310 evaluation board. If you have one of these boards and would
506 like to use the flash chips on it, say 'Y'.
507
508config MTD_IXP4XX
509 tristate "CFI Flash device mapped on Intel IXP4xx based systems"
510 depends on ARM && MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX
511 help
512 This enables MTD access to flash devices on platforms based
513 on Intel's IXP4xx family of network processors such as the
514 IXDP425 and Coyote. If you have an IXP4xx based board and
515 would like to use the flash chips on it, say 'Y'.
516
517config MTD_IXP2000
518 tristate "CFI Flash device mapped on Intel IXP2000 based systems"
519 depends on ARM && MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP2000
520 help
521 This enables MTD access to flash devices on platforms based
522 on Intel's IXP2000 family of network processors such as the
523 IXDP425 and Coyote. If you have an IXP2000 based board and
524 would like to use the flash chips on it, say 'Y'.
525
526config MTD_EPXA10DB
527 tristate "CFI Flash device mapped on Epxa10db"
528 depends on ARM && MTD_CFI && MTD_PARTITIONS && ARCH_CAMELOT
529 help
530 This enables support for the flash devices on the Altera
531 Excalibur XA10 Development Board. If you are building a kernel
532 for on of these boards then you should say 'Y' otherwise say 'N'.
533
534config MTD_FORTUNET
535 tristate "CFI Flash device mapped on the FortuNet board"
536 depends on ARM && MTD_CFI && MTD_PARTITIONS && SA1100_FORTUNET
537 help
538 This enables access to the Flash on the FortuNet board. If you
539 have such a board, say 'Y'.
540
541config MTD_AUTCPU12
542 tristate "NV-RAM mapping AUTCPU12 board"
543 depends on ARM && ARCH_AUTCPU12
544 help
545 This enables access to the NV-RAM on autronix autcpu12 board.
546 If you have such a board, say 'Y'.
547
548config MTD_EDB7312
549 tristate "CFI Flash device mapped on EDB7312"
550 depends on ARM && MTD_CFI
551 help
552 This enables access to the CFI Flash on the Cogent EDB7312 board.
553 If you have such a board, say 'Y' here.
554
555config MTD_IMPA7
556 tristate "JEDEC Flash device mapped on impA7"
557 depends on ARM && MTD_JEDECPROBE
558 help
559 This enables access to the NOR Flash on the impA7 board of
560 implementa GmbH. If you have such a board, say 'Y' here.
561
562config MTD_CEIVA
563 tristate "JEDEC Flash device mapped on Ceiva/Polaroid PhotoMax Digital Picture Frame"
564 depends on ARM && MTD_JEDECPROBE && ARCH_CEIVA
565 help
566 This enables access to the flash chips on the Ceiva/Polaroid
567 PhotoMax Digital Picture Frame.
568 If you have such a device, say 'Y'.
569
570config MTD_NOR_TOTO
571 tristate "NOR Flash device on TOTO board"
572 depends on ARM && ARCH_OMAP && OMAP_TOTO
573 help
574 This enables access to the NOR flash on the Texas Instruments
575 TOTO board.
576
577config MTD_H720X
578 tristate "Hynix evaluation board mappings"
579 depends on ARM && MTD_CFI && ( ARCH_H7201 || ARCH_H7202 )
580 help
581 This enables access to the flash chips on the Hynix evaluation boards.
582 If you have such a board, say 'Y'.
583
584config MTD_MPC1211
585 tristate "CFI Flash device mapped on Interface MPC-1211"
586 depends on SUPERH && SH_MPC1211 && MTD_CFI
587 help
588 This enables access to the flash chips on the Interface MPC-1211(CTP/PCI/MPC-SH02).
589 If you have such a board, say 'Y'.
590
591# This needs CFI or JEDEC, depending on the cards found.
592config MTD_PCI
593 tristate "PCI MTD driver"
594 depends on MTD && PCI && MTD_COMPLEX_MAPPINGS
595 help
596 Mapping for accessing flash devices on add-in cards like the Intel XScale
597 IQ80310 card, and the Intel EBSA285 card in blank ROM programming mode
598 (please see the manual for the link settings).
599
600 If you are not sure, say N.
601
602config MTD_PCMCIA
603 tristate "PCMCIA MTD driver"
604 depends on MTD && PCMCIA && MTD_COMPLEX_MAPPINGS && BROKEN
605 help
606 Map driver for accessing PCMCIA linear flash memory cards. These
607 cards are usually around 4-16MiB in size. This does not include
608 Compact Flash cards which are treated as IDE devices.
609
610config MTD_UCLINUX
611 tristate "Generic uClinux RAM/ROM filesystem support"
612 depends on MTD_PARTITIONS && !MMU
613 help
614 Map driver to support image based filesystems for uClinux.
615
616config MTD_WRSBC8260
617 tristate "Map driver for WindRiver PowerQUICC II MPC82xx board"
618 depends on (SBC82xx || SBC8560)
619 select MTD_PARTITIONS
620 select MTD_MAP_BANK_WIDTH_4
621 select MTD_MAP_BANK_WIDTH_1
622 select MTD_CFI_I1
623 select MTD_CFI_I4
624 help
625 Map driver for WindRiver PowerQUICC II MPC82xx board. Drives
626 all three flash regions on CS0, CS1 and CS6 if they are configured
627 correctly by the boot loader.
628
629config MTD_DMV182
630 tristate "Map driver for Dy-4 SVME/DMV-182 board."
631 depends on DMV182
632 select MTD_PARTITIONS
633 select MTD_MAP_BANK_WIDTH_32
634 select MTD_CFI_I8
635 select MTD_CFI_AMDSTD
636 help
637 Map driver for Dy-4 SVME/DMV-182 board.
638
639config MTD_BAST
640 tristate "Map driver for Simtec BAST (EB2410ITX)"
641 depends on ARCH_BAST
642 select MTD_PARTITIONS
643 select MTD_MAP_BANK_WIDTH_16
644 select MTD_JEDECPROBE
645 help
646 Map driver for NOR flash on the Simtec BAST (EB2410ITX).
647
648 Note, this driver *cannot* over-ride the WP link on the
649 board, or currently detect the state of the link.
650
651config MTD_BAST_MAXSIZE
652 int "Maximum size for BAST flash area (MiB)"
653 depends on MTD_BAST
654 default "4"
655
656config MTD_SHARP_SL
657 bool "ROM maped on Sharp SL Series"
658 depends on MTD && ARCH_PXA
659 help
660 This enables access to the flash chip on the Sharp SL Series of PDAs.
661
662endmenu
663
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
new file mode 100644
index 000000000000..7ffe02b85301
--- /dev/null
+++ b/drivers/mtd/maps/Makefile
@@ -0,0 +1,73 @@
1#
2# linux/drivers/maps/Makefile
3#
4# $Id: Makefile.common,v 1.23 2005/01/05 17:06:36 dwmw2 Exp $
5
6ifeq ($(CONFIG_MTD_COMPLEX_MAPPINGS),y)
7obj-$(CONFIG_MTD) += map_funcs.o
8endif
9
10# Chip mappings
11obj-$(CONFIG_MTD_CDB89712) += cdb89712.o
12obj-$(CONFIG_MTD_ARM_INTEGRATOR)+= integrator-flash.o
13obj-$(CONFIG_MTD_BAST) += bast-flash.o
14obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
15obj-$(CONFIG_MTD_CSTM_MIPS_IXX) += cstm_mips_ixx.o
16obj-$(CONFIG_MTD_DC21285) += dc21285.o
17obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o
18obj-$(CONFIG_MTD_ELAN_104NC) += elan-104nc.o
19obj-$(CONFIG_MTD_EPXA10DB) += epxa10db-flash.o
20obj-$(CONFIG_MTD_IQ80310) += iq80310.o
21obj-$(CONFIG_MTD_L440GX) += l440gx.o
22obj-$(CONFIG_MTD_AMD76XROM) += amd76xrom.o
23obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o
24obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
25obj-$(CONFIG_MTD_LUBBOCK) += lubbock-flash.o
26obj-$(CONFIG_MTD_MBX860) += mbx860.o
27obj-$(CONFIG_MTD_CEIVA) += ceiva.o
28obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
29obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
30obj-$(CONFIG_MTD_PNC2000) += pnc2000.o
31obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
32obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
33obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
34obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o
35obj-$(CONFIG_MTD_IPAQ) += ipaq-flash.o
36obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o
37obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o
38obj-$(CONFIG_MTD_NETSC520) += netsc520.o
39obj-$(CONFIG_MTD_TS5500) += ts5500_flash.o
40obj-$(CONFIG_MTD_SUN_UFLASH) += sun_uflash.o
41obj-$(CONFIG_MTD_VMAX) += vmax301.o
42obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o
43obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o
44obj-$(CONFIG_MTD_OCELOT) += ocelot.o
45obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
46obj-$(CONFIG_MTD_PCI) += pci.o
47obj-$(CONFIG_MTD_PB1XXX) += pb1xxx-flash.o
48obj-$(CONFIG_MTD_DB1X00) += db1x00-flash.o
49obj-$(CONFIG_MTD_PB1550) += pb1550-flash.o
50obj-$(CONFIG_MTD_DB1550) += db1550-flash.o
51obj-$(CONFIG_MTD_LASAT) += lasat.o
52obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
53obj-$(CONFIG_MTD_EDB7312) += edb7312.o
54obj-$(CONFIG_MTD_IMPA7) += impa7.o
55obj-$(CONFIG_MTD_FORTUNET) += fortunet.o
56obj-$(CONFIG_MTD_REDWOOD) += redwood.o
57obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
58obj-$(CONFIG_MTD_NETtel) += nettel.o
59obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
60obj-$(CONFIG_MTD_EBONY) += ebony.o
61obj-$(CONFIG_MTD_OCOTEA) += ocotea.o
62obj-$(CONFIG_MTD_BEECH) += beech-mtd.o
63obj-$(CONFIG_MTD_ARCTIC) += arctic-mtd.o
64obj-$(CONFIG_MTD_WALNUT) += walnut.o
65obj-$(CONFIG_MTD_H720X) += h720x-flash.o
66obj-$(CONFIG_MTD_SBC8240) += sbc8240.o
67obj-$(CONFIG_MTD_NOR_TOTO) += omap-toto-flash.o
68obj-$(CONFIG_MTD_MPC1211) += mpc1211.o
69obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o
70obj-$(CONFIG_MTD_IXP2000) += ixp2000.o
71obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o
72obj-$(CONFIG_MTD_DMV182) += dmv182.o
73obj-$(CONFIG_MTD_SHARP_SL) += sharpsl-flash.o
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
new file mode 100644
index 000000000000..51e97b05304e
--- /dev/null
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -0,0 +1,332 @@
1/*
2 * amd76xrom.c
3 *
4 * Normal mappings of chips in physical memory
5 * $Id: amd76xrom.c,v 1.19 2004/11/28 09:40:39 dwmw2 Exp $
6 */
7
8#include <linux/module.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <asm/io.h>
13#include <linux/mtd/mtd.h>
14#include <linux/mtd/map.h>
15#include <linux/mtd/cfi.h>
16#include <linux/mtd/flashchip.h>
17#include <linux/config.h>
18#include <linux/pci.h>
19#include <linux/pci_ids.h>
20#include <linux/list.h>
21
22
23#define xstr(s) str(s)
24#define str(s) #s
25#define MOD_NAME xstr(KBUILD_BASENAME)
26
27#define ADDRESS_NAME_LEN 18
28
29#define ROM_PROBE_STEP_SIZE (64*1024) /* 64KiB */
30
31struct amd76xrom_window {
32 void __iomem *virt;
33 unsigned long phys;
34 unsigned long size;
35 struct list_head maps;
36 struct resource rsrc;
37 struct pci_dev *pdev;
38};
39
40struct amd76xrom_map_info {
41 struct list_head list;
42 struct map_info map;
43 struct mtd_info *mtd;
44 struct resource rsrc;
45 char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
46};
47
48static struct amd76xrom_window amd76xrom_window = {
49 .maps = LIST_HEAD_INIT(amd76xrom_window.maps),
50};
51
52static void amd76xrom_cleanup(struct amd76xrom_window *window)
53{
54 struct amd76xrom_map_info *map, *scratch;
55 u8 byte;
56
57 if (window->pdev) {
58 /* Disable writes through the rom window */
59 pci_read_config_byte(window->pdev, 0x40, &byte);
60 pci_write_config_byte(window->pdev, 0x40, byte & ~1);
61 }
62
63 /* Free all of the mtd devices */
64 list_for_each_entry_safe(map, scratch, &window->maps, list) {
65 if (map->rsrc.parent) {
66 release_resource(&map->rsrc);
67 }
68 del_mtd_device(map->mtd);
69 map_destroy(map->mtd);
70 list_del(&map->list);
71 kfree(map);
72 }
73 if (window->rsrc.parent)
74 release_resource(&window->rsrc);
75
76 if (window->virt) {
77 iounmap(window->virt);
78 window->virt = NULL;
79 window->phys = 0;
80 window->size = 0;
81 window->pdev = NULL;
82 }
83}
84
85
86static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
87 const struct pci_device_id *ent)
88{
89 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
90 u8 byte;
91 struct amd76xrom_window *window = &amd76xrom_window;
92 struct amd76xrom_map_info *map = NULL;
93 unsigned long map_top;
94
95 /* Remember the pci dev I find the window in */
96 window->pdev = pdev;
97
98 /* Assume the rom window is properly setup, and find it's size */
99 pci_read_config_byte(pdev, 0x43, &byte);
100 if ((byte & ((1<<7)|(1<<6))) == ((1<<7)|(1<<6))) {
101 window->phys = 0xffb00000; /* 5MiB */
102 }
103 else if ((byte & (1<<7)) == (1<<7)) {
104 window->phys = 0xffc00000; /* 4MiB */
105 }
106 else {
107 window->phys = 0xffff0000; /* 64KiB */
108 }
109 window->size = 0xffffffffUL - window->phys + 1UL;
110
111 /*
112 * Try to reserve the window mem region. If this fails then
113 * it is likely due to a fragment of the window being
114 * "reseved" by the BIOS. In the case that the
115 * request_mem_region() fails then once the rom size is
116 * discovered we will try to reserve the unreserved fragment.
117 */
118 window->rsrc.name = MOD_NAME;
119 window->rsrc.start = window->phys;
120 window->rsrc.end = window->phys + window->size - 1;
121 window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
122 if (request_resource(&iomem_resource, &window->rsrc)) {
123 window->rsrc.parent = NULL;
124 printk(KERN_ERR MOD_NAME
125 " %s(): Unable to register resource"
126 " 0x%.08lx-0x%.08lx - kernel bug?\n",
127 __func__,
128 window->rsrc.start, window->rsrc.end);
129 }
130
131#if 0
132
133 /* Enable the selected rom window */
134 pci_read_config_byte(pdev, 0x43, &byte);
135 pci_write_config_byte(pdev, 0x43, byte | rwindow->segen_bits);
136#endif
137
138 /* Enable writes through the rom window */
139 pci_read_config_byte(pdev, 0x40, &byte);
140 pci_write_config_byte(pdev, 0x40, byte | 1);
141
142 /* FIXME handle registers 0x80 - 0x8C the bios region locks */
143
144 /* For write accesses caches are useless */
145 window->virt = ioremap_nocache(window->phys, window->size);
146 if (!window->virt) {
147 printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
148 window->phys, window->size);
149 goto out;
150 }
151
152 /* Get the first address to look for an rom chip at */
153 map_top = window->phys;
154#if 1
155 /* The probe sequence run over the firmware hub lock
156 * registers sets them to 0x7 (no access).
157 * Probe at most the last 4M of the address space.
158 */
159 if (map_top < 0xffc00000) {
160 map_top = 0xffc00000;
161 }
162#endif
163 /* Loop through and look for rom chips */
164 while((map_top - 1) < 0xffffffffUL) {
165 struct cfi_private *cfi;
166 unsigned long offset;
167 int i;
168
169 if (!map) {
170 map = kmalloc(sizeof(*map), GFP_KERNEL);
171 }
172 if (!map) {
173 printk(KERN_ERR MOD_NAME ": kmalloc failed");
174 goto out;
175 }
176 memset(map, 0, sizeof(*map));
177 INIT_LIST_HEAD(&map->list);
178 map->map.name = map->map_name;
179 map->map.phys = map_top;
180 offset = map_top - window->phys;
181 map->map.virt = (void __iomem *)
182 (((unsigned long)(window->virt)) + offset);
183 map->map.size = 0xffffffffUL - map_top + 1UL;
184 /* Set the name of the map to the address I am trying */
185 sprintf(map->map_name, "%s @%08lx",
186 MOD_NAME, map->map.phys);
187
188 /* There is no generic VPP support */
189 for(map->map.bankwidth = 32; map->map.bankwidth;
190 map->map.bankwidth >>= 1)
191 {
192 char **probe_type;
193 /* Skip bankwidths that are not supported */
194 if (!map_bankwidth_supported(map->map.bankwidth))
195 continue;
196
197 /* Setup the map methods */
198 simple_map_init(&map->map);
199
200 /* Try all of the probe methods */
201 probe_type = rom_probe_types;
202 for(; *probe_type; probe_type++) {
203 map->mtd = do_map_probe(*probe_type, &map->map);
204 if (map->mtd)
205 goto found;
206 }
207 }
208 map_top += ROM_PROBE_STEP_SIZE;
209 continue;
210 found:
211 /* Trim the size if we are larger than the map */
212 if (map->mtd->size > map->map.size) {
213 printk(KERN_WARNING MOD_NAME
214 " rom(%u) larger than window(%lu). fixing...\n",
215 map->mtd->size, map->map.size);
216 map->mtd->size = map->map.size;
217 }
218 if (window->rsrc.parent) {
219 /*
220 * Registering the MTD device in iomem may not be possible
221 * if there is a BIOS "reserved" and BUSY range. If this
222 * fails then continue anyway.
223 */
224 map->rsrc.name = map->map_name;
225 map->rsrc.start = map->map.phys;
226 map->rsrc.end = map->map.phys + map->mtd->size - 1;
227 map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
228 if (request_resource(&window->rsrc, &map->rsrc)) {
229 printk(KERN_ERR MOD_NAME
230 ": cannot reserve MTD resource\n");
231 map->rsrc.parent = NULL;
232 }
233 }
234
235 /* Make the whole region visible in the map */
236 map->map.virt = window->virt;
237 map->map.phys = window->phys;
238 cfi = map->map.fldrv_priv;
239 for(i = 0; i < cfi->numchips; i++) {
240 cfi->chips[i].start += offset;
241 }
242
243 /* Now that the mtd devices is complete claim and export it */
244 map->mtd->owner = THIS_MODULE;
245 if (add_mtd_device(map->mtd)) {
246 map_destroy(map->mtd);
247 map->mtd = NULL;
248 goto out;
249 }
250
251
252 /* Calculate the new value of map_top */
253 map_top += map->mtd->size;
254
255 /* File away the map structure */
256 list_add(&map->list, &window->maps);
257 map = NULL;
258 }
259
260 out:
261 /* Free any left over map structures */
262 if (map) {
263 kfree(map);
264 }
265 /* See if I have any map structures */
266 if (list_empty(&window->maps)) {
267 amd76xrom_cleanup(window);
268 return -ENODEV;
269 }
270 return 0;
271}
272
273
274static void __devexit amd76xrom_remove_one (struct pci_dev *pdev)
275{
276 struct amd76xrom_window *window = &amd76xrom_window;
277
278 amd76xrom_cleanup(window);
279}
280
281static struct pci_device_id amd76xrom_pci_tbl[] = {
282 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410,
283 PCI_ANY_ID, PCI_ANY_ID, },
284 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7440,
285 PCI_ANY_ID, PCI_ANY_ID, },
286 { PCI_VENDOR_ID_AMD, 0x7468 }, /* amd8111 support */
287 { 0, }
288};
289
290MODULE_DEVICE_TABLE(pci, amd76xrom_pci_tbl);
291
292#if 0
293static struct pci_driver amd76xrom_driver = {
294 .name = MOD_NAME,
295 .id_table = amd76xrom_pci_tbl,
296 .probe = amd76xrom_init_one,
297 .remove = amd76xrom_remove_one,
298};
299#endif
300
301static int __init init_amd76xrom(void)
302{
303 struct pci_dev *pdev;
304 struct pci_device_id *id;
305 pdev = NULL;
306 for(id = amd76xrom_pci_tbl; id->vendor; id++) {
307 pdev = pci_find_device(id->vendor, id->device, NULL);
308 if (pdev) {
309 break;
310 }
311 }
312 if (pdev) {
313 return amd76xrom_init_one(pdev, &amd76xrom_pci_tbl[0]);
314 }
315 return -ENXIO;
316#if 0
317 return pci_module_init(&amd76xrom_driver);
318#endif
319}
320
321static void __exit cleanup_amd76xrom(void)
322{
323 amd76xrom_remove_one(amd76xrom_window.pdev);
324}
325
326module_init(init_amd76xrom);
327module_exit(cleanup_amd76xrom);
328
329MODULE_LICENSE("GPL");
330MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>");
331MODULE_DESCRIPTION("MTD map driver for BIOS chips on the AMD76X southbridge");
332
diff --git a/drivers/mtd/maps/arctic-mtd.c b/drivers/mtd/maps/arctic-mtd.c
new file mode 100644
index 000000000000..777276fd0e15
--- /dev/null
+++ b/drivers/mtd/maps/arctic-mtd.c
@@ -0,0 +1,135 @@
1/*
2 * $Id: arctic-mtd.c,v 1.13 2004/11/04 13:24:14 gleixner Exp $
3 *
4 * drivers/mtd/maps/arctic-mtd.c MTD mappings and partition tables for
5 * IBM 405LP Arctic boards.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Copyright (C) 2002, International Business Machines Corporation
22 * All Rights Reserved.
23 *
24 * Bishop Brock
25 * IBM Research, Austin Center for Low-Power Computing
26 * bcbrock@us.ibm.com
27 * March 2002
28 *
29 * modified for Arctic by,
30 * David Gibson
31 * IBM OzLabs, Canberra, Australia
32 * <arctic@gibson.dropbear.id.au>
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/types.h>
38#include <linux/init.h>
39
40#include <linux/mtd/mtd.h>
41#include <linux/mtd/map.h>
42#include <linux/mtd/partitions.h>
43
44#include <asm/io.h>
45#include <asm/ibm4xx.h>
46
47/*
48 * 0 : 0xFE00 0000 - 0xFEFF FFFF : Filesystem 1 (16MiB)
49 * 1 : 0xFF00 0000 - 0xFF4F FFFF : kernel (5.12MiB)
50 * 2 : 0xFF50 0000 - 0xFFF5 FFFF : Filesystem 2 (10.624MiB) (if non-XIP)
51 * 3 : 0xFFF6 0000 - 0xFFFF FFFF : PIBS Firmware (640KiB)
52 */
53
54#define FFS1_SIZE 0x01000000 /* 16MiB */
55#define KERNEL_SIZE 0x00500000 /* 5.12MiB */
56#define FFS2_SIZE 0x00a60000 /* 10.624MiB */
57#define FIRMWARE_SIZE 0x000a0000 /* 640KiB */
58
59
60#define NAME "Arctic Linux Flash"
61#define PADDR SUBZERO_BOOTFLASH_PADDR
62#define BUSWIDTH 2
63#define SIZE SUBZERO_BOOTFLASH_SIZE
64#define PARTITIONS 4
65
66/* Flash memories on these boards are memory resources, accessed big-endian. */
67
68{
69 /* do nothing for now */
70}
71
72static struct map_info arctic_mtd_map = {
73 .name = NAME,
74 .size = SIZE,
75 .bankwidth = BUSWIDTH,
76 .phys = PADDR,
77};
78
79static struct mtd_info *arctic_mtd;
80
81static struct mtd_partition arctic_partitions[PARTITIONS] = {
82 { .name = "Filesystem",
83 .size = FFS1_SIZE,
84 .offset = 0,},
85 { .name = "Kernel",
86 .size = KERNEL_SIZE,
87 .offset = FFS1_SIZE,},
88 { .name = "Filesystem",
89 .size = FFS2_SIZE,
90 .offset = FFS1_SIZE + KERNEL_SIZE,},
91 { .name = "Firmware",
92 .size = FIRMWARE_SIZE,
93 .offset = SUBZERO_BOOTFLASH_SIZE - FIRMWARE_SIZE,},
94};
95
96static int __init
97init_arctic_mtd(void)
98{
99 printk("%s: 0x%08x at 0x%08x\n", NAME, SIZE, PADDR);
100
101 arctic_mtd_map.virt = ioremap(PADDR, SIZE);
102
103 if (!arctic_mtd_map.virt) {
104 printk("%s: failed to ioremap 0x%x\n", NAME, PADDR);
105 return -EIO;
106 }
107 simple_map_init(&arctic_mtd_map);
108
109 printk("%s: probing %d-bit flash bus\n", NAME, BUSWIDTH * 8);
110 arctic_mtd = do_map_probe("cfi_probe", &arctic_mtd_map);
111
112 if (!arctic_mtd)
113 return -ENXIO;
114
115 arctic_mtd->owner = THIS_MODULE;
116
117 return add_mtd_partitions(arctic_mtd, arctic_partitions, PARTITIONS);
118}
119
120static void __exit
121cleanup_arctic_mtd(void)
122{
123 if (arctic_mtd) {
124 del_mtd_partitions(arctic_mtd);
125 map_destroy(arctic_mtd);
126 iounmap((void *) arctic_mtd_map.virt);
127 }
128}
129
130module_init(init_arctic_mtd);
131module_exit(cleanup_arctic_mtd);
132
133MODULE_LICENSE("GPL");
134MODULE_AUTHOR("David Gibson <arctic@gibson.dropbear.id.au>");
135MODULE_DESCRIPTION("MTD map and partitions for IBM 405LP Arctic boards");
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
new file mode 100644
index 000000000000..cf362ccc3c8e
--- /dev/null
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -0,0 +1,127 @@
1/*
2 * NV-RAM memory access on autcpu12
3 * (C) 2002 Thomas Gleixner (gleixner@autronix.de)
4 *
5 * $Id: autcpu12-nvram.c,v 1.8 2004/11/04 13:24:14 gleixner Exp $
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/ioport.h>
27#include <linux/init.h>
28#include <asm/io.h>
29#include <asm/sizes.h>
30#include <asm/hardware.h>
31#include <asm/arch/autcpu12.h>
32#include <linux/mtd/mtd.h>
33#include <linux/mtd/map.h>
34#include <linux/mtd/partitions.h>
35
36
37static struct mtd_info *sram_mtd;
38
39struct map_info autcpu12_sram_map = {
40 .name = "SRAM",
41 .size = 32768,
42 .bankwidth = 4,
43 .phys = 0x12000000,
44};
45
46static int __init init_autcpu12_sram (void)
47{
48 int err, save0, save1;
49
50 autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K);
51 if (!autcpu12_sram_map.virt) {
52 printk("Failed to ioremap autcpu12 NV-RAM space\n");
53 err = -EIO;
54 goto out;
55 }
56 simple_map_init(&autcpu_sram_map);
57
58 /*
59 * Check for 32K/128K
60 * read ofs 0
61 * read ofs 0x10000
62 * Write complement to ofs 0x100000
63 * Read and check result on ofs 0x0
64 * Restore contents
65 */
66 save0 = map_read32(&autcpu12_sram_map,0);
67 save1 = map_read32(&autcpu12_sram_map,0x10000);
68 map_write32(&autcpu12_sram_map,~save0,0x10000);
69 /* if we find this pattern on 0x0, we have 32K size
70 * restore contents and exit
71 */
72 if ( map_read32(&autcpu12_sram_map,0) != save0) {
73 map_write32(&autcpu12_sram_map,save0,0x0);
74 goto map;
75 }
76 /* We have a 128K found, restore 0x10000 and set size
77 * to 128K
78 */
79 map_write32(&autcpu12_sram_map,save1,0x10000);
80 autcpu12_sram_map.size = SZ_128K;
81
82map:
83 sram_mtd = do_map_probe("map_ram", &autcpu12_sram_map);
84 if (!sram_mtd) {
85 printk("NV-RAM probe failed\n");
86 err = -ENXIO;
87 goto out_ioremap;
88 }
89
90 sram_mtd->owner = THIS_MODULE;
91 sram_mtd->erasesize = 16;
92
93 if (add_mtd_device(sram_mtd)) {
94 printk("NV-RAM device addition failed\n");
95 err = -ENOMEM;
96 goto out_probe;
97 }
98
99 printk("NV-RAM device size %ldKiB registered on AUTCPU12\n",autcpu12_sram_map.size/SZ_1K);
100
101 return 0;
102
103out_probe:
104 map_destroy(sram_mtd);
105 sram_mtd = 0;
106
107out_ioremap:
108 iounmap((void *)autcpu12_sram_map.virt);
109out:
110 return err;
111}
112
113static void __exit cleanup_autcpu12_maps(void)
114{
115 if (sram_mtd) {
116 del_mtd_device(sram_mtd);
117 map_destroy(sram_mtd);
118 iounmap((void *)autcpu12_sram_map.virt);
119 }
120}
121
122module_init(init_autcpu12_sram);
123module_exit(cleanup_autcpu12_maps);
124
125MODULE_AUTHOR("Thomas Gleixner");
126MODULE_DESCRIPTION("autcpu12 NV-RAM map driver");
127MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/bast-flash.c b/drivers/mtd/maps/bast-flash.c
new file mode 100644
index 000000000000..44de3a81b277
--- /dev/null
+++ b/drivers/mtd/maps/bast-flash.c
@@ -0,0 +1,227 @@
1/* linux/drivers/mtd/maps/bast_flash.c
2 *
3 * Copyright (c) 2004 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * Simtec Bast (EB2410ITX) NOR MTD Mapping driver
7 *
8 * Changelog:
9 * 20-Sep-2004 BJD Initial version
10 *
11 * $Id: bast-flash.c,v 1.1 2004/09/21 14:29:04 bjd Exp $
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26*/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/kernel.h>
32#include <linux/string.h>
33#include <linux/ioport.h>
34#include <linux/device.h>
35
36#include <linux/mtd/mtd.h>
37#include <linux/mtd/map.h>
38#include <linux/mtd/partitions.h>
39
40#include <asm/io.h>
41#include <asm/mach-types.h>
42#include <asm/mach/flash.h>
43
44#include <asm/arch/map.h>
45#include <asm/arch/bast-map.h>
46#include <asm/arch/bast-cpld.h>
47
48#ifdef CONFIG_MTD_BAST_MAXSIZE
49#define AREA_MAXSIZE (CONFIG_MTD_BAST_MAXSIZE * (1024*1024))
50#else
51#define AREA_MAXSIZE (32*1024*1024)
52#endif
53
54#define PFX "bast-flash: "
55
56struct bast_flash_info {
57 struct mtd_info *mtd;
58 struct map_info map;
59 struct mtd_partition *partitions;
60 struct resource *area;
61};
62
63static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
64
65static struct bast_flash_info *to_bast_info(struct device *dev)
66{
67 return (struct bast_flash_info *)dev_get_drvdata(dev);
68}
69
70static void bast_flash_setrw(int to)
71{
72 unsigned int val;
73 unsigned long flags;
74
75 local_irq_save(flags);
76 val = __raw_readb(BAST_VA_CTRL3);
77
78 if (to)
79 val |= BAST_CPLD_CTRL3_ROMWEN;
80 else
81 val &= ~BAST_CPLD_CTRL3_ROMWEN;
82
83 pr_debug("new cpld ctrl3=%02x\n", val);
84
85 __raw_writeb(val, BAST_VA_CTRL3);
86 local_irq_restore(flags);
87}
88
89static int bast_flash_remove(struct device *dev)
90{
91 struct bast_flash_info *info = to_bast_info(dev);
92
93 dev_set_drvdata(dev, NULL);
94
95 if (info == NULL)
96 return 0;
97
98 if (info->map.virt != NULL)
99 iounmap(info->map.virt);
100
101 if (info->mtd) {
102 del_mtd_partitions(info->mtd);
103 map_destroy(info->mtd);
104 }
105
106 if (info->partitions)
107 kfree(info->partitions);
108
109 if (info->area) {
110 release_resource(info->area);
111 kfree(info->area);
112 }
113
114 kfree(info);
115
116 return 0;
117}
118
119static int bast_flash_probe(struct device *dev)
120{
121 struct platform_device *pdev = to_platform_device(dev);
122 struct bast_flash_info *info;
123 struct resource *res;
124 int err = 0;
125
126 info = kmalloc(sizeof(*info), GFP_KERNEL);
127 if (info == NULL) {
128 printk(KERN_ERR PFX "no memory for flash info\n");
129 err = -ENOMEM;
130 goto exit_error;
131 }
132
133 memzero(info, sizeof(*info));
134 dev_set_drvdata(dev, info);
135
136 res = pdev->resource; /* assume that the flash has one resource */
137
138 info->map.phys = res->start;
139 info->map.size = res->end - res->start + 1;
140 info->map.name = dev->bus_id;
141 info->map.bankwidth = 2;
142
143 if (info->map.size > AREA_MAXSIZE)
144 info->map.size = AREA_MAXSIZE;
145
146 pr_debug("%s: area %08lx, size %ld\n", __FUNCTION__,
147 info->map.phys, info->map.size);
148
149 info->area = request_mem_region(res->start, info->map.size,
150 pdev->name);
151 if (info->area == NULL) {
152 printk(KERN_ERR PFX "cannot reserve flash memory region\n");
153 err = -ENOENT;
154 goto exit_error;
155 }
156
157 info->map.virt = ioremap(res->start, info->map.size);
158 pr_debug("%s: virt at %08x\n", __FUNCTION__, (int)info->map.virt);
159
160 if (info->map.virt == 0) {
161 printk(KERN_ERR PFX "failed to ioremap() region\n");
162 err = -EIO;
163 goto exit_error;
164 }
165
166 simple_map_init(&info->map);
167
168 /* enable the write to the flash area */
169
170 bast_flash_setrw(1);
171
172 /* probe for the device(s) */
173
174 info->mtd = do_map_probe("jedec_probe", &info->map);
175 if (info->mtd == NULL)
176 info->mtd = do_map_probe("cfi_probe", &info->map);
177
178 if (info->mtd == NULL) {
179 printk(KERN_ERR PFX "map_probe() failed\n");
180 err = -ENXIO;
181 goto exit_error;
182 }
183
184 /* mark ourselves as the owner */
185 info->mtd->owner = THIS_MODULE;
186
187 err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
188 if (err > 0) {
189 err = add_mtd_partitions(info->mtd, info->partitions, err);
190 if (err)
191 printk(KERN_ERR PFX "cannot add/parse partitions\n");
192 }
193
194 if (err == 0)
195 return 0;
196
197 /* fall through to exit error */
198
199 exit_error:
200 bast_flash_remove(dev);
201 return err;
202}
203
204static struct device_driver bast_flash_driver = {
205 .name = "bast-nor",
206 .bus = &platform_bus_type,
207 .probe = bast_flash_probe,
208 .remove = bast_flash_remove,
209};
210
211static int __init bast_flash_init(void)
212{
213 printk("BAST NOR-Flash Driver, (c) 2004 Simtec Electronics\n");
214 return driver_register(&bast_flash_driver);
215}
216
217static void __exit bast_flash_exit(void)
218{
219 driver_unregister(&bast_flash_driver);
220}
221
222module_init(bast_flash_init);
223module_exit(bast_flash_exit);
224
225MODULE_LICENSE("GPL");
226MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
227MODULE_DESCRIPTION("BAST MTD Map driver");
diff --git a/drivers/mtd/maps/beech-mtd.c b/drivers/mtd/maps/beech-mtd.c
new file mode 100644
index 000000000000..5e79c9d5da2b
--- /dev/null
+++ b/drivers/mtd/maps/beech-mtd.c
@@ -0,0 +1,112 @@
1/*
2 * $Id: beech-mtd.c,v 1.10 2004/11/04 13:24:14 gleixner Exp $
3 *
4 * drivers/mtd/maps/beech-mtd.c MTD mappings and partition tables for
5 * IBM 405LP Beech boards.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Copyright (C) 2002, International Business Machines Corporation
22 * All Rights Reserved.
23 *
24 * Bishop Brock
25 * IBM Research, Austin Center for Low-Power Computing
26 * bcbrock@us.ibm.com
27 * March 2002
28 *
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/types.h>
34#include <linux/init.h>
35
36#include <linux/mtd/mtd.h>
37#include <linux/mtd/map.h>
38#include <linux/mtd/partitions.h>
39
40#include <asm/io.h>
41#include <asm/ibm4xx.h>
42
43#define NAME "Beech Linux Flash"
44#define PADDR BEECH_BIGFLASH_PADDR
45#define SIZE BEECH_BIGFLASH_SIZE
46#define BUSWIDTH 1
47
48/* Flash memories on these boards are memory resources, accessed big-endian. */
49
50
51static struct map_info beech_mtd_map = {
52 .name = NAME,
53 .size = SIZE,
54 .bankwidth = BUSWIDTH,
55 .phys = PADDR
56};
57
58static struct mtd_info *beech_mtd;
59
60static struct mtd_partition beech_partitions[2] = {
61 {
62 .name = "Linux Kernel",
63 .size = BEECH_KERNEL_SIZE,
64 .offset = BEECH_KERNEL_OFFSET
65 }, {
66 .name = "Free Area",
67 .size = BEECH_FREE_AREA_SIZE,
68 .offset = BEECH_FREE_AREA_OFFSET
69 }
70};
71
72static int __init
73init_beech_mtd(void)
74{
75 printk("%s: 0x%08x at 0x%08x\n", NAME, SIZE, PADDR);
76
77 beech_mtd_map.virt = ioremap(PADDR, SIZE);
78
79 if (!beech_mtd_map.virt) {
80 printk("%s: failed to ioremap 0x%x\n", NAME, PADDR);
81 return -EIO;
82 }
83
84 simple_map_init(&beech_mtd_map);
85
86 printk("%s: probing %d-bit flash bus\n", NAME, BUSWIDTH * 8);
87 beech_mtd = do_map_probe("cfi_probe", &beech_mtd_map);
88
89 if (!beech_mtd)
90 return -ENXIO;
91
92 beech_mtd->owner = THIS_MODULE;
93
94 return add_mtd_partitions(beech_mtd, beech_partitions, 2);
95}
96
97static void __exit
98cleanup_beech_mtd(void)
99{
100 if (beech_mtd) {
101 del_mtd_partitions(beech_mtd);
102 map_destroy(beech_mtd);
103 iounmap((void *) beech_mtd_map.virt);
104 }
105}
106
107module_init(init_beech_mtd);
108module_exit(cleanup_beech_mtd);
109
110MODULE_LICENSE("GPL");
111MODULE_AUTHOR("Bishop Brock <bcbrock@us.ibm.com>");
112MODULE_DESCRIPTION("MTD map and partitions for IBM 405LP Beech boards");
diff --git a/drivers/mtd/maps/cdb89712.c b/drivers/mtd/maps/cdb89712.c
new file mode 100644
index 000000000000..ab15dac2f936
--- /dev/null
+++ b/drivers/mtd/maps/cdb89712.c
@@ -0,0 +1,268 @@
1/*
2 * Flash on Cirrus CDB89712
3 *
4 * $Id: cdb89712.c,v 1.10 2004/11/04 13:24:14 gleixner Exp $
5 */
6
7#include <linux/module.h>
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/ioport.h>
11#include <linux/init.h>
12#include <asm/io.h>
13#include <asm/arch/hardware.h>
14#include <linux/mtd/mtd.h>
15#include <linux/mtd/map.h>
16#include <linux/mtd/partitions.h>
17
18
19
20
21static struct mtd_info *flash_mtd;
22
23struct map_info cdb89712_flash_map = {
24 .name = "flash",
25 .size = FLASH_SIZE,
26 .bankwidth = FLASH_WIDTH,
27 .phys = FLASH_START,
28};
29
30struct resource cdb89712_flash_resource = {
31 .name = "Flash",
32 .start = FLASH_START,
33 .end = FLASH_START + FLASH_SIZE - 1,
34 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
35};
36
37static int __init init_cdb89712_flash (void)
38{
39 int err;
40
41 if (request_resource (&ioport_resource, &cdb89712_flash_resource)) {
42 printk(KERN_NOTICE "Failed to reserve Cdb89712 FLASH space\n");
43 err = -EBUSY;
44 goto out;
45 }
46
47 cdb89712_flash_map.virt = ioremap(FLASH_START, FLASH_SIZE);
48 if (!cdb89712_flash_map.virt) {
49 printk(KERN_NOTICE "Failed to ioremap Cdb89712 FLASH space\n");
50 err = -EIO;
51 goto out_resource;
52 }
53 simple_map_init(&cdb89712_flash_map);
54 flash_mtd = do_map_probe("cfi_probe", &cdb89712_flash_map);
55 if (!flash_mtd) {
56 flash_mtd = do_map_probe("map_rom", &cdb89712_flash_map);
57 if (flash_mtd)
58 flash_mtd->erasesize = 0x10000;
59 }
60 if (!flash_mtd) {
61 printk("FLASH probe failed\n");
62 err = -ENXIO;
63 goto out_ioremap;
64 }
65
66 flash_mtd->owner = THIS_MODULE;
67
68 if (add_mtd_device(flash_mtd)) {
69 printk("FLASH device addition failed\n");
70 err = -ENOMEM;
71 goto out_probe;
72 }
73
74 return 0;
75
76out_probe:
77 map_destroy(flash_mtd);
78 flash_mtd = 0;
79out_ioremap:
80 iounmap((void *)cdb89712_flash_map.virt);
81out_resource:
82 release_resource (&cdb89712_flash_resource);
83out:
84 return err;
85}
86
87
88
89
90
91static struct mtd_info *sram_mtd;
92
93struct map_info cdb89712_sram_map = {
94 .name = "SRAM",
95 .size = SRAM_SIZE,
96 .bankwidth = SRAM_WIDTH,
97 .phys = SRAM_START,
98};
99
100struct resource cdb89712_sram_resource = {
101 .name = "SRAM",
102 .start = SRAM_START,
103 .end = SRAM_START + SRAM_SIZE - 1,
104 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
105};
106
107static int __init init_cdb89712_sram (void)
108{
109 int err;
110
111 if (request_resource (&ioport_resource, &cdb89712_sram_resource)) {
112 printk(KERN_NOTICE "Failed to reserve Cdb89712 SRAM space\n");
113 err = -EBUSY;
114 goto out;
115 }
116
117 cdb89712_sram_map.virt = ioremap(SRAM_START, SRAM_SIZE);
118 if (!cdb89712_sram_map.virt) {
119 printk(KERN_NOTICE "Failed to ioremap Cdb89712 SRAM space\n");
120 err = -EIO;
121 goto out_resource;
122 }
123 simple_map_init(&cdb89712_sram_map);
124 sram_mtd = do_map_probe("map_ram", &cdb89712_sram_map);
125 if (!sram_mtd) {
126 printk("SRAM probe failed\n");
127 err = -ENXIO;
128 goto out_ioremap;
129 }
130
131 sram_mtd->owner = THIS_MODULE;
132 sram_mtd->erasesize = 16;
133
134 if (add_mtd_device(sram_mtd)) {
135 printk("SRAM device addition failed\n");
136 err = -ENOMEM;
137 goto out_probe;
138 }
139
140 return 0;
141
142out_probe:
143 map_destroy(sram_mtd);
144 sram_mtd = 0;
145out_ioremap:
146 iounmap((void *)cdb89712_sram_map.virt);
147out_resource:
148 release_resource (&cdb89712_sram_resource);
149out:
150 return err;
151}
152
153
154
155
156
157
158
159static struct mtd_info *bootrom_mtd;
160
161struct map_info cdb89712_bootrom_map = {
162 .name = "BootROM",
163 .size = BOOTROM_SIZE,
164 .bankwidth = BOOTROM_WIDTH,
165 .phys = BOOTROM_START,
166};
167
168struct resource cdb89712_bootrom_resource = {
169 .name = "BootROM",
170 .start = BOOTROM_START,
171 .end = BOOTROM_START + BOOTROM_SIZE - 1,
172 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
173};
174
175static int __init init_cdb89712_bootrom (void)
176{
177 int err;
178
179 if (request_resource (&ioport_resource, &cdb89712_bootrom_resource)) {
180 printk(KERN_NOTICE "Failed to reserve Cdb89712 BOOTROM space\n");
181 err = -EBUSY;
182 goto out;
183 }
184
185 cdb89712_bootrom_map.virt = ioremap(BOOTROM_START, BOOTROM_SIZE);
186 if (!cdb89712_bootrom_map.virt) {
187 printk(KERN_NOTICE "Failed to ioremap Cdb89712 BootROM space\n");
188 err = -EIO;
189 goto out_resource;
190 }
191 simple_map_init(&cdb89712_bootrom_map);
192 bootrom_mtd = do_map_probe("map_rom", &cdb89712_bootrom_map);
193 if (!bootrom_mtd) {
194 printk("BootROM probe failed\n");
195 err = -ENXIO;
196 goto out_ioremap;
197 }
198
199 bootrom_mtd->owner = THIS_MODULE;
200 bootrom_mtd->erasesize = 0x10000;
201
202 if (add_mtd_device(bootrom_mtd)) {
203 printk("BootROM device addition failed\n");
204 err = -ENOMEM;
205 goto out_probe;
206 }
207
208 return 0;
209
210out_probe:
211 map_destroy(bootrom_mtd);
212 bootrom_mtd = 0;
213out_ioremap:
214 iounmap((void *)cdb89712_bootrom_map.virt);
215out_resource:
216 release_resource (&cdb89712_bootrom_resource);
217out:
218 return err;
219}
220
221
222
223
224
225static int __init init_cdb89712_maps(void)
226{
227
228 printk(KERN_INFO "Cirrus CDB89712 MTD mappings:\n Flash 0x%x at 0x%x\n SRAM 0x%x at 0x%x\n BootROM 0x%x at 0x%x\n",
229 FLASH_SIZE, FLASH_START, SRAM_SIZE, SRAM_START, BOOTROM_SIZE, BOOTROM_START);
230
231 init_cdb89712_flash();
232 init_cdb89712_sram();
233 init_cdb89712_bootrom();
234
235 return 0;
236}
237
238
239static void __exit cleanup_cdb89712_maps(void)
240{
241 if (sram_mtd) {
242 del_mtd_device(sram_mtd);
243 map_destroy(sram_mtd);
244 iounmap((void *)cdb89712_sram_map.virt);
245 release_resource (&cdb89712_sram_resource);
246 }
247
248 if (flash_mtd) {
249 del_mtd_device(flash_mtd);
250 map_destroy(flash_mtd);
251 iounmap((void *)cdb89712_flash_map.virt);
252 release_resource (&cdb89712_flash_resource);
253 }
254
255 if (bootrom_mtd) {
256 del_mtd_device(bootrom_mtd);
257 map_destroy(bootrom_mtd);
258 iounmap((void *)cdb89712_bootrom_map.virt);
259 release_resource (&cdb89712_bootrom_resource);
260 }
261}
262
263module_init(init_cdb89712_maps);
264module_exit(cleanup_cdb89712_maps);
265
266MODULE_AUTHOR("Ray L");
267MODULE_DESCRIPTION("ARM CDB89712 map driver");
268MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
new file mode 100644
index 000000000000..da8584a662f4
--- /dev/null
+++ b/drivers/mtd/maps/ceiva.c
@@ -0,0 +1,350 @@
1/*
2 * Ceiva flash memory driver.
3 * Copyright (C) 2002 Rob Scott <rscott@mtrob.fdns.net>
4 *
5 * Note: this driver supports jedec compatible devices. Modification
6 * for CFI compatible devices should be straight forward: change
7 * jedec_probe to cfi_probe.
8 *
9 * Based on: sa1100-flash.c, which has the following copyright:
10 * Flash memory access on SA11x0 based devices
11 *
12 * (C) 2000 Nicolas Pitre <nico@cam.org>
13 *
14 * $Id: ceiva.c,v 1.11 2004/09/16 23:27:12 gleixner Exp $
15 */
16
17#include <linux/config.h>
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/ioport.h>
21#include <linux/kernel.h>
22#include <linux/init.h>
23
24#include <linux/mtd/mtd.h>
25#include <linux/mtd/map.h>
26#include <linux/mtd/partitions.h>
27#include <linux/mtd/concat.h>
28
29#include <asm/hardware.h>
30#include <asm/mach-types.h>
31#include <asm/io.h>
32#include <asm/sizes.h>
33
34/*
35 * This isn't complete yet, so...
36 */
37#define CONFIG_MTD_CEIVA_STATICMAP
38
39#ifdef CONFIG_MTD_CEIVA_STATICMAP
40/*
41 * See include/linux/mtd/partitions.h for definition of the mtd_partition
42 * structure.
43 *
44 * Please note:
45 * 1. The flash size given should be the largest flash size that can
46 * be accomodated.
47 *
48 * 2. The bus width must defined in clps_setup_flash.
49 *
50 * The MTD layer will detect flash chip aliasing and reduce the size of
51 * the map accordingly.
52 *
53 */
54
55#ifdef CONFIG_ARCH_CEIVA
56/* Flash / Partition sizing */
57/* For the 28F8003, we use the block mapping to calcuate the sizes */
58#define MAX_SIZE_KiB (16 + 8 + 8 + 96 + (7*128))
59#define BOOT_PARTITION_SIZE_KiB (16)
60#define PARAMS_PARTITION_SIZE_KiB (8)
61#define KERNEL_PARTITION_SIZE_KiB (4*128)
62/* Use both remaing portion of first flash, and all of second flash */
63#define ROOT_PARTITION_SIZE_KiB (3*128) + (8*128)
64
65static struct mtd_partition ceiva_partitions[] = {
66 {
67 .name = "Ceiva BOOT partition",
68 .size = BOOT_PARTITION_SIZE_KiB*1024,
69 .offset = 0,
70
71 },{
72 .name = "Ceiva parameters partition",
73 .size = PARAMS_PARTITION_SIZE_KiB*1024,
74 .offset = (16 + 8) * 1024,
75 },{
76 .name = "Ceiva kernel partition",
77 .size = (KERNEL_PARTITION_SIZE_KiB)*1024,
78 .offset = 0x20000,
79
80 },{
81 .name = "Ceiva root filesystem partition",
82 .offset = MTDPART_OFS_APPEND,
83 .size = (ROOT_PARTITION_SIZE_KiB)*1024,
84 }
85};
86#endif
87
88static int __init clps_static_partitions(struct mtd_partition **parts)
89{
90 int nb_parts = 0;
91
92#ifdef CONFIG_ARCH_CEIVA
93 if (machine_is_ceiva()) {
94 *parts = ceiva_partitions;
95 nb_parts = ARRAY_SIZE(ceiva_partitions);
96 }
97#endif
98 return nb_parts;
99}
100#endif
101
102struct clps_info {
103 unsigned long base;
104 unsigned long size;
105 int width;
106 void *vbase;
107 struct map_info *map;
108 struct mtd_info *mtd;
109 struct resource *res;
110};
111
112#define NR_SUBMTD 4
113
114static struct clps_info info[NR_SUBMTD];
115
116static int __init clps_setup_mtd(struct clps_info *clps, int nr, struct mtd_info **rmtd)
117{
118 struct mtd_info *subdev[nr];
119 struct map_info *maps;
120 int i, found = 0, ret = 0;
121
122 /*
123 * Allocate the map_info structs in one go.
124 */
125 maps = kmalloc(sizeof(struct map_info) * nr, GFP_KERNEL);
126 if (!maps)
127 return -ENOMEM;
128 memset(maps, 0, sizeof(struct map_info) * nr);
129 /*
130 * Claim and then map the memory regions.
131 */
132 for (i = 0; i < nr; i++) {
133 if (clps[i].base == (unsigned long)-1)
134 break;
135
136 clps[i].res = request_mem_region(clps[i].base, clps[i].size, "clps flash");
137 if (!clps[i].res) {
138 ret = -EBUSY;
139 break;
140 }
141
142 clps[i].map = maps + i;
143
144 clps[i].map->name = "clps flash";
145 clps[i].map->phys = clps[i].base;
146
147 clps[i].vbase = ioremap(clps[i].base, clps[i].size);
148 if (!clps[i].vbase) {
149 ret = -ENOMEM;
150 break;
151 }
152
153 clps[i].map->virt = (void __iomem *)clps[i].vbase;
154 clps[i].map->bankwidth = clps[i].width;
155 clps[i].map->size = clps[i].size;
156
157 simple_map_init(&clps[i].map);
158
159 clps[i].mtd = do_map_probe("jedec_probe", clps[i].map);
160 if (clps[i].mtd == NULL) {
161 ret = -ENXIO;
162 break;
163 }
164 clps[i].mtd->owner = THIS_MODULE;
165 subdev[i] = clps[i].mtd;
166
167 printk(KERN_INFO "clps flash: JEDEC device at 0x%08lx, %dMiB, "
168 "%d-bit\n", clps[i].base, clps[i].mtd->size >> 20,
169 clps[i].width * 8);
170 found += 1;
171 }
172
173 /*
174 * ENXIO is special. It means we didn't find a chip when
175 * we probed. We need to tear down the mapping, free the
176 * resource and mark it as such.
177 */
178 if (ret == -ENXIO) {
179 iounmap(clps[i].vbase);
180 clps[i].vbase = NULL;
181 release_resource(clps[i].res);
182 clps[i].res = NULL;
183 }
184
185 /*
186 * If we found one device, don't bother with concat support.
187 * If we found multiple devices, use concat if we have it
188 * available, otherwise fail.
189 */
190 if (ret == 0 || ret == -ENXIO) {
191 if (found == 1) {
192 *rmtd = subdev[0];
193 ret = 0;
194 } else if (found > 1) {
195 /*
196 * We detected multiple devices. Concatenate
197 * them together.
198 */
199#ifdef CONFIG_MTD_CONCAT
200 *rmtd = mtd_concat_create(subdev, found,
201 "clps flash");
202 if (*rmtd == NULL)
203 ret = -ENXIO;
204#else
205 printk(KERN_ERR "clps flash: multiple devices "
206 "found but MTD concat support disabled.\n");
207 ret = -ENXIO;
208#endif
209 }
210 }
211
212 /*
213 * If we failed, clean up.
214 */
215 if (ret) {
216 do {
217 if (clps[i].mtd)
218 map_destroy(clps[i].mtd);
219 if (clps[i].vbase)
220 iounmap(clps[i].vbase);
221 if (clps[i].res)
222 release_resource(clps[i].res);
223 } while (i--);
224
225 kfree(maps);
226 }
227
228 return ret;
229}
230
231static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd)
232{
233 int i;
234
235 del_mtd_partitions(mtd);
236
237 if (mtd != clps[0].mtd)
238 mtd_concat_destroy(mtd);
239
240 for (i = NR_SUBMTD; i >= 0; i--) {
241 if (clps[i].mtd)
242 map_destroy(clps[i].mtd);
243 if (clps[i].vbase)
244 iounmap(clps[i].vbase);
245 if (clps[i].res)
246 release_resource(clps[i].res);
247 }
248 kfree(clps[0].map);
249}
250
251/*
252 * We define the memory space, size, and width for the flash memory
253 * space here.
254 */
255
256static int __init clps_setup_flash(void)
257{
258 int nr;
259
260#ifdef CONFIG_ARCH_CEIVA
261 if (machine_is_ceiva()) {
262 info[0].base = CS0_PHYS_BASE;
263 info[0].size = SZ_32M;
264 info[0].width = CEIVA_FLASH_WIDTH;
265 info[1].base = CS1_PHYS_BASE;
266 info[1].size = SZ_32M;
267 info[1].width = CEIVA_FLASH_WIDTH;
268 nr = 2;
269 }
270#endif
271 return nr;
272}
273
274static struct mtd_partition *parsed_parts;
275static const char *probes[] = { "cmdlinepart", "RedBoot", NULL };
276
277static void __init clps_locate_partitions(struct mtd_info *mtd)
278{
279 const char *part_type = NULL;
280 int nr_parts = 0;
281 do {
282 /*
283 * Partition selection stuff.
284 */
285 nr_parts = parse_mtd_partitions(mtd, probes, &parsed_parts, 0);
286 if (nr_parts > 0) {
287 part_type = "command line";
288 break;
289 }
290#ifdef CONFIG_MTD_CEIVA_STATICMAP
291 nr_parts = clps_static_partitions(&parsed_parts);
292 if (nr_parts > 0) {
293 part_type = "static";
294 break;
295 }
296 printk("found: %d partitions\n", nr_parts);
297#endif
298 } while (0);
299
300 if (nr_parts == 0) {
301 printk(KERN_NOTICE "clps flash: no partition info "
302 "available, registering whole flash\n");
303 add_mtd_device(mtd);
304 } else {
305 printk(KERN_NOTICE "clps flash: using %s partition "
306 "definition\n", part_type);
307 add_mtd_partitions(mtd, parsed_parts, nr_parts);
308 }
309
310 /* Always succeeds. */
311}
312
313static void __exit clps_destroy_partitions(void)
314{
315 if (parsed_parts)
316 kfree(parsed_parts);
317}
318
319static struct mtd_info *mymtd;
320
321static int __init clps_mtd_init(void)
322{
323 int ret;
324 int nr;
325
326 nr = clps_setup_flash();
327 if (nr < 0)
328 return nr;
329
330 ret = clps_setup_mtd(info, nr, &mymtd);
331 if (ret)
332 return ret;
333
334 clps_locate_partitions(mymtd);
335
336 return 0;
337}
338
339static void __exit clps_mtd_cleanup(void)
340{
341 clps_destroy_mtd(info, mymtd);
342 clps_destroy_partitions();
343}
344
345module_init(clps_mtd_init);
346module_exit(clps_mtd_cleanup);
347
348MODULE_AUTHOR("Rob Scott");
349MODULE_DESCRIPTION("Cirrus Logic JEDEC map driver");
350MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
new file mode 100644
index 000000000000..f72e4f894b32
--- /dev/null
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -0,0 +1,139 @@
1/*
2 * Copyright © 2001 Flaga hf. Medical Devices, Kári Davíðsson <kd@flaga.is>
3 *
4 * $Id: cfi_flagadm.c,v 1.14 2004/11/04 13:24:14 gleixner Exp $
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
12 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
14 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
15 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
16 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
17 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
18 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
19 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
20 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/kernel.h>
30#include <linux/init.h>
31#include <asm/io.h>
32#include <linux/mtd/mtd.h>
33#include <linux/mtd/map.h>
34#include <linux/mtd/partitions.h>
35
36
37/* We split the flash chip up into four parts.
38 * 1: bootloader firts 128k (0x00000000 - 0x0001FFFF) size 0x020000
39 * 2: kernel 640k (0x00020000 - 0x000BFFFF) size 0x0A0000
40 * 3: compressed 1536k root ramdisk (0x000C0000 - 0x0023FFFF) size 0x180000
41 * 4: writeable diskpartition (jffs)(0x00240000 - 0x003FFFFF) size 0x1C0000
42 */
43
44#define FLASH_PHYS_ADDR 0x40000000
45#define FLASH_SIZE 0x400000
46
47#define FLASH_PARTITION0_ADDR 0x00000000
48#define FLASH_PARTITION0_SIZE 0x00020000
49
50#define FLASH_PARTITION1_ADDR 0x00020000
51#define FLASH_PARTITION1_SIZE 0x000A0000
52
53#define FLASH_PARTITION2_ADDR 0x000C0000
54#define FLASH_PARTITION2_SIZE 0x00180000
55
56#define FLASH_PARTITION3_ADDR 0x00240000
57#define FLASH_PARTITION3_SIZE 0x001C0000
58
59
60struct map_info flagadm_map = {
61 .name = "FlagaDM flash device",
62 .size = FLASH_SIZE,
63 .bankwidth = 2,
64};
65
66struct mtd_partition flagadm_parts[] = {
67 {
68 .name = "Bootloader",
69 .offset = FLASH_PARTITION0_ADDR,
70 .size = FLASH_PARTITION0_SIZE
71 },
72 {
73 .name = "Kernel image",
74 .offset = FLASH_PARTITION1_ADDR,
75 .size = FLASH_PARTITION1_SIZE
76 },
77 {
78 .name = "Initial ramdisk image",
79 .offset = FLASH_PARTITION2_ADDR,
80 .size = FLASH_PARTITION2_SIZE
81 },
82 {
83 .name = "Persistant storage",
84 .offset = FLASH_PARTITION3_ADDR,
85 .size = FLASH_PARTITION3_SIZE
86 }
87};
88
89#define PARTITION_COUNT (sizeof(flagadm_parts)/sizeof(struct mtd_partition))
90
91static struct mtd_info *mymtd;
92
93int __init init_flagadm(void)
94{
95 printk(KERN_NOTICE "FlagaDM flash device: %x at %x\n",
96 FLASH_SIZE, FLASH_PHYS_ADDR);
97
98 flagadm_map.phys = FLASH_PHYS_ADDR;
99 flagadm_map.virt = ioremap(FLASH_PHYS_ADDR,
100 FLASH_SIZE);
101
102 if (!flagadm_map.virt) {
103 printk("Failed to ioremap\n");
104 return -EIO;
105 }
106
107 simple_map_init(&flagadm_map);
108
109 mymtd = do_map_probe("cfi_probe", &flagadm_map);
110 if (mymtd) {
111 mymtd->owner = THIS_MODULE;
112 add_mtd_partitions(mymtd, flagadm_parts, PARTITION_COUNT);
113 printk(KERN_NOTICE "FlagaDM flash device initialized\n");
114 return 0;
115 }
116
117 iounmap((void *)flagadm_map.virt);
118 return -ENXIO;
119}
120
121static void __exit cleanup_flagadm(void)
122{
123 if (mymtd) {
124 del_mtd_partitions(mymtd);
125 map_destroy(mymtd);
126 }
127 if (flagadm_map.virt) {
128 iounmap((void *)flagadm_map.virt);
129 flagadm_map.virt = 0;
130 }
131}
132
133module_init(init_flagadm);
134module_exit(cleanup_flagadm);
135
136
137MODULE_LICENSE("GPL");
138MODULE_AUTHOR("Kári Davíðsson <kd@flaga.is>");
139MODULE_DESCRIPTION("MTD map driver for Flaga digital module");
diff --git a/drivers/mtd/maps/cstm_mips_ixx.c b/drivers/mtd/maps/cstm_mips_ixx.c
new file mode 100644
index 000000000000..ae9252fbf176
--- /dev/null
+++ b/drivers/mtd/maps/cstm_mips_ixx.c
@@ -0,0 +1,270 @@
1/*
2 * $Id: cstm_mips_ixx.c,v 1.12 2004/11/04 13:24:14 gleixner Exp $
3 *
4 * Mapping of a custom board with both AMD CFI and JEDEC flash in partitions.
5 * Config with both CFI and JEDEC device support.
6 *
7 * Basically physmap.c with the addition of partitions and
8 * an array of mapping info to accomodate more than one flash type per board.
9 *
10 * Copyright 2000 MontaVista Software Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
20 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
24 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * You should have received a copy of the GNU General Public License along
29 * with this program; if not, write to the Free Software Foundation, Inc.,
30 * 675 Mass Ave, Cambridge, MA 02139, USA.
31 */
32
33#include <linux/module.h>
34#include <linux/types.h>
35#include <linux/kernel.h>
36#include <linux/init.h>
37#include <asm/io.h>
38#include <linux/mtd/mtd.h>
39#include <linux/mtd/map.h>
40#include <linux/mtd/partitions.h>
41#include <linux/config.h>
42#include <linux/delay.h>
43
44#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
45#define CC_GCR 0xB4013818
46#define CC_GPBCR 0xB401380A
47#define CC_GPBDR 0xB4013808
48#define CC_M68K_DEVICE 1
49#define CC_M68K_FUNCTION 6
50#define CC_CONFADDR 0xB8004000
51#define CC_CONFDATA 0xB8004004
52#define CC_FC_FCR 0xB8002004
53#define CC_FC_DCR 0xB8002008
54#define CC_GPACR 0xB4013802
55#define CC_GPAICR 0xB4013804
56#endif /* defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR) */
57
58#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
59void cstm_mips_ixx_set_vpp(struct map_info *map,int vpp)
60{
61 static DEFINE_SPINLOCK(vpp_lock);
62 static int vpp_count = 0;
63 unsigned long flags;
64
65 spin_lock_irqsave(&vpp_lock, flags);
66
67 if (vpp) {
68 if (!vpp_count++) {
69 __u16 data;
70 __u8 data1;
71 static u8 first = 1;
72
73 // Set GPIO port B pin3 to high
74 data = *(__u16 *)(CC_GPBCR);
75 data = (data & 0xff0f) | 0x0040;
76 *(__u16 *)CC_GPBCR = data;
77 *(__u8 *)CC_GPBDR = (*(__u8*)CC_GPBDR) | 0x08;
78 if (first) {
79 first = 0;
80 /* need to have this delay for first
81 enabling vpp after powerup */
82 udelay(40);
83 }
84 }
85 } else {
86 if (!--vpp_count) {
87 __u16 data;
88
89 // Set GPIO port B pin3 to high
90 data = *(__u16 *)(CC_GPBCR);
91 data = (data & 0xff3f) | 0x0040;
92 *(__u16 *)CC_GPBCR = data;
93 *(__u8 *)CC_GPBDR = (*(__u8*)CC_GPBDR) & 0xf7;
94 }
95 }
96 spin_unlock_irqrestore(&vpp_lock, flags);
97}
98#endif
99
100/* board and partition description */
101
102#define MAX_PHYSMAP_PARTITIONS 8
103struct cstm_mips_ixx_info {
104 char *name;
105 unsigned long window_addr;
106 unsigned long window_size;
107 int bankwidth;
108 int num_partitions;
109};
110
111#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
112#define PHYSMAP_NUMBER 1 // number of board desc structs needed, one per contiguous flash type
113const struct cstm_mips_ixx_info cstm_mips_ixx_board_desc[PHYSMAP_NUMBER] =
114{
115 { // 28F128J3A in 2x16 configuration
116 "big flash", // name
117 0x08000000, // window_addr
118 0x02000000, // window_size
119 4, // bankwidth
120 1, // num_partitions
121 }
122
123};
124static struct mtd_partition cstm_mips_ixx_partitions[PHYSMAP_NUMBER][MAX_PHYSMAP_PARTITIONS] = {
125{ // 28F128J3A in 2x16 configuration
126 {
127 .name = "main partition ",
128 .size = 0x02000000, // 128 x 2 x 128k byte sectors
129 .offset = 0,
130 },
131},
132};
133#else /* defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR) */
134#define PHYSMAP_NUMBER 1 // number of board desc structs needed, one per contiguous flash type
135const struct cstm_mips_ixx_info cstm_mips_ixx_board_desc[PHYSMAP_NUMBER] =
136{
137 {
138 "MTD flash", // name
139 CONFIG_MTD_CSTM_MIPS_IXX_START, // window_addr
140 CONFIG_MTD_CSTM_MIPS_IXX_LEN, // window_size
141 CONFIG_MTD_CSTM_MIPS_IXX_BUSWIDTH, // bankwidth
142 1, // num_partitions
143 },
144
145};
146static struct mtd_partition cstm_mips_ixx_partitions[PHYSMAP_NUMBER][MAX_PHYSMAP_PARTITIONS] = {
147{
148 {
149 .name = "main partition",
150 .size = CONFIG_MTD_CSTM_MIPS_IXX_LEN,
151 .offset = 0,
152 },
153},
154};
155#endif /* defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR) */
156
157struct map_info cstm_mips_ixx_map[PHYSMAP_NUMBER];
158
159int __init init_cstm_mips_ixx(void)
160{
161 int i;
162 int jedec;
163 struct mtd_info *mymtd;
164 struct mtd_partition *parts;
165
166 /* Initialize mapping */
167 for (i=0;i<PHYSMAP_NUMBER;i++) {
168 printk(KERN_NOTICE "cstm_mips_ixx flash device: 0x%lx at 0x%lx\n",
169 cstm_mips_ixx_board_desc[i].window_size, cstm_mips_ixx_board_desc[i].window_addr);
170
171
172 cstm_mips_ixx_map[i].phys = cstm_mips_ixx_board_desc[i].window_addr;
173 cstm_mips_ixx_map[i].virt = ioremap(cstm_mips_ixx_board_desc[i].window_addr, cstm_mips_ixx_board_desc[i].window_size);
174 if (!cstm_mips_ixx_map[i].virt) {
175 printk(KERN_WARNING "Failed to ioremap\n");
176 return -EIO;
177 }
178 cstm_mips_ixx_map[i].name = cstm_mips_ixx_board_desc[i].name;
179 cstm_mips_ixx_map[i].size = cstm_mips_ixx_board_desc[i].window_size;
180 cstm_mips_ixx_map[i].bankwidth = cstm_mips_ixx_board_desc[i].bankwidth;
181#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
182 cstm_mips_ixx_map[i].set_vpp = cstm_mips_ixx_set_vpp;
183#endif
184 simple_map_init(&cstm_mips_ixx_map[i]);
185 //printk(KERN_NOTICE "cstm_mips_ixx: ioremap is %x\n",(unsigned int)(cstm_mips_ixx_map[i].virt));
186 }
187
188#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
189 setup_ITE_IVR_flash();
190#endif /* defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR) */
191
192 for (i=0;i<PHYSMAP_NUMBER;i++) {
193 parts = &cstm_mips_ixx_partitions[i][0];
194 jedec = 0;
195 mymtd = (struct mtd_info *)do_map_probe("cfi_probe", &cstm_mips_ixx_map[i]);
196 //printk(KERN_NOTICE "phymap %d cfi_probe: mymtd is %x\n",i,(unsigned int)mymtd);
197 if (!mymtd) {
198 jedec = 1;
199 mymtd = (struct mtd_info *)do_map_probe("jedec", &cstm_mips_ixx_map[i]);
200 printk(KERN_NOTICE "cstm_mips_ixx %d jedec: mymtd is %x\n",i,(unsigned int)mymtd);
201 }
202 if (mymtd) {
203 mymtd->owner = THIS_MODULE;
204
205 cstm_mips_ixx_map[i].map_priv_2 = (unsigned long)mymtd;
206 add_mtd_partitions(mymtd, parts, cstm_mips_ixx_board_desc[i].num_partitions);
207 }
208 else
209 return -ENXIO;
210 }
211 return 0;
212}
213
214static void __exit cleanup_cstm_mips_ixx(void)
215{
216 int i;
217 struct mtd_info *mymtd;
218
219 for (i=0;i<PHYSMAP_NUMBER;i++) {
220 mymtd = (struct mtd_info *)cstm_mips_ixx_map[i].map_priv_2;
221 if (mymtd) {
222 del_mtd_partitions(mymtd);
223 map_destroy(mymtd);
224 }
225 if (cstm_mips_ixx_map[i].virt) {
226 iounmap((void *)cstm_mips_ixx_map[i].virt);
227 cstm_mips_ixx_map[i].virt = 0;
228 }
229 }
230}
231#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
232void PCISetULongByOffset(__u32 DevNumber, __u32 FuncNumber, __u32 Offset, __u32 data)
233{
234 __u32 offset;
235
236 offset = ( unsigned long )( 0x80000000 | ( DevNumber << 11 ) + ( FuncNumber << 8 ) + Offset) ;
237
238 *(__u32 *)CC_CONFADDR = offset;
239 *(__u32 *)CC_CONFDATA = data;
240}
241void setup_ITE_IVR_flash()
242{
243 __u32 size, base;
244
245 size = 0x0e000000; // 32MiB
246 base = (0x08000000) >> 8 >>1; // Bug: we must shift one more bit
247
248 /* need to set ITE flash to 32 bits instead of default 8 */
249#ifdef CONFIG_MIPS_IVR
250 *(__u32 *)CC_FC_FCR = 0x55;
251 *(__u32 *)CC_GPACR = 0xfffc;
252#else
253 *(__u32 *)CC_FC_FCR = 0x77;
254#endif
255 /* turn bursting off */
256 *(__u32 *)CC_FC_DCR = 0x0;
257
258 /* setup for one chip 4 byte PCI access */
259 PCISetULongByOffset(CC_M68K_DEVICE, CC_M68K_FUNCTION, 0x60, size | base);
260 PCISetULongByOffset(CC_M68K_DEVICE, CC_M68K_FUNCTION, 0x64, 0x02);
261}
262#endif /* defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR) */
263
264module_init(init_cstm_mips_ixx);
265module_exit(cleanup_cstm_mips_ixx);
266
267
268MODULE_LICENSE("GPL");
269MODULE_AUTHOR("Alice Hennessy <ahennessy@mvista.com>");
270MODULE_DESCRIPTION("MTD map driver for ITE 8172G and Globespan IVR boards");
diff --git a/drivers/mtd/maps/db1550-flash.c b/drivers/mtd/maps/db1550-flash.c
new file mode 100644
index 000000000000..d213888462a4
--- /dev/null
+++ b/drivers/mtd/maps/db1550-flash.c
@@ -0,0 +1,187 @@
1/*
2 * Flash memory access on Alchemy Db1550 board
3 *
4 * $Id: db1550-flash.c,v 1.7 2004/11/04 13:24:14 gleixner Exp $
5 *
6 * (C) 2004 Embedded Edge, LLC, based on db1550-flash.c:
7 * (C) 2003, 2004 Pete Popov <ppopov@embeddedalley.com>
8 *
9 */
10
11#include <linux/config.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16
17#include <linux/mtd/mtd.h>
18#include <linux/mtd/map.h>
19#include <linux/mtd/partitions.h>
20
21#include <asm/io.h>
22
23#ifdef DEBUG_RW
24#define DBG(x...) printk(x)
25#else
26#define DBG(x...)
27#endif
28
29static unsigned long window_addr;
30static unsigned long window_size;
31
32
33static struct map_info db1550_map = {
34 .name = "Db1550 flash",
35};
36
37static unsigned char flash_bankwidth = 4;
38
39/*
40 * Support only 64MB NOR Flash parts
41 */
42
43#if defined(CONFIG_MTD_DB1550_BOOT) && defined(CONFIG_MTD_DB1550_USER)
44#define DB1550_BOTH_BANKS
45#elif defined(CONFIG_MTD_DB1550_BOOT) && !defined(CONFIG_MTD_DB1550_USER)
46#define DB1550_BOOT_ONLY
47#elif !defined(CONFIG_MTD_DB1550_BOOT) && defined(CONFIG_MTD_DB1550_USER)
48#define DB1550_USER_ONLY
49#endif
50
51#ifdef DB1550_BOTH_BANKS
52/* both banks will be used. Combine the first bank and the first
53 * part of the second bank together into a single jffs/jffs2
54 * partition.
55 */
56static struct mtd_partition db1550_partitions[] = {
57 /* assume boot[2:0]:swap is '0000' or '1000', which translates to:
58 * 1C00 0000 1FFF FFFF CE0 64MB Boot NOR Flash
59 * 1800 0000 1BFF FFFF CE0 64MB Param NOR Flash
60 */
61 {
62 .name = "User FS",
63 .size = (0x1FC00000 - 0x18000000),
64 .offset = 0x0000000
65 },{
66 .name = "yamon",
67 .size = 0x0100000,
68 .offset = MTDPART_OFS_APPEND,
69 .mask_flags = MTD_WRITEABLE
70 },{
71 .name = "raw kernel",
72 .size = (0x300000 - 0x40000), /* last 256KB is yamon env */
73 .offset = MTDPART_OFS_APPEND,
74 }
75};
76#elif defined(DB1550_BOOT_ONLY)
77static struct mtd_partition db1550_partitions[] = {
78 /* assume boot[2:0]:swap is '0000' or '1000', which translates to:
79 * 1C00 0000 1FFF FFFF CE0 64MB Boot NOR Flash
80 */
81 {
82 .name = "User FS",
83 .size = 0x03c00000,
84 .offset = 0x0000000
85 },{
86 .name = "yamon",
87 .size = 0x0100000,
88 .offset = MTDPART_OFS_APPEND,
89 .mask_flags = MTD_WRITEABLE
90 },{
91 .name = "raw kernel",
92 .size = (0x300000-0x40000), /* last 256KB is yamon env */
93 .offset = MTDPART_OFS_APPEND,
94 }
95};
96#elif defined(DB1550_USER_ONLY)
97static struct mtd_partition db1550_partitions[] = {
98 /* assume boot[2:0]:swap is '0000' or '1000', which translates to:
99 * 1800 0000 1BFF FFFF CE0 64MB Param NOR Flash
100 */
101 {
102 .name = "User FS",
103 .size = (0x4000000 - 0x200000), /* reserve 2MB for raw kernel */
104 .offset = 0x0000000
105 },{
106 .name = "raw kernel",
107 .size = MTDPART_SIZ_FULL,
108 .offset = MTDPART_OFS_APPEND,
109 }
110};
111#else
112#error MTD_DB1550 define combo error /* should never happen */
113#endif
114
115#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
116
117static struct mtd_info *mymtd;
118
119/*
120 * Probe the flash density and setup window address and size
121 * based on user CONFIG options. There are times when we don't
122 * want the MTD driver to be probing the boot or user flash,
123 * so having the option to enable only one bank is important.
124 */
125int setup_flash_params(void)
126{
127#if defined(DB1550_BOTH_BANKS)
128 window_addr = 0x18000000;
129 window_size = 0x8000000;
130#elif defined(DB1550_BOOT_ONLY)
131 window_addr = 0x1C000000;
132 window_size = 0x4000000;
133#else /* USER ONLY */
134 window_addr = 0x18000000;
135 window_size = 0x4000000;
136#endif
137 return 0;
138}
139
140int __init db1550_mtd_init(void)
141{
142 struct mtd_partition *parts;
143 int nb_parts = 0;
144
145 /* Default flash bankwidth */
146 db1550_map.bankwidth = flash_bankwidth;
147
148 if (setup_flash_params())
149 return -ENXIO;
150
151 /*
152 * Static partition definition selection
153 */
154 parts = db1550_partitions;
155 nb_parts = NB_OF(db1550_partitions);
156 db1550_map.size = window_size;
157
158 /*
159 * Now let's probe for the actual flash. Do it here since
160 * specific machine settings might have been set above.
161 */
162 printk(KERN_NOTICE "Db1550 flash: probing %d-bit flash bus\n",
163 db1550_map.bankwidth*8);
164 db1550_map.virt = ioremap(window_addr, window_size);
165 mymtd = do_map_probe("cfi_probe", &db1550_map);
166 if (!mymtd) return -ENXIO;
167 mymtd->owner = THIS_MODULE;
168
169 add_mtd_partitions(mymtd, parts, nb_parts);
170 return 0;
171}
172
173static void __exit db1550_mtd_cleanup(void)
174{
175 if (mymtd) {
176 del_mtd_partitions(mymtd);
177 map_destroy(mymtd);
178 iounmap((void *) db1550_map.virt);
179 }
180}
181
182module_init(db1550_mtd_init);
183module_exit(db1550_mtd_cleanup);
184
185MODULE_AUTHOR("Embedded Edge, LLC");
186MODULE_DESCRIPTION("Db1550 mtd map driver");
187MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/db1x00-flash.c b/drivers/mtd/maps/db1x00-flash.c
new file mode 100644
index 000000000000..faa68ec56902
--- /dev/null
+++ b/drivers/mtd/maps/db1x00-flash.c
@@ -0,0 +1,226 @@
1/*
2 * Flash memory access on Alchemy Db1xxx boards
3 *
4 * $Id: db1x00-flash.c,v 1.6 2004/11/04 13:24:14 gleixner Exp $
5 *
6 * (C) 2003 Pete Popov <ppopov@embeddedalley.com>
7 *
8 */
9
10#include <linux/config.h>
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h>
18#include <linux/mtd/partitions.h>
19
20#include <asm/io.h>
21
22#ifdef DEBUG_RW
23#define DBG(x...) printk(x)
24#else
25#define DBG(x...)
26#endif
27
28/* MTD CONFIG OPTIONS */
29#if defined(CONFIG_MTD_DB1X00_BOOT) && defined(CONFIG_MTD_DB1X00_USER)
30#define DB1X00_BOTH_BANKS
31#elif defined(CONFIG_MTD_DB1X00_BOOT) && !defined(CONFIG_MTD_DB1X00_USER)
32#define DB1X00_BOOT_ONLY
33#elif !defined(CONFIG_MTD_DB1X00_BOOT) && defined(CONFIG_MTD_DB1X00_USER)
34#define DB1X00_USER_ONLY
35#endif
36
37static unsigned long window_addr;
38static unsigned long window_size;
39static unsigned long flash_size;
40
41static unsigned short *bcsr = (unsigned short *)0xAE000000;
42static unsigned char flash_bankwidth = 4;
43
44/*
45 * The Db1x boards support different flash densities. We setup
46 * the mtd_partition structures below for default of 64Mbit
47 * flash densities, and override the partitions sizes, if
48 * necessary, after we check the board status register.
49 */
50
51#ifdef DB1X00_BOTH_BANKS
52/* both banks will be used. Combine the first bank and the first
53 * part of the second bank together into a single jffs/jffs2
54 * partition.
55 */
56static struct mtd_partition db1x00_partitions[] = {
57 {
58 .name = "User FS",
59 .size = 0x1c00000,
60 .offset = 0x0000000
61 },{
62 .name = "yamon",
63 .size = 0x0100000,
64 .offset = MTDPART_OFS_APPEND,
65 .mask_flags = MTD_WRITEABLE
66 },{
67 .name = "raw kernel",
68 .size = (0x300000-0x40000), /* last 256KB is env */
69 .offset = MTDPART_OFS_APPEND,
70 }
71};
72#elif defined(DB1X00_BOOT_ONLY)
73static struct mtd_partition db1x00_partitions[] = {
74 {
75 .name = "User FS",
76 .size = 0x00c00000,
77 .offset = 0x0000000
78 },{
79 .name = "yamon",
80 .size = 0x0100000,
81 .offset = MTDPART_OFS_APPEND,
82 .mask_flags = MTD_WRITEABLE
83 },{
84 .name = "raw kernel",
85 .size = (0x300000-0x40000), /* last 256KB is env */
86 .offset = MTDPART_OFS_APPEND,
87 }
88};
89#elif defined(DB1X00_USER_ONLY)
90static struct mtd_partition db1x00_partitions[] = {
91 {
92 .name = "User FS",
93 .size = 0x0e00000,
94 .offset = 0x0000000
95 },{
96 .name = "raw kernel",
97 .size = MTDPART_SIZ_FULL,
98 .offset = MTDPART_OFS_APPEND,
99 }
100};
101#else
102#error MTD_DB1X00 define combo error /* should never happen */
103#endif
104#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
105
106#define NAME "Db1x00 Linux Flash"
107
108static struct map_info db1xxx_mtd_map = {
109 .name = NAME,
110};
111
112static struct mtd_partition *parsed_parts;
113static struct mtd_info *db1xxx_mtd;
114
115/*
116 * Probe the flash density and setup window address and size
117 * based on user CONFIG options. There are times when we don't
118 * want the MTD driver to be probing the boot or user flash,
119 * so having the option to enable only one bank is important.
120 */
121int setup_flash_params(void)
122{
123 switch ((bcsr[2] >> 14) & 0x3) {
124 case 0: /* 64Mbit devices */
125 flash_size = 0x800000; /* 8MB per part */
126#if defined(DB1X00_BOTH_BANKS)
127 window_addr = 0x1E000000;
128 window_size = 0x2000000;
129#elif defined(DB1X00_BOOT_ONLY)
130 window_addr = 0x1F000000;
131 window_size = 0x1000000;
132#else /* USER ONLY */
133 window_addr = 0x1E000000;
134 window_size = 0x1000000;
135#endif
136 break;
137 case 1:
138 /* 128 Mbit devices */
139 flash_size = 0x1000000; /* 16MB per part */
140#if defined(DB1X00_BOTH_BANKS)
141 window_addr = 0x1C000000;
142 window_size = 0x4000000;
143 /* USERFS from 0x1C00 0000 to 0x1FC0 0000 */
144 db1x00_partitions[0].size = 0x3C00000;
145#elif defined(DB1X00_BOOT_ONLY)
146 window_addr = 0x1E000000;
147 window_size = 0x2000000;
148 /* USERFS from 0x1E00 0000 to 0x1FC0 0000 */
149 db1x00_partitions[0].size = 0x1C00000;
150#else /* USER ONLY */
151 window_addr = 0x1C000000;
152 window_size = 0x2000000;
153 /* USERFS from 0x1C00 0000 to 0x1DE00000 */
154 db1x00_partitions[0].size = 0x1DE0000;
155#endif
156 break;
157 case 2:
158 /* 256 Mbit devices */
159 flash_size = 0x4000000; /* 64MB per part */
160#if defined(DB1X00_BOTH_BANKS)
161 return 1;
162#elif defined(DB1X00_BOOT_ONLY)
163 /* Boot ROM flash bank only; no user bank */
164 window_addr = 0x1C000000;
165 window_size = 0x4000000;
166 /* USERFS from 0x1C00 0000 to 0x1FC00000 */
167 db1x00_partitions[0].size = 0x3C00000;
168#else /* USER ONLY */
169 return 1;
170#endif
171 break;
172 default:
173 return 1;
174 }
175 db1xxx_mtd_map.size = window_size;
176 db1xxx_mtd_map.bankwidth = flash_bankwidth;
177 db1xxx_mtd_map.phys = window_addr;
178 db1xxx_mtd_map.bankwidth = flash_bankwidth;
179 return 0;
180}
181
182int __init db1x00_mtd_init(void)
183{
184 struct mtd_partition *parts;
185 int nb_parts = 0;
186
187 if (setup_flash_params())
188 return -ENXIO;
189
190 /*
191 * Static partition definition selection
192 */
193 parts = db1x00_partitions;
194 nb_parts = NB_OF(db1x00_partitions);
195
196 /*
197 * Now let's probe for the actual flash. Do it here since
198 * specific machine settings might have been set above.
199 */
200 printk(KERN_NOTICE "Db1xxx flash: probing %d-bit flash bus\n",
201 db1xxx_mtd_map.bankwidth*8);
202 db1xxx_mtd_map.virt = ioremap(window_addr, window_size);
203 db1xxx_mtd = do_map_probe("cfi_probe", &db1xxx_mtd_map);
204 if (!db1xxx_mtd) return -ENXIO;
205 db1xxx_mtd->owner = THIS_MODULE;
206
207 add_mtd_partitions(db1xxx_mtd, parts, nb_parts);
208 return 0;
209}
210
211static void __exit db1x00_mtd_cleanup(void)
212{
213 if (db1xxx_mtd) {
214 del_mtd_partitions(db1xxx_mtd);
215 map_destroy(db1xxx_mtd);
216 if (parsed_parts)
217 kfree(parsed_parts);
218 }
219}
220
221module_init(db1x00_mtd_init);
222module_exit(db1x00_mtd_cleanup);
223
224MODULE_AUTHOR("Pete Popov");
225MODULE_DESCRIPTION("Db1x00 mtd map driver");
226MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c
new file mode 100644
index 000000000000..d850a27a4b59
--- /dev/null
+++ b/drivers/mtd/maps/dbox2-flash.c
@@ -0,0 +1,126 @@
1/*
2 * $Id: dbox2-flash.c,v 1.13 2004/11/04 13:24:14 gleixner Exp $
3 *
4 * D-Box 2 flash driver
5 */
6
7#include <linux/module.h>
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <asm/io.h>
12#include <linux/mtd/mtd.h>
13#include <linux/mtd/map.h>
14#include <linux/mtd/partitions.h>
15#include <linux/config.h>
16#include <linux/errno.h>
17
18/* partition_info gives details on the logical partitions that the split the
19 * single flash device into. If the size if zero we use up to the end of the
20 * device. */
21static struct mtd_partition partition_info[]= {
22 {
23 .name = "BR bootloader",
24 .size = 128 * 1024,
25 .offset = 0,
26 .mask_flags = MTD_WRITEABLE
27 },
28 {
29 .name = "FLFS (U-Boot)",
30 .size = 128 * 1024,
31 .offset = MTDPART_OFS_APPEND,
32 .mask_flags = 0
33 },
34 {
35 .name = "Root (SquashFS)",
36 .size = 7040 * 1024,
37 .offset = MTDPART_OFS_APPEND,
38 .mask_flags = 0
39 },
40 {
41 .name = "var (JFFS2)",
42 .size = 896 * 1024,
43 .offset = MTDPART_OFS_APPEND,
44 .mask_flags = 0
45 },
46 {
47 .name = "Flash without bootloader",
48 .size = MTDPART_SIZ_FULL,
49 .offset = 128 * 1024,
50 .mask_flags = 0
51 },
52 {
53 .name = "Complete Flash",
54 .size = MTDPART_SIZ_FULL,
55 .offset = 0,
56 .mask_flags = MTD_WRITEABLE
57 }
58};
59
60#define NUM_PARTITIONS (sizeof(partition_info) / sizeof(partition_info[0]))
61
62#define WINDOW_ADDR 0x10000000
63#define WINDOW_SIZE 0x800000
64
65static struct mtd_info *mymtd;
66
67
68struct map_info dbox2_flash_map = {
69 .name = "D-Box 2 flash memory",
70 .size = WINDOW_SIZE,
71 .bankwidth = 4,
72 .phys = WINDOW_ADDR,
73};
74
75int __init init_dbox2_flash(void)
76{
77 printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR);
78 dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
79
80 if (!dbox2_flash_map.virt) {
81 printk("Failed to ioremap\n");
82 return -EIO;
83 }
84 simple_map_init(&dbox2_flash_map);
85
86 // Probe for dual Intel 28F320 or dual AMD
87 mymtd = do_map_probe("cfi_probe", &dbox2_flash_map);
88 if (!mymtd) {
89 // Probe for single Intel 28F640
90 dbox2_flash_map.bankwidth = 2;
91
92 mymtd = do_map_probe("cfi_probe", &dbox2_flash_map);
93 }
94
95 if (mymtd) {
96 mymtd->owner = THIS_MODULE;
97
98 /* Create MTD devices for each partition. */
99 add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
100
101 return 0;
102 }
103
104 iounmap((void *)dbox2_flash_map.virt);
105 return -ENXIO;
106}
107
108static void __exit cleanup_dbox2_flash(void)
109{
110 if (mymtd) {
111 del_mtd_partitions(mymtd);
112 map_destroy(mymtd);
113 }
114 if (dbox2_flash_map.virt) {
115 iounmap((void *)dbox2_flash_map.virt);
116 dbox2_flash_map.virt = 0;
117 }
118}
119
120module_init(init_dbox2_flash);
121module_exit(cleanup_dbox2_flash);
122
123
124MODULE_LICENSE("GPL");
125MODULE_AUTHOR("Kári Davíðsson <kd@flaga.is>, Bastian Blank <waldi@tuxbox.org>, Alexander Wild <wild@te-elektronik.com>");
126MODULE_DESCRIPTION("MTD map driver for D-Box 2 board");
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
new file mode 100644
index 000000000000..938c41f2f056
--- /dev/null
+++ b/drivers/mtd/maps/dc21285.c
@@ -0,0 +1,253 @@
1/*
2 * MTD map driver for flash on the DC21285 (the StrongARM-110 companion chip)
3 *
4 * (C) 2000 Nicolas Pitre <nico@cam.org>
5 *
6 * This code is GPL
7 *
8 * $Id: dc21285.c,v 1.22 2004/11/01 13:39:21 rmk Exp $
9 */
10#include <linux/config.h>
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/delay.h>
16
17#include <linux/mtd/mtd.h>
18#include <linux/mtd/map.h>
19#include <linux/mtd/partitions.h>
20
21#include <asm/io.h>
22#include <asm/hardware/dec21285.h>
23#include <asm/mach-types.h>
24
25
26static struct mtd_info *dc21285_mtd;
27
28#ifdef CONFIG_ARCH_NETWINDER
29/*
30 * This is really ugly, but it seams to be the only
31 * realiable way to do it, as the cpld state machine
32 * is unpredictible. So we have a 25us penalty per
33 * write access.
34 */
35static void nw_en_write(void)
36{
37 extern spinlock_t gpio_lock;
38 unsigned long flags;
39
40 /*
41 * we want to write a bit pattern XXX1 to Xilinx to enable
42 * the write gate, which will be open for about the next 2ms.
43 */
44 spin_lock_irqsave(&gpio_lock, flags);
45 cpld_modify(1, 1);
46 spin_unlock_irqrestore(&gpio_lock, flags);
47
48 /*
49 * let the ISA bus to catch on...
50 */
51 udelay(25);
52}
53#else
54#define nw_en_write() do { } while (0)
55#endif
56
57static map_word dc21285_read8(struct map_info *map, unsigned long ofs)
58{
59 map_word val;
60 val.x[0] = *(uint8_t*)(map->virt + ofs);
61 return val;
62}
63
64static map_word dc21285_read16(struct map_info *map, unsigned long ofs)
65{
66 map_word val;
67 val.x[0] = *(uint16_t*)(map->virt + ofs);
68 return val;
69}
70
71static map_word dc21285_read32(struct map_info *map, unsigned long ofs)
72{
73 map_word val;
74 val.x[0] = *(uint32_t*)(map->virt + ofs);
75 return val;
76}
77
78static void dc21285_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
79{
80 memcpy(to, (void*)(map->virt + from), len);
81}
82
83static void dc21285_write8(struct map_info *map, const map_word d, unsigned long adr)
84{
85 if (machine_is_netwinder())
86 nw_en_write();
87 *CSR_ROMWRITEREG = adr & 3;
88 adr &= ~3;
89 *(uint8_t*)(map->virt + adr) = d.x[0];
90}
91
92static void dc21285_write16(struct map_info *map, const map_word d, unsigned long adr)
93{
94 if (machine_is_netwinder())
95 nw_en_write();
96 *CSR_ROMWRITEREG = adr & 3;
97 adr &= ~3;
98 *(uint16_t*)(map->virt + adr) = d.x[0];
99}
100
101static void dc21285_write32(struct map_info *map, const map_word d, unsigned long adr)
102{
103 if (machine_is_netwinder())
104 nw_en_write();
105 *(uint32_t*)(map->virt + adr) = d.x[0];
106}
107
108static void dc21285_copy_to_32(struct map_info *map, unsigned long to, const void *from, ssize_t len)
109{
110 while (len > 0) {
111 map_word d;
112 d.x[0] = *((uint32_t*)from)++;
113 dc21285_write32(map, d, to);
114 to += 4;
115 len -= 4;
116 }
117}
118
119static void dc21285_copy_to_16(struct map_info *map, unsigned long to, const void *from, ssize_t len)
120{
121 while (len > 0) {
122 map_word d;
123 d.x[0] = *((uint16_t*)from)++;
124 dc21285_write16(map, d, to);
125 to += 2;
126 len -= 2;
127 }
128}
129
130static void dc21285_copy_to_8(struct map_info *map, unsigned long to, const void *from, ssize_t len)
131{
132 map_word d;
133 d.x[0] = *((uint8_t*)from)++;
134 dc21285_write8(map, d, to);
135 to++;
136 len--;
137}
138
139static struct map_info dc21285_map = {
140 .name = "DC21285 flash",
141 .phys = NO_XIP,
142 .size = 16*1024*1024,
143 .copy_from = dc21285_copy_from,
144};
145
146
147/* Partition stuff */
148#ifdef CONFIG_MTD_PARTITIONS
149static struct mtd_partition *dc21285_parts;
150static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
151#endif
152
153static int __init init_dc21285(void)
154{
155
156#ifdef CONFIG_MTD_PARTITIONS
157 int nrparts;
158#endif
159
160 /* Determine bankwidth */
161 switch (*CSR_SA110_CNTL & (3<<14)) {
162 case SA110_CNTL_ROMWIDTH_8:
163 dc21285_map.bankwidth = 1;
164 dc21285_map.read = dc21285_read8;
165 dc21285_map.write = dc21285_write8;
166 dc21285_map.copy_to = dc21285_copy_to_8;
167 break;
168 case SA110_CNTL_ROMWIDTH_16:
169 dc21285_map.bankwidth = 2;
170 dc21285_map.read = dc21285_read16;
171 dc21285_map.write = dc21285_write16;
172 dc21285_map.copy_to = dc21285_copy_to_16;
173 break;
174 case SA110_CNTL_ROMWIDTH_32:
175 dc21285_map.bankwidth = 4;
176 dc21285_map.read = dc21285_read32;
177 dc21285_map.write = dc21285_write32;
178 dc21285_map.copy_to = dc21285_copy_to_32;
179 break;
180 default:
181 printk (KERN_ERR "DC21285 flash: undefined bankwidth\n");
182 return -ENXIO;
183 }
184 printk (KERN_NOTICE "DC21285 flash support (%d-bit bankwidth)\n",
185 dc21285_map.bankwidth*8);
186
187 /* Let's map the flash area */
188 dc21285_map.virt = ioremap(DC21285_FLASH, 16*1024*1024);
189 if (!dc21285_map.virt) {
190 printk("Failed to ioremap\n");
191 return -EIO;
192 }
193
194 if (machine_is_ebsa285()) {
195 dc21285_mtd = do_map_probe("cfi_probe", &dc21285_map);
196 } else {
197 dc21285_mtd = do_map_probe("jedec_probe", &dc21285_map);
198 }
199
200 if (!dc21285_mtd) {
201 iounmap(dc21285_map.virt);
202 return -ENXIO;
203 }
204
205 dc21285_mtd->owner = THIS_MODULE;
206
207#ifdef CONFIG_MTD_PARTITIONS
208 nrparts = parse_mtd_partitions(dc21285_mtd, probes, &dc21285_parts, 0);
209 if (nrparts > 0)
210 add_mtd_partitions(dc21285_mtd, dc21285_parts, nrparts);
211 else
212#endif
213 add_mtd_device(dc21285_mtd);
214
215 if(machine_is_ebsa285()) {
216 /*
217 * Flash timing is determined with bits 19-16 of the
218 * CSR_SA110_CNTL. The value is the number of wait cycles, or
219 * 0 for 16 cycles (the default). Cycles are 20 ns.
220 * Here we use 7 for 140 ns flash chips.
221 */
222 /* access time */
223 *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x000f0000) | (7 << 16));
224 /* burst time */
225 *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x00f00000) | (7 << 20));
226 /* tristate time */
227 *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x0f000000) | (7 << 24));
228 }
229
230 return 0;
231}
232
233static void __exit cleanup_dc21285(void)
234{
235#ifdef CONFIG_MTD_PARTITIONS
236 if (dc21285_parts) {
237 del_mtd_partitions(dc21285_mtd);
238 kfree(dc21285_parts);
239 } else
240#endif
241 del_mtd_device(dc21285_mtd);
242
243 map_destroy(dc21285_mtd);
244 iounmap(dc21285_map.virt);
245}
246
247module_init(init_dc21285);
248module_exit(cleanup_dc21285);
249
250
251MODULE_LICENSE("GPL");
252MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>");
253MODULE_DESCRIPTION("MTD map driver for DC21285 boards");
diff --git a/drivers/mtd/maps/dilnetpc.c b/drivers/mtd/maps/dilnetpc.c
new file mode 100644
index 000000000000..0bc79c93a584
--- /dev/null
+++ b/drivers/mtd/maps/dilnetpc.c
@@ -0,0 +1,495 @@
1/* dilnetpc.c -- MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP"
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
16 *
17 * $Id: dilnetpc.c,v 1.17 2004/11/28 09:40:39 dwmw2 Exp $
18 *
19 * The DIL/Net PC is a tiny embedded PC board made by SSV Embedded Systems
20 * featuring the AMD Elan SC410 processor. There are two variants of this
21 * board: DNP/1486 and ADNP/1486. The DNP version has 2 megs of flash
22 * ROM (Intel 28F016S3) and 8 megs of DRAM, the ADNP version has 4 megs
23 * flash and 16 megs of RAM.
24 * For details, see http://www.ssv-embedded.de/ssv/pc104/p169.htm
25 * and http://www.ssv-embedded.de/ssv/pc104/p170.htm
26 */
27
28#include <linux/config.h>
29#include <linux/module.h>
30#include <linux/types.h>
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <asm/io.h>
34#include <linux/mtd/mtd.h>
35#include <linux/mtd/map.h>
36#include <linux/mtd/partitions.h>
37#include <linux/mtd/concat.h>
38
39/*
40** The DIL/NetPC keeps its BIOS in two distinct flash blocks.
41** Destroying any of these blocks transforms the DNPC into
42** a paperweight (albeit not a very useful one, considering
43** it only weighs a few grams).
44**
45** Therefore, the BIOS blocks must never be erased or written to
46** except by people who know exactly what they are doing (e.g.
47** to install a BIOS update). These partitions are marked read-only
48** by default, but can be made read/write by undefining
49** DNPC_BIOS_BLOCKS_WRITEPROTECTED:
50*/
51#define DNPC_BIOS_BLOCKS_WRITEPROTECTED
52
53/*
54** The ID string (in ROM) is checked to determine whether we
55** are running on a DNP/1486 or ADNP/1486
56*/
57#define BIOSID_BASE 0x000fe100
58
59#define ID_DNPC "DNP1486"
60#define ID_ADNP "ADNP1486"
61
62/*
63** Address where the flash should appear in CPU space
64*/
65#define FLASH_BASE 0x2000000
66
67/*
68** Chip Setup and Control (CSC) indexed register space
69*/
70#define CSC_INDEX 0x22
71#define CSC_DATA 0x23
72
73#define CSC_MMSWAR 0x30 /* MMS window C-F attributes register */
74#define CSC_MMSWDSR 0x31 /* MMS window C-F device select register */
75
76#define CSC_RBWR 0xa7 /* GPIO Read-Back/Write Register B */
77
78#define CSC_CR 0xd0 /* internal I/O device disable/Echo */
79 /* Z-bus/configuration register */
80
81#define CSC_PCCMDCR 0xf1 /* PC card mode and DMA control register */
82
83
84/*
85** PC Card indexed register space:
86*/
87
88#define PCC_INDEX 0x3e0
89#define PCC_DATA 0x3e1
90
91#define PCC_AWER_B 0x46 /* Socket B Address Window enable register */
92#define PCC_MWSAR_1_Lo 0x58 /* memory window 1 start address low register */
93#define PCC_MWSAR_1_Hi 0x59 /* memory window 1 start address high register */
94#define PCC_MWEAR_1_Lo 0x5A /* memory window 1 stop address low register */
95#define PCC_MWEAR_1_Hi 0x5B /* memory window 1 stop address high register */
96#define PCC_MWAOR_1_Lo 0x5C /* memory window 1 address offset low register */
97#define PCC_MWAOR_1_Hi 0x5D /* memory window 1 address offset high register */
98
99
100/*
101** Access to SC4x0's Chip Setup and Control (CSC)
102** and PC Card (PCC) indexed registers:
103*/
104static inline void setcsc(int reg, unsigned char data)
105{
106 outb(reg, CSC_INDEX);
107 outb(data, CSC_DATA);
108}
109
110static inline unsigned char getcsc(int reg)
111{
112 outb(reg, CSC_INDEX);
113 return(inb(CSC_DATA));
114}
115
116static inline void setpcc(int reg, unsigned char data)
117{
118 outb(reg, PCC_INDEX);
119 outb(data, PCC_DATA);
120}
121
122static inline unsigned char getpcc(int reg)
123{
124 outb(reg, PCC_INDEX);
125 return(inb(PCC_DATA));
126}
127
128
129/*
130************************************************************
131** Enable access to DIL/NetPC's flash by mapping it into
132** the SC4x0's MMS Window C.
133************************************************************
134*/
135static void dnpc_map_flash(unsigned long flash_base, unsigned long flash_size)
136{
137 unsigned long flash_end = flash_base + flash_size - 1;
138
139 /*
140 ** enable setup of MMS windows C-F:
141 */
142 /* - enable PC Card indexed register space */
143 setcsc(CSC_CR, getcsc(CSC_CR) | 0x2);
144 /* - set PC Card controller to operate in standard mode */
145 setcsc(CSC_PCCMDCR, getcsc(CSC_PCCMDCR) & ~1);
146
147 /*
148 ** Program base address and end address of window
149 ** where the flash ROM should appear in CPU address space
150 */
151 setpcc(PCC_MWSAR_1_Lo, (flash_base >> 12) & 0xff);
152 setpcc(PCC_MWSAR_1_Hi, (flash_base >> 20) & 0x3f);
153 setpcc(PCC_MWEAR_1_Lo, (flash_end >> 12) & 0xff);
154 setpcc(PCC_MWEAR_1_Hi, (flash_end >> 20) & 0x3f);
155
156 /* program offset of first flash location to appear in this window (0) */
157 setpcc(PCC_MWAOR_1_Lo, ((0 - flash_base) >> 12) & 0xff);
158 setpcc(PCC_MWAOR_1_Hi, ((0 - flash_base)>> 20) & 0x3f);
159
160 /* set attributes for MMS window C: non-cacheable, write-enabled */
161 setcsc(CSC_MMSWAR, getcsc(CSC_MMSWAR) & ~0x11);
162
163 /* select physical device ROMCS0 (i.e. flash) for MMS Window C */
164 setcsc(CSC_MMSWDSR, getcsc(CSC_MMSWDSR) & ~0x03);
165
166 /* enable memory window 1 */
167 setpcc(PCC_AWER_B, getpcc(PCC_AWER_B) | 0x02);
168
169 /* now disable PC Card indexed register space again */
170 setcsc(CSC_CR, getcsc(CSC_CR) & ~0x2);
171}
172
173
174/*
175************************************************************
176** Disable access to DIL/NetPC's flash by mapping it into
177** the SC4x0's MMS Window C.
178************************************************************
179*/
180static void dnpc_unmap_flash(void)
181{
182 /* - enable PC Card indexed register space */
183 setcsc(CSC_CR, getcsc(CSC_CR) | 0x2);
184
185 /* disable memory window 1 */
186 setpcc(PCC_AWER_B, getpcc(PCC_AWER_B) & ~0x02);
187
188 /* now disable PC Card indexed register space again */
189 setcsc(CSC_CR, getcsc(CSC_CR) & ~0x2);
190}
191
192
193
194/*
195************************************************************
196** Enable/Disable VPP to write to flash
197************************************************************
198*/
199
200static DEFINE_SPINLOCK(dnpc_spin);
201static int vpp_counter = 0;
202/*
203** This is what has to be done for the DNP board ..
204*/
205static void dnp_set_vpp(struct map_info *not_used, int on)
206{
207 spin_lock_irq(&dnpc_spin);
208
209 if (on)
210 {
211 if(++vpp_counter == 1)
212 setcsc(CSC_RBWR, getcsc(CSC_RBWR) & ~0x4);
213 }
214 else
215 {
216 if(--vpp_counter == 0)
217 setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x4);
218 else if(vpp_counter < 0)
219 BUG();
220 }
221 spin_unlock_irq(&dnpc_spin);
222}
223
224/*
225** .. and this the ADNP version:
226*/
227static void adnp_set_vpp(struct map_info *not_used, int on)
228{
229 spin_lock_irq(&dnpc_spin);
230
231 if (on)
232 {
233 if(++vpp_counter == 1)
234 setcsc(CSC_RBWR, getcsc(CSC_RBWR) & ~0x8);
235 }
236 else
237 {
238 if(--vpp_counter == 0)
239 setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x8);
240 else if(vpp_counter < 0)
241 BUG();
242 }
243 spin_unlock_irq(&dnpc_spin);
244}
245
246
247
248#define DNP_WINDOW_SIZE 0x00200000 /* DNP flash size is 2MiB */
249#define ADNP_WINDOW_SIZE 0x00400000 /* ADNP flash size is 4MiB */
250#define WINDOW_ADDR FLASH_BASE
251
252static struct map_info dnpc_map = {
253 .name = "ADNP Flash Bank",
254 .size = ADNP_WINDOW_SIZE,
255 .bankwidth = 1,
256 .set_vpp = adnp_set_vpp,
257 .phys = WINDOW_ADDR
258};
259
260/*
261** The layout of the flash is somewhat "strange":
262**
263** 1. 960 KiB (15 blocks) : Space for ROM Bootloader and user data
264** 2. 64 KiB (1 block) : System BIOS
265** 3. 960 KiB (15 blocks) : User Data (DNP model) or
266** 3. 3008 KiB (47 blocks) : User Data (ADNP model)
267** 4. 64 KiB (1 block) : System BIOS Entry
268*/
269
270static struct mtd_partition partition_info[]=
271{
272 {
273 .name = "ADNP boot",
274 .offset = 0,
275 .size = 0xf0000,
276 },
277 {
278 .name = "ADNP system BIOS",
279 .offset = MTDPART_OFS_NXTBLK,
280 .size = 0x10000,
281#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
282 .mask_flags = MTD_WRITEABLE,
283#endif
284 },
285 {
286 .name = "ADNP file system",
287 .offset = MTDPART_OFS_NXTBLK,
288 .size = 0x2f0000,
289 },
290 {
291 .name = "ADNP system BIOS entry",
292 .offset = MTDPART_OFS_NXTBLK,
293 .size = MTDPART_SIZ_FULL,
294#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
295 .mask_flags = MTD_WRITEABLE,
296#endif
297 },
298};
299
300#define NUM_PARTITIONS (sizeof(partition_info)/sizeof(partition_info[0]))
301
302static struct mtd_info *mymtd;
303static struct mtd_info *lowlvl_parts[NUM_PARTITIONS];
304static struct mtd_info *merged_mtd;
305
306/*
307** "Highlevel" partition info:
308**
309** Using the MTD concat layer, we can re-arrange partitions to our
310** liking: we construct a virtual MTD device by concatenating the
311** partitions, specifying the sequence such that the boot block
312** is immediately followed by the filesystem block (i.e. the stupid
313** system BIOS block is mapped to a different place). When re-partitioning
314** this concatenated MTD device, we can set the boot block size to
315** an arbitrary (though erase block aligned) value i.e. not one that
316** is dictated by the flash's physical layout. We can thus set the
317** boot block to be e.g. 64 KB (which is fully sufficient if we want
318** to boot an etherboot image) or to -say- 1.5 MB if we want to boot
319** a large kernel image. In all cases, the remainder of the flash
320** is available as file system space.
321*/
322
323static struct mtd_partition higlvl_partition_info[]=
324{
325 {
326 .name = "ADNP boot block",
327 .offset = 0,
328 .size = CONFIG_MTD_DILNETPC_BOOTSIZE,
329 },
330 {
331 .name = "ADNP file system space",
332 .offset = MTDPART_OFS_NXTBLK,
333 .size = ADNP_WINDOW_SIZE-CONFIG_MTD_DILNETPC_BOOTSIZE-0x20000,
334 },
335 {
336 .name = "ADNP system BIOS + BIOS Entry",
337 .offset = MTDPART_OFS_NXTBLK,
338 .size = MTDPART_SIZ_FULL,
339#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
340 .mask_flags = MTD_WRITEABLE,
341#endif
342 },
343};
344
345#define NUM_HIGHLVL_PARTITIONS (sizeof(higlvl_partition_info)/sizeof(partition_info[0]))
346
347
348static int dnp_adnp_probe(void)
349{
350 char *biosid, rc = -1;
351
352 biosid = (char*)ioremap(BIOSID_BASE, 16);
353 if(biosid)
354 {
355 if(!strcmp(biosid, ID_DNPC))
356 rc = 1; /* this is a DNPC */
357 else if(!strcmp(biosid, ID_ADNP))
358 rc = 0; /* this is a ADNPC */
359 }
360 iounmap((void *)biosid);
361 return(rc);
362}
363
364
365static int __init init_dnpc(void)
366{
367 int is_dnp;
368
369 /*
370 ** determine hardware (DNP/ADNP/invalid)
371 */
372 if((is_dnp = dnp_adnp_probe()) < 0)
373 return -ENXIO;
374
375 /*
376 ** Things are set up for ADNP by default
377 ** -> modify all that needs to be different for DNP
378 */
379 if(is_dnp)
380 { /*
381 ** Adjust window size, select correct set_vpp function.
382 ** The partitioning scheme is identical on both DNP
383 ** and ADNP except for the size of the third partition.
384 */
385 int i;
386 dnpc_map.size = DNP_WINDOW_SIZE;
387 dnpc_map.set_vpp = dnp_set_vpp;
388 partition_info[2].size = 0xf0000;
389
390 /*
391 ** increment all string pointers so the leading 'A' gets skipped,
392 ** thus turning all occurrences of "ADNP ..." into "DNP ..."
393 */
394 ++dnpc_map.name;
395 for(i = 0; i < NUM_PARTITIONS; i++)
396 ++partition_info[i].name;
397 higlvl_partition_info[1].size = DNP_WINDOW_SIZE -
398 CONFIG_MTD_DILNETPC_BOOTSIZE - 0x20000;
399 for(i = 0; i < NUM_HIGHLVL_PARTITIONS; i++)
400 ++higlvl_partition_info[i].name;
401 }
402
403 printk(KERN_NOTICE "DIL/Net %s flash: 0x%lx at 0x%lx\n",
404 is_dnp ? "DNPC" : "ADNP", dnpc_map.size, dnpc_map.phys);
405
406 dnpc_map.virt = ioremap_nocache(dnpc_map.phys, dnpc_map.size);
407
408 dnpc_map_flash(dnpc_map.phys, dnpc_map.size);
409
410 if (!dnpc_map.virt) {
411 printk("Failed to ioremap_nocache\n");
412 return -EIO;
413 }
414 simple_map_init(&dnpc_map);
415
416 printk("FLASH virtual address: 0x%p\n", dnpc_map.virt);
417
418 mymtd = do_map_probe("jedec_probe", &dnpc_map);
419
420 if (!mymtd)
421 mymtd = do_map_probe("cfi_probe", &dnpc_map);
422
423 /*
424 ** If flash probes fail, try to make flashes accessible
425 ** at least as ROM. Ajust erasesize in this case since
426 ** the default one (128M) will break our partitioning
427 */
428 if (!mymtd)
429 if((mymtd = do_map_probe("map_rom", &dnpc_map)))
430 mymtd->erasesize = 0x10000;
431
432 if (!mymtd) {
433 iounmap(dnpc_map.virt);
434 return -ENXIO;
435 }
436
437 mymtd->owner = THIS_MODULE;
438
439 /*
440 ** Supply pointers to lowlvl_parts[] array to add_mtd_partitions()
441 ** -> add_mtd_partitions() will _not_ register MTD devices for
442 ** the partitions, but will instead store pointers to the MTD
443 ** objects it creates into our lowlvl_parts[] array.
444 ** NOTE: we arrange the pointers such that the sequence of the
445 ** partitions gets re-arranged: partition #2 follows
446 ** partition #0.
447 */
448 partition_info[0].mtdp = &lowlvl_parts[0];
449 partition_info[1].mtdp = &lowlvl_parts[2];
450 partition_info[2].mtdp = &lowlvl_parts[1];
451 partition_info[3].mtdp = &lowlvl_parts[3];
452
453 add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
454
455 /*
456 ** now create a virtual MTD device by concatenating the for partitions
457 ** (in the sequence given by the lowlvl_parts[] array.
458 */
459 merged_mtd = mtd_concat_create(lowlvl_parts, NUM_PARTITIONS, "(A)DNP Flash Concatenated");
460 if(merged_mtd)
461 { /*
462 ** now partition the new device the way we want it. This time,
463 ** we do not supply mtd pointers in higlvl_partition_info, so
464 ** add_mtd_partitions() will register the devices.
465 */
466 add_mtd_partitions(merged_mtd, higlvl_partition_info, NUM_HIGHLVL_PARTITIONS);
467 }
468
469 return 0;
470}
471
472static void __exit cleanup_dnpc(void)
473{
474 if(merged_mtd) {
475 del_mtd_partitions(merged_mtd);
476 mtd_concat_destroy(merged_mtd);
477 }
478
479 if (mymtd) {
480 del_mtd_partitions(mymtd);
481 map_destroy(mymtd);
482 }
483 if (dnpc_map.virt) {
484 iounmap(dnpc_map.virt);
485 dnpc_unmap_flash();
486 dnpc_map.virt = NULL;
487 }
488}
489
490module_init(init_dnpc);
491module_exit(cleanup_dnpc);
492
493MODULE_LICENSE("GPL");
494MODULE_AUTHOR("Sysgo Real-Time Solutions GmbH");
495MODULE_DESCRIPTION("MTD map driver for SSV DIL/NetPC DNP & ADNP");
diff --git a/drivers/mtd/maps/dmv182.c b/drivers/mtd/maps/dmv182.c
new file mode 100644
index 000000000000..b9bc63503e26
--- /dev/null
+++ b/drivers/mtd/maps/dmv182.c
@@ -0,0 +1,149 @@
1
2/*
3 * drivers/mtd/maps/svme182.c
4 *
5 * Flash map driver for the Dy4 SVME182 board
6 *
7 * $Id: dmv182.c,v 1.5 2004/11/04 13:24:14 gleixner Exp $
8 *
9 * Copyright 2003-2004, TimeSys Corporation
10 *
11 * Based on the SVME181 flash map, by Tom Nelson, Dot4, Inc. for TimeSys Corp.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18
19#include <linux/config.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <asm/io.h>
25#include <linux/mtd/mtd.h>
26#include <linux/mtd/map.h>
27#include <linux/mtd/partitions.h>
28#include <linux/errno.h>
29
30/*
31 * This driver currently handles only the 16MiB user flash bank 1 on the
32 * board. It does not provide access to bank 0 (contains the Dy4 FFW), bank 2
33 * (VxWorks boot), or the optional 48MiB expansion flash.
34 *
35 * scott.wood@timesys.com: On the newer boards with 128MiB flash, it
36 * now supports the first 96MiB (the boot flash bank containing FFW
37 * is excluded). The VxWorks loader is in partition 1.
38 */
39
40#define FLASH_BASE_ADDR 0xf0000000
41#define FLASH_BANK_SIZE (128*1024*1024)
42
43MODULE_AUTHOR("Scott Wood, TimeSys Corporation <scott.wood@timesys.com>");
44MODULE_DESCRIPTION("User-programmable flash device on the Dy4 SVME182 board");
45MODULE_LICENSE("GPL");
46
47static struct map_info svme182_map = {
48 .name = "Dy4 SVME182",
49 .bankwidth = 32,
50 .size = 128 * 1024 * 1024
51};
52
53#define BOOTIMAGE_PART_SIZE ((6*1024*1024)-RESERVED_PART_SIZE)
54
55// Allow 6MiB for the kernel
56#define NEW_BOOTIMAGE_PART_SIZE (6 * 1024 * 1024)
57// Allow 1MiB for the bootloader
58#define NEW_BOOTLOADER_PART_SIZE (1024 * 1024)
59// Use the remaining 9MiB at the end of flash for the RFS
60#define NEW_RFS_PART_SIZE (0x01000000 - NEW_BOOTLOADER_PART_SIZE - \
61 NEW_BOOTIMAGE_PART_SIZE)
62
63static struct mtd_partition svme182_partitions[] = {
64 // The Lower PABS is only 128KiB, but the partition code doesn't
65 // like partitions that don't end on the largest erase block
66 // size of the device, even if all of the erase blocks in the
67 // partition are small ones. The hardware should prevent
68 // writes to the actual PABS areas.
69 {
70 name: "Lower PABS and CPU 0 bootloader or kernel",
71 size: 6*1024*1024,
72 offset: 0,
73 },
74 {
75 name: "Root Filesystem",
76 size: 10*1024*1024,
77 offset: MTDPART_OFS_NXTBLK
78 },
79 {
80 name: "CPU1 Bootloader",
81 size: 1024*1024,
82 offset: MTDPART_OFS_NXTBLK,
83 },
84 {
85 name: "Extra",
86 size: 110*1024*1024,
87 offset: MTDPART_OFS_NXTBLK
88 },
89 {
90 name: "Foundation Firmware and Upper PABS",
91 size: 1024*1024,
92 offset: MTDPART_OFS_NXTBLK,
93 mask_flags: MTD_WRITEABLE // read-only
94 }
95};
96
97static struct mtd_info *this_mtd;
98
99static int __init init_svme182(void)
100{
101 struct mtd_partition *partitions;
102 int num_parts = sizeof(svme182_partitions) / sizeof(struct mtd_partition);
103
104 partitions = svme182_partitions;
105
106 svme182_map.virt = ioremap(FLASH_BASE_ADDR, svme182_map.size);
107
108 if (svme182_map.virt == 0) {
109 printk("Failed to ioremap FLASH memory area.\n");
110 return -EIO;
111 }
112
113 simple_map_init(&svme182_map);
114
115 this_mtd = do_map_probe("cfi_probe", &svme182_map);
116 if (!this_mtd)
117 {
118 iounmap((void *)svme182_map.virt);
119 return -ENXIO;
120 }
121
122 printk(KERN_NOTICE "SVME182 flash device: %dMiB at 0x%08x\n",
123 this_mtd->size >> 20, FLASH_BASE_ADDR);
124
125 this_mtd->owner = THIS_MODULE;
126 add_mtd_partitions(this_mtd, partitions, num_parts);
127
128 return 0;
129}
130
131static void __exit cleanup_svme182(void)
132{
133 if (this_mtd)
134 {
135 del_mtd_partitions(this_mtd);
136 map_destroy(this_mtd);
137 }
138
139 if (svme182_map.virt)
140 {
141 iounmap((void *)svme182_map.virt);
142 svme182_map.virt = 0;
143 }
144
145 return;
146}
147
148module_init(init_svme182);
149module_exit(cleanup_svme182);
diff --git a/drivers/mtd/maps/ebony.c b/drivers/mtd/maps/ebony.c
new file mode 100644
index 000000000000..b9d9cf4854b6
--- /dev/null
+++ b/drivers/mtd/maps/ebony.c
@@ -0,0 +1,163 @@
1/*
2 * $Id: ebony.c,v 1.15 2004/12/09 18:39:54 holindho Exp $
3 *
4 * Mapping for Ebony user flash
5 *
6 * Matt Porter <mporter@kernel.crashing.org>
7 *
8 * Copyright 2002-2004 MontaVista Software Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/mtd/mtd.h>
21#include <linux/mtd/map.h>
22#include <linux/mtd/partitions.h>
23#include <linux/config.h>
24#include <linux/version.h>
25#include <asm/io.h>
26#include <asm/ibm44x.h>
27#include <platforms/4xx/ebony.h>
28
29static struct mtd_info *flash;
30
31static struct map_info ebony_small_map = {
32 .name = "Ebony small flash",
33 .size = EBONY_SMALL_FLASH_SIZE,
34 .bankwidth = 1,
35};
36
37static struct map_info ebony_large_map = {
38 .name = "Ebony large flash",
39 .size = EBONY_LARGE_FLASH_SIZE,
40 .bankwidth = 1,
41};
42
43static struct mtd_partition ebony_small_partitions[] = {
44 {
45 .name = "OpenBIOS",
46 .offset = 0x0,
47 .size = 0x80000,
48 }
49};
50
51static struct mtd_partition ebony_large_partitions[] = {
52 {
53 .name = "fs",
54 .offset = 0,
55 .size = 0x380000,
56 },
57 {
58 .name = "firmware",
59 .offset = 0x380000,
60 .size = 0x80000,
61 }
62};
63
64int __init init_ebony(void)
65{
66 u8 fpga0_reg;
67 u8 __iomem *fpga0_adr;
68 unsigned long long small_flash_base, large_flash_base;
69
70 fpga0_adr = ioremap64(EBONY_FPGA_ADDR, 16);
71 if (!fpga0_adr)
72 return -ENOMEM;
73
74 fpga0_reg = readb(fpga0_adr);
75 iounmap(fpga0_adr);
76
77 if (EBONY_BOOT_SMALL_FLASH(fpga0_reg) &&
78 !EBONY_FLASH_SEL(fpga0_reg))
79 small_flash_base = EBONY_SMALL_FLASH_HIGH2;
80 else if (EBONY_BOOT_SMALL_FLASH(fpga0_reg) &&
81 EBONY_FLASH_SEL(fpga0_reg))
82 small_flash_base = EBONY_SMALL_FLASH_HIGH1;
83 else if (!EBONY_BOOT_SMALL_FLASH(fpga0_reg) &&
84 !EBONY_FLASH_SEL(fpga0_reg))
85 small_flash_base = EBONY_SMALL_FLASH_LOW2;
86 else
87 small_flash_base = EBONY_SMALL_FLASH_LOW1;
88
89 if (EBONY_BOOT_SMALL_FLASH(fpga0_reg) &&
90 !EBONY_ONBRD_FLASH_EN(fpga0_reg))
91 large_flash_base = EBONY_LARGE_FLASH_LOW;
92 else
93 large_flash_base = EBONY_LARGE_FLASH_HIGH;
94
95 ebony_small_map.phys = small_flash_base;
96 ebony_small_map.virt = ioremap64(small_flash_base,
97 ebony_small_map.size);
98
99 if (!ebony_small_map.virt) {
100 printk("Failed to ioremap flash\n");
101 return -EIO;
102 }
103
104 simple_map_init(&ebony_small_map);
105
106 flash = do_map_probe("jedec_probe", &ebony_small_map);
107 if (flash) {
108 flash->owner = THIS_MODULE;
109 add_mtd_partitions(flash, ebony_small_partitions,
110 ARRAY_SIZE(ebony_small_partitions));
111 } else {
112 printk("map probe failed for flash\n");
113 return -ENXIO;
114 }
115
116 ebony_large_map.phys = large_flash_base;
117 ebony_large_map.virt = ioremap64(large_flash_base,
118 ebony_large_map.size);
119
120 if (!ebony_large_map.virt) {
121 printk("Failed to ioremap flash\n");
122 return -EIO;
123 }
124
125 simple_map_init(&ebony_large_map);
126
127 flash = do_map_probe("jedec_probe", &ebony_large_map);
128 if (flash) {
129 flash->owner = THIS_MODULE;
130 add_mtd_partitions(flash, ebony_large_partitions,
131 ARRAY_SIZE(ebony_large_partitions));
132 } else {
133 printk("map probe failed for flash\n");
134 return -ENXIO;
135 }
136
137 return 0;
138}
139
140static void __exit cleanup_ebony(void)
141{
142 if (flash) {
143 del_mtd_partitions(flash);
144 map_destroy(flash);
145 }
146
147 if (ebony_small_map.virt) {
148 iounmap(ebony_small_map.virt);
149 ebony_small_map.virt = NULL;
150 }
151
152 if (ebony_large_map.virt) {
153 iounmap(ebony_large_map.virt);
154 ebony_large_map.virt = NULL;
155 }
156}
157
158module_init(init_ebony);
159module_exit(cleanup_ebony);
160
161MODULE_LICENSE("GPL");
162MODULE_AUTHOR("Matt Porter <mporter@kernel.crashing.org>");
163MODULE_DESCRIPTION("MTD map and partitions for IBM 440GP Ebony boards");
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c
new file mode 100644
index 000000000000..8b0da394f3fa
--- /dev/null
+++ b/drivers/mtd/maps/edb7312.c
@@ -0,0 +1,147 @@
1/*
2 * $Id: edb7312.c,v 1.13 2004/11/04 13:24:14 gleixner Exp $
3 *
4 * Handle mapping of the NOR flash on Cogent EDB7312 boards
5 *
6 * Copyright 2002 SYSGO Real-Time Solutions GmbH
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <asm/io.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/map.h>
20#include <linux/config.h>
21
22#ifdef CONFIG_MTD_PARTITIONS
23#include <linux/mtd/partitions.h>
24#endif
25
26#define WINDOW_ADDR 0x00000000 /* physical properties of flash */
27#define WINDOW_SIZE 0x01000000
28#define BUSWIDTH 2
29#define FLASH_BLOCKSIZE_MAIN 0x20000
30#define FLASH_NUMBLOCKS_MAIN 128
31/* can be "cfi_probe", "jedec_probe", "map_rom", NULL }; */
32#define PROBETYPES { "cfi_probe", NULL }
33
34#define MSG_PREFIX "EDB7312-NOR:" /* prefix for our printk()'s */
35#define MTDID "edb7312-nor" /* for mtdparts= partitioning */
36
37static struct mtd_info *mymtd;
38
39struct map_info edb7312nor_map = {
40 .name = "NOR flash on EDB7312",
41 .size = WINDOW_SIZE,
42 .bankwidth = BUSWIDTH,
43 .phys = WINDOW_ADDR,
44};
45
46#ifdef CONFIG_MTD_PARTITIONS
47
48/*
49 * MTD partitioning stuff
50 */
51static struct mtd_partition static_partitions[3] =
52{
53 {
54 .name = "ARMboot",
55 .size = 0x40000,
56 .offset = 0
57 },
58 {
59 .name = "Kernel",
60 .size = 0x200000,
61 .offset = 0x40000
62 },
63 {
64 .name = "RootFS",
65 .size = 0xDC0000,
66 .offset = 0x240000
67 },
68};
69
70static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
71
72#endif
73
74static int mtd_parts_nb = 0;
75static struct mtd_partition *mtd_parts = 0;
76
77int __init init_edb7312nor(void)
78{
79 static const char *rom_probe_types[] = PROBETYPES;
80 const char **type;
81 const char *part_type = 0;
82
83 printk(KERN_NOTICE MSG_PREFIX "0x%08x at 0x%08x\n",
84 WINDOW_SIZE, WINDOW_ADDR);
85 edb7312nor_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
86
87 if (!edb7312nor_map.virt) {
88 printk(MSG_PREFIX "failed to ioremap\n");
89 return -EIO;
90 }
91
92 simple_map_init(&edb7312nor_map);
93
94 mymtd = 0;
95 type = rom_probe_types;
96 for(; !mymtd && *type; type++) {
97 mymtd = do_map_probe(*type, &edb7312nor_map);
98 }
99 if (mymtd) {
100 mymtd->owner = THIS_MODULE;
101
102#ifdef CONFIG_MTD_PARTITIONS
103 mtd_parts_nb = parse_mtd_partitions(mymtd, probes, &mtd_parts, MTDID);
104 if (mtd_parts_nb > 0)
105 part_type = "detected";
106
107 if (mtd_parts_nb == 0)
108 {
109 mtd_parts = static_partitions;
110 mtd_parts_nb = ARRAY_SIZE(static_partitions);
111 part_type = "static";
112 }
113#endif
114 add_mtd_device(mymtd);
115 if (mtd_parts_nb == 0)
116 printk(KERN_NOTICE MSG_PREFIX "no partition info available\n");
117 else
118 {
119 printk(KERN_NOTICE MSG_PREFIX
120 "using %s partition definition\n", part_type);
121 add_mtd_partitions(mymtd, mtd_parts, mtd_parts_nb);
122 }
123 return 0;
124 }
125
126 iounmap((void *)edb7312nor_map.virt);
127 return -ENXIO;
128}
129
130static void __exit cleanup_edb7312nor(void)
131{
132 if (mymtd) {
133 del_mtd_device(mymtd);
134 map_destroy(mymtd);
135 }
136 if (edb7312nor_map.virt) {
137 iounmap((void *)edb7312nor_map.virt);
138 edb7312nor_map.virt = 0;
139 }
140}
141
142module_init(init_edb7312nor);
143module_exit(cleanup_edb7312nor);
144
145MODULE_LICENSE("GPL");
146MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
147MODULE_DESCRIPTION("Generic configurable MTD map driver");
diff --git a/drivers/mtd/maps/elan-104nc.c b/drivers/mtd/maps/elan-104nc.c
new file mode 100644
index 000000000000..e9465f5c069e
--- /dev/null
+++ b/drivers/mtd/maps/elan-104nc.c
@@ -0,0 +1,228 @@
1/* elan-104nc.c -- MTD map driver for Arcom Control Systems ELAN-104NC
2
3 Copyright (C) 2000 Arcom Control System Ltd
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
18
19 $Id: elan-104nc.c,v 1.25 2004/11/28 09:40:39 dwmw2 Exp $
20
21The ELAN-104NC has up to 8 Mibyte of Intel StrataFlash (28F320/28F640) in x16
22mode. This drivers uses the CFI probe and Intel Extended Command Set drivers.
23
24The flash is accessed as follows:
25
26 32 kbyte memory window at 0xb0000-0xb7fff
27
28 16 bit I/O port (0x22) for some sort of paging.
29
30The single flash device is divided into 3 partition which appear as separate
31MTD devices.
32
33Linux thinks that the I/O port is used by the PIC and hence check_region() will
34always fail. So we don't do it. I just hope it doesn't break anything.
35*/
36#include <linux/module.h>
37#include <linux/slab.h>
38#include <linux/ioport.h>
39#include <linux/init.h>
40#include <asm/io.h>
41
42#include <linux/mtd/map.h>
43#include <linux/mtd/mtd.h>
44#include <linux/mtd/partitions.h>
45
46#define WINDOW_START 0xb0000
47/* Number of bits in offset. */
48#define WINDOW_SHIFT 15
49#define WINDOW_LENGTH (1 << WINDOW_SHIFT)
50/* The bits for the offset into the window. */
51#define WINDOW_MASK (WINDOW_LENGTH-1)
52#define PAGE_IO 0x22
53#define PAGE_IO_SIZE 2
54
55static volatile int page_in_window = -1; // Current page in window.
56static void __iomem *iomapadr;
57static DEFINE_SPINLOCK(elan_104nc_spin);
58
59/* partition_info gives details on the logical partitions that the split the
60 * single flash device into. If the size if zero we use up to the end of the
61 * device. */
62static struct mtd_partition partition_info[]={
63 { .name = "ELAN-104NC flash boot partition",
64 .offset = 0,
65 .size = 640*1024 },
66 { .name = "ELAN-104NC flash partition 1",
67 .offset = 640*1024,
68 .size = 896*1024 },
69 { .name = "ELAN-104NC flash partition 2",
70 .offset = (640+896)*1024 }
71};
72#define NUM_PARTITIONS (sizeof(partition_info)/sizeof(partition_info[0]))
73
74/*
75 * If no idea what is going on here. This is taken from the FlashFX stuff.
76 */
77#define ROMCS 1
78
79static inline void elan_104nc_setup(void)
80{
81 u16 t;
82
83 outw( 0x0023 + ROMCS*2, PAGE_IO );
84 t=inb( PAGE_IO+1 );
85
86 t=(t & 0xf9) | 0x04;
87
88 outw( ((0x0023 + ROMCS*2) | (t << 8)), PAGE_IO );
89}
90
91static inline void elan_104nc_page(struct map_info *map, unsigned long ofs)
92{
93 unsigned long page = ofs >> WINDOW_SHIFT;
94
95 if( page!=page_in_window ) {
96 int cmd1;
97 int cmd2;
98
99 cmd1=(page & 0x700) + 0x0833 + ROMCS*0x4000;
100 cmd2=((page & 0xff) << 8) + 0x0032;
101
102 outw( cmd1, PAGE_IO );
103 outw( cmd2, PAGE_IO );
104
105 page_in_window = page;
106 }
107}
108
109
110static map_word elan_104nc_read16(struct map_info *map, unsigned long ofs)
111{
112 map_word ret;
113 spin_lock(&elan_104nc_spin);
114 elan_104nc_page(map, ofs);
115 ret.x[0] = readw(iomapadr + (ofs & WINDOW_MASK));
116 spin_unlock(&elan_104nc_spin);
117 return ret;
118}
119
120static void elan_104nc_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
121{
122 while (len) {
123 unsigned long thislen = len;
124 if (len > (WINDOW_LENGTH - (from & WINDOW_MASK)))
125 thislen = WINDOW_LENGTH-(from & WINDOW_MASK);
126
127 spin_lock(&elan_104nc_spin);
128 elan_104nc_page(map, from);
129 memcpy_fromio(to, iomapadr + (from & WINDOW_MASK), thislen);
130 spin_unlock(&elan_104nc_spin);
131 to += thislen;
132 from += thislen;
133 len -= thislen;
134 }
135}
136
137static void elan_104nc_write16(struct map_info *map, map_word d, unsigned long adr)
138{
139 spin_lock(&elan_104nc_spin);
140 elan_104nc_page(map, adr);
141 writew(d.x[0], iomapadr + (adr & WINDOW_MASK));
142 spin_unlock(&elan_104nc_spin);
143}
144
145static void elan_104nc_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
146{
147 while(len) {
148 unsigned long thislen = len;
149 if (len > (WINDOW_LENGTH - (to & WINDOW_MASK)))
150 thislen = WINDOW_LENGTH-(to & WINDOW_MASK);
151
152 spin_lock(&elan_104nc_spin);
153 elan_104nc_page(map, to);
154 memcpy_toio(iomapadr + (to & WINDOW_MASK), from, thislen);
155 spin_unlock(&elan_104nc_spin);
156 to += thislen;
157 from += thislen;
158 len -= thislen;
159 }
160}
161
162static struct map_info elan_104nc_map = {
163 .name = "ELAN-104NC flash",
164 .phys = NO_XIP,
165 .size = 8*1024*1024, /* this must be set to a maximum possible amount
166 of flash so the cfi probe routines find all
167 the chips */
168 .bankwidth = 2,
169 .read = elan_104nc_read16,
170 .copy_from = elan_104nc_copy_from,
171 .write = elan_104nc_write16,
172 .copy_to = elan_104nc_copy_to
173};
174
175/* MTD device for all of the flash. */
176static struct mtd_info *all_mtd;
177
178static void cleanup_elan_104nc(void)
179{
180 if( all_mtd ) {
181 del_mtd_partitions( all_mtd );
182 map_destroy( all_mtd );
183 }
184
185 iounmap(iomapadr);
186}
187
188static int __init init_elan_104nc(void)
189{
190 /* Urg! We use I/O port 0x22 without request_region()ing it,
191 because it's already allocated to the PIC. */
192
193 iomapadr = ioremap(WINDOW_START, WINDOW_LENGTH);
194 if (!iomapadr) {
195 printk( KERN_ERR"%s: failed to ioremap memory region\n",
196 elan_104nc_map.name );
197 return -EIO;
198 }
199
200 printk( KERN_INFO"%s: IO:0x%x-0x%x MEM:0x%x-0x%x\n",
201 elan_104nc_map.name,
202 PAGE_IO, PAGE_IO+PAGE_IO_SIZE-1,
203 WINDOW_START, WINDOW_START+WINDOW_LENGTH-1 );
204
205 elan_104nc_setup();
206
207 /* Probe for chip. */
208 all_mtd = do_map_probe("cfi_probe", &elan_104nc_map );
209 if( !all_mtd ) {
210 cleanup_elan_104nc();
211 return -ENXIO;
212 }
213
214 all_mtd->owner = THIS_MODULE;
215
216 /* Create MTD devices for each partition. */
217 add_mtd_partitions( all_mtd, partition_info, NUM_PARTITIONS );
218
219 return 0;
220}
221
222module_init(init_elan_104nc);
223module_exit(cleanup_elan_104nc);
224
225
226MODULE_LICENSE("GPL");
227MODULE_AUTHOR("Arcom Control Systems Ltd.");
228MODULE_DESCRIPTION("MTD map driver for Arcom Control Systems ELAN-104NC");
diff --git a/drivers/mtd/maps/epxa10db-flash.c b/drivers/mtd/maps/epxa10db-flash.c
new file mode 100644
index 000000000000..ab6dbe2b8cce
--- /dev/null
+++ b/drivers/mtd/maps/epxa10db-flash.c
@@ -0,0 +1,176 @@
1/*
2 * Flash memory access on EPXA based devices
3 *
4 * (C) 2000 Nicolas Pitre <nico@cam.org>
5 * Copyright (C) 2001 Altera Corporation
6 * Copyright (C) 2001 Red Hat, Inc.
7 *
8 * $Id: epxa10db-flash.c,v 1.13 2004/11/04 13:24:14 gleixner Exp $
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25#include <linux/config.h>
26#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/kernel.h>
29#include <linux/init.h>
30#include <asm/io.h>
31#include <linux/mtd/mtd.h>
32#include <linux/mtd/map.h>
33#include <linux/mtd/partitions.h>
34
35#include <asm/hardware.h>
36#ifdef CONFIG_EPXA10DB
37#define BOARD_NAME "EPXA10DB"
38#else
39#define BOARD_NAME "EPXA1DB"
40#endif
41
42static int nr_parts = 0;
43static struct mtd_partition *parts;
44
45static struct mtd_info *mymtd;
46
47static int epxa_default_partitions(struct mtd_info *master, struct mtd_partition **pparts);
48
49
50static struct map_info epxa_map = {
51 .name = "EPXA flash",
52 .size = FLASH_SIZE,
53 .bankwidth = 2,
54 .phys = FLASH_START,
55};
56
57static const char *probes[] = { "RedBoot", "afs", NULL };
58
59static int __init epxa_mtd_init(void)
60{
61 int i;
62
63 printk(KERN_NOTICE "%s flash device: 0x%x at 0x%x\n", BOARD_NAME, FLASH_SIZE, FLASH_START);
64
65 epxa_map.virt = ioremap(FLASH_START, FLASH_SIZE);
66 if (!epxa_map.virt) {
67 printk("Failed to ioremap %s flash\n",BOARD_NAME);
68 return -EIO;
69 }
70 simple_map_init(&epxa_map);
71
72 mymtd = do_map_probe("cfi_probe", &epxa_map);
73 if (!mymtd) {
74 iounmap((void *)epxa_map.virt);
75 return -ENXIO;
76 }
77
78 mymtd->owner = THIS_MODULE;
79
80 /* Unlock the flash device. */
81 if(mymtd->unlock){
82 for (i=0; i<mymtd->numeraseregions;i++){
83 int j;
84 for(j=0;j<mymtd->eraseregions[i].numblocks;j++){
85 mymtd->unlock(mymtd,mymtd->eraseregions[i].offset + j * mymtd->eraseregions[i].erasesize,mymtd->eraseregions[i].erasesize);
86 }
87 }
88 }
89
90#ifdef CONFIG_MTD_PARTITIONS
91 nr_parts = parse_mtd_partitions(mymtd, probes, &parts, 0);
92
93 if (nr_parts > 0) {
94 add_mtd_partitions(mymtd, parts, nr_parts);
95 return 0;
96 }
97#endif
98 /* No recognised partitioning schemes found - use defaults */
99 nr_parts = epxa_default_partitions(mymtd, &parts);
100 if (nr_parts > 0) {
101 add_mtd_partitions(mymtd, parts, nr_parts);
102 return 0;
103 }
104
105 /* If all else fails... */
106 add_mtd_device(mymtd);
107 return 0;
108}
109
110static void __exit epxa_mtd_cleanup(void)
111{
112 if (mymtd) {
113 if (nr_parts)
114 del_mtd_partitions(mymtd);
115 else
116 del_mtd_device(mymtd);
117 map_destroy(mymtd);
118 }
119 if (epxa_map.virt) {
120 iounmap((void *)epxa_map.virt);
121 epxa_map.virt = 0;
122 }
123}
124
125
126/*
127 * This will do for now, once we decide which bootldr we're finally
128 * going to use then we'll remove this function and do it properly
129 *
130 * Partions are currently (as offsets from base of flash):
131 * 0x00000000 - 0x003FFFFF - bootloader (!)
132 * 0x00400000 - 0x00FFFFFF - Flashdisk
133 */
134
135static int __init epxa_default_partitions(struct mtd_info *master, struct mtd_partition **pparts)
136{
137 struct mtd_partition *parts;
138 int ret, i;
139 int npartitions = 0;
140 char *names;
141 const char *name = "jffs";
142
143 printk("Using default partitions for %s\n",BOARD_NAME);
144 npartitions=1;
145 parts = kmalloc(npartitions*sizeof(*parts)+strlen(name), GFP_KERNEL);
146 memzero(parts,npartitions*sizeof(*parts)+strlen(name));
147 if (!parts) {
148 ret = -ENOMEM;
149 goto out;
150 }
151 i=0;
152 names = (char *)&parts[npartitions];
153 parts[i].name = names;
154 names += strlen(name) + 1;
155 strcpy(parts[i].name, name);
156
157#ifdef CONFIG_EPXA10DB
158 parts[i].size = FLASH_SIZE-0x00400000;
159 parts[i].offset = 0x00400000;
160#else
161 parts[i].size = FLASH_SIZE-0x00180000;
162 parts[i].offset = 0x00180000;
163#endif
164
165 out:
166 *pparts = parts;
167 return npartitions;
168}
169
170
171module_init(epxa_mtd_init);
172module_exit(epxa_mtd_cleanup);
173
174MODULE_AUTHOR("Clive Davies");
175MODULE_DESCRIPTION("Altera epxa mtd flash map");
176MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
new file mode 100644
index 000000000000..068bb6a54520
--- /dev/null
+++ b/drivers/mtd/maps/fortunet.c
@@ -0,0 +1,271 @@
1/* fortunet.c memory map
2 *
3 * $Id: fortunet.c,v 1.9 2004/11/04 13:24:14 gleixner Exp $
4 */
5
6#include <linux/module.h>
7#include <linux/types.h>
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <asm/io.h>
11#include <linux/mtd/mtd.h>
12#include <linux/mtd/map.h>
13#include <linux/mtd/partitions.h>
14
15#define MAX_NUM_REGIONS 4
16#define MAX_NUM_PARTITIONS 8
17
18#define DEF_WINDOW_ADDR_PHY 0x00000000
19#define DEF_WINDOW_SIZE 0x00800000 // 8 Mega Bytes
20
21#define MTD_FORTUNET_PK "MTD FortuNet: "
22
23#define MAX_NAME_SIZE 128
24
25struct map_region
26{
27 int window_addr_physical;
28 int altbankwidth;
29 struct map_info map_info;
30 struct mtd_info *mymtd;
31 struct mtd_partition parts[MAX_NUM_PARTITIONS];
32 char map_name[MAX_NAME_SIZE];
33 char parts_name[MAX_NUM_PARTITIONS][MAX_NAME_SIZE];
34};
35
36static struct map_region map_regions[MAX_NUM_REGIONS];
37static int map_regions_set[MAX_NUM_REGIONS] = {0,0,0,0};
38static int map_regions_parts[MAX_NUM_REGIONS] = {0,0,0,0};
39
40
41
42struct map_info default_map = {
43 .size = DEF_WINDOW_SIZE,
44 .bankwidth = 4,
45};
46
47static char * __init get_string_option(char *dest,int dest_size,char *sor)
48{
49 if(!dest_size)
50 return sor;
51 dest_size--;
52 while(*sor)
53 {
54 if(*sor==',')
55 {
56 sor++;
57 break;
58 }
59 else if(*sor=='\"')
60 {
61 sor++;
62 while(*sor)
63 {
64 if(*sor=='\"')
65 {
66 sor++;
67 break;
68 }
69 *dest = *sor;
70 dest++;
71 sor++;
72 dest_size--;
73 if(!dest_size)
74 {
75 *dest = 0;
76 return sor;
77 }
78 }
79 }
80 else
81 {
82 *dest = *sor;
83 dest++;
84 sor++;
85 dest_size--;
86 if(!dest_size)
87 {
88 *dest = 0;
89 return sor;
90 }
91 }
92 }
93 *dest = 0;
94 return sor;
95}
96
97static int __init MTD_New_Region(char *line)
98{
99 char string[MAX_NAME_SIZE];
100 int params[6];
101 get_options (get_string_option(string,sizeof(string),line),6,params);
102 if(params[0]<1)
103 {
104 printk(MTD_FORTUNET_PK "Bad parameters for MTD Region "
105 " name,region-number[,base,size,bankwidth,altbankwidth]\n");
106 return 1;
107 }
108 if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
109 {
110 printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
111 params[1],MAX_NUM_REGIONS-1);
112 return 1;
113 }
114 memset(&map_regions[params[1]],0,sizeof(map_regions[params[1]]));
115 memcpy(&map_regions[params[1]].map_info,
116 &default_map,sizeof(map_regions[params[1]].map_info));
117 map_regions_set[params[1]] = 1;
118 map_regions[params[1]].window_addr_physical = DEF_WINDOW_ADDR_PHY;
119 map_regions[params[1]].altbankwidth = 2;
120 map_regions[params[1]].mymtd = NULL;
121 map_regions[params[1]].map_info.name = map_regions[params[1]].map_name;
122 strcpy(map_regions[params[1]].map_info.name,string);
123 if(params[0]>1)
124 {
125 map_regions[params[1]].window_addr_physical = params[2];
126 }
127 if(params[0]>2)
128 {
129 map_regions[params[1]].map_info.size = params[3];
130 }
131 if(params[0]>3)
132 {
133 map_regions[params[1]].map_info.bankwidth = params[4];
134 }
135 if(params[0]>4)
136 {
137 map_regions[params[1]].altbankwidth = params[5];
138 }
139 return 1;
140}
141
142static int __init MTD_New_Partition(char *line)
143{
144 char string[MAX_NAME_SIZE];
145 int params[4];
146 get_options (get_string_option(string,sizeof(string),line),4,params);
147 if(params[0]<3)
148 {
149 printk(MTD_FORTUNET_PK "Bad parameters for MTD Partition "
150 " name,region-number,size,offset\n");
151 return 1;
152 }
153 if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
154 {
155 printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
156 params[1],MAX_NUM_REGIONS-1);
157 return 1;
158 }
159 if(map_regions_parts[params[1]]>=MAX_NUM_PARTITIONS)
160 {
161 printk(MTD_FORTUNET_PK "Out of space for partition in this region\n");
162 return 1;
163 }
164 map_regions[params[1]].parts[map_regions_parts[params[1]]].name =
165 map_regions[params[1]]. parts_name[map_regions_parts[params[1]]];
166 strcpy(map_regions[params[1]].parts[map_regions_parts[params[1]]].name,string);
167 map_regions[params[1]].parts[map_regions_parts[params[1]]].size =
168 params[2];
169 map_regions[params[1]].parts[map_regions_parts[params[1]]].offset =
170 params[3];
171 map_regions[params[1]].parts[map_regions_parts[params[1]]].mask_flags = 0;
172 map_regions_parts[params[1]]++;
173 return 1;
174}
175
176__setup("MTD_Region=", MTD_New_Region);
177__setup("MTD_Partition=", MTD_New_Partition);
178
179/* Backwards-spelling-compatibility */
180__setup("MTD_Partion=", MTD_New_Partition);
181
182int __init init_fortunet(void)
183{
184 int ix,iy;
185 for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++)
186 {
187 if(map_regions_parts[ix]&&(!map_regions_set[ix]))
188 {
189 printk(MTD_FORTUNET_PK "Region %d is not setup (Setting to default)\n",
190 ix);
191 memset(&map_regions[ix],0,sizeof(map_regions[ix]));
192 memcpy(&map_regions[ix].map_info,&default_map,
193 sizeof(map_regions[ix].map_info));
194 map_regions_set[ix] = 1;
195 map_regions[ix].window_addr_physical = DEF_WINDOW_ADDR_PHY;
196 map_regions[ix].altbankwidth = 2;
197 map_regions[ix].mymtd = NULL;
198 map_regions[ix].map_info.name = map_regions[ix].map_name;
199 strcpy(map_regions[ix].map_info.name,"FORTUNET");
200 }
201 if(map_regions_set[ix])
202 {
203 iy++;
204 printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash device at physically "
205 " address %x size %x\n",
206 map_regions[ix].map_info.name,
207 map_regions[ix].window_addr_physical,
208 map_regions[ix].map_info.size);
209
210 map_regions[ix].map_info.phys = map_regions[ix].window_addr_physical,
211
212 map_regions[ix].map_info.virt =
213 ioremap_nocache(
214 map_regions[ix].window_addr_physical,
215 map_regions[ix].map_info.size);
216 if(!map_regions[ix].map_info.virt)
217 {
218 printk(MTD_FORTUNET_PK "%s flash failed to ioremap!\n",
219 map_regions[ix].map_info.name);
220 return -ENXIO;
221 }
222 simple_map_init(&map_regions[ix].map_info);
223
224 printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash is virtually at: %x\n",
225 map_regions[ix].map_info.name,
226 map_regions[ix].map_info.virt);
227 map_regions[ix].mymtd = do_map_probe("cfi_probe",
228 &map_regions[ix].map_info);
229 if((!map_regions[ix].mymtd)&&(
230 map_regions[ix].altbankwidth!=map_regions[ix].map_info.bankwidth))
231 {
232 printk(KERN_NOTICE MTD_FORTUNET_PK "Trying alternate bankwidth "
233 "for %s flash.\n",
234 map_regions[ix].map_info.name);
235 map_regions[ix].map_info.bankwidth =
236 map_regions[ix].altbankwidth;
237 map_regions[ix].mymtd = do_map_probe("cfi_probe",
238 &map_regions[ix].map_info);
239 }
240 map_regions[ix].mymtd->owner = THIS_MODULE;
241 add_mtd_partitions(map_regions[ix].mymtd,
242 map_regions[ix].parts,map_regions_parts[ix]);
243 }
244 }
245 if(iy)
246 return 0;
247 return -ENXIO;
248}
249
250static void __exit cleanup_fortunet(void)
251{
252 int ix;
253 for(ix=0;ix<MAX_NUM_REGIONS;ix++)
254 {
255 if(map_regions_set[ix])
256 {
257 if( map_regions[ix].mymtd )
258 {
259 del_mtd_partitions( map_regions[ix].mymtd );
260 map_destroy( map_regions[ix].mymtd );
261 }
262 iounmap((void *)map_regions[ix].map_info.virt);
263 }
264 }
265}
266
267module_init(init_fortunet);
268module_exit(cleanup_fortunet);
269
270MODULE_AUTHOR("FortuNet, Inc.");
271MODULE_DESCRIPTION("MTD map driver for FortuNet boards");
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
new file mode 100644
index 000000000000..c73828171d9b
--- /dev/null
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -0,0 +1,144 @@
1/*
2 * Flash memory access on Hynix GMS30C7201/HMS30C7202 based
3 * evaluation boards
4 *
5 * $Id: h720x-flash.c,v 1.11 2004/11/04 13:24:14 gleixner Exp $
6 *
7 * (C) 2002 Jungjun Kim <jungjun.kim@hynix.com>
8 * 2003 Thomas Gleixner <tglx@linutronix.de>
9 */
10
11#include <linux/config.h>
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/errno.h>
17#include <linux/slab.h>
18
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/map.h>
21#include <linux/mtd/partitions.h>
22#include <asm/hardware.h>
23#include <asm/io.h>
24
25static struct mtd_info *mymtd;
26
27static struct map_info h720x_map = {
28 .name = "H720X",
29 .bankwidth = 4,
30 .size = FLASH_SIZE,
31 .phys = FLASH_PHYS,
32};
33
34static struct mtd_partition h720x_partitions[] = {
35 {
36 .name = "ArMon",
37 .size = 0x00080000,
38 .offset = 0,
39 .mask_flags = MTD_WRITEABLE
40 },{
41 .name = "Env",
42 .size = 0x00040000,
43 .offset = 0x00080000,
44 .mask_flags = MTD_WRITEABLE
45 },{
46 .name = "Kernel",
47 .size = 0x00180000,
48 .offset = 0x000c0000,
49 .mask_flags = MTD_WRITEABLE
50 },{
51 .name = "Ramdisk",
52 .size = 0x00400000,
53 .offset = 0x00240000,
54 .mask_flags = MTD_WRITEABLE
55 },{
56 .name = "jffs2",
57 .size = MTDPART_SIZ_FULL,
58 .offset = MTDPART_OFS_APPEND
59 }
60};
61
62#define NUM_PARTITIONS (sizeof(h720x_partitions)/sizeof(h720x_partitions[0]))
63
64static int nr_mtd_parts;
65static struct mtd_partition *mtd_parts;
66static const char *probes[] = { "cmdlinepart", NULL };
67
68/*
69 * Initialize FLASH support
70 */
71int __init h720x_mtd_init(void)
72{
73
74 char *part_type = NULL;
75
76 h720x_map.virt = ioremap(FLASH_PHYS, FLASH_SIZE);
77
78 if (!h720x_map.virt) {
79 printk(KERN_ERR "H720x-MTD: ioremap failed\n");
80 return -EIO;
81 }
82
83 simple_map_init(&h720x_map);
84
85 // Probe for flash bankwidth 4
86 printk (KERN_INFO "H720x-MTD probing 32bit FLASH\n");
87 mymtd = do_map_probe("cfi_probe", &h720x_map);
88 if (!mymtd) {
89 printk (KERN_INFO "H720x-MTD probing 16bit FLASH\n");
90 // Probe for bankwidth 2
91 h720x_map.bankwidth = 2;
92 mymtd = do_map_probe("cfi_probe", &h720x_map);
93 }
94
95 if (mymtd) {
96 mymtd->owner = THIS_MODULE;
97
98#ifdef CONFIG_MTD_PARTITIONS
99 nr_mtd_parts = parse_mtd_partitions(mymtd, probes, &mtd_parts, 0);
100 if (nr_mtd_parts > 0)
101 part_type = "command line";
102#endif
103 if (nr_mtd_parts <= 0) {
104 mtd_parts = h720x_partitions;
105 nr_mtd_parts = NUM_PARTITIONS;
106 part_type = "builtin";
107 }
108 printk(KERN_INFO "Using %s partition table\n", part_type);
109 add_mtd_partitions(mymtd, mtd_parts, nr_mtd_parts);
110 return 0;
111 }
112
113 iounmap((void *)h720x_map.virt);
114 return -ENXIO;
115}
116
117/*
118 * Cleanup
119 */
120static void __exit h720x_mtd_cleanup(void)
121{
122
123 if (mymtd) {
124 del_mtd_partitions(mymtd);
125 map_destroy(mymtd);
126 }
127
128 /* Free partition info, if commandline partition was used */
129 if (mtd_parts && (mtd_parts != h720x_partitions))
130 kfree (mtd_parts);
131
132 if (h720x_map.virt) {
133 iounmap((void *)h720x_map.virt);
134 h720x_map.virt = 0;
135 }
136}
137
138
139module_init(h720x_mtd_init);
140module_exit(h720x_mtd_cleanup);
141
142MODULE_LICENSE("GPL");
143MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
144MODULE_DESCRIPTION("MTD map driver for Hynix evaluation boards");
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
new file mode 100644
index 000000000000..29d1cc1bb426
--- /dev/null
+++ b/drivers/mtd/maps/ichxrom.c
@@ -0,0 +1,383 @@
1/*
2 * ichxrom.c
3 *
4 * Normal mappings of chips in physical memory
5 * $Id: ichxrom.c,v 1.16 2004/11/28 09:40:39 dwmw2 Exp $
6 */
7
8#include <linux/module.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <asm/io.h>
13#include <linux/mtd/mtd.h>
14#include <linux/mtd/map.h>
15#include <linux/mtd/cfi.h>
16#include <linux/mtd/flashchip.h>
17#include <linux/config.h>
18#include <linux/pci.h>
19#include <linux/pci_ids.h>
20#include <linux/list.h>
21
22#define xstr(s) str(s)
23#define str(s) #s
24#define MOD_NAME xstr(KBUILD_BASENAME)
25
26#define ADDRESS_NAME_LEN 18
27
28#define ROM_PROBE_STEP_SIZE (64*1024) /* 64KiB */
29
30#define BIOS_CNTL 0x4e
31#define FWH_DEC_EN1 0xE3
32#define FWH_DEC_EN2 0xF0
33#define FWH_SEL1 0xE8
34#define FWH_SEL2 0xEE
35
36struct ichxrom_window {
37 void __iomem* virt;
38 unsigned long phys;
39 unsigned long size;
40 struct list_head maps;
41 struct resource rsrc;
42 struct pci_dev *pdev;
43};
44
45struct ichxrom_map_info {
46 struct list_head list;
47 struct map_info map;
48 struct mtd_info *mtd;
49 struct resource rsrc;
50 char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
51};
52
53static struct ichxrom_window ichxrom_window = {
54 .maps = LIST_HEAD_INIT(ichxrom_window.maps),
55};
56
57static void ichxrom_cleanup(struct ichxrom_window *window)
58{
59 struct ichxrom_map_info *map, *scratch;
60 u16 word;
61
62 /* Disable writes through the rom window */
63 pci_read_config_word(window->pdev, BIOS_CNTL, &word);
64 pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1);
65
66 /* Free all of the mtd devices */
67 list_for_each_entry_safe(map, scratch, &window->maps, list) {
68 if (map->rsrc.parent)
69 release_resource(&map->rsrc);
70 del_mtd_device(map->mtd);
71 map_destroy(map->mtd);
72 list_del(&map->list);
73 kfree(map);
74 }
75 if (window->rsrc.parent)
76 release_resource(&window->rsrc);
77 if (window->virt) {
78 iounmap(window->virt);
79 window->virt = NULL;
80 window->phys = 0;
81 window->size = 0;
82 window->pdev = NULL;
83 }
84}
85
86
87static int __devinit ichxrom_init_one (struct pci_dev *pdev,
88 const struct pci_device_id *ent)
89{
90 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
91 struct ichxrom_window *window = &ichxrom_window;
92 struct ichxrom_map_info *map = NULL;
93 unsigned long map_top;
94 u8 byte;
95 u16 word;
96
97 /* For now I just handle the ichx and I assume there
98 * are not a lot of resources up at the top of the address
99 * space. It is possible to handle other devices in the
100 * top 16MB but it is very painful. Also since
101 * you can only really attach a FWH to an ICHX there
102 * a number of simplifications you can make.
103 *
104 * Also you can page firmware hubs if an 8MB window isn't enough
105 * but don't currently handle that case either.
106 */
107 window->pdev = pdev;
108
109 /* Find a region continuous to the end of the ROM window */
110 window->phys = 0;
111 pci_read_config_byte(pdev, FWH_DEC_EN1, &byte);
112 if (byte == 0xff) {
113 window->phys = 0xffc00000;
114 pci_read_config_byte(pdev, FWH_DEC_EN2, &byte);
115 if ((byte & 0x0f) == 0x0f) {
116 window->phys = 0xff400000;
117 }
118 else if ((byte & 0x0e) == 0x0e) {
119 window->phys = 0xff500000;
120 }
121 else if ((byte & 0x0c) == 0x0c) {
122 window->phys = 0xff600000;
123 }
124 else if ((byte & 0x08) == 0x08) {
125 window->phys = 0xff700000;
126 }
127 }
128 else if ((byte & 0xfe) == 0xfe) {
129 window->phys = 0xffc80000;
130 }
131 else if ((byte & 0xfc) == 0xfc) {
132 window->phys = 0xffd00000;
133 }
134 else if ((byte & 0xf8) == 0xf8) {
135 window->phys = 0xffd80000;
136 }
137 else if ((byte & 0xf0) == 0xf0) {
138 window->phys = 0xffe00000;
139 }
140 else if ((byte & 0xe0) == 0xe0) {
141 window->phys = 0xffe80000;
142 }
143 else if ((byte & 0xc0) == 0xc0) {
144 window->phys = 0xfff00000;
145 }
146 else if ((byte & 0x80) == 0x80) {
147 window->phys = 0xfff80000;
148 }
149
150 if (window->phys == 0) {
151 printk(KERN_ERR MOD_NAME ": Rom window is closed\n");
152 goto out;
153 }
154 window->phys -= 0x400000UL;
155 window->size = (0xffffffffUL - window->phys) + 1UL;
156
157 /* Enable writes through the rom window */
158 pci_read_config_word(pdev, BIOS_CNTL, &word);
159 if (!(word & 1) && (word & (1<<1))) {
160 /* The BIOS will generate an error if I enable
161 * this device, so don't even try.
162 */
163 printk(KERN_ERR MOD_NAME ": firmware access control, I can't enable writes\n");
164 goto out;
165 }
166 pci_write_config_word(pdev, BIOS_CNTL, word | 1);
167
168 /*
169 * Try to reserve the window mem region. If this fails then
170 * it is likely due to the window being "reseved" by the BIOS.
171 */
172 window->rsrc.name = MOD_NAME;
173 window->rsrc.start = window->phys;
174 window->rsrc.end = window->phys + window->size - 1;
175 window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
176 if (request_resource(&iomem_resource, &window->rsrc)) {
177 window->rsrc.parent = NULL;
178 printk(KERN_DEBUG MOD_NAME
179 ": %s(): Unable to register resource"
180 " 0x%.08lx-0x%.08lx - kernel bug?\n",
181 __func__,
182 window->rsrc.start, window->rsrc.end);
183 }
184
185 /* Map the firmware hub into my address space. */
186 window->virt = ioremap_nocache(window->phys, window->size);
187 if (!window->virt) {
188 printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
189 window->phys, window->size);
190 goto out;
191 }
192
193 /* Get the first address to look for an rom chip at */
194 map_top = window->phys;
195 if ((window->phys & 0x3fffff) != 0) {
196 map_top = window->phys + 0x400000;
197 }
198#if 1
199 /* The probe sequence run over the firmware hub lock
200 * registers sets them to 0x7 (no access).
201 * Probe at most the last 4M of the address space.
202 */
203 if (map_top < 0xffc00000) {
204 map_top = 0xffc00000;
205 }
206#endif
207 /* Loop through and look for rom chips */
208 while((map_top - 1) < 0xffffffffUL) {
209 struct cfi_private *cfi;
210 unsigned long offset;
211 int i;
212
213 if (!map) {
214 map = kmalloc(sizeof(*map), GFP_KERNEL);
215 }
216 if (!map) {
217 printk(KERN_ERR MOD_NAME ": kmalloc failed");
218 goto out;
219 }
220 memset(map, 0, sizeof(*map));
221 INIT_LIST_HEAD(&map->list);
222 map->map.name = map->map_name;
223 map->map.phys = map_top;
224 offset = map_top - window->phys;
225 map->map.virt = (void __iomem *)
226 (((unsigned long)(window->virt)) + offset);
227 map->map.size = 0xffffffffUL - map_top + 1UL;
228 /* Set the name of the map to the address I am trying */
229 sprintf(map->map_name, "%s @%08lx",
230 MOD_NAME, map->map.phys);
231
232 /* Firmware hubs only use vpp when being programmed
233 * in a factory setting. So in-place programming
234 * needs to use a different method.
235 */
236 for(map->map.bankwidth = 32; map->map.bankwidth;
237 map->map.bankwidth >>= 1)
238 {
239 char **probe_type;
240 /* Skip bankwidths that are not supported */
241 if (!map_bankwidth_supported(map->map.bankwidth))
242 continue;
243
244 /* Setup the map methods */
245 simple_map_init(&map->map);
246
247 /* Try all of the probe methods */
248 probe_type = rom_probe_types;
249 for(; *probe_type; probe_type++) {
250 map->mtd = do_map_probe(*probe_type, &map->map);
251 if (map->mtd)
252 goto found;
253 }
254 }
255 map_top += ROM_PROBE_STEP_SIZE;
256 continue;
257 found:
258 /* Trim the size if we are larger than the map */
259 if (map->mtd->size > map->map.size) {
260 printk(KERN_WARNING MOD_NAME
261 " rom(%u) larger than window(%lu). fixing...\n",
262 map->mtd->size, map->map.size);
263 map->mtd->size = map->map.size;
264 }
265 if (window->rsrc.parent) {
266 /*
267 * Registering the MTD device in iomem may not be possible
268 * if there is a BIOS "reserved" and BUSY range. If this
269 * fails then continue anyway.
270 */
271 map->rsrc.name = map->map_name;
272 map->rsrc.start = map->map.phys;
273 map->rsrc.end = map->map.phys + map->mtd->size - 1;
274 map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
275 if (request_resource(&window->rsrc, &map->rsrc)) {
276 printk(KERN_ERR MOD_NAME
277 ": cannot reserve MTD resource\n");
278 map->rsrc.parent = NULL;
279 }
280 }
281
282 /* Make the whole region visible in the map */
283 map->map.virt = window->virt;
284 map->map.phys = window->phys;
285 cfi = map->map.fldrv_priv;
286 for(i = 0; i < cfi->numchips; i++) {
287 cfi->chips[i].start += offset;
288 }
289
290 /* Now that the mtd devices is complete claim and export it */
291 map->mtd->owner = THIS_MODULE;
292 if (add_mtd_device(map->mtd)) {
293 map_destroy(map->mtd);
294 map->mtd = NULL;
295 goto out;
296 }
297
298
299 /* Calculate the new value of map_top */
300 map_top += map->mtd->size;
301
302 /* File away the map structure */
303 list_add(&map->list, &window->maps);
304 map = NULL;
305 }
306
307 out:
308 /* Free any left over map structures */
309 if (map) {
310 kfree(map);
311 }
312 /* See if I have any map structures */
313 if (list_empty(&window->maps)) {
314 ichxrom_cleanup(window);
315 return -ENODEV;
316 }
317 return 0;
318}
319
320
321static void __devexit ichxrom_remove_one (struct pci_dev *pdev)
322{
323 struct ichxrom_window *window = &ichxrom_window;
324 ichxrom_cleanup(window);
325}
326
327static struct pci_device_id ichxrom_pci_tbl[] __devinitdata = {
328 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
329 PCI_ANY_ID, PCI_ANY_ID, },
330 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
331 PCI_ANY_ID, PCI_ANY_ID, },
332 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
333 PCI_ANY_ID, PCI_ANY_ID, },
334 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
335 PCI_ANY_ID, PCI_ANY_ID, },
336 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
337 PCI_ANY_ID, PCI_ANY_ID, },
338 { 0, },
339};
340
341MODULE_DEVICE_TABLE(pci, ichxrom_pci_tbl);
342
343#if 0
344static struct pci_driver ichxrom_driver = {
345 .name = MOD_NAME,
346 .id_table = ichxrom_pci_tbl,
347 .probe = ichxrom_init_one,
348 .remove = ichxrom_remove_one,
349};
350#endif
351
352static int __init init_ichxrom(void)
353{
354 struct pci_dev *pdev;
355 struct pci_device_id *id;
356
357 pdev = NULL;
358 for (id = ichxrom_pci_tbl; id->vendor; id++) {
359 pdev = pci_find_device(id->vendor, id->device, NULL);
360 if (pdev) {
361 break;
362 }
363 }
364 if (pdev) {
365 return ichxrom_init_one(pdev, &ichxrom_pci_tbl[0]);
366 }
367 return -ENXIO;
368#if 0
369 return pci_module_init(&ichxrom_driver);
370#endif
371}
372
373static void __exit cleanup_ichxrom(void)
374{
375 ichxrom_remove_one(ichxrom_window.pdev);
376}
377
378module_init(init_ichxrom);
379module_exit(cleanup_ichxrom);
380
381MODULE_LICENSE("GPL");
382MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>");
383MODULE_DESCRIPTION("MTD map driver for BIOS chips on the ICHX southbridge");
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
new file mode 100644
index 000000000000..cb39172c81d2
--- /dev/null
+++ b/drivers/mtd/maps/impa7.c
@@ -0,0 +1,161 @@
1/*
2 * $Id: impa7.c,v 1.13 2004/11/04 13:24:14 gleixner Exp $
3 *
4 * Handle mapping of the NOR flash on implementa A7 boards
5 *
6 * Copyright 2002 SYSGO Real-Time Solutions GmbH
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <asm/io.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/map.h>
20#include <linux/config.h>
21
22#ifdef CONFIG_MTD_PARTITIONS
23#include <linux/mtd/partitions.h>
24#endif
25
26#define WINDOW_ADDR0 0x00000000 /* physical properties of flash */
27#define WINDOW_SIZE0 0x00800000
28#define WINDOW_ADDR1 0x10000000 /* physical properties of flash */
29#define WINDOW_SIZE1 0x00800000
30#define NUM_FLASHBANKS 2
31#define BUSWIDTH 4
32
33/* can be { "cfi_probe", "jedec_probe", "map_rom", NULL } */
34#define PROBETYPES { "jedec_probe", NULL }
35
36#define MSG_PREFIX "impA7:" /* prefix for our printk()'s */
37#define MTDID "impa7-%d" /* for mtdparts= partitioning */
38
39static struct mtd_info *impa7_mtd[NUM_FLASHBANKS];
40
41
42static struct map_info impa7_map[NUM_FLASHBANKS] = {
43 {
44 .name = "impA7 NOR Flash Bank #0",
45 .size = WINDOW_SIZE0,
46 .bankwidth = BUSWIDTH,
47 },
48 {
49 .name = "impA7 NOR Flash Bank #1",
50 .size = WINDOW_SIZE1,
51 .bankwidth = BUSWIDTH,
52 },
53};
54
55#ifdef CONFIG_MTD_PARTITIONS
56
57/*
58 * MTD partitioning stuff
59 */
60static struct mtd_partition static_partitions[] =
61{
62 {
63 .name = "FileSystem",
64 .size = 0x800000,
65 .offset = 0x00000000
66 },
67};
68
69static int mtd_parts_nb[NUM_FLASHBANKS];
70static struct mtd_partition *mtd_parts[NUM_FLASHBANKS];
71
72#endif
73
74static const char *probes[] = { "cmdlinepart", NULL };
75
76int __init init_impa7(void)
77{
78 static const char *rom_probe_types[] = PROBETYPES;
79 const char **type;
80 const char *part_type = 0;
81 int i;
82 static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = {
83 { WINDOW_ADDR0, WINDOW_SIZE0 },
84 { WINDOW_ADDR1, WINDOW_SIZE1 },
85 };
86 int devicesfound = 0;
87
88 for(i=0; i<NUM_FLASHBANKS; i++)
89 {
90 printk(KERN_NOTICE MSG_PREFIX "probing 0x%08lx at 0x%08lx\n",
91 pt[i].size, pt[i].addr);
92
93 impa7_map[i].phys = pt[i].addr;
94 impa7_map[i].virt = ioremap(pt[i].addr, pt[i].size);
95 if (!impa7_map[i].virt) {
96 printk(MSG_PREFIX "failed to ioremap\n");
97 return -EIO;
98 }
99 simple_map_init(&impa7_map[i]);
100
101 impa7_mtd[i] = 0;
102 type = rom_probe_types;
103 for(; !impa7_mtd[i] && *type; type++) {
104 impa7_mtd[i] = do_map_probe(*type, &impa7_map[i]);
105 }
106
107 if (impa7_mtd[i]) {
108 impa7_mtd[i]->owner = THIS_MODULE;
109 devicesfound++;
110#ifdef CONFIG_MTD_PARTITIONS
111 mtd_parts_nb[i] = parse_mtd_partitions(impa7_mtd[i],
112 probes,
113 &mtd_parts[i],
114 0);
115 if (mtd_parts_nb[i] > 0) {
116 part_type = "command line";
117 } else {
118 mtd_parts[i] = static_partitions;
119 mtd_parts_nb[i] = ARRAY_SIZE(static_partitions);
120 part_type = "static";
121 }
122
123 printk(KERN_NOTICE MSG_PREFIX
124 "using %s partition definition\n",
125 part_type);
126 add_mtd_partitions(impa7_mtd[i],
127 mtd_parts[i], mtd_parts_nb[i]);
128#else
129 add_mtd_device(impa7_mtd[i]);
130
131#endif
132 }
133 else
134 iounmap((void *)impa7_map[i].virt);
135 }
136 return devicesfound == 0 ? -ENXIO : 0;
137}
138
139static void __exit cleanup_impa7(void)
140{
141 int i;
142 for (i=0; i<NUM_FLASHBANKS; i++) {
143 if (impa7_mtd[i]) {
144#ifdef CONFIG_MTD_PARTITIONS
145 del_mtd_partitions(impa7_mtd[i]);
146#else
147 del_mtd_device(impa7_mtd[i]);
148#endif
149 map_destroy(impa7_mtd[i]);
150 iounmap((void *)impa7_map[i].virt);
151 impa7_map[i].virt = 0;
152 }
153 }
154}
155
156module_init(init_impa7);
157module_exit(cleanup_impa7);
158
159MODULE_LICENSE("GPL");
160MODULE_AUTHOR("Pavel Bartusek <pba@sysgo.de>");
161MODULE_DESCRIPTION("MTD map driver for implementa impA7");
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
new file mode 100644
index 000000000000..e39a98a0171c
--- /dev/null
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -0,0 +1,217 @@
1/*======================================================================
2
3 drivers/mtd/maps/integrator-flash.c: ARM Integrator flash map driver
4
5 Copyright (C) 2000 ARM Limited
6 Copyright (C) 2003 Deep Blue Solutions Ltd.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
22 This is access code for flashes using ARM's flash partitioning
23 standards.
24
25 $Id: integrator-flash.c,v 1.18 2004/11/01 13:26:15 rmk Exp $
26
27======================================================================*/
28
29#include <linux/config.h>
30#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/kernel.h>
33#include <linux/slab.h>
34#include <linux/ioport.h>
35#include <linux/device.h>
36#include <linux/init.h>
37
38#include <linux/mtd/mtd.h>
39#include <linux/mtd/map.h>
40#include <linux/mtd/partitions.h>
41
42#include <asm/mach/flash.h>
43#include <asm/hardware.h>
44#include <asm/io.h>
45#include <asm/system.h>
46
47#ifdef CONFIG_ARCH_P720T
48#define FLASH_BASE (0x04000000)
49#define FLASH_SIZE (64*1024*1024)
50#endif
51
52struct armflash_info {
53 struct flash_platform_data *plat;
54 struct resource *res;
55 struct mtd_partition *parts;
56 struct mtd_info *mtd;
57 struct map_info map;
58};
59
60static void armflash_set_vpp(struct map_info *map, int on)
61{
62 struct armflash_info *info = container_of(map, struct armflash_info, map);
63
64 if (info->plat && info->plat->set_vpp)
65 info->plat->set_vpp(on);
66}
67
68static const char *probes[] = { "cmdlinepart", "RedBoot", "afs", NULL };
69
70static int armflash_probe(struct device *_dev)
71{
72 struct platform_device *dev = to_platform_device(_dev);
73 struct flash_platform_data *plat = dev->dev.platform_data;
74 struct resource *res = dev->resource;
75 unsigned int size = res->end - res->start + 1;
76 struct armflash_info *info;
77 int err;
78 void __iomem *base;
79
80 info = kmalloc(sizeof(struct armflash_info), GFP_KERNEL);
81 if (!info) {
82 err = -ENOMEM;
83 goto out;
84 }
85
86 memset(info, 0, sizeof(struct armflash_info));
87
88 info->plat = plat;
89 if (plat && plat->init) {
90 err = plat->init();
91 if (err)
92 goto no_resource;
93 }
94
95 info->res = request_mem_region(res->start, size, "armflash");
96 if (!info->res) {
97 err = -EBUSY;
98 goto no_resource;
99 }
100
101 base = ioremap(res->start, size);
102 if (!base) {
103 err = -ENOMEM;
104 goto no_mem;
105 }
106
107 /*
108 * look for CFI based flash parts fitted to this board
109 */
110 info->map.size = size;
111 info->map.bankwidth = plat->width;
112 info->map.phys = res->start;
113 info->map.virt = base;
114 info->map.name = dev->dev.bus_id;
115 info->map.set_vpp = armflash_set_vpp;
116
117 simple_map_init(&info->map);
118
119 /*
120 * Also, the CFI layer automatically works out what size
121 * of chips we have, and does the necessary identification
122 * for us automatically.
123 */
124 info->mtd = do_map_probe(plat->map_name, &info->map);
125 if (!info->mtd) {
126 err = -ENXIO;
127 goto no_device;
128 }
129
130 info->mtd->owner = THIS_MODULE;
131
132 err = parse_mtd_partitions(info->mtd, probes, &info->parts, 0);
133 if (err > 0) {
134 err = add_mtd_partitions(info->mtd, info->parts, err);
135 if (err)
136 printk(KERN_ERR
137 "mtd partition registration failed: %d\n", err);
138 }
139
140 if (err == 0)
141 dev_set_drvdata(&dev->dev, info);
142
143 /*
144 * If we got an error, free all resources.
145 */
146 if (err < 0) {
147 if (info->mtd) {
148 del_mtd_partitions(info->mtd);
149 map_destroy(info->mtd);
150 }
151 if (info->parts)
152 kfree(info->parts);
153
154 no_device:
155 iounmap(base);
156 no_mem:
157 release_mem_region(res->start, size);
158 no_resource:
159 if (plat && plat->exit)
160 plat->exit();
161 kfree(info);
162 }
163 out:
164 return err;
165}
166
167static int armflash_remove(struct device *_dev)
168{
169 struct platform_device *dev = to_platform_device(_dev);
170 struct armflash_info *info = dev_get_drvdata(&dev->dev);
171
172 dev_set_drvdata(&dev->dev, NULL);
173
174 if (info) {
175 if (info->mtd) {
176 del_mtd_partitions(info->mtd);
177 map_destroy(info->mtd);
178 }
179 if (info->parts)
180 kfree(info->parts);
181
182 iounmap(info->map.virt);
183 release_resource(info->res);
184 kfree(info->res);
185
186 if (info->plat && info->plat->exit)
187 info->plat->exit();
188
189 kfree(info);
190 }
191
192 return 0;
193}
194
195static struct device_driver armflash_driver = {
196 .name = "armflash",
197 .bus = &platform_bus_type,
198 .probe = armflash_probe,
199 .remove = armflash_remove,
200};
201
202static int __init armflash_init(void)
203{
204 return driver_register(&armflash_driver);
205}
206
207static void __exit armflash_exit(void)
208{
209 driver_unregister(&armflash_driver);
210}
211
212module_init(armflash_init);
213module_exit(armflash_exit);
214
215MODULE_AUTHOR("ARM Ltd");
216MODULE_DESCRIPTION("ARM Integrator CFI map driver");
217MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/ipaq-flash.c b/drivers/mtd/maps/ipaq-flash.c
new file mode 100644
index 000000000000..712401810841
--- /dev/null
+++ b/drivers/mtd/maps/ipaq-flash.c
@@ -0,0 +1,464 @@
1/*
2 * Flash memory access on iPAQ Handhelds (either SA1100 or PXA250 based)
3 *
4 * (C) 2000 Nicolas Pitre <nico@cam.org>
5 * (C) 2002 Hewlett-Packard Company <jamey.hicks@hp.com>
6 * (C) 2003 Christian Pellegrin <chri@ascensit.com>, <chri@infis.univ.ts.it>: concatenation of multiple flashes
7 *
8 * $Id: ipaq-flash.c,v 1.3 2004/11/04 13:24:15 gleixner Exp $
9 */
10
11#include <linux/config.h>
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/spinlock.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <asm/page.h>
19#include <asm/mach-types.h>
20#include <asm/system.h>
21#include <asm/errno.h>
22
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/map.h>
25#include <linux/mtd/partitions.h>
26#ifdef CONFIG_MTD_CONCAT
27#include <linux/mtd/concat.h>
28#endif
29
30#include <asm/hardware.h>
31#include <asm/arch-sa1100/h3600.h>
32#include <asm/io.h>
33
34
35#ifndef CONFIG_IPAQ_HANDHELD
36#error This is for iPAQ Handhelds only
37#endif
38#ifdef CONFIG_SA1100_JORNADA56X
39
40static void jornada56x_set_vpp(struct map_info *map, int vpp)
41{
42 if (vpp)
43 GPSR = GPIO_GPIO26;
44 else
45 GPCR = GPIO_GPIO26;
46 GPDR |= GPIO_GPIO26;
47}
48
49#endif
50
51#ifdef CONFIG_SA1100_JORNADA720
52
53static void jornada720_set_vpp(struct map_info *map, int vpp)
54{
55 if (vpp)
56 PPSR |= 0x80;
57 else
58 PPSR &= ~0x80;
59 PPDR |= 0x80;
60}
61
62#endif
63
64#define MAX_IPAQ_CS 2 /* Number of CS we are going to test */
65
66#define IPAQ_MAP_INIT(X) \
67 { \
68 name: "IPAQ flash " X, \
69 }
70
71
72static struct map_info ipaq_map[MAX_IPAQ_CS] = {
73 IPAQ_MAP_INIT("bank 1"),
74 IPAQ_MAP_INIT("bank 2")
75};
76
77static struct mtd_info *my_sub_mtd[MAX_IPAQ_CS] = {
78 NULL,
79 NULL
80};
81
82/*
83 * Here are partition information for all known IPAQ-based devices.
84 * See include/linux/mtd/partitions.h for definition of the mtd_partition
85 * structure.
86 *
87 * The *_max_flash_size is the maximum possible mapped flash size which
88 * is not necessarily the actual flash size. It must be no more than
89 * the value specified in the "struct map_desc *_io_desc" mapping
90 * definition for the corresponding machine.
91 *
92 * Please keep these in alphabetical order, and formatted as per existing
93 * entries. Thanks.
94 */
95
96#ifdef CONFIG_IPAQ_HANDHELD
97static unsigned long h3xxx_max_flash_size = 0x04000000;
98static struct mtd_partition h3xxx_partitions[] = {
99 {
100 name: "H3XXX boot firmware",
101#ifndef CONFIG_LAB
102 size: 0x00040000,
103#else
104 size: 0x00080000,
105#endif
106 offset: 0,
107#ifndef CONFIG_LAB
108 mask_flags: MTD_WRITEABLE, /* force read-only */
109#endif
110 },
111 {
112 name: "H3XXX root jffs2",
113#ifndef CONFIG_LAB
114 size: 0x2000000 - 2*0x40000, /* Warning, this is fixed later */
115 offset: 0x00040000,
116#else
117 size: 0x2000000 - 0x40000 - 0x80000, /* Warning, this is fixed later */
118 offset: 0x00080000,
119#endif
120 },
121 {
122 name: "asset",
123 size: 0x40000,
124 offset: 0x2000000 - 0x40000, /* Warning, this is fixed later */
125 mask_flags: MTD_WRITEABLE, /* force read-only */
126 }
127};
128
129#ifndef CONFIG_MTD_CONCAT
130static struct mtd_partition h3xxx_partitions_bank2[] = {
131 /* this is used only on 2 CS machines when concat is not present */
132 {
133 name: "second H3XXX root jffs2",
134 size: 0x1000000 - 0x40000, /* Warning, this is fixed later */
135 offset: 0x00000000,
136 },
137 {
138 name: "second asset",
139 size: 0x40000,
140 offset: 0x1000000 - 0x40000, /* Warning, this is fixed later */
141 mask_flags: MTD_WRITEABLE, /* force read-only */
142 }
143};
144#endif
145
146static DEFINE_SPINLOCK(ipaq_vpp_lock);
147
148static void h3xxx_set_vpp(struct map_info *map, int vpp)
149{
150 static int nest = 0;
151
152 spin_lock(&ipaq_vpp_lock);
153 if (vpp)
154 nest++;
155 else
156 nest--;
157 if (nest)
158 assign_h3600_egpio(IPAQ_EGPIO_VPP_ON, 1);
159 else
160 assign_h3600_egpio(IPAQ_EGPIO_VPP_ON, 0);
161 spin_unlock(&ipaq_vpp_lock);
162}
163
164#endif
165
166#if defined(CONFIG_SA1100_JORNADA56X) || defined(CONFIG_SA1100_JORNADA720)
167static unsigned long jornada_max_flash_size = 0x02000000;
168static struct mtd_partition jornada_partitions[] = {
169 {
170 name: "Jornada boot firmware",
171 size: 0x00040000,
172 offset: 0,
173 mask_flags: MTD_WRITEABLE, /* force read-only */
174 }, {
175 name: "Jornada root jffs2",
176 size: MTDPART_SIZ_FULL,
177 offset: 0x00040000,
178 }
179};
180#endif
181
182
183static struct mtd_partition *parsed_parts;
184static struct mtd_info *mymtd;
185
186static unsigned long cs_phys[] = {
187#ifdef CONFIG_ARCH_SA1100
188 SA1100_CS0_PHYS,
189 SA1100_CS1_PHYS,
190 SA1100_CS2_PHYS,
191 SA1100_CS3_PHYS,
192 SA1100_CS4_PHYS,
193 SA1100_CS5_PHYS,
194#else
195 PXA_CS0_PHYS,
196 PXA_CS1_PHYS,
197 PXA_CS2_PHYS,
198 PXA_CS3_PHYS,
199 PXA_CS4_PHYS,
200 PXA_CS5_PHYS,
201#endif
202};
203
204static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
205
206static int __init h1900_special_case(void);
207
208int __init ipaq_mtd_init(void)
209{
210 struct mtd_partition *parts = NULL;
211 int nb_parts = 0;
212 int parsed_nr_parts = 0;
213 const char *part_type;
214 int i; /* used when we have >1 flash chips */
215 unsigned long tot_flashsize = 0; /* used when we have >1 flash chips */
216
217 /* Default flash bankwidth */
218 // ipaq_map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
219
220 if (machine_is_h1900())
221 {
222 /* For our intents, the h1900 is not a real iPAQ, so we special-case it. */
223 return h1900_special_case();
224 }
225
226 if (machine_is_h3100() || machine_is_h1900())
227 for(i=0; i<MAX_IPAQ_CS; i++)
228 ipaq_map[i].bankwidth = 2;
229 else
230 for(i=0; i<MAX_IPAQ_CS; i++)
231 ipaq_map[i].bankwidth = 4;
232
233 /*
234 * Static partition definition selection
235 */
236 part_type = "static";
237
238 simple_map_init(&ipaq_map[0]);
239 simple_map_init(&ipaq_map[1]);
240
241#ifdef CONFIG_IPAQ_HANDHELD
242 if (machine_is_ipaq()) {
243 parts = h3xxx_partitions;
244 nb_parts = ARRAY_SIZE(h3xxx_partitions);
245 for(i=0; i<MAX_IPAQ_CS; i++) {
246 ipaq_map[i].size = h3xxx_max_flash_size;
247 ipaq_map[i].set_vpp = h3xxx_set_vpp;
248 ipaq_map[i].phys = cs_phys[i];
249 ipaq_map[i].virt = __ioremap(cs_phys[i], 0x04000000, 0, 1);
250 if (machine_is_h3100 () || machine_is_h1900())
251 ipaq_map[i].bankwidth = 2;
252 }
253 if (machine_is_h3600()) {
254 /* No asset partition here */
255 h3xxx_partitions[1].size += 0x40000;
256 nb_parts--;
257 }
258 }
259#endif
260#ifdef CONFIG_ARCH_H5400
261 if (machine_is_h5400()) {
262 ipaq_map[0].size = 0x02000000;
263 ipaq_map[1].size = 0x02000000;
264 ipaq_map[1].phys = 0x02000000;
265 ipaq_map[1].virt = ipaq_map[0].virt + 0x02000000;
266 }
267#endif
268#ifdef CONFIG_ARCH_H1900
269 if (machine_is_h1900()) {
270 ipaq_map[0].size = 0x00400000;
271 ipaq_map[1].size = 0x02000000;
272 ipaq_map[1].phys = 0x00080000;
273 ipaq_map[1].virt = ipaq_map[0].virt + 0x00080000;
274 }
275#endif
276
277#ifdef CONFIG_SA1100_JORNADA56X
278 if (machine_is_jornada56x()) {
279 parts = jornada_partitions;
280 nb_parts = ARRAY_SIZE(jornada_partitions);
281 ipaq_map[0].size = jornada_max_flash_size;
282 ipaq_map[0].set_vpp = jornada56x_set_vpp;
283 ipaq_map[0].virt = (__u32)__ioremap(0x0, 0x04000000, 0, 1);
284 }
285#endif
286#ifdef CONFIG_SA1100_JORNADA720
287 if (machine_is_jornada720()) {
288 parts = jornada_partitions;
289 nb_parts = ARRAY_SIZE(jornada_partitions);
290 ipaq_map[0].size = jornada_max_flash_size;
291 ipaq_map[0].set_vpp = jornada720_set_vpp;
292 }
293#endif
294
295
296 if (machine_is_ipaq()) { /* for iPAQs only */
297 for(i=0; i<MAX_IPAQ_CS; i++) {
298 printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with CFI.\n", ipaq_map[i].bankwidth*8, ipaq_map[i].virt);
299 my_sub_mtd[i] = do_map_probe("cfi_probe", &ipaq_map[i]);
300 if (!my_sub_mtd[i]) {
301 printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with JEDEC.\n", ipaq_map[i].bankwidth*8, ipaq_map[i].virt);
302 my_sub_mtd[i] = do_map_probe("jedec_probe", &ipaq_map[i]);
303 }
304 if (!my_sub_mtd[i]) {
305 printk(KERN_NOTICE "iPAQ flash: failed to find flash.\n");
306 if (i)
307 break;
308 else
309 return -ENXIO;
310 } else
311 printk(KERN_NOTICE "iPAQ flash: found %d bytes\n", my_sub_mtd[i]->size);
312
313 /* do we really need this debugging? --joshua 20030703 */
314 // printk("my_sub_mtd[%d]=%p\n", i, my_sub_mtd[i]);
315 my_sub_mtd[i]->owner = THIS_MODULE;
316 tot_flashsize += my_sub_mtd[i]->size;
317 }
318#ifdef CONFIG_MTD_CONCAT
319 /* fix the asset location */
320# ifdef CONFIG_LAB
321 h3xxx_partitions[1].size = tot_flashsize - 0x40000 - 0x80000 /* extra big boot block */;
322# else
323 h3xxx_partitions[1].size = tot_flashsize - 2 * 0x40000;
324# endif
325 h3xxx_partitions[2].offset = tot_flashsize - 0x40000;
326 /* and concat the devices */
327 mymtd = mtd_concat_create(&my_sub_mtd[0], i,
328 "ipaq");
329 if (!mymtd) {
330 printk("Cannot create iPAQ concat device\n");
331 return -ENXIO;
332 }
333#else
334 mymtd = my_sub_mtd[0];
335
336 /*
337 *In the very near future, command line partition parsing
338 * will use the device name as 'mtd-id' instead of a value
339 * passed to the parse_cmdline_partitions() routine. Since
340 * the bootldr says 'ipaq', make sure it continues to work.
341 */
342 mymtd->name = "ipaq";
343
344 if ((machine_is_h3600())) {
345# ifdef CONFIG_LAB
346 h3xxx_partitions[1].size = my_sub_mtd[0]->size - 0x80000;
347# else
348 h3xxx_partitions[1].size = my_sub_mtd[0]->size - 0x40000;
349# endif
350 nb_parts = 2;
351 } else {
352# ifdef CONFIG_LAB
353 h3xxx_partitions[1].size = my_sub_mtd[0]->size - 0x40000 - 0x80000; /* extra big boot block */
354# else
355 h3xxx_partitions[1].size = my_sub_mtd[0]->size - 2*0x40000;
356# endif
357 h3xxx_partitions[2].offset = my_sub_mtd[0]->size - 0x40000;
358 }
359
360 if (my_sub_mtd[1]) {
361# ifdef CONFIG_LAB
362 h3xxx_partitions_bank2[0].size = my_sub_mtd[1]->size - 0x80000;
363# else
364 h3xxx_partitions_bank2[0].size = my_sub_mtd[1]->size - 0x40000;
365# endif
366 h3xxx_partitions_bank2[1].offset = my_sub_mtd[1]->size - 0x40000;
367 }
368#endif
369 }
370 else {
371 /*
372 * Now let's probe for the actual flash. Do it here since
373 * specific machine settings might have been set above.
374 */
375 printk(KERN_NOTICE "IPAQ flash: probing %d-bit flash bus, window=%lx\n", ipaq_map[0].bankwidth*8, ipaq_map[0].virt);
376 mymtd = do_map_probe("cfi_probe", &ipaq_map[0]);
377 if (!mymtd)
378 return -ENXIO;
379 mymtd->owner = THIS_MODULE;
380 }
381
382
383 /*
384 * Dynamic partition selection stuff (might override the static ones)
385 */
386
387 i = parse_mtd_partitions(mymtd, part_probes, &parsed_parts, 0);
388
389 if (i > 0) {
390 nb_parts = parsed_nr_parts = i;
391 parts = parsed_parts;
392 part_type = "dynamic";
393 }
394
395 if (!parts) {
396 printk(KERN_NOTICE "IPAQ flash: no partition info available, registering whole flash at once\n");
397 add_mtd_device(mymtd);
398#ifndef CONFIG_MTD_CONCAT
399 if (my_sub_mtd[1])
400 add_mtd_device(my_sub_mtd[1]);
401#endif
402 } else {
403 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
404 add_mtd_partitions(mymtd, parts, nb_parts);
405#ifndef CONFIG_MTD_CONCAT
406 if (my_sub_mtd[1])
407 add_mtd_partitions(my_sub_mtd[1], h3xxx_partitions_bank2, ARRAY_SIZE(h3xxx_partitions_bank2));
408#endif
409 }
410
411 return 0;
412}
413
414static void __exit ipaq_mtd_cleanup(void)
415{
416 int i;
417
418 if (mymtd) {
419 del_mtd_partitions(mymtd);
420#ifndef CONFIG_MTD_CONCAT
421 if (my_sub_mtd[1])
422 del_mtd_partitions(my_sub_mtd[1]);
423#endif
424 map_destroy(mymtd);
425#ifdef CONFIG_MTD_CONCAT
426 for(i=0; i<MAX_IPAQ_CS; i++)
427#else
428 for(i=1; i<MAX_IPAQ_CS; i++)
429#endif
430 {
431 if (my_sub_mtd[i])
432 map_destroy(my_sub_mtd[i]);
433 }
434 if (parsed_parts)
435 kfree(parsed_parts);
436 }
437}
438
439static int __init h1900_special_case(void)
440{
441 /* The iPAQ h1900 is a special case - it has weird ROM. */
442 simple_map_init(&ipaq_map[0]);
443 ipaq_map[0].size = 0x80000;
444 ipaq_map[0].set_vpp = h3xxx_set_vpp;
445 ipaq_map[0].phys = 0x0;
446 ipaq_map[0].virt = __ioremap(0x0, 0x04000000, 0, 1);
447 ipaq_map[0].bankwidth = 2;
448
449 printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with JEDEC.\n", ipaq_map[0].bankwidth*8, ipaq_map[0].virt);
450 mymtd = do_map_probe("jedec_probe", &ipaq_map[0]);
451 if (!mymtd)
452 return -ENODEV;
453 add_mtd_device(mymtd);
454 printk(KERN_NOTICE "iPAQ flash: registered h1910 flash\n");
455
456 return 0;
457}
458
459module_init(ipaq_mtd_init);
460module_exit(ipaq_mtd_cleanup);
461
462MODULE_AUTHOR("Jamey Hicks");
463MODULE_DESCRIPTION("IPAQ CFI map driver");
464MODULE_LICENSE("MIT");
diff --git a/drivers/mtd/maps/iq80310.c b/drivers/mtd/maps/iq80310.c
new file mode 100644
index 000000000000..558d014e7acc
--- /dev/null
+++ b/drivers/mtd/maps/iq80310.c
@@ -0,0 +1,119 @@
1/*
2 * $Id: iq80310.c,v 1.20 2004/11/04 13:24:15 gleixner Exp $
3 *
4 * Mapping for the Intel XScale IQ80310 evaluation board
5 *
6 * Author: Nicolas Pitre
7 * Copyright: (C) 2001 MontaVista Software Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <asm/io.h>
20#include <linux/mtd/mtd.h>
21#include <linux/mtd/map.h>
22#include <linux/mtd/partitions.h>
23
24
25#define WINDOW_ADDR 0
26#define WINDOW_SIZE 8*1024*1024
27#define BUSWIDTH 1
28
29static struct mtd_info *mymtd;
30
31static struct map_info iq80310_map = {
32 .name = "IQ80310 flash",
33 .size = WINDOW_SIZE,
34 .bankwidth = BUSWIDTH,
35 .phys = WINDOW_ADDR
36};
37
38static struct mtd_partition iq80310_partitions[4] = {
39 {
40 .name = "Firmware",
41 .size = 0x00080000,
42 .offset = 0,
43 .mask_flags = MTD_WRITEABLE /* force read-only */
44 },{
45 .name = "Kernel",
46 .size = 0x000a0000,
47 .offset = 0x00080000,
48 },{
49 .name = "Filesystem",
50 .size = 0x00600000,
51 .offset = 0x00120000
52 },{
53 .name = "RedBoot",
54 .size = 0x000e0000,
55 .offset = 0x00720000,
56 .mask_flags = MTD_WRITEABLE
57 }
58};
59
60static struct mtd_info *mymtd;
61static struct mtd_partition *parsed_parts;
62static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
63
64static int __init init_iq80310(void)
65{
66 struct mtd_partition *parts;
67 int nb_parts = 0;
68 int parsed_nr_parts = 0;
69 int ret;
70
71 iq80310_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
72 if (!iq80310_map.virt) {
73 printk("Failed to ioremap\n");
74 return -EIO;
75 }
76 simple_map_init(&iq80310_map);
77
78 mymtd = do_map_probe("cfi_probe", &iq80310_map);
79 if (!mymtd) {
80 iounmap((void *)iq80310_map.virt);
81 return -ENXIO;
82 }
83 mymtd->owner = THIS_MODULE;
84
85 ret = parse_mtd_partitions(mymtd, probes, &parsed_parts, 0);
86
87 if (ret > 0)
88 parsed_nr_parts = ret;
89
90 if (parsed_nr_parts > 0) {
91 parts = parsed_parts;
92 nb_parts = parsed_nr_parts;
93 } else {
94 parts = iq80310_partitions;
95 nb_parts = ARRAY_SIZE(iq80310_partitions);
96 }
97 add_mtd_partitions(mymtd, parts, nb_parts);
98 return 0;
99}
100
101static void __exit cleanup_iq80310(void)
102{
103 if (mymtd) {
104 del_mtd_partitions(mymtd);
105 map_destroy(mymtd);
106 if (parsed_parts)
107 kfree(parsed_parts);
108 }
109 if (iq80310_map.virt)
110 iounmap((void *)iq80310_map.virt);
111}
112
113module_init(init_iq80310);
114module_exit(cleanup_iq80310);
115
116
117MODULE_LICENSE("GPL");
118MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>");
119MODULE_DESCRIPTION("MTD map driver for Intel XScale IQ80310 evaluation board");
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
new file mode 100644
index 000000000000..c5b5f447e34b
--- /dev/null
+++ b/drivers/mtd/maps/ixp2000.c
@@ -0,0 +1,280 @@
1/*
2 * $Id: ixp2000.c,v 1.5 2004/11/16 17:15:48 dsaxena Exp $
3 *
4 * drivers/mtd/maps/ixp2000.c
5 *
6 * Mapping for the Intel XScale IXP2000 based systems
7 *
8 * Copyright (C) 2002 Intel Corp.
9 * Copyright (C) 2003-2004 MontaVista Software, Inc.
10 *
11 * Original Author: Naeem M Afzal <naeem.m.afzal@intel.com>
12 * Maintainer: Deepak Saxena <dsaxena@plexity.net>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/string.h>
25#include <linux/mtd/mtd.h>
26#include <linux/mtd/map.h>
27#include <linux/mtd/partitions.h>
28#include <linux/ioport.h>
29#include <linux/device.h>
30
31#include <asm/io.h>
32#include <asm/hardware.h>
33#include <asm/mach-types.h>
34#include <asm/mach/flash.h>
35
36#include <linux/reboot.h>
37
38struct ixp2000_flash_info {
39 struct mtd_info *mtd;
40 struct map_info map;
41 struct mtd_partition *partitions;
42 struct resource *res;
43 int nr_banks;
44};
45
46static inline unsigned long flash_bank_setup(struct map_info *map, unsigned long ofs)
47{
48 unsigned long (*set_bank)(unsigned long) =
49 (unsigned long(*)(unsigned long))map->map_priv_2;
50
51 return (set_bank ? set_bank(ofs) : ofs);
52}
53
54#ifdef __ARMEB__
55/*
56 * Rev A0 and A1 of IXP2400 silicon have a broken addressing unit which
57 * causes the lower address bits to be XORed with 0x11 on 8 bit accesses
58 * and XORed with 0x10 on 16 bit accesses. See the spec update, erratum 44.
59 */
60static int erratum44_workaround = 0;
61
62static inline unsigned long address_fix8_write(unsigned long addr)
63{
64 if (erratum44_workaround) {
65 return (addr ^ 3);
66 }
67 return addr;
68}
69#else
70
71#define address_fix8_write(x) (x)
72#endif
73
74static map_word ixp2000_flash_read8(struct map_info *map, unsigned long ofs)
75{
76 map_word val;
77
78 val.x[0] = *((u8 *)(map->map_priv_1 + flash_bank_setup(map, ofs)));
79 return val;
80}
81
82/*
83 * We can't use the standard memcpy due to the broken SlowPort
84 * address translation on rev A0 and A1 silicon and the fact that
85 * we have banked flash.
86 */
87static void ixp2000_flash_copy_from(struct map_info *map, void *to,
88 unsigned long from, ssize_t len)
89{
90 from = flash_bank_setup(map, from);
91 while(len--)
92 *(__u8 *) to++ = *(__u8 *)(map->map_priv_1 + from++);
93}
94
95static void ixp2000_flash_write8(struct map_info *map, map_word d, unsigned long ofs)
96{
97 *(__u8 *) (address_fix8_write(map->map_priv_1 +
98 flash_bank_setup(map, ofs))) = d.x[0];
99}
100
101static void ixp2000_flash_copy_to(struct map_info *map, unsigned long to,
102 const void *from, ssize_t len)
103{
104 to = flash_bank_setup(map, to);
105 while(len--) {
106 unsigned long tmp = address_fix8_write(map->map_priv_1 + to++);
107 *(__u8 *)(tmp) = *(__u8 *)(from++);
108 }
109}
110
111
112static int ixp2000_flash_remove(struct device *_dev)
113{
114 struct platform_device *dev = to_platform_device(_dev);
115 struct flash_platform_data *plat = dev->dev.platform_data;
116 struct ixp2000_flash_info *info = dev_get_drvdata(&dev->dev);
117
118 dev_set_drvdata(&dev->dev, NULL);
119
120 if(!info)
121 return 0;
122
123 if (info->mtd) {
124 del_mtd_partitions(info->mtd);
125 map_destroy(info->mtd);
126 }
127 if (info->map.map_priv_1)
128 iounmap((void *) info->map.map_priv_1);
129
130 if (info->partitions) {
131 kfree(info->partitions); }
132
133 if (info->res) {
134 release_resource(info->res);
135 kfree(info->res);
136 }
137
138 if (plat->exit)
139 plat->exit();
140
141 return 0;
142}
143
144
145static int ixp2000_flash_probe(struct device *_dev)
146{
147 static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
148 struct platform_device *dev = to_platform_device(_dev);
149 struct ixp2000_flash_data *ixp_data = dev->dev.platform_data;
150 struct flash_platform_data *plat;
151 struct ixp2000_flash_info *info;
152 unsigned long window_size;
153 int err = -1;
154
155 if (!ixp_data)
156 return -ENODEV;
157
158 plat = ixp_data->platform_data;
159 if (!plat)
160 return -ENODEV;
161
162 window_size = dev->resource->end - dev->resource->start + 1;
163 dev_info(_dev, "Probe of IXP2000 flash(%d banks x %dMiB)\n",
164 ixp_data->nr_banks, ((u32)window_size >> 20));
165
166 if (plat->width != 1) {
167 dev_err(_dev, "IXP2000 MTD map only supports 8-bit mode, asking for %d\n",
168 plat->width * 8);
169 return -EIO;
170 }
171
172 info = kmalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL);
173 if(!info) {
174 err = -ENOMEM;
175 goto Error;
176 }
177 memzero(info, sizeof(struct ixp2000_flash_info));
178
179 dev_set_drvdata(&dev->dev, info);
180
181 /*
182 * Tell the MTD layer we're not 1:1 mapped so that it does
183 * not attempt to do a direct access on us.
184 */
185 info->map.phys = NO_XIP;
186
187 info->nr_banks = ixp_data->nr_banks;
188 info->map.size = ixp_data->nr_banks * window_size;
189 info->map.bankwidth = 1;
190
191 /*
192 * map_priv_2 is used to store a ptr to to the bank_setup routine
193 */
194 info->map.map_priv_2 = (void __iomem *) ixp_data->bank_setup;
195
196 info->map.name = dev->dev.bus_id;
197 info->map.read = ixp2000_flash_read8;
198 info->map.write = ixp2000_flash_write8;
199 info->map.copy_from = ixp2000_flash_copy_from;
200 info->map.copy_to = ixp2000_flash_copy_to;
201
202 info->res = request_mem_region(dev->resource->start,
203 dev->resource->end - dev->resource->start + 1,
204 dev->dev.bus_id);
205 if (!info->res) {
206 dev_err(_dev, "Could not reserve memory region\n");
207 err = -ENOMEM;
208 goto Error;
209 }
210
211 info->map.map_priv_1 = ioremap(dev->resource->start,
212 dev->resource->end - dev->resource->start + 1);
213 if (!info->map.map_priv_1) {
214 dev_err(_dev, "Failed to ioremap flash region\n");
215 err = -EIO;
216 goto Error;
217 }
218
219 /*
220 * Setup read mode for FLASH
221 */
222 *IXP2000_SLOWPORT_FRM = 1;
223
224#if defined(__ARMEB__)
225 /*
226 * Enable erratum 44 workaround for NPUs with broken slowport
227 */
228
229 erratum44_workaround = ixp2000_has_broken_slowport();
230 dev_info(_dev, "Erratum 44 workaround %s\n",
231 erratum44_workaround ? "enabled" : "disabled");
232#endif
233
234 info->mtd = do_map_probe(plat->map_name, &info->map);
235 if (!info->mtd) {
236 dev_err(_dev, "map_probe failed\n");
237 err = -ENXIO;
238 goto Error;
239 }
240 info->mtd->owner = THIS_MODULE;
241
242 err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
243 if (err > 0) {
244 err = add_mtd_partitions(info->mtd, info->partitions, err);
245 if(err)
246 dev_err(_dev, "Could not parse partitions\n");
247 }
248
249 if (err)
250 goto Error;
251
252 return 0;
253
254Error:
255 ixp2000_flash_remove(_dev);
256 return err;
257}
258
259static struct device_driver ixp2000_flash_driver = {
260 .name = "IXP2000-Flash",
261 .bus = &platform_bus_type,
262 .probe = &ixp2000_flash_probe,
263 .remove = &ixp2000_flash_remove
264};
265
266static int __init ixp2000_flash_init(void)
267{
268 return driver_register(&ixp2000_flash_driver);
269}
270
271static void __exit ixp2000_flash_exit(void)
272{
273 driver_unregister(&ixp2000_flash_driver);
274}
275
276module_init(ixp2000_flash_init);
277module_exit(ixp2000_flash_exit);
278MODULE_LICENSE("GPL");
279MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
280
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
new file mode 100644
index 000000000000..5afe660aa2c4
--- /dev/null
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -0,0 +1,259 @@
1/*
2 * $Id: ixp4xx.c,v 1.7 2004/11/04 13:24:15 gleixner Exp $
3 *
4 * drivers/mtd/maps/ixp4xx.c
5 *
6 * MTD Map file for IXP4XX based systems. Please do not make per-board
7 * changes in here. If your board needs special setup, do it in your
8 * platform level code in arch/arm/mach-ixp4xx/board-setup.c
9 *
10 * Original Author: Intel Corporation
11 * Maintainer: Deepak Saxena <dsaxena@mvista.com>
12 *
13 * Copyright (C) 2002 Intel Corporation
14 * Copyright (C) 2003-2004 MontaVista Software, Inc.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/map.h>
25#include <linux/mtd/partitions.h>
26#include <linux/ioport.h>
27#include <linux/device.h>
28#include <asm/io.h>
29#include <asm/mach-types.h>
30#include <asm/mach/flash.h>
31
32#include <linux/reboot.h>
33
34#ifndef __ARMEB__
35#define BYTE0(h) ((h) & 0xFF)
36#define BYTE1(h) (((h) >> 8) & 0xFF)
37#else
38#define BYTE0(h) (((h) >> 8) & 0xFF)
39#define BYTE1(h) ((h) & 0xFF)
40#endif
41
42static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs)
43{
44 map_word val;
45 val.x[0] = *(__u16 *) (map->map_priv_1 + ofs);
46 return val;
47}
48
49/*
50 * The IXP4xx expansion bus only allows 16-bit wide acceses
51 * when attached to a 16-bit wide device (such as the 28F128J3A),
52 * so we can't just memcpy_fromio().
53 */
54static void ixp4xx_copy_from(struct map_info *map, void *to,
55 unsigned long from, ssize_t len)
56{
57 int i;
58 u8 *dest = (u8 *) to;
59 u16 *src = (u16 *) (map->map_priv_1 + from);
60 u16 data;
61
62 for (i = 0; i < (len / 2); i++) {
63 data = src[i];
64 dest[i * 2] = BYTE0(data);
65 dest[i * 2 + 1] = BYTE1(data);
66 }
67
68 if (len & 1)
69 dest[len - 1] = BYTE0(src[i]);
70}
71
72/*
73 * Unaligned writes are ignored, causing the 8-bit
74 * probe to fail and proceed to the 16-bit probe (which succeeds).
75 */
76static void ixp4xx_probe_write16(struct map_info *map, map_word d, unsigned long adr)
77{
78 if (!(adr & 1))
79 *(__u16 *) (map->map_priv_1 + adr) = d.x[0];
80}
81
82/*
83 * Fast write16 function without the probing check above
84 */
85static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr)
86{
87 *(__u16 *) (map->map_priv_1 + adr) = d.x[0];
88}
89
90struct ixp4xx_flash_info {
91 struct mtd_info *mtd;
92 struct map_info map;
93 struct mtd_partition *partitions;
94 struct resource *res;
95};
96
97static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
98
99static int ixp4xx_flash_remove(struct device *_dev)
100{
101 struct platform_device *dev = to_platform_device(_dev);
102 struct flash_platform_data *plat = dev->dev.platform_data;
103 struct ixp4xx_flash_info *info = dev_get_drvdata(&dev->dev);
104 map_word d;
105
106 dev_set_drvdata(&dev->dev, NULL);
107
108 if(!info)
109 return 0;
110
111 /*
112 * This is required for a soft reboot to work.
113 */
114 d.x[0] = 0xff;
115 ixp4xx_write16(&info->map, d, 0x55 * 0x2);
116
117 if (info->mtd) {
118 del_mtd_partitions(info->mtd);
119 map_destroy(info->mtd);
120 }
121 if (info->map.map_priv_1)
122 iounmap((void *) info->map.map_priv_1);
123
124 if (info->partitions)
125 kfree(info->partitions);
126
127 if (info->res) {
128 release_resource(info->res);
129 kfree(info->res);
130 }
131
132 if (plat->exit)
133 plat->exit();
134
135 /* Disable flash write */
136 *IXP4XX_EXP_CS0 &= ~IXP4XX_FLASH_WRITABLE;
137
138 return 0;
139}
140
141static int ixp4xx_flash_probe(struct device *_dev)
142{
143 struct platform_device *dev = to_platform_device(_dev);
144 struct flash_platform_data *plat = dev->dev.platform_data;
145 struct ixp4xx_flash_info *info;
146 int err = -1;
147
148 if (!plat)
149 return -ENODEV;
150
151 if (plat->init) {
152 err = plat->init();
153 if (err)
154 return err;
155 }
156
157 info = kmalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL);
158 if(!info) {
159 err = -ENOMEM;
160 goto Error;
161 }
162 memzero(info, sizeof(struct ixp4xx_flash_info));
163
164 dev_set_drvdata(&dev->dev, info);
165
166 /*
167 * Enable flash write
168 * TODO: Move this out to board specific code
169 */
170 *IXP4XX_EXP_CS0 |= IXP4XX_FLASH_WRITABLE;
171
172 /*
173 * Tell the MTD layer we're not 1:1 mapped so that it does
174 * not attempt to do a direct access on us.
175 */
176 info->map.phys = NO_XIP;
177 info->map.size = dev->resource->end - dev->resource->start + 1;
178
179 /*
180 * We only support 16-bit accesses for now. If and when
181 * any board use 8-bit access, we'll fixup the driver to
182 * handle that.
183 */
184 info->map.bankwidth = 2;
185 info->map.name = dev->dev.bus_id;
186 info->map.read = ixp4xx_read16,
187 info->map.write = ixp4xx_probe_write16,
188 info->map.copy_from = ixp4xx_copy_from,
189
190 info->res = request_mem_region(dev->resource->start,
191 dev->resource->end - dev->resource->start + 1,
192 "IXP4XXFlash");
193 if (!info->res) {
194 printk(KERN_ERR "IXP4XXFlash: Could not reserve memory region\n");
195 err = -ENOMEM;
196 goto Error;
197 }
198
199 info->map.map_priv_1 = ioremap(dev->resource->start,
200 dev->resource->end - dev->resource->start + 1);
201 if (!info->map.map_priv_1) {
202 printk(KERN_ERR "IXP4XXFlash: Failed to ioremap region\n");
203 err = -EIO;
204 goto Error;
205 }
206
207 info->mtd = do_map_probe(plat->map_name, &info->map);
208 if (!info->mtd) {
209 printk(KERN_ERR "IXP4XXFlash: map_probe failed\n");
210 err = -ENXIO;
211 goto Error;
212 }
213 info->mtd->owner = THIS_MODULE;
214
215 /* Use the fast version */
216 info->map.write = ixp4xx_write16,
217
218 err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
219 if (err > 0) {
220 err = add_mtd_partitions(info->mtd, info->partitions, err);
221 if(err)
222 printk(KERN_ERR "Could not parse partitions\n");
223 }
224
225 if (err)
226 goto Error;
227
228 return 0;
229
230Error:
231 ixp4xx_flash_remove(_dev);
232 return err;
233}
234
235static struct device_driver ixp4xx_flash_driver = {
236 .name = "IXP4XX-Flash",
237 .bus = &platform_bus_type,
238 .probe = ixp4xx_flash_probe,
239 .remove = ixp4xx_flash_remove,
240};
241
242static int __init ixp4xx_flash_init(void)
243{
244 return driver_register(&ixp4xx_flash_driver);
245}
246
247static void __exit ixp4xx_flash_exit(void)
248{
249 driver_unregister(&ixp4xx_flash_driver);
250}
251
252
253module_init(ixp4xx_flash_init);
254module_exit(ixp4xx_flash_exit);
255
256MODULE_LICENSE("GPL");
257MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems")
258MODULE_AUTHOR("Deepak Saxena");
259
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
new file mode 100644
index 000000000000..b08668212ab7
--- /dev/null
+++ b/drivers/mtd/maps/l440gx.c
@@ -0,0 +1,157 @@
1/*
2 * $Id: l440gx.c,v 1.17 2004/11/28 09:40:39 dwmw2 Exp $
3 *
4 * BIOS Flash chip on Intel 440GX board.
5 *
6 * Bugs this currently does not work under linuxBIOS.
7 */
8
9#include <linux/module.h>
10#include <linux/pci.h>
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <asm/io.h>
14#include <linux/mtd/mtd.h>
15#include <linux/mtd/map.h>
16#include <linux/config.h>
17
18#define PIIXE_IOBASE_RESOURCE 11
19
20#define WINDOW_ADDR 0xfff00000
21#define WINDOW_SIZE 0x00100000
22#define BUSWIDTH 1
23
24static u32 iobase;
25#define IOBASE iobase
26#define TRIBUF_PORT (IOBASE+0x37)
27#define VPP_PORT (IOBASE+0x28)
28
29static struct mtd_info *mymtd;
30
31
32/* Is this really the vpp port? */
33static void l440gx_set_vpp(struct map_info *map, int vpp)
34{
35 unsigned long l;
36
37 l = inl(VPP_PORT);
38 if (vpp) {
39 l |= 1;
40 } else {
41 l &= ~1;
42 }
43 outl(l, VPP_PORT);
44}
45
46static struct map_info l440gx_map = {
47 .name = "L440GX BIOS",
48 .size = WINDOW_SIZE,
49 .bankwidth = BUSWIDTH,
50 .phys = WINDOW_ADDR,
51#if 0
52 /* FIXME verify that this is the
53 * appripriate code for vpp enable/disable
54 */
55 .set_vpp = l440gx_set_vpp
56#endif
57};
58
59static int __init init_l440gx(void)
60{
61 struct pci_dev *dev, *pm_dev;
62 struct resource *pm_iobase;
63 __u16 word;
64
65 dev = pci_find_device(PCI_VENDOR_ID_INTEL,
66 PCI_DEVICE_ID_INTEL_82371AB_0, NULL);
67
68 pm_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
69 PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
70
71 if (!dev || !pm_dev) {
72 printk(KERN_NOTICE "L440GX flash mapping: failed to find PIIX4 ISA bridge, cannot continue\n");
73 return -ENODEV;
74 }
75
76 l440gx_map.virt = ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE);
77
78 if (!l440gx_map.virt) {
79 printk(KERN_WARNING "Failed to ioremap L440GX flash region\n");
80 return -ENOMEM;
81 }
82 simple_map_init(&l440gx_map);
83 printk(KERN_NOTICE "window_addr = 0x%08lx\n", (unsigned long)l440gx_map.virt);
84
85 /* Setup the pm iobase resource
86 * This code should move into some kind of generic bridge
87 * driver but for the moment I'm content with getting the
88 * allocation correct.
89 */
90 pm_iobase = &pm_dev->resource[PIIXE_IOBASE_RESOURCE];
91 if (!(pm_iobase->flags & IORESOURCE_IO)) {
92 pm_iobase->name = "pm iobase";
93 pm_iobase->start = 0;
94 pm_iobase->end = 63;
95 pm_iobase->flags = IORESOURCE_IO;
96
97 /* Put the current value in the resource */
98 pci_read_config_dword(pm_dev, 0x40, &iobase);
99 iobase &= ~1;
100 pm_iobase->start += iobase & ~1;
101 pm_iobase->end += iobase & ~1;
102
103 /* Allocate the resource region */
104 if (pci_assign_resource(pm_dev, PIIXE_IOBASE_RESOURCE) != 0) {
105 printk(KERN_WARNING "Could not allocate pm iobase resource\n");
106 iounmap(l440gx_map.virt);
107 return -ENXIO;
108 }
109 }
110 /* Set the iobase */
111 iobase = pm_iobase->start;
112 pci_write_config_dword(pm_dev, 0x40, iobase | 1);
113
114
115 /* Set XBCS# */
116 pci_read_config_word(dev, 0x4e, &word);
117 word |= 0x4;
118 pci_write_config_word(dev, 0x4e, word);
119
120 /* Supply write voltage to the chip */
121 l440gx_set_vpp(&l440gx_map, 1);
122
123 /* Enable the gate on the WE line */
124 outb(inb(TRIBUF_PORT) & ~1, TRIBUF_PORT);
125
126 printk(KERN_NOTICE "Enabled WE line to L440GX BIOS flash chip.\n");
127
128 mymtd = do_map_probe("jedec_probe", &l440gx_map);
129 if (!mymtd) {
130 printk(KERN_NOTICE "JEDEC probe on BIOS chip failed. Using ROM\n");
131 mymtd = do_map_probe("map_rom", &l440gx_map);
132 }
133 if (mymtd) {
134 mymtd->owner = THIS_MODULE;
135
136 add_mtd_device(mymtd);
137 return 0;
138 }
139
140 iounmap(l440gx_map.virt);
141 return -ENXIO;
142}
143
144static void __exit cleanup_l440gx(void)
145{
146 del_mtd_device(mymtd);
147 map_destroy(mymtd);
148
149 iounmap(l440gx_map.virt);
150}
151
152module_init(init_l440gx);
153module_exit(cleanup_l440gx);
154
155MODULE_LICENSE("GPL");
156MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
157MODULE_DESCRIPTION("MTD map driver for BIOS chips on Intel L440GX motherboards");
diff --git a/drivers/mtd/maps/lasat.c b/drivers/mtd/maps/lasat.c
new file mode 100644
index 000000000000..c658d4045ef2
--- /dev/null
+++ b/drivers/mtd/maps/lasat.c
@@ -0,0 +1,102 @@
1/*
2 * Flash device on Lasat 100 and 200 boards
3 *
4 * (C) 2002 Brian Murphy <brian@murphy.dk>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * $Id: lasat.c,v 1.9 2004/11/04 13:24:15 gleixner Exp $
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <asm/io.h>
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/map.h>
21#include <linux/mtd/partitions.h>
22#include <linux/config.h>
23#include <asm/lasat/lasat.h>
24
25static struct mtd_info *lasat_mtd;
26
27static struct mtd_partition partition_info[LASAT_MTD_LAST];
28static char *lasat_mtd_partnames[] = {"Bootloader", "Service", "Normal", "Filesystem", "Config"};
29
30static void lasat_set_vpp(struct map_info *map, int vpp)
31{
32 if (vpp)
33 *lasat_misc->flash_wp_reg |= 1 << lasat_misc->flash_wp_bit;
34 else
35 *lasat_misc->flash_wp_reg &= ~(1 << lasat_misc->flash_wp_bit);
36}
37
38static struct map_info lasat_map = {
39 .name = "LASAT flash",
40 .bankwidth = 4,
41 .set_vpp = lasat_set_vpp
42};
43
44static int __init init_lasat(void)
45{
46 int i;
47 /* since we use AMD chips and set_vpp is not implimented
48 * for these (yet) we still have to permanently enable flash write */
49 printk(KERN_NOTICE "Unprotecting flash\n");
50 ENABLE_VPP((&lasat_map));
51
52 lasat_map.phys = lasat_flash_partition_start(LASAT_MTD_BOOTLOADER);
53 lasat_map.virt = ioremap_nocache(
54 lasat_map.phys, lasat_board_info.li_flash_size);
55 lasat_map.size = lasat_board_info.li_flash_size;
56
57 simple_map_init(&lasat_map);
58
59 for (i=0; i < LASAT_MTD_LAST; i++)
60 partition_info[i].name = lasat_mtd_partnames[i];
61
62 lasat_mtd = do_map_probe("cfi_probe", &lasat_map);
63
64 if (!lasat_mtd)
65 lasat_mtd = do_map_probe("jedec_probe", &lasat_map);
66
67 if (lasat_mtd) {
68 u32 size, offset = 0;
69
70 lasat_mtd->owner = THIS_MODULE;
71
72 for (i=0; i < LASAT_MTD_LAST; i++) {
73 size = lasat_flash_partition_size(i);
74 partition_info[i].size = size;
75 partition_info[i].offset = offset;
76 offset += size;
77 }
78
79 add_mtd_partitions( lasat_mtd, partition_info, LASAT_MTD_LAST );
80 return 0;
81 }
82
83 return -ENXIO;
84}
85
86static void __exit cleanup_lasat(void)
87{
88 if (lasat_mtd) {
89 del_mtd_partitions(lasat_mtd);
90 map_destroy(lasat_mtd);
91 }
92 if (lasat_map.virt) {
93 lasat_map.virt = 0;
94 }
95}
96
97module_init(init_lasat);
98module_exit(cleanup_lasat);
99
100MODULE_LICENSE("GPL");
101MODULE_AUTHOR("Brian Murphy <brian@murphy.dk>");
102MODULE_DESCRIPTION("Lasat Safepipe/Masquerade MTD map driver");
diff --git a/drivers/mtd/maps/lubbock-flash.c b/drivers/mtd/maps/lubbock-flash.c
new file mode 100644
index 000000000000..1298de475c9a
--- /dev/null
+++ b/drivers/mtd/maps/lubbock-flash.c
@@ -0,0 +1,168 @@
1/*
2 * $Id: lubbock-flash.c,v 1.19 2004/11/04 13:24:15 gleixner Exp $
3 *
4 * Map driver for the Lubbock developer platform.
5 *
6 * Author: Nicolas Pitre
7 * Copyright: (C) 2001 MontaVista Software Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/dma-mapping.h>
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/map.h>
21#include <linux/mtd/partitions.h>
22#include <asm/io.h>
23#include <asm/hardware.h>
24#include <asm/arch/pxa-regs.h>
25#include <asm/arch/lubbock.h>
26
27
28#define ROM_ADDR 0x00000000
29#define FLASH_ADDR 0x04000000
30
31#define WINDOW_SIZE 64*1024*1024
32
33static void lubbock_map_inval_cache(struct map_info *map, unsigned long from, ssize_t len)
34{
35 consistent_sync((char *)map->cached + from, len, DMA_FROM_DEVICE);
36}
37
38static struct map_info lubbock_maps[2] = { {
39 .size = WINDOW_SIZE,
40 .phys = 0x00000000,
41 .inval_cache = lubbock_map_inval_cache,
42}, {
43 .size = WINDOW_SIZE,
44 .phys = 0x04000000,
45 .inval_cache = lubbock_map_inval_cache,
46} };
47
48static struct mtd_partition lubbock_partitions[] = {
49 {
50 .name = "Bootloader",
51 .size = 0x00040000,
52 .offset = 0,
53 .mask_flags = MTD_WRITEABLE /* force read-only */
54 },{
55 .name = "Kernel",
56 .size = 0x00100000,
57 .offset = 0x00040000,
58 },{
59 .name = "Filesystem",
60 .size = MTDPART_SIZ_FULL,
61 .offset = 0x00140000
62 }
63};
64
65static struct mtd_info *mymtds[2];
66static struct mtd_partition *parsed_parts[2];
67static int nr_parsed_parts[2];
68
69static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
70
71static int __init init_lubbock(void)
72{
73 int flashboot = (LUB_CONF_SWITCHES & 1);
74 int ret = 0, i;
75
76 lubbock_maps[0].bankwidth = lubbock_maps[1].bankwidth =
77 (BOOT_DEF & 1) ? 2 : 4;
78
79 /* Compensate for the nROMBT switch which swaps the flash banks */
80 printk(KERN_NOTICE "Lubbock configured to boot from %s (bank %d)\n",
81 flashboot?"Flash":"ROM", flashboot);
82
83 lubbock_maps[flashboot^1].name = "Lubbock Application Flash";
84 lubbock_maps[flashboot].name = "Lubbock Boot ROM";
85
86 for (i = 0; i < 2; i++) {
87 lubbock_maps[i].virt = ioremap(lubbock_maps[i].phys, WINDOW_SIZE);
88 if (!lubbock_maps[i].virt) {
89 printk(KERN_WARNING "Failed to ioremap %s\n", lubbock_maps[i].name);
90 if (!ret)
91 ret = -ENOMEM;
92 continue;
93 }
94 lubbock_maps[i].cached = ioremap_cached(lubbock_maps[i].phys, WINDOW_SIZE);
95 if (!lubbock_maps[i].cached)
96 printk(KERN_WARNING "Failed to ioremap cached %s\n", lubbock_maps[i].name);
97 simple_map_init(&lubbock_maps[i]);
98
99 printk(KERN_NOTICE "Probing %s at physical address 0x%08lx (%d-bit bankwidth)\n",
100 lubbock_maps[i].name, lubbock_maps[i].phys,
101 lubbock_maps[i].bankwidth * 8);
102
103 mymtds[i] = do_map_probe("cfi_probe", &lubbock_maps[i]);
104
105 if (!mymtds[i]) {
106 iounmap((void *)lubbock_maps[i].virt);
107 if (lubbock_maps[i].cached)
108 iounmap(lubbock_maps[i].cached);
109 if (!ret)
110 ret = -EIO;
111 continue;
112 }
113 mymtds[i]->owner = THIS_MODULE;
114
115 ret = parse_mtd_partitions(mymtds[i], probes,
116 &parsed_parts[i], 0);
117
118 if (ret > 0)
119 nr_parsed_parts[i] = ret;
120 }
121
122 if (!mymtds[0] && !mymtds[1])
123 return ret;
124
125 for (i = 0; i < 2; i++) {
126 if (!mymtds[i]) {
127 printk(KERN_WARNING "%s is absent. Skipping\n", lubbock_maps[i].name);
128 } else if (nr_parsed_parts[i]) {
129 add_mtd_partitions(mymtds[i], parsed_parts[i], nr_parsed_parts[i]);
130 } else if (!i) {
131 printk("Using static partitions on %s\n", lubbock_maps[i].name);
132 add_mtd_partitions(mymtds[i], lubbock_partitions, ARRAY_SIZE(lubbock_partitions));
133 } else {
134 printk("Registering %s as whole device\n", lubbock_maps[i].name);
135 add_mtd_device(mymtds[i]);
136 }
137 }
138 return 0;
139}
140
141static void __exit cleanup_lubbock(void)
142{
143 int i;
144 for (i = 0; i < 2; i++) {
145 if (!mymtds[i])
146 continue;
147
148 if (nr_parsed_parts[i] || !i)
149 del_mtd_partitions(mymtds[i]);
150 else
151 del_mtd_device(mymtds[i]);
152
153 map_destroy(mymtds[i]);
154 iounmap((void *)lubbock_maps[i].virt);
155 if (lubbock_maps[i].cached)
156 iounmap(lubbock_maps[i].cached);
157
158 if (parsed_parts[i])
159 kfree(parsed_parts[i]);
160 }
161}
162
163module_init(init_lubbock);
164module_exit(cleanup_lubbock);
165
166MODULE_LICENSE("GPL");
167MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>");
168MODULE_DESCRIPTION("MTD map driver for Intel Lubbock");
diff --git a/drivers/mtd/maps/map_funcs.c b/drivers/mtd/maps/map_funcs.c
new file mode 100644
index 000000000000..38f6a7af53f8
--- /dev/null
+++ b/drivers/mtd/maps/map_funcs.c
@@ -0,0 +1,44 @@
1/*
2 * $Id: map_funcs.c,v 1.9 2004/07/13 22:33:15 dwmw2 Exp $
3 *
4 * Out-of-line map I/O functions for simple maps when CONFIG_COMPLEX_MAPPINGS
5 * is enabled.
6 */
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10
11#include <linux/mtd/map.h>
12
13static map_word simple_map_read(struct map_info *map, unsigned long ofs)
14{
15 return inline_map_read(map, ofs);
16}
17
18static void simple_map_write(struct map_info *map, const map_word datum, unsigned long ofs)
19{
20 inline_map_write(map, datum, ofs);
21}
22
23static void simple_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
24{
25 inline_map_copy_from(map, to, from, len);
26}
27
28static void simple_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
29{
30 inline_map_copy_to(map, to, from, len);
31}
32
33void simple_map_init(struct map_info *map)
34{
35 BUG_ON(!map_bankwidth_supported(map->bankwidth));
36
37 map->read = simple_map_read;
38 map->write = simple_map_write;
39 map->copy_from = simple_map_copy_from;
40 map->copy_to = simple_map_copy_to;
41}
42
43EXPORT_SYMBOL(simple_map_init);
44MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/mbx860.c b/drivers/mtd/maps/mbx860.c
new file mode 100644
index 000000000000..c5c6901a4763
--- /dev/null
+++ b/drivers/mtd/maps/mbx860.c
@@ -0,0 +1,100 @@
1/*
2 * $Id: mbx860.c,v 1.8 2004/11/04 13:24:15 gleixner Exp $
3 *
4 * Handle mapping of the flash on MBX860 boards
5 *
6 * Author: Anton Todorov
7 * Copyright: (C) 2001 Emness Technology
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <asm/io.h>
20#include <linux/mtd/mtd.h>
21#include <linux/mtd/map.h>
22#include <linux/mtd/partitions.h>
23
24
25#define WINDOW_ADDR 0xfe000000
26#define WINDOW_SIZE 0x00200000
27
28/* Flash / Partition sizing */
29#define MAX_SIZE_KiB 8192
30#define BOOT_PARTITION_SIZE_KiB 512
31#define KERNEL_PARTITION_SIZE_KiB 5632
32#define APP_PARTITION_SIZE_KiB 2048
33
34#define NUM_PARTITIONS 3
35
36/* partition_info gives details on the logical partitions that the split the
37 * single flash device into. If the size if zero we use up to the end of the
38 * device. */
39static struct mtd_partition partition_info[]={
40 { .name = "MBX flash BOOT partition",
41 .offset = 0,
42 .size = BOOT_PARTITION_SIZE_KiB*1024 },
43 { .name = "MBX flash DATA partition",
44 .offset = BOOT_PARTITION_SIZE_KiB*1024,
45 .size = (KERNEL_PARTITION_SIZE_KiB)*1024 },
46 { .name = "MBX flash APPLICATION partition",
47 .offset = (BOOT_PARTITION_SIZE_KiB+KERNEL_PARTITION_SIZE_KiB)*1024 }
48};
49
50
51static struct mtd_info *mymtd;
52
53struct map_info mbx_map = {
54 .name = "MBX flash",
55 .size = WINDOW_SIZE,
56 .phys = WINDOW_ADDR,
57 .bankwidth = 4,
58};
59
60int __init init_mbx(void)
61{
62 printk(KERN_NOTICE "Motorola MBX flash device: 0x%x at 0x%x\n", WINDOW_SIZE*4, WINDOW_ADDR);
63 mbx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
64
65 if (!mbx_map.virt) {
66 printk("Failed to ioremap\n");
67 return -EIO;
68 }
69 simple_map_init(&mbx_map);
70
71 mymtd = do_map_probe("jedec_probe", &mbx_map);
72 if (mymtd) {
73 mymtd->owner = THIS_MODULE;
74 add_mtd_device(mymtd);
75 add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
76 return 0;
77 }
78
79 iounmap((void *)mbx_map.virt);
80 return -ENXIO;
81}
82
83static void __exit cleanup_mbx(void)
84{
85 if (mymtd) {
86 del_mtd_device(mymtd);
87 map_destroy(mymtd);
88 }
89 if (mbx_map.virt) {
90 iounmap((void *)mbx_map.virt);
91 mbx_map.virt = 0;
92 }
93}
94
95module_init(init_mbx);
96module_exit(cleanup_mbx);
97
98MODULE_AUTHOR("Anton Todorov <a.todorov@emness.com>");
99MODULE_DESCRIPTION("MTD map driver for Motorola MBX860 board");
100MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/mpc1211.c b/drivers/mtd/maps/mpc1211.c
new file mode 100644
index 000000000000..4685e8e13460
--- /dev/null
+++ b/drivers/mtd/maps/mpc1211.c
@@ -0,0 +1,81 @@
1/*
2 * Flash on MPC-1211
3 *
4 * $Id: mpc1211.c,v 1.4 2004/09/16 23:27:13 gleixner Exp $
5 *
6 * (C) 2002 Interface, Saito.K & Jeanne
7 *
8 * GPL'd
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <asm/io.h>
15#include <linux/mtd/mtd.h>
16#include <linux/mtd/map.h>
17#include <linux/mtd/partitions.h>
18#include <linux/config.h>
19
20static struct mtd_info *flash_mtd;
21static struct mtd_partition *parsed_parts;
22
23struct map_info mpc1211_flash_map = {
24 .name = "MPC-1211 FLASH",
25 .size = 0x80000,
26 .bankwidth = 1,
27};
28
29static struct mtd_partition mpc1211_partitions[] = {
30 {
31 .name = "IPL & ETH-BOOT",
32 .offset = 0x00000000,
33 .size = 0x10000,
34 },
35 {
36 .name = "Flash FS",
37 .offset = 0x00010000,
38 .size = MTDPART_SIZ_FULL,
39 }
40};
41
42static int __init init_mpc1211_maps(void)
43{
44 int nr_parts;
45
46 mpc1211_flash_map.phys = 0;
47 mpc1211_flash_map.virt = (void __iomem *)P2SEGADDR(0);
48
49 simple_map_init(&mpc1211_flash_map);
50
51 printk(KERN_NOTICE "Probing for flash chips at 0x00000000:\n");
52 flash_mtd = do_map_probe("jedec_probe", &mpc1211_flash_map);
53 if (!flash_mtd) {
54 printk(KERN_NOTICE "Flash chips not detected at either possible location.\n");
55 return -ENXIO;
56 }
57 printk(KERN_NOTICE "MPC-1211: Flash at 0x%08lx\n", mpc1211_flash_map.virt & 0x1fffffff);
58 flash_mtd->module = THIS_MODULE;
59
60 parsed_parts = mpc1211_partitions;
61 nr_parts = ARRAY_SIZE(mpc1211_partitions);
62
63 add_mtd_partitions(flash_mtd, parsed_parts, nr_parts);
64 return 0;
65}
66
67static void __exit cleanup_mpc1211_maps(void)
68{
69 if (parsed_parts)
70 del_mtd_partitions(flash_mtd);
71 else
72 del_mtd_device(flash_mtd);
73 map_destroy(flash_mtd);
74}
75
76module_init(init_mpc1211_maps);
77module_exit(cleanup_mpc1211_maps);
78
79MODULE_LICENSE("GPL");
80MODULE_AUTHOR("Saito.K & Jeanne <ksaito@interface.co.jp>");
81MODULE_DESCRIPTION("MTD map driver for MPC-1211 boards. Interface");
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
new file mode 100644
index 000000000000..ab7e6358d281
--- /dev/null
+++ b/drivers/mtd/maps/netsc520.c
@@ -0,0 +1,140 @@
1/* netsc520.c -- MTD map driver for AMD NetSc520 Demonstration Board
2 *
3 * Copyright (C) 2001 Mark Langsdorf (mark.langsdorf@amd.com)
4 * based on sc520cdp.c by Sysgo Real-Time Solutions GmbH
5 *
6 * $Id: netsc520.c,v 1.13 2004/11/28 09:40:40 dwmw2 Exp $
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
21 *
22 * The NetSc520 is a demonstration board for the Elan Sc520 processor available
23 * from AMD. It has a single back of 16 megs of 32-bit Flash ROM and another
24 * 16 megs of SDRAM.
25 */
26
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/kernel.h>
30#include <linux/init.h>
31#include <asm/io.h>
32#include <linux/mtd/mtd.h>
33#include <linux/mtd/map.h>
34#include <linux/mtd/partitions.h>
35
36
37/*
38** The single, 16 megabyte flash bank is divided into four virtual
39** partitions. The first partition is 768 KiB and is intended to
40** store the kernel image loaded by the bootstrap loader. The second
41** partition is 256 KiB and holds the BIOS image. The third
42** partition is 14.5 MiB and is intended for the flash file system
43** image. The last partition is 512 KiB and contains another copy
44** of the BIOS image and the reset vector.
45**
46** Only the third partition should be mounted. The first partition
47** should not be mounted, but it can erased and written to using the
48** MTD character routines. The second and fourth partitions should
49** not be touched - it is possible to corrupt the BIOS image by
50** mounting these partitions, and potentially the board will not be
51** recoverable afterwards.
52*/
53
54/* partition_info gives details on the logical partitions that the split the
55 * single flash device into. If the size if zero we use up to the end of the
56 * device. */
57static struct mtd_partition partition_info[]={
58 {
59 .name = "NetSc520 boot kernel",
60 .offset = 0,
61 .size = 0xc0000
62 },
63 {
64 .name = "NetSc520 Low BIOS",
65 .offset = 0xc0000,
66 .size = 0x40000
67 },
68 {
69 .name = "NetSc520 file system",
70 .offset = 0x100000,
71 .size = 0xe80000
72 },
73 {
74 .name = "NetSc520 High BIOS",
75 .offset = 0xf80000,
76 .size = 0x80000
77 },
78};
79#define NUM_PARTITIONS (sizeof(partition_info)/sizeof(partition_info[0]))
80
81#define WINDOW_SIZE 0x00100000
82#define WINDOW_ADDR 0x00200000
83
84static struct map_info netsc520_map = {
85 .name = "netsc520 Flash Bank",
86 .size = WINDOW_SIZE,
87 .bankwidth = 4,
88 .phys = WINDOW_ADDR,
89};
90
91#define NUM_FLASH_BANKS (sizeof(netsc520_map)/sizeof(struct map_info))
92
93static struct mtd_info *mymtd;
94
95static int __init init_netsc520(void)
96{
97 printk(KERN_NOTICE "NetSc520 flash device: 0x%lx at 0x%lx\n", netsc520_map.size, netsc520_map.phys);
98 netsc520_map.virt = ioremap_nocache(netsc520_map.phys, netsc520_map.size);
99
100 if (!netsc520_map.virt) {
101 printk("Failed to ioremap_nocache\n");
102 return -EIO;
103 }
104
105 simple_map_init(&netsc520_map);
106
107 mymtd = do_map_probe("cfi_probe", &netsc520_map);
108 if(!mymtd)
109 mymtd = do_map_probe("map_ram", &netsc520_map);
110 if(!mymtd)
111 mymtd = do_map_probe("map_rom", &netsc520_map);
112
113 if (!mymtd) {
114 iounmap(netsc520_map.virt);
115 return -ENXIO;
116 }
117
118 mymtd->owner = THIS_MODULE;
119 add_mtd_partitions( mymtd, partition_info, NUM_PARTITIONS );
120 return 0;
121}
122
123static void __exit cleanup_netsc520(void)
124{
125 if (mymtd) {
126 del_mtd_partitions(mymtd);
127 map_destroy(mymtd);
128 }
129 if (netsc520_map.virt) {
130 iounmap(netsc520_map.virt);
131 netsc520_map.virt = NULL;
132 }
133}
134
135module_init(init_netsc520);
136module_exit(cleanup_netsc520);
137
138MODULE_LICENSE("GPL");
139MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@amd.com>");
140MODULE_DESCRIPTION("MTD map driver for AMD NetSc520 Demonstration Board");
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
new file mode 100644
index 000000000000..61be5a4148c9
--- /dev/null
+++ b/drivers/mtd/maps/nettel.c
@@ -0,0 +1,496 @@
1/****************************************************************************/
2
3/*
4 * nettel.c -- mappings for NETtel/SecureEdge/SnapGear (x86) boards.
5 *
6 * (C) Copyright 2000-2001, Greg Ungerer (gerg@snapgear.com)
7 * (C) Copyright 2001-2002, SnapGear (www.snapgear.com)
8 *
9 * $Id: nettel.c,v 1.10 2005/01/05 17:11:29 dwmw2 Exp $
10 */
11
12/****************************************************************************/
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/map.h>
20#include <linux/mtd/partitions.h>
21#include <linux/mtd/cfi.h>
22#include <linux/reboot.h>
23#include <asm/io.h>
24
25/****************************************************************************/
26
27#define INTEL_BUSWIDTH 1
28#define AMD_WINDOW_MAXSIZE 0x00200000
29#define AMD_BUSWIDTH 1
30
31/*
32 * PAR masks and shifts, assuming 64K pages.
33 */
34#define SC520_PAR_ADDR_MASK 0x00003fff
35#define SC520_PAR_ADDR_SHIFT 16
36#define SC520_PAR_TO_ADDR(par) \
37 (((par)&SC520_PAR_ADDR_MASK) << SC520_PAR_ADDR_SHIFT)
38
39#define SC520_PAR_SIZE_MASK 0x01ffc000
40#define SC520_PAR_SIZE_SHIFT 2
41#define SC520_PAR_TO_SIZE(par) \
42 ((((par)&SC520_PAR_SIZE_MASK) << SC520_PAR_SIZE_SHIFT) + (64*1024))
43
44#define SC520_PAR(cs, addr, size) \
45 ((cs) | \
46 ((((size)-(64*1024)) >> SC520_PAR_SIZE_SHIFT) & SC520_PAR_SIZE_MASK) | \
47 (((addr) >> SC520_PAR_ADDR_SHIFT) & SC520_PAR_ADDR_MASK))
48
49#define SC520_PAR_BOOTCS 0x8a000000
50#define SC520_PAR_ROMCS1 0xaa000000
51#define SC520_PAR_ROMCS2 0xca000000 /* Cache disabled, 64K page */
52
53static void *nettel_mmcrp = NULL;
54
55#ifdef CONFIG_MTD_CFI_INTELEXT
56static struct mtd_info *intel_mtd;
57#endif
58static struct mtd_info *amd_mtd;
59
60/****************************************************************************/
61
62/****************************************************************************/
63
64#ifdef CONFIG_MTD_CFI_INTELEXT
65static struct map_info nettel_intel_map = {
66 .name = "SnapGear Intel",
67 .size = 0,
68 .bankwidth = INTEL_BUSWIDTH,
69};
70
71static struct mtd_partition nettel_intel_partitions[] = {
72 {
73 .name = "SnapGear kernel",
74 .offset = 0,
75 .size = 0x000e0000
76 },
77 {
78 .name = "SnapGear filesystem",
79 .offset = 0x00100000,
80 },
81 {
82 .name = "SnapGear config",
83 .offset = 0x000e0000,
84 .size = 0x00020000
85 },
86 {
87 .name = "SnapGear Intel",
88 .offset = 0
89 },
90 {
91 .name = "SnapGear BIOS Config",
92 .offset = 0x007e0000,
93 .size = 0x00020000
94 },
95 {
96 .name = "SnapGear BIOS",
97 .offset = 0x007e0000,
98 .size = 0x00020000
99 },
100};
101#endif
102
103static struct map_info nettel_amd_map = {
104 .name = "SnapGear AMD",
105 .size = AMD_WINDOW_MAXSIZE,
106 .bankwidth = AMD_BUSWIDTH,
107};
108
109static struct mtd_partition nettel_amd_partitions[] = {
110 {
111 .name = "SnapGear BIOS config",
112 .offset = 0x000e0000,
113 .size = 0x00010000
114 },
115 {
116 .name = "SnapGear BIOS",
117 .offset = 0x000f0000,
118 .size = 0x00010000
119 },
120 {
121 .name = "SnapGear AMD",
122 .offset = 0
123 },
124 {
125 .name = "SnapGear high BIOS",
126 .offset = 0x001f0000,
127 .size = 0x00010000
128 }
129};
130
131#define NUM_AMD_PARTITIONS \
132 (sizeof(nettel_amd_partitions)/sizeof(nettel_amd_partitions[0]))
133
134/****************************************************************************/
135
136#ifdef CONFIG_MTD_CFI_INTELEXT
137
138/*
139 * Set the Intel flash back to read mode since some old boot
140 * loaders don't.
141 */
142static int nettel_reboot_notifier(struct notifier_block *nb, unsigned long val, void *v)
143{
144 struct cfi_private *cfi = nettel_intel_map.fldrv_priv;
145 unsigned long b;
146
147 /* Make sure all FLASH chips are put back into read mode */
148 for (b = 0; (b < nettel_intel_partitions[3].size); b += 0x100000) {
149 cfi_send_gen_cmd(0xff, 0x55, b, &nettel_intel_map, cfi,
150 cfi->device_type, NULL);
151 }
152 return(NOTIFY_OK);
153}
154
155static struct notifier_block nettel_notifier_block = {
156 nettel_reboot_notifier, NULL, 0
157};
158
159/*
160 * Erase the configuration file system.
161 * Used to support the software reset button.
162 */
163static void nettel_erasecallback(struct erase_info *done)
164{
165 wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
166 wake_up(wait_q);
167}
168
169static struct erase_info nettel_erase;
170
171int nettel_eraseconfig(void)
172{
173 struct mtd_info *mtd;
174 DECLARE_WAITQUEUE(wait, current);
175 wait_queue_head_t wait_q;
176 int ret;
177
178 init_waitqueue_head(&wait_q);
179 mtd = get_mtd_device(NULL, 2);
180 if (mtd) {
181 nettel_erase.mtd = mtd;
182 nettel_erase.callback = nettel_erasecallback;
183 nettel_erase.callback = NULL;
184 nettel_erase.addr = 0;
185 nettel_erase.len = mtd->size;
186 nettel_erase.priv = (u_long) &wait_q;
187 nettel_erase.priv = 0;
188
189 set_current_state(TASK_INTERRUPTIBLE);
190 add_wait_queue(&wait_q, &wait);
191
192 ret = MTD_ERASE(mtd, &nettel_erase);
193 if (ret) {
194 set_current_state(TASK_RUNNING);
195 remove_wait_queue(&wait_q, &wait);
196 put_mtd_device(mtd);
197 return(ret);
198 }
199
200 schedule(); /* Wait for erase to finish. */
201 remove_wait_queue(&wait_q, &wait);
202
203 put_mtd_device(mtd);
204 }
205
206 return(0);
207}
208
209#else
210
211int nettel_eraseconfig(void)
212{
213 return(0);
214}
215
216#endif
217
218/****************************************************************************/
219
220int __init nettel_init(void)
221{
222 volatile unsigned long *amdpar;
223 unsigned long amdaddr, maxsize;
224 int num_amd_partitions=0;
225#ifdef CONFIG_MTD_CFI_INTELEXT
226 volatile unsigned long *intel0par, *intel1par;
227 unsigned long orig_bootcspar, orig_romcs1par;
228 unsigned long intel0addr, intel0size;
229 unsigned long intel1addr, intel1size;
230 int intelboot, intel0cs, intel1cs;
231 int num_intel_partitions;
232#endif
233 int rc = 0;
234
235 nettel_mmcrp = (void *) ioremap_nocache(0xfffef000, 4096);
236 if (nettel_mmcrp == NULL) {
237 printk("SNAPGEAR: failed to disable MMCR cache??\n");
238 return(-EIO);
239 }
240
241 /* Set CPU clock to be 33.000MHz */
242 *((unsigned char *) (nettel_mmcrp + 0xc64)) = 0x01;
243
244 amdpar = (volatile unsigned long *) (nettel_mmcrp + 0xc4);
245
246#ifdef CONFIG_MTD_CFI_INTELEXT
247 intelboot = 0;
248 intel0cs = SC520_PAR_ROMCS1;
249 intel0par = (volatile unsigned long *) (nettel_mmcrp + 0xc0);
250 intel1cs = SC520_PAR_ROMCS2;
251 intel1par = (volatile unsigned long *) (nettel_mmcrp + 0xbc);
252
253 /*
254 * Save the CS settings then ensure ROMCS1 and ROMCS2 are off,
255 * otherwise they might clash with where we try to map BOOTCS.
256 */
257 orig_bootcspar = *amdpar;
258 orig_romcs1par = *intel0par;
259 *intel0par = 0;
260 *intel1par = 0;
261#endif
262
263 /*
264 * The first thing to do is determine if we have a separate
265 * boot FLASH device. Typically this is a small (1 to 2MB)
266 * AMD FLASH part. It seems that device size is about the
267 * only way to tell if this is the case...
268 */
269 amdaddr = 0x20000000;
270 maxsize = AMD_WINDOW_MAXSIZE;
271
272 *amdpar = SC520_PAR(SC520_PAR_BOOTCS, amdaddr, maxsize);
273 __asm__ ("wbinvd");
274
275 nettel_amd_map.phys = amdaddr;
276 nettel_amd_map.virt = ioremap_nocache(amdaddr, maxsize);
277 if (!nettel_amd_map.virt) {
278 printk("SNAPGEAR: failed to ioremap() BOOTCS\n");
279 return(-EIO);
280 }
281 simple_map_init(&nettel_amd_map);
282
283 if ((amd_mtd = do_map_probe("jedec_probe", &nettel_amd_map))) {
284 printk(KERN_NOTICE "SNAPGEAR: AMD flash device size = %dK\n",
285 amd_mtd->size>>10);
286
287 amd_mtd->owner = THIS_MODULE;
288
289 /* The high BIOS partition is only present for 2MB units */
290 num_amd_partitions = NUM_AMD_PARTITIONS;
291 if (amd_mtd->size < AMD_WINDOW_MAXSIZE)
292 num_amd_partitions--;
293 /* Don't add the partition until after the primary INTEL's */
294
295#ifdef CONFIG_MTD_CFI_INTELEXT
296 /*
297 * Map the Intel flash into memory after the AMD
298 * It has to start on a multiple of maxsize.
299 */
300 maxsize = SC520_PAR_TO_SIZE(orig_romcs1par);
301 if (maxsize < (32 * 1024 * 1024))
302 maxsize = (32 * 1024 * 1024);
303 intel0addr = amdaddr + maxsize;
304#endif
305 } else {
306#ifdef CONFIG_MTD_CFI_INTELEXT
307 /* INTEL boot FLASH */
308 intelboot++;
309
310 if (!orig_romcs1par) {
311 intel0cs = SC520_PAR_BOOTCS;
312 intel0par = (volatile unsigned long *)
313 (nettel_mmcrp + 0xc4);
314 intel1cs = SC520_PAR_ROMCS1;
315 intel1par = (volatile unsigned long *)
316 (nettel_mmcrp + 0xc0);
317
318 intel0addr = SC520_PAR_TO_ADDR(orig_bootcspar);
319 maxsize = SC520_PAR_TO_SIZE(orig_bootcspar);
320 } else {
321 /* Kernel base is on ROMCS1, not BOOTCS */
322 intel0cs = SC520_PAR_ROMCS1;
323 intel0par = (volatile unsigned long *)
324 (nettel_mmcrp + 0xc0);
325 intel1cs = SC520_PAR_BOOTCS;
326 intel1par = (volatile unsigned long *)
327 (nettel_mmcrp + 0xc4);
328
329 intel0addr = SC520_PAR_TO_ADDR(orig_romcs1par);
330 maxsize = SC520_PAR_TO_SIZE(orig_romcs1par);
331 }
332
333 /* Destroy useless AMD MTD mapping */
334 amd_mtd = NULL;
335 iounmap(nettel_amd_map.virt);
336 nettel_amd_map.virt = NULL;
337#else
338 /* Only AMD flash supported */
339 return(-ENXIO);
340#endif
341 }
342
343#ifdef CONFIG_MTD_CFI_INTELEXT
344 /*
345 * We have determined the INTEL FLASH configuration, so lets
346 * go ahead and probe for them now.
347 */
348
349 /* Set PAR to the maximum size */
350 if (maxsize < (32 * 1024 * 1024))
351 maxsize = (32 * 1024 * 1024);
352 *intel0par = SC520_PAR(intel0cs, intel0addr, maxsize);
353
354 /* Turn other PAR off so the first probe doesn't find it */
355 *intel1par = 0;
356
357 /* Probe for the the size of the first Intel flash */
358 nettel_intel_map.size = maxsize;
359 nettel_intel_map.phys = intel0addr;
360 nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
361 if (!nettel_intel_map.virt) {
362 printk("SNAPGEAR: failed to ioremap() ROMCS1\n");
363 return(-EIO);
364 }
365 simple_map_init(&nettel_intel_map);
366
367 intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map);
368 if (!intel_mtd) {
369 iounmap(nettel_intel_map.virt);
370 return(-ENXIO);
371 }
372
373 /* Set PAR to the detected size */
374 intel0size = intel_mtd->size;
375 *intel0par = SC520_PAR(intel0cs, intel0addr, intel0size);
376
377 /*
378 * Map second Intel FLASH right after first. Set its size to the
379 * same maxsize used for the first Intel FLASH.
380 */
381 intel1addr = intel0addr + intel0size;
382 *intel1par = SC520_PAR(intel1cs, intel1addr, maxsize);
383 __asm__ ("wbinvd");
384
385 maxsize += intel0size;
386
387 /* Delete the old map and probe again to do both chips */
388 map_destroy(intel_mtd);
389 intel_mtd = NULL;
390 iounmap(nettel_intel_map.virt);
391
392 nettel_intel_map.size = maxsize;
393 nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
394 if (!nettel_intel_map.virt) {
395 printk("SNAPGEAR: failed to ioremap() ROMCS1/2\n");
396 return(-EIO);
397 }
398
399 intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map);
400 if (! intel_mtd) {
401 iounmap((void *) nettel_intel_map.virt);
402 return(-ENXIO);
403 }
404
405 intel1size = intel_mtd->size - intel0size;
406 if (intel1size > 0) {
407 *intel1par = SC520_PAR(intel1cs, intel1addr, intel1size);
408 __asm__ ("wbinvd");
409 } else {
410 *intel1par = 0;
411 }
412
413 printk(KERN_NOTICE "SNAPGEAR: Intel flash device size = %dK\n",
414 (intel_mtd->size >> 10));
415
416 intel_mtd->owner = THIS_MODULE;
417
418#ifndef CONFIG_BLK_DEV_INITRD
419 ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, 1);
420#endif
421
422 num_intel_partitions = sizeof(nettel_intel_partitions) /
423 sizeof(nettel_intel_partitions[0]);
424
425 if (intelboot) {
426 /*
427 * Adjust offset and size of last boot partition.
428 * Must allow for BIOS region at end of FLASH.
429 */
430 nettel_intel_partitions[1].size = (intel0size + intel1size) -
431 (1024*1024 + intel_mtd->erasesize);
432 nettel_intel_partitions[3].size = intel0size + intel1size;
433 nettel_intel_partitions[4].offset =
434 (intel0size + intel1size) - intel_mtd->erasesize;
435 nettel_intel_partitions[4].size = intel_mtd->erasesize;
436 nettel_intel_partitions[5].offset =
437 nettel_intel_partitions[4].offset;
438 nettel_intel_partitions[5].size =
439 nettel_intel_partitions[4].size;
440 } else {
441 /* No BIOS regions when AMD boot */
442 num_intel_partitions -= 2;
443 }
444 rc = add_mtd_partitions(intel_mtd, nettel_intel_partitions,
445 num_intel_partitions);
446#endif
447
448 if (amd_mtd) {
449 rc = add_mtd_partitions(amd_mtd, nettel_amd_partitions,
450 num_amd_partitions);
451 }
452
453#ifdef CONFIG_MTD_CFI_INTELEXT
454 register_reboot_notifier(&nettel_notifier_block);
455#endif
456
457 return(rc);
458}
459
460/****************************************************************************/
461
462void __exit nettel_cleanup(void)
463{
464#ifdef CONFIG_MTD_CFI_INTELEXT
465 unregister_reboot_notifier(&nettel_notifier_block);
466#endif
467 if (amd_mtd) {
468 del_mtd_partitions(amd_mtd);
469 map_destroy(amd_mtd);
470 }
471 if (nettel_amd_map.virt) {
472 iounmap(nettel_amd_map.virt);
473 nettel_amd_map.virt = NULL;
474 }
475#ifdef CONFIG_MTD_CFI_INTELEXT
476 if (intel_mtd) {
477 del_mtd_partitions(intel_mtd);
478 map_destroy(intel_mtd);
479 }
480 if (nettel_intel_map.virt) {
481 iounmap(nettel_intel_map.virt);
482 nettel_intel_map.virt = 0;
483 }
484#endif
485}
486
487/****************************************************************************/
488
489module_init(nettel_init);
490module_exit(nettel_cleanup);
491
492MODULE_LICENSE("GPL");
493MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>");
494MODULE_DESCRIPTION("SnapGear/SecureEdge FLASH support");
495
496/****************************************************************************/
diff --git a/drivers/mtd/maps/ocelot.c b/drivers/mtd/maps/ocelot.c
new file mode 100644
index 000000000000..82c3070678c5
--- /dev/null
+++ b/drivers/mtd/maps/ocelot.c
@@ -0,0 +1,175 @@
1/*
2 * $Id: ocelot.c,v 1.16 2005/01/05 18:05:13 dwmw2 Exp $
3 *
4 * Flash on Momenco Ocelot
5 */
6
7#include <linux/module.h>
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <asm/io.h>
12#include <linux/mtd/mtd.h>
13#include <linux/mtd/map.h>
14#include <linux/mtd/partitions.h>
15
16#define OCELOT_PLD 0x2c000000
17#define FLASH_WINDOW_ADDR 0x2fc00000
18#define FLASH_WINDOW_SIZE 0x00080000
19#define FLASH_BUSWIDTH 1
20#define NVRAM_WINDOW_ADDR 0x2c800000
21#define NVRAM_WINDOW_SIZE 0x00007FF0
22#define NVRAM_BUSWIDTH 1
23
24static unsigned int cacheflush = 0;
25
26static struct mtd_info *flash_mtd;
27static struct mtd_info *nvram_mtd;
28
29static void ocelot_ram_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
30{
31 struct map_info *map = mtd->priv;
32 size_t done = 0;
33
34 /* If we use memcpy, it does word-wide writes. Even though we told the
35 GT64120A that it's an 8-bit wide region, word-wide writes don't work.
36 We end up just writing the first byte of the four to all four bytes.
37 So we have this loop instead */
38 *retlen = len;
39 while(len) {
40 __raw_writeb(*(unsigned char *) from, map->virt + to);
41 from++;
42 to++;
43 len--;
44 }
45}
46
47static struct mtd_partition *parsed_parts;
48
49struct map_info ocelot_flash_map = {
50 .name = "Ocelot boot flash",
51 .size = FLASH_WINDOW_SIZE,
52 .bankwidth = FLASH_BUSWIDTH,
53 .phys = FLASH_WINDOW_ADDR,
54};
55
56struct map_info ocelot_nvram_map = {
57 .name = "Ocelot NVRAM",
58 .size = NVRAM_WINDOW_SIZE,
59 .bankwidth = NVRAM_BUSWIDTH,
60 .phys = NVRAM_WINDOW_ADDR,
61};
62
63static const char *probes[] = { "RedBoot", NULL };
64
65static int __init init_ocelot_maps(void)
66{
67 void *pld;
68 int nr_parts;
69 unsigned char brd_status;
70
71 printk(KERN_INFO "Momenco Ocelot MTD mappings: Flash 0x%x at 0x%x, NVRAM 0x%x at 0x%x\n",
72 FLASH_WINDOW_SIZE, FLASH_WINDOW_ADDR, NVRAM_WINDOW_SIZE, NVRAM_WINDOW_ADDR);
73
74 /* First check whether the flash jumper is present */
75 pld = ioremap(OCELOT_PLD, 0x10);
76 if (!pld) {
77 printk(KERN_NOTICE "Failed to ioremap Ocelot PLD\n");
78 return -EIO;
79 }
80 brd_status = readb(pld+4);
81 iounmap(pld);
82
83 /* Now ioremap the NVRAM space */
84 ocelot_nvram_map.virt = ioremap_nocache(NVRAM_WINDOW_ADDR, NVRAM_WINDOW_SIZE);
85 if (!ocelot_nvram_map.virt) {
86 printk(KERN_NOTICE "Failed to ioremap Ocelot NVRAM space\n");
87 return -EIO;
88 }
89
90 simple_map_init(&ocelot_nvram_map);
91
92 /* And do the RAM probe on it to get an MTD device */
93 nvram_mtd = do_map_probe("map_ram", &ocelot_nvram_map);
94 if (!nvram_mtd) {
95 printk("NVRAM probe failed\n");
96 goto fail_1;
97 }
98 nvram_mtd->owner = THIS_MODULE;
99 nvram_mtd->erasesize = 16;
100 /* Override the write() method */
101 nvram_mtd->write = ocelot_ram_write;
102
103 /* Now map the flash space */
104 ocelot_flash_map.virt = ioremap_nocache(FLASH_WINDOW_ADDR, FLASH_WINDOW_SIZE);
105 if (!ocelot_flash_map.virt) {
106 printk(KERN_NOTICE "Failed to ioremap Ocelot flash space\n");
107 goto fail_2;
108 }
109 /* Now the cached version */
110 ocelot_flash_map.cached = (unsigned long)__ioremap(FLASH_WINDOW_ADDR, FLASH_WINDOW_SIZE, 0);
111
112 simple_map_init(&ocelot_flash_map);
113
114 /* Only probe for flash if the write jumper is present */
115 if (brd_status & 0x40) {
116 flash_mtd = do_map_probe("jedec", &ocelot_flash_map);
117 } else {
118 printk(KERN_NOTICE "Ocelot flash write jumper not present. Treating as ROM\n");
119 }
120 /* If that failed or the jumper's absent, pretend it's ROM */
121 if (!flash_mtd) {
122 flash_mtd = do_map_probe("map_rom", &ocelot_flash_map);
123 /* If we're treating it as ROM, set the erase size */
124 if (flash_mtd)
125 flash_mtd->erasesize = 0x10000;
126 }
127 if (!flash_mtd)
128 goto fail3;
129
130 add_mtd_device(nvram_mtd);
131
132 flash_mtd->owner = THIS_MODULE;
133 nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0);
134
135 if (nr_parts > 0)
136 add_mtd_partitions(flash_mtd, parsed_parts, nr_parts);
137 else
138 add_mtd_device(flash_mtd);
139
140 return 0;
141
142 fail3:
143 iounmap((void *)ocelot_flash_map.virt);
144 if (ocelot_flash_map.cached)
145 iounmap((void *)ocelot_flash_map.cached);
146 fail_2:
147 map_destroy(nvram_mtd);
148 fail_1:
149 iounmap((void *)ocelot_nvram_map.virt);
150
151 return -ENXIO;
152}
153
154static void __exit cleanup_ocelot_maps(void)
155{
156 del_mtd_device(nvram_mtd);
157 map_destroy(nvram_mtd);
158 iounmap((void *)ocelot_nvram_map.virt);
159
160 if (parsed_parts)
161 del_mtd_partitions(flash_mtd);
162 else
163 del_mtd_device(flash_mtd);
164 map_destroy(flash_mtd);
165 iounmap((void *)ocelot_flash_map.virt);
166 if (ocelot_flash_map.cached)
167 iounmap((void *)ocelot_flash_map.cached);
168}
169
170module_init(init_ocelot_maps);
171module_exit(cleanup_ocelot_maps);
172
173MODULE_LICENSE("GPL");
174MODULE_AUTHOR("Red Hat, Inc. - David Woodhouse <dwmw2@cambridge.redhat.com>");
175MODULE_DESCRIPTION("MTD map driver for Momenco Ocelot board");
diff --git a/drivers/mtd/maps/ocotea.c b/drivers/mtd/maps/ocotea.c
new file mode 100644
index 000000000000..6e559bc14636
--- /dev/null
+++ b/drivers/mtd/maps/ocotea.c
@@ -0,0 +1,154 @@
1/*
2 * Mapping for Ocotea user flash
3 *
4 * Matt Porter <mporter@kernel.crashing.org>
5 *
6 * Copyright 2002-2004 MontaVista Software Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/map.h>
20#include <linux/mtd/partitions.h>
21#include <linux/config.h>
22#include <linux/version.h>
23#include <asm/io.h>
24#include <asm/ibm44x.h>
25#include <platforms/4xx/ocotea.h>
26
27static struct mtd_info *flash;
28
29static struct map_info ocotea_small_map = {
30 .name = "Ocotea small flash",
31 .size = OCOTEA_SMALL_FLASH_SIZE,
32 .buswidth = 1,
33};
34
35static struct map_info ocotea_large_map = {
36 .name = "Ocotea large flash",
37 .size = OCOTEA_LARGE_FLASH_SIZE,
38 .buswidth = 1,
39};
40
41static struct mtd_partition ocotea_small_partitions[] = {
42 {
43 .name = "pibs",
44 .offset = 0x0,
45 .size = 0x100000,
46 }
47};
48
49static struct mtd_partition ocotea_large_partitions[] = {
50 {
51 .name = "fs",
52 .offset = 0,
53 .size = 0x300000,
54 },
55 {
56 .name = "firmware",
57 .offset = 0x300000,
58 .size = 0x100000,
59 }
60};
61
62#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
63
64int __init init_ocotea(void)
65{
66 u8 fpga0_reg;
67 u8 *fpga0_adr;
68 unsigned long long small_flash_base, large_flash_base;
69
70 fpga0_adr = ioremap64(OCOTEA_FPGA_ADDR, 16);
71 if (!fpga0_adr)
72 return -ENOMEM;
73
74 fpga0_reg = readb((unsigned long)fpga0_adr);
75 iounmap(fpga0_adr);
76
77 if (OCOTEA_BOOT_LARGE_FLASH(fpga0_reg)) {
78 small_flash_base = OCOTEA_SMALL_FLASH_HIGH;
79 large_flash_base = OCOTEA_LARGE_FLASH_LOW;
80 }
81 else {
82 small_flash_base = OCOTEA_SMALL_FLASH_LOW;
83 large_flash_base = OCOTEA_LARGE_FLASH_HIGH;
84 }
85
86 ocotea_small_map.phys = small_flash_base;
87 ocotea_small_map.virt = ioremap64(small_flash_base,
88 ocotea_small_map.size);
89
90 if (!ocotea_small_map.virt) {
91 printk("Failed to ioremap flash\n");
92 return -EIO;
93 }
94
95 simple_map_init(&ocotea_small_map);
96
97 flash = do_map_probe("map_rom", &ocotea_small_map);
98 if (flash) {
99 flash->owner = THIS_MODULE;
100 add_mtd_partitions(flash, ocotea_small_partitions,
101 NB_OF(ocotea_small_partitions));
102 } else {
103 printk("map probe failed for flash\n");
104 return -ENXIO;
105 }
106
107 ocotea_large_map.phys = large_flash_base;
108 ocotea_large_map.virt = ioremap64(large_flash_base,
109 ocotea_large_map.size);
110
111 if (!ocotea_large_map.virt) {
112 printk("Failed to ioremap flash\n");
113 return -EIO;
114 }
115
116 simple_map_init(&ocotea_large_map);
117
118 flash = do_map_probe("cfi_probe", &ocotea_large_map);
119 if (flash) {
120 flash->owner = THIS_MODULE;
121 add_mtd_partitions(flash, ocotea_large_partitions,
122 NB_OF(ocotea_large_partitions));
123 } else {
124 printk("map probe failed for flash\n");
125 return -ENXIO;
126 }
127
128 return 0;
129}
130
131static void __exit cleanup_ocotea(void)
132{
133 if (flash) {
134 del_mtd_partitions(flash);
135 map_destroy(flash);
136 }
137
138 if (ocotea_small_map.virt) {
139 iounmap((void *)ocotea_small_map.virt);
140 ocotea_small_map.virt = 0;
141 }
142
143 if (ocotea_large_map.virt) {
144 iounmap((void *)ocotea_large_map.virt);
145 ocotea_large_map.virt = 0;
146 }
147}
148
149module_init(init_ocotea);
150module_exit(cleanup_ocotea);
151
152MODULE_LICENSE("GPL");
153MODULE_AUTHOR("Matt Porter <mporter@kernel.crashing.org>");
154MODULE_DESCRIPTION("MTD map and partitions for IBM 440GX Ocotea boards");
diff --git a/drivers/mtd/maps/octagon-5066.c b/drivers/mtd/maps/octagon-5066.c
new file mode 100644
index 000000000000..e5ff83de420e
--- /dev/null
+++ b/drivers/mtd/maps/octagon-5066.c
@@ -0,0 +1,248 @@
1// $Id: octagon-5066.c,v 1.26 2004/07/12 22:38:29 dwmw2 Exp $
2/* ######################################################################
3
4 Octagon 5066 MTD Driver.
5
6 The Octagon 5066 is a SBC based on AMD's 586-WB running at 133 MHZ. It
7 comes with a builtin AMD 29F016 flash chip and a socketed EEPROM that
8 is replacable by flash. Both units are mapped through a multiplexer
9 into a 32k memory window at 0xe8000. The control register for the
10 multiplexing unit is located at IO 0x208 with a bit map of
11 0-5 Page Selection in 32k increments
12 6-7 Device selection:
13 00 SSD off
14 01 SSD 0 (Socket)
15 10 SSD 1 (Flash chip)
16 11 undefined
17
18 On each SSD, the first 128k is reserved for use by the bios
19 (actually it IS the bios..) This only matters if you are booting off the
20 flash, you must not put a file system starting there.
21
22 The driver tries to do a detection algorithm to guess what sort of devices
23 are plugged into the sockets.
24
25 ##################################################################### */
26
27#include <linux/module.h>
28#include <linux/slab.h>
29#include <linux/ioport.h>
30#include <linux/init.h>
31#include <asm/io.h>
32
33#include <linux/mtd/map.h>
34#include <linux/mtd/mtd.h>
35
36#define WINDOW_START 0xe8000
37#define WINDOW_LENGTH 0x8000
38#define WINDOW_SHIFT 27
39#define WINDOW_MASK 0x7FFF
40#define PAGE_IO 0x208
41
42static volatile char page_n_dev = 0;
43static unsigned long iomapadr;
44static DEFINE_SPINLOCK(oct5066_spin);
45
46/*
47 * We use map_priv_1 to identify which device we are.
48 */
49
50static void __oct5066_page(struct map_info *map, __u8 byte)
51{
52 outb(byte,PAGE_IO);
53 page_n_dev = byte;
54}
55
56static inline void oct5066_page(struct map_info *map, unsigned long ofs)
57{
58 __u8 byte = map->map_priv_1 | (ofs >> WINDOW_SHIFT);
59
60 if (page_n_dev != byte)
61 __oct5066_page(map, byte);
62}
63
64
65static map_word oct5066_read8(struct map_info *map, unsigned long ofs)
66{
67 map_word ret;
68 spin_lock(&oct5066_spin);
69 oct5066_page(map, ofs);
70 ret.x[0] = readb(iomapadr + (ofs & WINDOW_MASK));
71 spin_unlock(&oct5066_spin);
72 return ret;
73}
74
75static void oct5066_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
76{
77 while(len) {
78 unsigned long thislen = len;
79 if (len > (WINDOW_LENGTH - (from & WINDOW_MASK)))
80 thislen = WINDOW_LENGTH-(from & WINDOW_MASK);
81
82 spin_lock(&oct5066_spin);
83 oct5066_page(map, from);
84 memcpy_fromio(to, iomapadr + from, thislen);
85 spin_unlock(&oct5066_spin);
86 to += thislen;
87 from += thislen;
88 len -= thislen;
89 }
90}
91
92static void oct5066_write8(struct map_info *map, map_word d, unsigned long adr)
93{
94 spin_lock(&oct5066_spin);
95 oct5066_page(map, adr);
96 writeb(d.x[0], iomapadr + (adr & WINDOW_MASK));
97 spin_unlock(&oct5066_spin);
98}
99
100static void oct5066_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
101{
102 while(len) {
103 unsigned long thislen = len;
104 if (len > (WINDOW_LENGTH - (to & WINDOW_MASK)))
105 thislen = WINDOW_LENGTH-(to & WINDOW_MASK);
106
107 spin_lock(&oct5066_spin);
108 oct5066_page(map, to);
109 memcpy_toio(iomapadr + to, from, thislen);
110 spin_unlock(&oct5066_spin);
111 to += thislen;
112 from += thislen;
113 len -= thislen;
114 }
115}
116
117static struct map_info oct5066_map[2] = {
118 {
119 .name = "Octagon 5066 Socket",
120 .phys = NO_XIP,
121 .size = 512 * 1024,
122 .bankwidth = 1,
123 .read = oct5066_read8,
124 .copy_from = oct5066_copy_from,
125 .write = oct5066_write8,
126 .copy_to = oct5066_copy_to,
127 .map_priv_1 = 1<<6
128 },
129 {
130 .name = "Octagon 5066 Internal Flash",
131 .phys = NO_XIP,
132 .size = 2 * 1024 * 1024,
133 .bankwidth = 1,
134 .read = oct5066_read8,
135 .copy_from = oct5066_copy_from,
136 .write = oct5066_write8,
137 .copy_to = oct5066_copy_to,
138 .map_priv_1 = 2<<6
139 }
140};
141
142static struct mtd_info *oct5066_mtd[2] = {NULL, NULL};
143
144// OctProbe - Sense if this is an octagon card
145// ---------------------------------------------------------------------
146/* Perform a simple validity test, we map the window select SSD0 and
147 change pages while monitoring the window. A change in the window,
148 controlled by the PAGE_IO port is a functioning 5066 board. This will
149 fail if the thing in the socket is set to a uniform value. */
150static int __init OctProbe(void)
151{
152 unsigned int Base = (1 << 6);
153 unsigned long I;
154 unsigned long Values[10];
155 for (I = 0; I != 20; I++)
156 {
157 outb(Base + (I%10),PAGE_IO);
158 if (I < 10)
159 {
160 // Record the value and check for uniqueness
161 Values[I%10] = readl(iomapadr);
162 if (I > 0 && Values[I%10] == Values[0])
163 return -EAGAIN;
164 }
165 else
166 {
167 // Make sure we get the same values on the second pass
168 if (Values[I%10] != readl(iomapadr))
169 return -EAGAIN;
170 }
171 }
172 return 0;
173}
174
175void cleanup_oct5066(void)
176{
177 int i;
178 for (i=0; i<2; i++) {
179 if (oct5066_mtd[i]) {
180 del_mtd_device(oct5066_mtd[i]);
181 map_destroy(oct5066_mtd[i]);
182 }
183 }
184 iounmap((void *)iomapadr);
185 release_region(PAGE_IO, 1);
186}
187
188int __init init_oct5066(void)
189{
190 int i;
191 int ret = 0;
192
193 // Do an autoprobe sequence
194 if (!request_region(PAGE_IO,1,"Octagon SSD")) {
195 printk(KERN_NOTICE "5066: Page Register in Use\n");
196 return -EAGAIN;
197 }
198 iomapadr = (unsigned long)ioremap(WINDOW_START, WINDOW_LENGTH);
199 if (!iomapadr) {
200 printk(KERN_NOTICE "Failed to ioremap memory region\n");
201 ret = -EIO;
202 goto out_rel;
203 }
204 if (OctProbe() != 0) {
205 printk(KERN_NOTICE "5066: Octagon Probe Failed, is this an Octagon 5066 SBC?\n");
206 iounmap((void *)iomapadr);
207 ret = -EAGAIN;
208 goto out_unmap;
209 }
210
211 // Print out our little header..
212 printk("Octagon 5066 SSD IO:0x%x MEM:0x%x-0x%x\n",PAGE_IO,WINDOW_START,
213 WINDOW_START+WINDOW_LENGTH);
214
215 for (i=0; i<2; i++) {
216 oct5066_mtd[i] = do_map_probe("cfi_probe", &oct5066_map[i]);
217 if (!oct5066_mtd[i])
218 oct5066_mtd[i] = do_map_probe("jedec", &oct5066_map[i]);
219 if (!oct5066_mtd[i])
220 oct5066_mtd[i] = do_map_probe("map_ram", &oct5066_map[i]);
221 if (!oct5066_mtd[i])
222 oct5066_mtd[i] = do_map_probe("map_rom", &oct5066_map[i]);
223 if (oct5066_mtd[i]) {
224 oct5066_mtd[i]->owner = THIS_MODULE;
225 add_mtd_device(oct5066_mtd[i]);
226 }
227 }
228
229 if (!oct5066_mtd[0] && !oct5066_mtd[1]) {
230 cleanup_oct5066();
231 return -ENXIO;
232 }
233
234 return 0;
235
236 out_unmap:
237 iounmap((void *)iomapadr);
238 out_rel:
239 release_region(PAGE_IO, 1);
240 return ret;
241}
242
243module_init(init_oct5066);
244module_exit(cleanup_oct5066);
245
246MODULE_LICENSE("GPL");
247MODULE_AUTHOR("Jason Gunthorpe <jgg@deltatee.com>, David Woodhouse <dwmw2@infradead.org>");
248MODULE_DESCRIPTION("MTD map driver for Octagon 5066 Single Board Computer");
diff --git a/drivers/mtd/maps/omap-toto-flash.c b/drivers/mtd/maps/omap-toto-flash.c
new file mode 100644
index 000000000000..496109071cb1
--- /dev/null
+++ b/drivers/mtd/maps/omap-toto-flash.c
@@ -0,0 +1,137 @@
1/*
2 * NOR Flash memory access on TI Toto board
3 *
4 * jzhang@ti.com (C) 2003 Texas Instruments.
5 *
6 * (C) 2002 MontVista Software, Inc.
7 *
8 * $Id: omap-toto-flash.c,v 1.3 2004/09/16 23:27:13 gleixner Exp $
9 */
10
11#include <linux/config.h>
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15
16#include <linux/errno.h>
17#include <linux/init.h>
18
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/map.h>
21#include <linux/mtd/partitions.h>
22
23#include <asm/hardware.h>
24#include <asm/io.h>
25
26
27#ifndef CONFIG_ARCH_OMAP
28#error This is for OMAP architecture only
29#endif
30
31//these lines need be moved to a hardware header file
32#define OMAP_TOTO_FLASH_BASE 0xd8000000
33#define OMAP_TOTO_FLASH_SIZE 0x80000
34
35static struct map_info omap_toto_map_flash = {
36 .name = "OMAP Toto flash",
37 .bankwidth = 2,
38 .virt = (void __iomem *)OMAP_TOTO_FLASH_BASE,
39};
40
41
42static struct mtd_partition toto_flash_partitions[] = {
43 {
44 .name = "BootLoader",
45 .size = 0x00040000, /* hopefully u-boot will stay 128k + 128*/
46 .offset = 0,
47 .mask_flags = MTD_WRITEABLE, /* force read-only */
48 }, {
49 .name = "ReservedSpace",
50 .size = 0x00030000,
51 .offset = MTDPART_OFS_APPEND,
52 //mask_flags: MTD_WRITEABLE, /* force read-only */
53 }, {
54 .name = "EnvArea", /* bottom 64KiB for env vars */
55 .size = MTDPART_SIZ_FULL,
56 .offset = MTDPART_OFS_APPEND,
57 }
58};
59
60static struct mtd_partition *parsed_parts;
61
62static struct mtd_info *flash_mtd;
63
64static int __init init_flash (void)
65{
66
67 struct mtd_partition *parts;
68 int nb_parts = 0;
69 int parsed_nr_parts = 0;
70 const char *part_type;
71
72 /*
73 * Static partition definition selection
74 */
75 part_type = "static";
76
77 parts = toto_flash_partitions;
78 nb_parts = ARRAY_SIZE(toto_flash_partitions);
79 omap_toto_map_flash.size = OMAP_TOTO_FLASH_SIZE;
80 omap_toto_map_flash.phys = virt_to_phys(OMAP_TOTO_FLASH_BASE);
81
82 simple_map_init(&omap_toto_map_flash);
83 /*
84 * Now let's probe for the actual flash. Do it here since
85 * specific machine settings might have been set above.
86 */
87 printk(KERN_NOTICE "OMAP toto flash: probing %d-bit flash bus\n",
88 omap_toto_map_flash.bankwidth*8);
89 flash_mtd = do_map_probe("jedec_probe", &omap_toto_map_flash);
90 if (!flash_mtd)
91 return -ENXIO;
92
93 if (parsed_nr_parts > 0) {
94 parts = parsed_parts;
95 nb_parts = parsed_nr_parts;
96 }
97
98 if (nb_parts == 0) {
99 printk(KERN_NOTICE "OMAP toto flash: no partition info available,"
100 "registering whole flash at once\n");
101 if (add_mtd_device(flash_mtd)){
102 return -ENXIO;
103 }
104 } else {
105 printk(KERN_NOTICE "Using %s partition definition\n",
106 part_type);
107 return add_mtd_partitions(flash_mtd, parts, nb_parts);
108 }
109 return 0;
110}
111
112int __init omap_toto_mtd_init(void)
113{
114 int status;
115
116 if (status = init_flash()) {
117 printk(KERN_ERR "OMAP Toto Flash: unable to init map for toto flash\n");
118 }
119 return status;
120}
121
122static void __exit omap_toto_mtd_cleanup(void)
123{
124 if (flash_mtd) {
125 del_mtd_partitions(flash_mtd);
126 map_destroy(flash_mtd);
127 if (parsed_parts)
128 kfree(parsed_parts);
129 }
130}
131
132module_init(omap_toto_mtd_init);
133module_exit(omap_toto_mtd_cleanup);
134
135MODULE_AUTHOR("Jian Zhang");
136MODULE_DESCRIPTION("OMAP Toto board map driver");
137MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/pb1550-flash.c b/drivers/mtd/maps/pb1550-flash.c
new file mode 100644
index 000000000000..1424726a219e
--- /dev/null
+++ b/drivers/mtd/maps/pb1550-flash.c
@@ -0,0 +1,203 @@
1/*
2 * Flash memory access on Alchemy Pb1550 board
3 *
4 * $Id: pb1550-flash.c,v 1.6 2004/11/04 13:24:15 gleixner Exp $
5 *
6 * (C) 2004 Embedded Edge, LLC, based on pb1550-flash.c:
7 * (C) 2003 Pete Popov <ppopov@pacbell.net>
8 *
9 */
10
11#include <linux/config.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16
17#include <linux/mtd/mtd.h>
18#include <linux/mtd/map.h>
19#include <linux/mtd/partitions.h>
20
21#include <asm/io.h>
22#include <asm/au1000.h>
23#include <asm/pb1550.h>
24
25#ifdef DEBUG_RW
26#define DBG(x...) printk(x)
27#else
28#define DBG(x...)
29#endif
30
31static unsigned long window_addr;
32static unsigned long window_size;
33
34
35static struct map_info pb1550_map = {
36 .name = "Pb1550 flash",
37};
38
39static unsigned char flash_bankwidth = 4;
40
41/*
42 * Support only 64MB NOR Flash parts
43 */
44
45#ifdef PB1550_BOTH_BANKS
46/* both banks will be used. Combine the first bank and the first
47 * part of the second bank together into a single jffs/jffs2
48 * partition.
49 */
50static struct mtd_partition pb1550_partitions[] = {
51 /* assume boot[2:0]:swap is '0000' or '1000', which translates to:
52 * 1C00 0000 1FFF FFFF CE0 64MB Boot NOR Flash
53 * 1800 0000 1BFF FFFF CE0 64MB Param NOR Flash
54 */
55 {
56 .name = "User FS",
57 .size = (0x1FC00000 - 0x18000000),
58 .offset = 0x0000000
59 },{
60 .name = "yamon",
61 .size = 0x0100000,
62 .offset = MTDPART_OFS_APPEND,
63 .mask_flags = MTD_WRITEABLE
64 },{
65 .name = "raw kernel",
66 .size = (0x300000 - 0x40000), /* last 256KB is yamon env */
67 .offset = MTDPART_OFS_APPEND,
68 }
69};
70#elif defined(PB1550_BOOT_ONLY)
71static struct mtd_partition pb1550_partitions[] = {
72 /* assume boot[2:0]:swap is '0000' or '1000', which translates to:
73 * 1C00 0000 1FFF FFFF CE0 64MB Boot NOR Flash
74 */
75 {
76 .name = "User FS",
77 .size = 0x03c00000,
78 .offset = 0x0000000
79 },{
80 .name = "yamon",
81 .size = 0x0100000,
82 .offset = MTDPART_OFS_APPEND,
83 .mask_flags = MTD_WRITEABLE
84 },{
85 .name = "raw kernel",
86 .size = (0x300000-0x40000), /* last 256KB is yamon env */
87 .offset = MTDPART_OFS_APPEND,
88 }
89};
90#elif defined(PB1550_USER_ONLY)
91static struct mtd_partition pb1550_partitions[] = {
92 /* assume boot[2:0]:swap is '0000' or '1000', which translates to:
93 * 1800 0000 1BFF FFFF CE0 64MB Param NOR Flash
94 */
95 {
96 .name = "User FS",
97 .size = (0x4000000 - 0x200000), /* reserve 2MB for raw kernel */
98 .offset = 0x0000000
99 },{
100 .name = "raw kernel",
101 .size = MTDPART_SIZ_FULL,
102 .offset = MTDPART_OFS_APPEND,
103 }
104};
105#else
106#error MTD_PB1550 define combo error /* should never happen */
107#endif
108
109#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
110
111static struct mtd_info *mymtd;
112
113/*
114 * Probe the flash density and setup window address and size
115 * based on user CONFIG options. There are times when we don't
116 * want the MTD driver to be probing the boot or user flash,
117 * so having the option to enable only one bank is important.
118 */
119int setup_flash_params(void)
120{
121 u16 boot_swapboot;
122 boot_swapboot = (au_readl(MEM_STSTAT) & (0x7<<1)) |
123 ((bcsr->status >> 6) & 0x1);
124 printk("Pb1550 MTD: boot:swap %d\n", boot_swapboot);
125
126 switch (boot_swapboot) {
127 case 0: /* 512Mbit devices, both enabled */
128 case 1:
129 case 8:
130 case 9:
131#if defined(PB1550_BOTH_BANKS)
132 window_addr = 0x18000000;
133 window_size = 0x8000000;
134#elif defined(PB1550_BOOT_ONLY)
135 window_addr = 0x1C000000;
136 window_size = 0x4000000;
137#else /* USER ONLY */
138 window_addr = 0x1E000000;
139 window_size = 0x4000000;
140#endif
141 break;
142 case 0xC:
143 case 0xD:
144 case 0xE:
145 case 0xF:
146 /* 64 MB Boot NOR Flash is disabled */
147 /* and the start address is moved to 0x0C00000 */
148 window_addr = 0x0C000000;
149 window_size = 0x4000000;
150 default:
151 printk("Pb1550 MTD: unsupported boot:swap setting\n");
152 return 1;
153 }
154 return 0;
155}
156
157int __init pb1550_mtd_init(void)
158{
159 struct mtd_partition *parts;
160 int nb_parts = 0;
161
162 /* Default flash bankwidth */
163 pb1550_map.bankwidth = flash_bankwidth;
164
165 if (setup_flash_params())
166 return -ENXIO;
167
168 /*
169 * Static partition definition selection
170 */
171 parts = pb1550_partitions;
172 nb_parts = NB_OF(pb1550_partitions);
173 pb1550_map.size = window_size;
174
175 /*
176 * Now let's probe for the actual flash. Do it here since
177 * specific machine settings might have been set above.
178 */
179 printk(KERN_NOTICE "Pb1550 flash: probing %d-bit flash bus\n",
180 pb1550_map.bankwidth*8);
181 pb1550_map.virt = ioremap(window_addr, window_size);
182 mymtd = do_map_probe("cfi_probe", &pb1550_map);
183 if (!mymtd) return -ENXIO;
184 mymtd->owner = THIS_MODULE;
185
186 add_mtd_partitions(mymtd, parts, nb_parts);
187 return 0;
188}
189
190static void __exit pb1550_mtd_cleanup(void)
191{
192 if (mymtd) {
193 del_mtd_partitions(mymtd);
194 map_destroy(mymtd);
195 }
196}
197
198module_init(pb1550_mtd_init);
199module_exit(pb1550_mtd_cleanup);
200
201MODULE_AUTHOR("Embedded Edge, LLC");
202MODULE_DESCRIPTION("Pb1550 mtd map driver");
203MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/pb1xxx-flash.c b/drivers/mtd/maps/pb1xxx-flash.c
new file mode 100644
index 000000000000..06e731540552
--- /dev/null
+++ b/drivers/mtd/maps/pb1xxx-flash.c
@@ -0,0 +1,178 @@
1/*
2 * Flash memory access on Alchemy Pb1xxx boards
3 *
4 * (C) 2001 Pete Popov <ppopov@mvista.com>
5 *
6 * $Id: pb1xxx-flash.c,v 1.14 2004/11/04 13:24:15 gleixner Exp $
7 */
8
9#include <linux/config.h>
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14
15#include <linux/mtd/mtd.h>
16#include <linux/mtd/map.h>
17#include <linux/mtd/partitions.h>
18
19#include <asm/io.h>
20
21#ifdef DEBUG_RW
22#define DBG(x...) printk(x)
23#else
24#define DBG(x...)
25#endif
26
27#ifdef CONFIG_MIPS_PB1000
28
29#define WINDOW_ADDR 0x1F800000
30#define WINDOW_SIZE 0x800000
31
32static struct mtd_partition pb1xxx_partitions[] = {
33 {
34 .name = "yamon env",
35 .size = 0x00020000,
36 .offset = 0,
37 .mask_flags = MTD_WRITEABLE},
38 {
39 .name = "User FS",
40 .size = 0x003e0000,
41 .offset = 0x20000,},
42 {
43 .name = "boot code",
44 .size = 0x100000,
45 .offset = 0x400000,
46 .mask_flags = MTD_WRITEABLE},
47 {
48 .name = "raw/kernel",
49 .size = 0x300000,
50 .offset = 0x500000}
51};
52
53#elif defined(CONFIG_MIPS_PB1500) || defined(CONFIG_MIPS_PB1100)
54
55#if defined(CONFIG_MTD_PB1500_BOOT) && defined(CONFIG_MTD_PB1500_USER)
56/* both 32MB banks will be used. Combine the first 32MB bank and the
57 * first 28MB of the second bank together into a single jffs/jffs2
58 * partition.
59 */
60#define WINDOW_ADDR 0x1C000000
61#define WINDOW_SIZE 0x4000000
62static struct mtd_partition pb1xxx_partitions[] = {
63 {
64 .name = "User FS",
65 .size = 0x3c00000,
66 .offset = 0x0000000
67 },{
68 .name = "yamon",
69 .size = 0x0100000,
70 .offset = 0x3c00000,
71 .mask_flags = MTD_WRITEABLE
72 },{
73 .name = "raw kernel",
74 .size = 0x02c0000,
75 .offset = 0x3d00000
76 }
77};
78#elif defined(CONFIG_MTD_PB1500_BOOT) && !defined(CONFIG_MTD_PB1500_USER)
79#define WINDOW_ADDR 0x1E000000
80#define WINDOW_SIZE 0x2000000
81static struct mtd_partition pb1xxx_partitions[] = {
82 {
83 .name = "User FS",
84 .size = 0x1c00000,
85 .offset = 0x0000000
86 },{
87 .name = "yamon",
88 .size = 0x0100000,
89 .offset = 0x1c00000,
90 .mask_flags = MTD_WRITEABLE
91 },{
92 .name = "raw kernel",
93 .size = 0x02c0000,
94 .offset = 0x1d00000
95 }
96};
97#elif !defined(CONFIG_MTD_PB1500_BOOT) && defined(CONFIG_MTD_PB1500_USER)
98#define WINDOW_ADDR 0x1C000000
99#define WINDOW_SIZE 0x2000000
100static struct mtd_partition pb1xxx_partitions[] = {
101 {
102 .name = "User FS",
103 .size = 0x1e00000,
104 .offset = 0x0000000
105 },{
106 .name = "raw kernel",
107 .size = 0x0200000,
108 .offset = 0x1e00000,
109 }
110};
111#else
112#error MTD_PB1500 define combo error /* should never happen */
113#endif
114#else
115#error Unsupported board
116#endif
117
118#define NAME "Pb1x00 Linux Flash"
119#define PADDR WINDOW_ADDR
120#define BUSWIDTH 4
121#define SIZE WINDOW_SIZE
122#define PARTITIONS 4
123
124static struct map_info pb1xxx_mtd_map = {
125 .name = NAME,
126 .size = SIZE,
127 .bankwidth = BUSWIDTH,
128 .phys = PADDR,
129};
130
131static struct mtd_info *pb1xxx_mtd;
132
133int __init pb1xxx_mtd_init(void)
134{
135 struct mtd_partition *parts;
136 int nb_parts = 0;
137 char *part_type;
138
139 /*
140 * Static partition definition selection
141 */
142 part_type = "static";
143 parts = pb1xxx_partitions;
144 nb_parts = ARRAY_SIZE(pb1xxx_partitions);
145
146 /*
147 * Now let's probe for the actual flash. Do it here since
148 * specific machine settings might have been set above.
149 */
150 printk(KERN_NOTICE "Pb1xxx flash: probing %d-bit flash bus\n",
151 BUSWIDTH*8);
152 pb1xxx_mtd_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
153
154 simple_map_init(&pb1xxx_mtd_map);
155
156 pb1xxx_mtd = do_map_probe("cfi_probe", &pb1xxx_mtd_map);
157 if (!pb1xxx_mtd) return -ENXIO;
158 pb1xxx_mtd->owner = THIS_MODULE;
159
160 add_mtd_partitions(pb1xxx_mtd, parts, nb_parts);
161 return 0;
162}
163
164static void __exit pb1xxx_mtd_cleanup(void)
165{
166 if (pb1xxx_mtd) {
167 del_mtd_partitions(pb1xxx_mtd);
168 map_destroy(pb1xxx_mtd);
169 iounmap((void *) pb1xxx_mtd_map.virt);
170 }
171}
172
173module_init(pb1xxx_mtd_init);
174module_exit(pb1xxx_mtd_cleanup);
175
176MODULE_AUTHOR("Pete Popov");
177MODULE_DESCRIPTION("Pb1xxx CFI map driver");
178MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
new file mode 100644
index 000000000000..08b60bdc5381
--- /dev/null
+++ b/drivers/mtd/maps/pci.c
@@ -0,0 +1,388 @@
1/*
2 * linux/drivers/mtd/maps/pci.c
3 *
4 * Copyright (C) 2001 Russell King, All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * $Id: pci.c,v 1.9 2004/11/28 09:40:40 dwmw2 Exp $
11 *
12 * Generic PCI memory map driver. We support the following boards:
13 * - Intel IQ80310 ATU.
14 * - Intel EBSA285 (blank rom programming mode). Tested working 27/09/2001
15 */
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/pci.h>
19#include <linux/init.h>
20
21#include <linux/mtd/mtd.h>
22#include <linux/mtd/map.h>
23#include <linux/mtd/partitions.h>
24
25struct map_pci_info;
26
27struct mtd_pci_info {
28 int (*init)(struct pci_dev *dev, struct map_pci_info *map);
29 void (*exit)(struct pci_dev *dev, struct map_pci_info *map);
30 unsigned long (*translate)(struct map_pci_info *map, unsigned long ofs);
31 const char *map_name;
32};
33
34struct map_pci_info {
35 struct map_info map;
36 void __iomem *base;
37 void (*exit)(struct pci_dev *dev, struct map_pci_info *map);
38 unsigned long (*translate)(struct map_pci_info *map, unsigned long ofs);
39 struct pci_dev *dev;
40};
41
42static map_word mtd_pci_read8(struct map_info *_map, unsigned long ofs)
43{
44 struct map_pci_info *map = (struct map_pci_info *)_map;
45 map_word val;
46 val.x[0]= readb(map->base + map->translate(map, ofs));
47// printk("read8 : %08lx => %02x\n", ofs, val.x[0]);
48 return val;
49}
50
51#if 0
52static map_word mtd_pci_read16(struct map_info *_map, unsigned long ofs)
53{
54 struct map_pci_info *map = (struct map_pci_info *)_map;
55 map_word val;
56 val.x[0] = readw(map->base + map->translate(map, ofs));
57// printk("read16: %08lx => %04x\n", ofs, val.x[0]);
58 return val;
59}
60#endif
61static map_word mtd_pci_read32(struct map_info *_map, unsigned long ofs)
62{
63 struct map_pci_info *map = (struct map_pci_info *)_map;
64 map_word val;
65 val.x[0] = readl(map->base + map->translate(map, ofs));
66// printk("read32: %08lx => %08x\n", ofs, val.x[0]);
67 return val;
68}
69
70static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from, ssize_t len)
71{
72 struct map_pci_info *map = (struct map_pci_info *)_map;
73 memcpy_fromio(to, map->base + map->translate(map, from), len);
74}
75
76static void mtd_pci_write8(struct map_info *_map, map_word val, unsigned long ofs)
77{
78 struct map_pci_info *map = (struct map_pci_info *)_map;
79// printk("write8 : %08lx <= %02x\n", ofs, val.x[0]);
80 writeb(val.x[0], map->base + map->translate(map, ofs));
81}
82
83#if 0
84static void mtd_pci_write16(struct map_info *_map, map_word val, unsigned long ofs)
85{
86 struct map_pci_info *map = (struct map_pci_info *)_map;
87// printk("write16: %08lx <= %04x\n", ofs, val.x[0]);
88 writew(val.x[0], map->base + map->translate(map, ofs));
89}
90#endif
91static void mtd_pci_write32(struct map_info *_map, map_word val, unsigned long ofs)
92{
93 struct map_pci_info *map = (struct map_pci_info *)_map;
94// printk("write32: %08lx <= %08x\n", ofs, val.x[0]);
95 writel(val.x[0], map->base + map->translate(map, ofs));
96}
97
98static void mtd_pci_copyto(struct map_info *_map, unsigned long to, const void *from, ssize_t len)
99{
100 struct map_pci_info *map = (struct map_pci_info *)_map;
101 memcpy_toio(map->base + map->translate(map, to), from, len);
102}
103
104static struct map_info mtd_pci_map = {
105 .phys = NO_XIP,
106 .copy_from = mtd_pci_copyfrom,
107 .copy_to = mtd_pci_copyto,
108};
109
110/*
111 * Intel IOP80310 Flash driver
112 */
113
114static int
115intel_iq80310_init(struct pci_dev *dev, struct map_pci_info *map)
116{
117 u32 win_base;
118
119 map->map.bankwidth = 1;
120 map->map.read = mtd_pci_read8,
121 map->map.write = mtd_pci_write8,
122
123 map->map.size = 0x00800000;
124 map->base = ioremap_nocache(pci_resource_start(dev, 0),
125 pci_resource_len(dev, 0));
126
127 if (!map->base)
128 return -ENOMEM;
129
130 /*
131 * We want to base the memory window at Xscale
132 * bus address 0, not 0x1000.
133 */
134 pci_read_config_dword(dev, 0x44, &win_base);
135 pci_write_config_dword(dev, 0x44, 0);
136
137 map->map.map_priv_2 = win_base;
138
139 return 0;
140}
141
142static void
143intel_iq80310_exit(struct pci_dev *dev, struct map_pci_info *map)
144{
145 if (map->base)
146 iounmap(map->base);
147 pci_write_config_dword(dev, 0x44, map->map.map_priv_2);
148}
149
150static unsigned long
151intel_iq80310_translate(struct map_pci_info *map, unsigned long ofs)
152{
153 unsigned long page_addr = ofs & 0x00400000;
154
155 /*
156 * This mundges the flash location so we avoid
157 * the first 80 bytes (they appear to read nonsense).
158 */
159 if (page_addr) {
160 writel(0x00000008, map->base + 0x1558);
161 writel(0x00000000, map->base + 0x1550);
162 } else {
163 writel(0x00000007, map->base + 0x1558);
164 writel(0x00800000, map->base + 0x1550);
165 ofs += 0x00800000;
166 }
167
168 return ofs;
169}
170
171static struct mtd_pci_info intel_iq80310_info = {
172 .init = intel_iq80310_init,
173 .exit = intel_iq80310_exit,
174 .translate = intel_iq80310_translate,
175 .map_name = "cfi_probe",
176};
177
178/*
179 * Intel DC21285 driver
180 */
181
182static int
183intel_dc21285_init(struct pci_dev *dev, struct map_pci_info *map)
184{
185 unsigned long base, len;
186
187 base = pci_resource_start(dev, PCI_ROM_RESOURCE);
188 len = pci_resource_len(dev, PCI_ROM_RESOURCE);
189
190 if (!len || !base) {
191 /*
192 * No ROM resource
193 */
194 base = pci_resource_start(dev, 2);
195 len = pci_resource_len(dev, 2);
196
197 /*
198 * We need to re-allocate PCI BAR2 address range to the
199 * PCI ROM BAR, and disable PCI BAR2.
200 */
201 } else {
202 /*
203 * Hmm, if an address was allocated to the ROM resource, but
204 * not enabled, should we be allocating a new resource for it
205 * or simply enabling it?
206 */
207 if (!(pci_resource_flags(dev, PCI_ROM_RESOURCE) &
208 IORESOURCE_ROM_ENABLE)) {
209 u32 val;
210 pci_resource_flags(dev, PCI_ROM_RESOURCE) |= IORESOURCE_ROM_ENABLE;
211 pci_read_config_dword(dev, PCI_ROM_ADDRESS, &val);
212 val |= PCI_ROM_ADDRESS_ENABLE;
213 pci_write_config_dword(dev, PCI_ROM_ADDRESS, val);
214 printk("%s: enabling expansion ROM\n", pci_name(dev));
215 }
216 }
217
218 if (!len || !base)
219 return -ENXIO;
220
221 map->map.bankwidth = 4;
222 map->map.read = mtd_pci_read32,
223 map->map.write = mtd_pci_write32,
224 map->map.size = len;
225 map->base = ioremap_nocache(base, len);
226
227 if (!map->base)
228 return -ENOMEM;
229
230 return 0;
231}
232
233static void
234intel_dc21285_exit(struct pci_dev *dev, struct map_pci_info *map)
235{
236 u32 val;
237
238 if (map->base)
239 iounmap(map->base);
240
241 /*
242 * We need to undo the PCI BAR2/PCI ROM BAR address alteration.
243 */
244 pci_resource_flags(dev, PCI_ROM_RESOURCE) &= ~IORESOURCE_ROM_ENABLE;
245 pci_read_config_dword(dev, PCI_ROM_ADDRESS, &val);
246 val &= ~PCI_ROM_ADDRESS_ENABLE;
247 pci_write_config_dword(dev, PCI_ROM_ADDRESS, val);
248}
249
250static unsigned long
251intel_dc21285_translate(struct map_pci_info *map, unsigned long ofs)
252{
253 return ofs & 0x00ffffc0 ? ofs : (ofs ^ (1 << 5));
254}
255
256static struct mtd_pci_info intel_dc21285_info = {
257 .init = intel_dc21285_init,
258 .exit = intel_dc21285_exit,
259 .translate = intel_dc21285_translate,
260 .map_name = "jedec_probe",
261};
262
263/*
264 * PCI device ID table
265 */
266
267static struct pci_device_id mtd_pci_ids[] = {
268 {
269 .vendor = PCI_VENDOR_ID_INTEL,
270 .device = 0x530d,
271 .subvendor = PCI_ANY_ID,
272 .subdevice = PCI_ANY_ID,
273 .class = PCI_CLASS_MEMORY_OTHER << 8,
274 .class_mask = 0xffff00,
275 .driver_data = (unsigned long)&intel_iq80310_info,
276 },
277 {
278 .vendor = PCI_VENDOR_ID_DEC,
279 .device = PCI_DEVICE_ID_DEC_21285,
280 .subvendor = 0, /* DC21285 defaults to 0 on reset */
281 .subdevice = 0, /* DC21285 defaults to 0 on reset */
282 .driver_data = (unsigned long)&intel_dc21285_info,
283 },
284 { 0, }
285};
286
287/*
288 * Generic code follows.
289 */
290
291static int __devinit
292mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
293{
294 struct mtd_pci_info *info = (struct mtd_pci_info *)id->driver_data;
295 struct map_pci_info *map = NULL;
296 struct mtd_info *mtd = NULL;
297 int err;
298
299 err = pci_enable_device(dev);
300 if (err)
301 goto out;
302
303 err = pci_request_regions(dev, "pci mtd");
304 if (err)
305 goto out;
306
307 map = kmalloc(sizeof(*map), GFP_KERNEL);
308 err = -ENOMEM;
309 if (!map)
310 goto release;
311
312 map->map = mtd_pci_map;
313 map->map.name = pci_name(dev);
314 map->dev = dev;
315 map->exit = info->exit;
316 map->translate = info->translate;
317
318 err = info->init(dev, map);
319 if (err)
320 goto release;
321
322 /* tsk - do_map_probe should take const char * */
323 mtd = do_map_probe((char *)info->map_name, &map->map);
324 err = -ENODEV;
325 if (!mtd)
326 goto release;
327
328 mtd->owner = THIS_MODULE;
329 add_mtd_device(mtd);
330
331 pci_set_drvdata(dev, mtd);
332
333 return 0;
334
335release:
336 if (mtd)
337 map_destroy(mtd);
338
339 if (map) {
340 map->exit(dev, map);
341 kfree(map);
342 }
343
344 pci_release_regions(dev);
345out:
346 return err;
347}
348
349static void __devexit
350mtd_pci_remove(struct pci_dev *dev)
351{
352 struct mtd_info *mtd = pci_get_drvdata(dev);
353 struct map_pci_info *map = mtd->priv;
354
355 del_mtd_device(mtd);
356 map_destroy(mtd);
357 map->exit(dev, map);
358 kfree(map);
359
360 pci_set_drvdata(dev, NULL);
361 pci_release_regions(dev);
362}
363
364static struct pci_driver mtd_pci_driver = {
365 .name = "MTD PCI",
366 .probe = mtd_pci_probe,
367 .remove = __devexit_p(mtd_pci_remove),
368 .id_table = mtd_pci_ids,
369};
370
371static int __init mtd_pci_maps_init(void)
372{
373 return pci_module_init(&mtd_pci_driver);
374}
375
376static void __exit mtd_pci_maps_exit(void)
377{
378 pci_unregister_driver(&mtd_pci_driver);
379}
380
381module_init(mtd_pci_maps_init);
382module_exit(mtd_pci_maps_exit);
383
384MODULE_LICENSE("GPL");
385MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
386MODULE_DESCRIPTION("Generic PCI map driver");
387MODULE_DEVICE_TABLE(pci, mtd_pci_ids);
388
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
new file mode 100644
index 000000000000..e37b4c1976e5
--- /dev/null
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -0,0 +1,860 @@
1/*
2 * $Id: pcmciamtd.c,v 1.51 2004/07/12 22:38:29 dwmw2 Exp $
3 *
4 * pcmciamtd.c - MTD driver for PCMCIA flash memory cards
5 *
6 * Author: Simon Evans <spse@secret.org.uk>
7 *
8 * Copyright (C) 2002 Simon Evans
9 *
10 * Licence: GPL
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/timer.h>
17#include <linux/init.h>
18#include <asm/io.h>
19#include <asm/system.h>
20
21#include <pcmcia/version.h>
22#include <pcmcia/cs_types.h>
23#include <pcmcia/cs.h>
24#include <pcmcia/cistpl.h>
25#include <pcmcia/ds.h>
26
27#include <linux/mtd/map.h>
28#include <linux/mtd/mtd.h>
29
30#ifdef CONFIG_MTD_DEBUG
31static int debug = CONFIG_MTD_DEBUG_VERBOSE;
32MODULE_PARM(debug, "i");
33MODULE_PARM_DESC(debug, "Set Debug Level 0=quiet, 5=noisy");
34#undef DEBUG
35#define DEBUG(n, format, arg...) \
36 if (n <= debug) { \
37 printk(KERN_DEBUG __FILE__ ":%s(): " format "\n", __FUNCTION__ , ## arg); \
38 }
39
40#else
41#undef DEBUG
42#define DEBUG(n, arg...)
43static const int debug = 0;
44#endif
45
46#define err(format, arg...) printk(KERN_ERR "pcmciamtd: " format "\n" , ## arg)
47#define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg)
48#define warn(format, arg...) printk(KERN_WARNING "pcmciamtd: " format "\n" , ## arg)
49
50
51#define DRIVER_DESC "PCMCIA Flash memory card driver"
52#define DRIVER_VERSION "$Revision: 1.51 $"
53
54/* Size of the PCMCIA address space: 26 bits = 64 MB */
55#define MAX_PCMCIA_ADDR 0x4000000
56
57struct pcmciamtd_dev {
58 dev_link_t link; /* PCMCIA link */
59 dev_node_t node; /* device node */
60 caddr_t win_base; /* ioremapped address of PCMCIA window */
61 unsigned int win_size; /* size of window */
62 unsigned int offset; /* offset into card the window currently points at */
63 struct map_info pcmcia_map;
64 struct mtd_info *mtd_info;
65 int vpp;
66 char mtd_name[sizeof(struct cistpl_vers_1_t)];
67};
68
69
70static dev_info_t dev_info = "pcmciamtd";
71static dev_link_t *dev_list;
72
73/* Module parameters */
74
75/* 2 = do 16-bit transfers, 1 = do 8-bit transfers */
76static int bankwidth = 2;
77
78/* Speed of memory accesses, in ns */
79static int mem_speed;
80
81/* Force the size of an SRAM card */
82static int force_size;
83
84/* Force Vpp */
85static int vpp;
86
87/* Set Vpp */
88static int setvpp;
89
90/* Force card to be treated as FLASH, ROM or RAM */
91static int mem_type;
92
93MODULE_LICENSE("GPL");
94MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>");
95MODULE_DESCRIPTION(DRIVER_DESC);
96MODULE_PARM(bankwidth, "i");
97MODULE_PARM_DESC(bankwidth, "Set bankwidth (1=8 bit, 2=16 bit, default=2)");
98MODULE_PARM(mem_speed, "i");
99MODULE_PARM_DESC(mem_speed, "Set memory access speed in ns");
100MODULE_PARM(force_size, "i");
101MODULE_PARM_DESC(force_size, "Force size of card in MiB (1-64)");
102MODULE_PARM(setvpp, "i");
103MODULE_PARM_DESC(setvpp, "Set Vpp (0=Never, 1=On writes, 2=Always on, default=0)");
104MODULE_PARM(vpp, "i");
105MODULE_PARM_DESC(vpp, "Vpp value in 1/10ths eg 33=3.3V 120=12V (Dangerous)");
106MODULE_PARM(mem_type, "i");
107MODULE_PARM_DESC(mem_type, "Set Memory type (0=Flash, 1=RAM, 2=ROM, default=0)");
108
109
110/* read/write{8,16} copy_{from,to} routines with window remapping to access whole card */
111static caddr_t remap_window(struct map_info *map, unsigned long to)
112{
113 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
114 window_handle_t win = (window_handle_t)map->map_priv_2;
115 memreq_t mrq;
116 int ret;
117
118 if(!(dev->link.state & DEV_PRESENT)) {
119 DEBUG(1, "device removed state = 0x%4.4X", dev->link.state);
120 return 0;
121 }
122
123 mrq.CardOffset = to & ~(dev->win_size-1);
124 if(mrq.CardOffset != dev->offset) {
125 DEBUG(2, "Remapping window from 0x%8.8x to 0x%8.8x",
126 dev->offset, mrq.CardOffset);
127 mrq.Page = 0;
128 if( (ret = pcmcia_map_mem_page(win, &mrq)) != CS_SUCCESS) {
129 cs_error(dev->link.handle, MapMemPage, ret);
130 return NULL;
131 }
132 dev->offset = mrq.CardOffset;
133 }
134 return dev->win_base + (to & (dev->win_size-1));
135}
136
137
138static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs)
139{
140 caddr_t addr;
141 map_word d = {{0}};
142
143 addr = remap_window(map, ofs);
144 if(!addr)
145 return d;
146
147 d.x[0] = readb(addr);
148 DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, addr, d.x[0]);
149 return d;
150}
151
152
153static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs)
154{
155 caddr_t addr;
156 map_word d = {{0}};
157
158 addr = remap_window(map, ofs);
159 if(!addr)
160 return d;
161
162 d.x[0] = readw(addr);
163 DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, addr, d.x[0]);
164 return d;
165}
166
167
168static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long from, ssize_t len)
169{
170 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
171 unsigned long win_size = dev->win_size;
172
173 DEBUG(3, "to = %p from = %lu len = %u", to, from, len);
174 while(len) {
175 int toread = win_size - (from & (win_size-1));
176 caddr_t addr;
177
178 if(toread > len)
179 toread = len;
180
181 addr = remap_window(map, from);
182 if(!addr)
183 return;
184
185 DEBUG(4, "memcpy from %p to %p len = %d", addr, to, toread);
186 memcpy_fromio(to, addr, toread);
187 len -= toread;
188 to += toread;
189 from += toread;
190 }
191}
192
193
194static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long adr)
195{
196 caddr_t addr = remap_window(map, adr);
197
198 if(!addr)
199 return;
200
201 DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, addr, d.x[0]);
202 writeb(d.x[0], addr);
203}
204
205
206static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long adr)
207{
208 caddr_t addr = remap_window(map, adr);
209 if(!addr)
210 return;
211
212 DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, addr, d.x[0]);
213 writew(d.x[0], addr);
214}
215
216
217static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const void *from, ssize_t len)
218{
219 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
220 unsigned long win_size = dev->win_size;
221
222 DEBUG(3, "to = %lu from = %p len = %u", to, from, len);
223 while(len) {
224 int towrite = win_size - (to & (win_size-1));
225 caddr_t addr;
226
227 if(towrite > len)
228 towrite = len;
229
230 addr = remap_window(map, to);
231 if(!addr)
232 return;
233
234 DEBUG(4, "memcpy from %p to %p len = %d", from, addr, towrite);
235 memcpy_toio(addr, from, towrite);
236 len -= towrite;
237 to += towrite;
238 from += towrite;
239 }
240}
241
242
243/* read/write{8,16} copy_{from,to} routines with direct access */
244
245#define DEV_REMOVED(x) (!(*(u_int *)x->map_priv_1 & DEV_PRESENT))
246
247static map_word pcmcia_read8(struct map_info *map, unsigned long ofs)
248{
249 caddr_t win_base = (caddr_t)map->map_priv_2;
250 map_word d = {{0}};
251
252 if(DEV_REMOVED(map))
253 return d;
254
255 d.x[0] = readb(win_base + ofs);
256 DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, win_base + ofs, d.x[0]);
257 return d;
258}
259
260
261static map_word pcmcia_read16(struct map_info *map, unsigned long ofs)
262{
263 caddr_t win_base = (caddr_t)map->map_priv_2;
264 map_word d = {{0}};
265
266 if(DEV_REMOVED(map))
267 return d;
268
269 d.x[0] = readw(win_base + ofs);
270 DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, win_base + ofs, d.x[0]);
271 return d;
272}
273
274
275static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
276{
277 caddr_t win_base = (caddr_t)map->map_priv_2;
278
279 if(DEV_REMOVED(map))
280 return;
281
282 DEBUG(3, "to = %p from = %lu len = %u", to, from, len);
283 memcpy_fromio(to, win_base + from, len);
284}
285
286
287static void pcmcia_write8(struct map_info *map, u8 d, unsigned long adr)
288{
289 caddr_t win_base = (caddr_t)map->map_priv_2;
290
291 if(DEV_REMOVED(map))
292 return;
293
294 DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, win_base + adr, d);
295 writeb(d, win_base + adr);
296}
297
298
299static void pcmcia_write16(struct map_info *map, u16 d, unsigned long adr)
300{
301 caddr_t win_base = (caddr_t)map->map_priv_2;
302
303 if(DEV_REMOVED(map))
304 return;
305
306 DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, win_base + adr, d);
307 writew(d, win_base + adr);
308}
309
310
311static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
312{
313 caddr_t win_base = (caddr_t)map->map_priv_2;
314
315 if(DEV_REMOVED(map))
316 return;
317
318 DEBUG(3, "to = %lu from = %p len = %u", to, from, len);
319 memcpy_toio(win_base + to, from, len);
320}
321
322
323static void pcmciamtd_set_vpp(struct map_info *map, int on)
324{
325 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
326 dev_link_t *link = &dev->link;
327 modconf_t mod;
328 int ret;
329
330 mod.Attributes = CONF_VPP1_CHANGE_VALID | CONF_VPP2_CHANGE_VALID;
331 mod.Vcc = 0;
332 mod.Vpp1 = mod.Vpp2 = on ? dev->vpp : 0;
333
334 DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp);
335 ret = pcmcia_modify_configuration(link->handle, &mod);
336 if(ret != CS_SUCCESS) {
337 cs_error(link->handle, ModifyConfiguration, ret);
338 }
339}
340
341
342/* After a card is removed, pcmciamtd_release() will unregister the
343 * device, and release the PCMCIA configuration. If the device is
344 * still open, this will be postponed until it is closed.
345 */
346
347static void pcmciamtd_release(dev_link_t *link)
348{
349 struct pcmciamtd_dev *dev = link->priv;
350
351 DEBUG(3, "link = 0x%p", link);
352
353 if (link->win) {
354 if(dev->win_base) {
355 iounmap(dev->win_base);
356 dev->win_base = NULL;
357 }
358 pcmcia_release_window(link->win);
359 }
360 pcmcia_release_configuration(link->handle);
361 link->state &= ~DEV_CONFIG;
362}
363
364
365static void card_settings(struct pcmciamtd_dev *dev, dev_link_t *link, int *new_name)
366{
367 int rc;
368 tuple_t tuple;
369 cisparse_t parse;
370 u_char buf[64];
371
372 tuple.Attributes = 0;
373 tuple.TupleData = (cisdata_t *)buf;
374 tuple.TupleDataMax = sizeof(buf);
375 tuple.TupleOffset = 0;
376 tuple.DesiredTuple = RETURN_FIRST_TUPLE;
377
378 rc = pcmcia_get_first_tuple(link->handle, &tuple);
379 while(rc == CS_SUCCESS) {
380 rc = pcmcia_get_tuple_data(link->handle, &tuple);
381 if(rc != CS_SUCCESS) {
382 cs_error(link->handle, GetTupleData, rc);
383 break;
384 }
385 rc = pcmcia_parse_tuple(link->handle, &tuple, &parse);
386 if(rc != CS_SUCCESS) {
387 cs_error(link->handle, ParseTuple, rc);
388 break;
389 }
390
391 switch(tuple.TupleCode) {
392 case CISTPL_FORMAT: {
393 cistpl_format_t *t = &parse.format;
394 (void)t; /* Shut up, gcc */
395 DEBUG(2, "Format type: %u, Error Detection: %u, offset = %u, length =%u",
396 t->type, t->edc, t->offset, t->length);
397 break;
398
399 }
400
401 case CISTPL_DEVICE: {
402 cistpl_device_t *t = &parse.device;
403 int i;
404 DEBUG(2, "Common memory:");
405 dev->pcmcia_map.size = t->dev[0].size;
406 for(i = 0; i < t->ndev; i++) {
407 DEBUG(2, "Region %d, type = %u", i, t->dev[i].type);
408 DEBUG(2, "Region %d, wp = %u", i, t->dev[i].wp);
409 DEBUG(2, "Region %d, speed = %u ns", i, t->dev[i].speed);
410 DEBUG(2, "Region %d, size = %u bytes", i, t->dev[i].size);
411 }
412 break;
413 }
414
415 case CISTPL_VERS_1: {
416 cistpl_vers_1_t *t = &parse.version_1;
417 int i;
418 if(t->ns) {
419 dev->mtd_name[0] = '\0';
420 for(i = 0; i < t->ns; i++) {
421 if(i)
422 strcat(dev->mtd_name, " ");
423 strcat(dev->mtd_name, t->str+t->ofs[i]);
424 }
425 }
426 DEBUG(2, "Found name: %s", dev->mtd_name);
427 break;
428 }
429
430 case CISTPL_JEDEC_C: {
431 cistpl_jedec_t *t = &parse.jedec;
432 int i;
433 for(i = 0; i < t->nid; i++) {
434 DEBUG(2, "JEDEC: 0x%02x 0x%02x", t->id[i].mfr, t->id[i].info);
435 }
436 break;
437 }
438
439 case CISTPL_DEVICE_GEO: {
440 cistpl_device_geo_t *t = &parse.device_geo;
441 int i;
442 dev->pcmcia_map.bankwidth = t->geo[0].buswidth;
443 for(i = 0; i < t->ngeo; i++) {
444 DEBUG(2, "region: %d bankwidth = %u", i, t->geo[i].buswidth);
445 DEBUG(2, "region: %d erase_block = %u", i, t->geo[i].erase_block);
446 DEBUG(2, "region: %d read_block = %u", i, t->geo[i].read_block);
447 DEBUG(2, "region: %d write_block = %u", i, t->geo[i].write_block);
448 DEBUG(2, "region: %d partition = %u", i, t->geo[i].partition);
449 DEBUG(2, "region: %d interleave = %u", i, t->geo[i].interleave);
450 }
451 break;
452 }
453
454 default:
455 DEBUG(2, "Unknown tuple code %d", tuple.TupleCode);
456 }
457
458 rc = pcmcia_get_next_tuple(link->handle, &tuple);
459 }
460 if(!dev->pcmcia_map.size)
461 dev->pcmcia_map.size = MAX_PCMCIA_ADDR;
462
463 if(!dev->pcmcia_map.bankwidth)
464 dev->pcmcia_map.bankwidth = 2;
465
466 if(force_size) {
467 dev->pcmcia_map.size = force_size << 20;
468 DEBUG(2, "size forced to %dM", force_size);
469 }
470
471 if(bankwidth) {
472 dev->pcmcia_map.bankwidth = bankwidth;
473 DEBUG(2, "bankwidth forced to %d", bankwidth);
474 }
475
476 dev->pcmcia_map.name = dev->mtd_name;
477 if(!dev->mtd_name[0]) {
478 strcpy(dev->mtd_name, "PCMCIA Memory card");
479 *new_name = 1;
480 }
481
482 DEBUG(1, "Device: Size: %lu Width:%d Name: %s",
483 dev->pcmcia_map.size, dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
484}
485
486
487/* pcmciamtd_config() is scheduled to run after a CARD_INSERTION event
488 * is received, to configure the PCMCIA socket, and to make the
489 * MTD device available to the system.
490 */
491
492#define CS_CHECK(fn, ret) \
493do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
494
495static void pcmciamtd_config(dev_link_t *link)
496{
497 struct pcmciamtd_dev *dev = link->priv;
498 struct mtd_info *mtd = NULL;
499 cs_status_t status;
500 win_req_t req;
501 int last_ret = 0, last_fn = 0;
502 int ret;
503 int i;
504 config_info_t t;
505 static char *probes[] = { "jedec_probe", "cfi_probe" };
506 cisinfo_t cisinfo;
507 int new_name = 0;
508
509 DEBUG(3, "link=0x%p", link);
510
511 /* Configure card */
512 link->state |= DEV_CONFIG;
513
514 DEBUG(2, "Validating CIS");
515 ret = pcmcia_validate_cis(link->handle, &cisinfo);
516 if(ret != CS_SUCCESS) {
517 cs_error(link->handle, GetTupleData, ret);
518 } else {
519 DEBUG(2, "ValidateCIS found %d chains", cisinfo.Chains);
520 }
521
522 card_settings(dev, link, &new_name);
523
524 dev->pcmcia_map.phys = NO_XIP;
525 dev->pcmcia_map.copy_from = pcmcia_copy_from_remap;
526 dev->pcmcia_map.copy_to = pcmcia_copy_to_remap;
527 if (dev->pcmcia_map.bankwidth == 1) {
528 dev->pcmcia_map.read = pcmcia_read8_remap;
529 dev->pcmcia_map.write = pcmcia_write8_remap;
530 } else {
531 dev->pcmcia_map.read = pcmcia_read16_remap;
532 dev->pcmcia_map.write = pcmcia_write16_remap;
533 }
534 if(setvpp == 1)
535 dev->pcmcia_map.set_vpp = pcmciamtd_set_vpp;
536
537 /* Request a memory window for PCMCIA. Some architeures can map windows upto the maximum
538 that PCMCIA can support (64MiB) - this is ideal and we aim for a window the size of the
539 whole card - otherwise we try smaller windows until we succeed */
540
541 req.Attributes = WIN_MEMORY_TYPE_CM | WIN_ENABLE;
542 req.Attributes |= (dev->pcmcia_map.bankwidth == 1) ? WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16;
543 req.Base = 0;
544 req.AccessSpeed = mem_speed;
545 link->win = (window_handle_t)link->handle;
546 req.Size = (force_size) ? force_size << 20 : MAX_PCMCIA_ADDR;
547 dev->win_size = 0;
548
549 do {
550 int ret;
551 DEBUG(2, "requesting window with size = %dKiB memspeed = %d",
552 req.Size >> 10, req.AccessSpeed);
553 ret = pcmcia_request_window(&link->handle, &req, &link->win);
554 DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size);
555 if(ret) {
556 req.Size >>= 1;
557 } else {
558 DEBUG(2, "Got window of size %dKiB", req.Size >> 10);
559 dev->win_size = req.Size;
560 break;
561 }
562 } while(req.Size >= 0x1000);
563
564 DEBUG(2, "dev->win_size = %d", dev->win_size);
565
566 if(!dev->win_size) {
567 err("Cant allocate memory window");
568 pcmciamtd_release(link);
569 return;
570 }
571 DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10);
572
573 /* Get write protect status */
574 CS_CHECK(GetStatus, pcmcia_get_status(link->handle, &status));
575 DEBUG(2, "status value: 0x%x window handle = 0x%8.8lx",
576 status.CardState, (unsigned long)link->win);
577 dev->win_base = ioremap(req.Base, req.Size);
578 if(!dev->win_base) {
579 err("ioremap(%lu, %u) failed", req.Base, req.Size);
580 pcmciamtd_release(link);
581 return;
582 }
583 DEBUG(1, "mapped window dev = %p req.base = 0x%lx base = %p size = 0x%x",
584 dev, req.Base, dev->win_base, req.Size);
585
586 dev->offset = 0;
587 dev->pcmcia_map.map_priv_1 = (unsigned long)dev;
588 dev->pcmcia_map.map_priv_2 = (unsigned long)link->win;
589
590 DEBUG(2, "Getting configuration");
591 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link->handle, &t));
592 DEBUG(2, "Vcc = %d Vpp1 = %d Vpp2 = %d", t.Vcc, t.Vpp1, t.Vpp2);
593 dev->vpp = (vpp) ? vpp : t.Vpp1;
594 link->conf.Attributes = 0;
595 link->conf.Vcc = t.Vcc;
596 if(setvpp == 2) {
597 link->conf.Vpp1 = dev->vpp;
598 link->conf.Vpp2 = dev->vpp;
599 } else {
600 link->conf.Vpp1 = 0;
601 link->conf.Vpp2 = 0;
602 }
603
604 link->conf.IntType = INT_MEMORY;
605 link->conf.ConfigBase = t.ConfigBase;
606 link->conf.Status = t.Status;
607 link->conf.Pin = t.Pin;
608 link->conf.Copy = t.Copy;
609 link->conf.ExtStatus = t.ExtStatus;
610 link->conf.ConfigIndex = 0;
611 link->conf.Present = t.Present;
612 DEBUG(2, "Setting Configuration");
613 ret = pcmcia_request_configuration(link->handle, &link->conf);
614 if(ret != CS_SUCCESS) {
615 cs_error(link->handle, RequestConfiguration, ret);
616 }
617
618 if(mem_type == 1) {
619 mtd = do_map_probe("map_ram", &dev->pcmcia_map);
620 } else if(mem_type == 2) {
621 mtd = do_map_probe("map_rom", &dev->pcmcia_map);
622 } else {
623 for(i = 0; i < sizeof(probes) / sizeof(char *); i++) {
624 DEBUG(1, "Trying %s", probes[i]);
625 mtd = do_map_probe(probes[i], &dev->pcmcia_map);
626 if(mtd)
627 break;
628
629 DEBUG(1, "FAILED: %s", probes[i]);
630 }
631 }
632
633 if(!mtd) {
634 DEBUG(1, "Cant find an MTD");
635 pcmciamtd_release(link);
636 return;
637 }
638
639 dev->mtd_info = mtd;
640 mtd->owner = THIS_MODULE;
641
642 if(new_name) {
643 int size = 0;
644 char unit = ' ';
645 /* Since we are using a default name, make it better by adding in the
646 size */
647 if(mtd->size < 1048576) { /* <1MiB in size, show size in KiB */
648 size = mtd->size >> 10;
649 unit = 'K';
650 } else {
651 size = mtd->size >> 20;
652 unit = 'M';
653 }
654 snprintf(dev->mtd_name, sizeof(dev->mtd_name), "%d%ciB %s", size, unit, "PCMCIA Memory card");
655 }
656
657 /* If the memory found is fits completely into the mapped PCMCIA window,
658 use the faster non-remapping read/write functions */
659 if(mtd->size <= dev->win_size) {
660 DEBUG(1, "Using non remapping memory functions");
661 dev->pcmcia_map.map_priv_1 = (unsigned long)&(dev->link.state);
662 dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base;
663 if (dev->pcmcia_map.bankwidth == 1) {
664 dev->pcmcia_map.read = pcmcia_read8;
665 dev->pcmcia_map.write = pcmcia_write8;
666 } else {
667 dev->pcmcia_map.read = pcmcia_read16;
668 dev->pcmcia_map.write = pcmcia_write16;
669 }
670 dev->pcmcia_map.copy_from = pcmcia_copy_from;
671 dev->pcmcia_map.copy_to = pcmcia_copy_to;
672 }
673
674 if(add_mtd_device(mtd)) {
675 map_destroy(mtd);
676 dev->mtd_info = NULL;
677 err("Couldnt register MTD device");
678 pcmciamtd_release(link);
679 return;
680 }
681 snprintf(dev->node.dev_name, sizeof(dev->node.dev_name), "mtd%d", mtd->index);
682 info("mtd%d: %s", mtd->index, mtd->name);
683 link->state &= ~DEV_CONFIG_PENDING;
684 link->dev = &dev->node;
685 return;
686
687 cs_failed:
688 cs_error(link->handle, last_fn, last_ret);
689 err("CS Error, exiting");
690 pcmciamtd_release(link);
691 return;
692}
693
694
695/* The card status event handler. Mostly, this schedules other
696 * stuff to run after an event is received. A CARD_REMOVAL event
697 * also sets some flags to discourage the driver from trying
698 * to talk to the card any more.
699 */
700
701static int pcmciamtd_event(event_t event, int priority,
702 event_callback_args_t *args)
703{
704 dev_link_t *link = args->client_data;
705
706 DEBUG(1, "event=0x%06x", event);
707 switch (event) {
708 case CS_EVENT_CARD_REMOVAL:
709 DEBUG(2, "EVENT_CARD_REMOVAL");
710 link->state &= ~DEV_PRESENT;
711 if (link->state & DEV_CONFIG) {
712 struct pcmciamtd_dev *dev = link->priv;
713 if(dev->mtd_info) {
714 del_mtd_device(dev->mtd_info);
715 info("mtd%d: Removed", dev->mtd_info->index);
716 }
717 pcmciamtd_release(link);
718 }
719 break;
720 case CS_EVENT_CARD_INSERTION:
721 DEBUG(2, "EVENT_CARD_INSERTION");
722 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
723 pcmciamtd_config(link);
724 break;
725 case CS_EVENT_PM_SUSPEND:
726 DEBUG(2, "EVENT_PM_SUSPEND");
727 link->state |= DEV_SUSPEND;
728 /* Fall through... */
729 case CS_EVENT_RESET_PHYSICAL:
730 DEBUG(2, "EVENT_RESET_PHYSICAL");
731 /* get_lock(link); */
732 break;
733 case CS_EVENT_PM_RESUME:
734 DEBUG(2, "EVENT_PM_RESUME");
735 link->state &= ~DEV_SUSPEND;
736 /* Fall through... */
737 case CS_EVENT_CARD_RESET:
738 DEBUG(2, "EVENT_CARD_RESET");
739 /* free_lock(link); */
740 break;
741 default:
742 DEBUG(2, "Unknown event %d", event);
743 }
744 return 0;
745}
746
747
748/* This deletes a driver "instance". The device is de-registered
749 * with Card Services. If it has been released, all local data
750 * structures are freed. Otherwise, the structures will be freed
751 * when the device is released.
752 */
753
754static void pcmciamtd_detach(dev_link_t *link)
755{
756 DEBUG(3, "link=0x%p", link);
757
758 if(link->state & DEV_CONFIG) {
759 pcmciamtd_release(link);
760 }
761
762 if (link->handle) {
763 int ret;
764 DEBUG(2, "Deregistering with card services");
765 ret = pcmcia_deregister_client(link->handle);
766 if (ret != CS_SUCCESS)
767 cs_error(link->handle, DeregisterClient, ret);
768 }
769
770 link->state |= DEV_STALE_LINK;
771}
772
773
774/* pcmciamtd_attach() creates an "instance" of the driver, allocating
775 * local data structures for one device. The device is registered
776 * with Card Services.
777 */
778
779static dev_link_t *pcmciamtd_attach(void)
780{
781 struct pcmciamtd_dev *dev;
782 dev_link_t *link;
783 client_reg_t client_reg;
784 int ret;
785
786 /* Create new memory card device */
787 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
788 if (!dev) return NULL;
789 DEBUG(1, "dev=0x%p", dev);
790
791 memset(dev, 0, sizeof(*dev));
792 link = &dev->link;
793 link->priv = dev;
794
795 link->conf.Attributes = 0;
796 link->conf.IntType = INT_MEMORY;
797
798 link->next = dev_list;
799 dev_list = link;
800
801 /* Register with Card Services */
802 client_reg.dev_info = &dev_info;
803 client_reg.EventMask =
804 CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
805 CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
806 CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
807 client_reg.event_handler = &pcmciamtd_event;
808 client_reg.Version = 0x0210;
809 client_reg.event_callback_args.client_data = link;
810 DEBUG(2, "Calling RegisterClient");
811 ret = pcmcia_register_client(&link->handle, &client_reg);
812 if (ret != 0) {
813 cs_error(link->handle, RegisterClient, ret);
814 pcmciamtd_detach(link);
815 return NULL;
816 }
817 DEBUG(2, "link = %p", link);
818 return link;
819}
820
821
822static struct pcmcia_driver pcmciamtd_driver = {
823 .drv = {
824 .name = "pcmciamtd"
825 },
826 .attach = pcmciamtd_attach,
827 .detach = pcmciamtd_detach,
828 .owner = THIS_MODULE
829};
830
831
832static int __init init_pcmciamtd(void)
833{
834 info(DRIVER_DESC " " DRIVER_VERSION);
835
836 if(bankwidth && bankwidth != 1 && bankwidth != 2) {
837 info("bad bankwidth (%d), using default", bankwidth);
838 bankwidth = 2;
839 }
840 if(force_size && (force_size < 1 || force_size > 64)) {
841 info("bad force_size (%d), using default", force_size);
842 force_size = 0;
843 }
844 if(mem_type && mem_type != 1 && mem_type != 2) {
845 info("bad mem_type (%d), using default", mem_type);
846 mem_type = 0;
847 }
848 return pcmcia_register_driver(&pcmciamtd_driver);
849}
850
851
852static void __exit exit_pcmciamtd(void)
853{
854 DEBUG(1, DRIVER_DESC " unloading");
855 pcmcia_unregister_driver(&pcmciamtd_driver);
856 BUG_ON(dev_list != NULL);
857}
858
859module_init(init_pcmciamtd);
860module_exit(exit_pcmciamtd);
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
new file mode 100644
index 000000000000..b853670bfb81
--- /dev/null
+++ b/drivers/mtd/maps/physmap.c
@@ -0,0 +1,125 @@
1/*
2 * $Id: physmap.c,v 1.37 2004/11/28 09:40:40 dwmw2 Exp $
3 *
4 * Normal mappings of chips in physical memory
5 *
6 * Copyright (C) 2003 MontaVista Software Inc.
7 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
8 *
9 * 031022 - [jsun] add run-time configure and partition setup
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <asm/io.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/map.h>
20#include <linux/config.h>
21#include <linux/mtd/partitions.h>
22
23static struct mtd_info *mymtd;
24
25struct map_info physmap_map = {
26 .name = "phys_mapped_flash",
27 .phys = CONFIG_MTD_PHYSMAP_START,
28 .size = CONFIG_MTD_PHYSMAP_LEN,
29 .bankwidth = CONFIG_MTD_PHYSMAP_BANKWIDTH,
30};
31
32#ifdef CONFIG_MTD_PARTITIONS
33static struct mtd_partition *mtd_parts;
34static int mtd_parts_nb;
35
36static int num_physmap_partitions;
37static struct mtd_partition *physmap_partitions;
38
39static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL};
40
41void physmap_set_partitions(struct mtd_partition *parts, int num_parts)
42{
43 physmap_partitions=parts;
44 num_physmap_partitions=num_parts;
45}
46#endif /* CONFIG_MTD_PARTITIONS */
47
48static int __init init_physmap(void)
49{
50 static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL };
51 const char **type;
52
53 printk(KERN_NOTICE "physmap flash device: %lx at %lx\n", physmap_map.size, physmap_map.phys);
54 physmap_map.virt = ioremap(physmap_map.phys, physmap_map.size);
55
56 if (!physmap_map.virt) {
57 printk("Failed to ioremap\n");
58 return -EIO;
59 }
60
61 simple_map_init(&physmap_map);
62
63 mymtd = NULL;
64 type = rom_probe_types;
65 for(; !mymtd && *type; type++) {
66 mymtd = do_map_probe(*type, &physmap_map);
67 }
68 if (mymtd) {
69 mymtd->owner = THIS_MODULE;
70
71#ifdef CONFIG_MTD_PARTITIONS
72 mtd_parts_nb = parse_mtd_partitions(mymtd, part_probes,
73 &mtd_parts, 0);
74
75 if (mtd_parts_nb > 0)
76 {
77 add_mtd_partitions (mymtd, mtd_parts, mtd_parts_nb);
78 return 0;
79 }
80
81 if (num_physmap_partitions != 0)
82 {
83 printk(KERN_NOTICE
84 "Using physmap partition definition\n");
85 add_mtd_partitions (mymtd, physmap_partitions, num_physmap_partitions);
86 return 0;
87 }
88
89#endif
90 add_mtd_device(mymtd);
91
92 return 0;
93 }
94
95 iounmap(physmap_map.virt);
96 return -ENXIO;
97}
98
99static void __exit cleanup_physmap(void)
100{
101#ifdef CONFIG_MTD_PARTITIONS
102 if (mtd_parts_nb) {
103 del_mtd_partitions(mymtd);
104 kfree(mtd_parts);
105 } else if (num_physmap_partitions) {
106 del_mtd_partitions(mymtd);
107 } else {
108 del_mtd_device(mymtd);
109 }
110#else
111 del_mtd_device(mymtd);
112#endif
113 map_destroy(mymtd);
114
115 iounmap(physmap_map.virt);
116 physmap_map.virt = NULL;
117}
118
119module_init(init_physmap);
120module_exit(cleanup_physmap);
121
122
123MODULE_LICENSE("GPL");
124MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
125MODULE_DESCRIPTION("Generic configurable MTD map driver");
diff --git a/drivers/mtd/maps/pnc2000.c b/drivers/mtd/maps/pnc2000.c
new file mode 100644
index 000000000000..a0f43dad8985
--- /dev/null
+++ b/drivers/mtd/maps/pnc2000.c
@@ -0,0 +1,93 @@
1/*
2 * pnc2000.c - mapper for Photron PNC-2000 board.
3 *
4 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
5 *
6 * This code is GPL
7 *
8 * $Id: pnc2000.c,v 1.17 2004/11/16 18:29:02 dwmw2 Exp $
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h>
18#include <linux/mtd/partitions.h>
19
20
21#define WINDOW_ADDR 0xbf000000
22#define WINDOW_SIZE 0x00400000
23
24/*
25 * MAP DRIVER STUFF
26 */
27
28
29static struct map_info pnc_map = {
30 .name = "PNC-2000",
31 .size = WINDOW_SIZE,
32 .bankwidth = 4,
33 .phys = 0xFFFFFFFF,
34 .virt = (void __iomem *)WINDOW_ADDR,
35};
36
37
38/*
39 * MTD 'PARTITIONING' STUFF
40 */
41static struct mtd_partition pnc_partitions[3] = {
42 {
43 .name = "PNC-2000 boot firmware",
44 .size = 0x20000,
45 .offset = 0
46 },
47 {
48 .name = "PNC-2000 kernel",
49 .size = 0x1a0000,
50 .offset = 0x20000
51 },
52 {
53 .name = "PNC-2000 filesystem",
54 .size = 0x240000,
55 .offset = 0x1c0000
56 }
57};
58
59/*
60 * This is the master MTD device for which all the others are just
61 * auto-relocating aliases.
62 */
63static struct mtd_info *mymtd;
64
65static int __init init_pnc2000(void)
66{
67 printk(KERN_NOTICE "Photron PNC-2000 flash mapping: %x at %x\n", WINDOW_SIZE, WINDOW_ADDR);
68
69 simple_map_init(&pnc_map);
70
71 mymtd = do_map_probe("cfi_probe", &pnc_map);
72 if (mymtd) {
73 mymtd->owner = THIS_MODULE;
74 return add_mtd_partitions(mymtd, pnc_partitions, 3);
75 }
76
77 return -ENXIO;
78}
79
80static void __exit cleanup_pnc2000(void)
81{
82 if (mymtd) {
83 del_mtd_partitions(mymtd);
84 map_destroy(mymtd);
85 }
86}
87
88module_init(init_pnc2000);
89module_exit(cleanup_pnc2000);
90
91MODULE_LICENSE("GPL");
92MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp>");
93MODULE_DESCRIPTION("MTD map driver for Photron PNC-2000 board");
diff --git a/drivers/mtd/maps/redwood.c b/drivers/mtd/maps/redwood.c
new file mode 100644
index 000000000000..edd01ee4f90b
--- /dev/null
+++ b/drivers/mtd/maps/redwood.c
@@ -0,0 +1,169 @@
1/*
2 * $Id: redwood.c,v 1.10 2004/11/04 13:24:15 gleixner Exp $
3 *
4 * drivers/mtd/maps/redwood.c
5 *
6 * FLASH map for the IBM Redwood 4/5/6 boards.
7 *
8 * Author: MontaVista Software, Inc. <source@mvista.com>
9 *
10 * 2001-2003 (c) MontaVista, Software, Inc. This file is licensed under
11 * the terms of the GNU General Public License version 2. This program
12 * is licensed "as is" without any warranty of any kind, whether express
13 * or implied.
14 */
15
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21
22#include <linux/mtd/mtd.h>
23#include <linux/mtd/map.h>
24#include <linux/mtd/partitions.h>
25
26#include <asm/io.h>
27
28#if !defined (CONFIG_REDWOOD_6)
29
30#define WINDOW_ADDR 0xffc00000
31#define WINDOW_SIZE 0x00400000
32
33#define RW_PART0_OF 0
34#define RW_PART0_SZ 0x10000
35#define RW_PART1_OF RW_PART0_SZ
36#define RW_PART1_SZ 0x200000 - 0x10000
37#define RW_PART2_OF 0x200000
38#define RW_PART2_SZ 0x10000
39#define RW_PART3_OF 0x210000
40#define RW_PART3_SZ 0x200000 - (0x10000 + 0x20000)
41#define RW_PART4_OF 0x3e0000
42#define RW_PART4_SZ 0x20000
43
44static struct mtd_partition redwood_flash_partitions[] = {
45 {
46 .name = "Redwood OpenBIOS Vital Product Data",
47 .offset = RW_PART0_OF,
48 .size = RW_PART0_SZ,
49 .mask_flags = MTD_WRITEABLE /* force read-only */
50 },
51 {
52 .name = "Redwood kernel",
53 .offset = RW_PART1_OF,
54 .size = RW_PART1_SZ
55 },
56 {
57 .name = "Redwood OpenBIOS non-volatile storage",
58 .offset = RW_PART2_OF,
59 .size = RW_PART2_SZ,
60 .mask_flags = MTD_WRITEABLE /* force read-only */
61 },
62 {
63 .name = "Redwood filesystem",
64 .offset = RW_PART3_OF,
65 .size = RW_PART3_SZ
66 },
67 {
68 .name = "Redwood OpenBIOS",
69 .offset = RW_PART4_OF,
70 .size = RW_PART4_SZ,
71 .mask_flags = MTD_WRITEABLE /* force read-only */
72 }
73};
74
75#else /* CONFIG_REDWOOD_6 */
76/* FIXME: the window is bigger - armin */
77#define WINDOW_ADDR 0xff800000
78#define WINDOW_SIZE 0x00800000
79
80#define RW_PART0_OF 0
81#define RW_PART0_SZ 0x400000 /* 4 MiB data */
82#define RW_PART1_OF RW_PART0_OF + RW_PART0_SZ
83#define RW_PART1_SZ 0x10000 /* 64K VPD */
84#define RW_PART2_OF RW_PART1_OF + RW_PART1_SZ
85#define RW_PART2_SZ 0x400000 - (0x10000 + 0x20000)
86#define RW_PART3_OF RW_PART2_OF + RW_PART2_SZ
87#define RW_PART3_SZ 0x20000
88
89static struct mtd_partition redwood_flash_partitions[] = {
90 {
91 .name = "Redwood filesystem",
92 .offset = RW_PART0_OF,
93 .size = RW_PART0_SZ
94 },
95 {
96 .name = "Redwood OpenBIOS Vital Product Data",
97 .offset = RW_PART1_OF,
98 .size = RW_PART1_SZ,
99 .mask_flags = MTD_WRITEABLE /* force read-only */
100 },
101 {
102 .name = "Redwood kernel",
103 .offset = RW_PART2_OF,
104 .size = RW_PART2_SZ
105 },
106 {
107 .name = "Redwood OpenBIOS",
108 .offset = RW_PART3_OF,
109 .size = RW_PART3_SZ,
110 .mask_flags = MTD_WRITEABLE /* force read-only */
111 }
112};
113
114#endif /* CONFIG_REDWOOD_6 */
115
116struct map_info redwood_flash_map = {
117 .name = "IBM Redwood",
118 .size = WINDOW_SIZE,
119 .bankwidth = 2,
120 .phys = WINDOW_ADDR,
121};
122
123
124#define NUM_REDWOOD_FLASH_PARTITIONS \
125 (sizeof(redwood_flash_partitions)/sizeof(redwood_flash_partitions[0]))
126
127static struct mtd_info *redwood_mtd;
128
129int __init init_redwood_flash(void)
130{
131 printk(KERN_NOTICE "redwood: flash mapping: %x at %x\n",
132 WINDOW_SIZE, WINDOW_ADDR);
133
134 redwood_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
135
136 if (!redwood_flash_map.virt) {
137 printk("init_redwood_flash: failed to ioremap\n");
138 return -EIO;
139 }
140 simple_map_init(&redwood_flash_map);
141
142 redwood_mtd = do_map_probe("cfi_probe",&redwood_flash_map);
143
144 if (redwood_mtd) {
145 redwood_mtd->owner = THIS_MODULE;
146 return add_mtd_partitions(redwood_mtd,
147 redwood_flash_partitions,
148 NUM_REDWOOD_FLASH_PARTITIONS);
149 }
150
151 return -ENXIO;
152}
153
154static void __exit cleanup_redwood_flash(void)
155{
156 if (redwood_mtd) {
157 del_mtd_partitions(redwood_mtd);
158 /* moved iounmap after map_destroy - armin */
159 map_destroy(redwood_mtd);
160 iounmap((void *)redwood_flash_map.virt);
161 }
162}
163
164module_init(init_redwood_flash);
165module_exit(cleanup_redwood_flash);
166
167MODULE_LICENSE("GPL");
168MODULE_AUTHOR("MontaVista Software <source@mvista.com>");
169MODULE_DESCRIPTION("MTD map driver for the IBM Redwood reference boards");
diff --git a/drivers/mtd/maps/rpxlite.c b/drivers/mtd/maps/rpxlite.c
new file mode 100644
index 000000000000..809a0c8e7aaf
--- /dev/null
+++ b/drivers/mtd/maps/rpxlite.c
@@ -0,0 +1,66 @@
1/*
2 * $Id: rpxlite.c,v 1.22 2004/11/04 13:24:15 gleixner Exp $
3 *
4 * Handle mapping of the flash on the RPX Lite and CLLF boards
5 */
6
7#include <linux/module.h>
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <asm/io.h>
12#include <linux/mtd/mtd.h>
13#include <linux/mtd/map.h>
14
15
16#define WINDOW_ADDR 0xfe000000
17#define WINDOW_SIZE 0x800000
18
19static struct mtd_info *mymtd;
20
21static struct map_info rpxlite_map = {
22 .name = "RPX",
23 .size = WINDOW_SIZE,
24 .bankwidth = 4,
25 .phys = WINDOW_ADDR,
26};
27
28int __init init_rpxlite(void)
29{
30 printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR);
31 rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
32
33 if (!rpxlite_map.virt) {
34 printk("Failed to ioremap\n");
35 return -EIO;
36 }
37 simple_map_init(&rpxlite_map);
38 mymtd = do_map_probe("cfi_probe", &rpxlite_map);
39 if (mymtd) {
40 mymtd->owner = THIS_MODULE;
41 add_mtd_device(mymtd);
42 return 0;
43 }
44
45 iounmap((void *)rpxlite_map.virt);
46 return -ENXIO;
47}
48
49static void __exit cleanup_rpxlite(void)
50{
51 if (mymtd) {
52 del_mtd_device(mymtd);
53 map_destroy(mymtd);
54 }
55 if (rpxlite_map.virt) {
56 iounmap((void *)rpxlite_map.virt);
57 rpxlite_map.virt = 0;
58 }
59}
60
61module_init(init_rpxlite);
62module_exit(cleanup_rpxlite);
63
64MODULE_LICENSE("GPL");
65MODULE_AUTHOR("Arnold Christensen <AKC@pel.dk>");
66MODULE_DESCRIPTION("MTD map driver for RPX Lite and CLLF boards");
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
new file mode 100644
index 000000000000..0a6f861c4cd6
--- /dev/null
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -0,0 +1,453 @@
1/*
2 * Flash memory access on SA11x0 based devices
3 *
4 * (C) 2000 Nicolas Pitre <nico@cam.org>
5 *
6 * $Id: sa1100-flash.c,v 1.47 2004/11/01 13:44:36 rmk Exp $
7 */
8#include <linux/config.h>
9#include <linux/module.h>
10#include <linux/types.h>
11#include <linux/ioport.h>
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/errno.h>
15#include <linux/slab.h>
16#include <linux/device.h>
17#include <linux/err.h>
18
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/map.h>
21#include <linux/mtd/partitions.h>
22#include <linux/mtd/concat.h>
23
24#include <asm/mach-types.h>
25#include <asm/io.h>
26#include <asm/sizes.h>
27#include <asm/mach/flash.h>
28
29#if 0
30/*
31 * This is here for documentation purposes only - until these people
32 * submit their machine types. It will be gone January 2005.
33 */
34static struct mtd_partition consus_partitions[] = {
35 {
36 .name = "Consus boot firmware",
37 .offset = 0,
38 .size = 0x00040000,
39 .mask_flags = MTD_WRITABLE, /* force read-only */
40 }, {
41 .name = "Consus kernel",
42 .offset = 0x00040000,
43 .size = 0x00100000,
44 .mask_flags = 0,
45 }, {
46 .name = "Consus disk",
47 .offset = 0x00140000,
48 /* The rest (up to 16M) for jffs. We could put 0 and
49 make it find the size automatically, but right now
50 i have 32 megs. jffs will use all 32 megs if given
51 the chance, and this leads to horrible problems
52 when you try to re-flash the image because blob
53 won't erase the whole partition. */
54 .size = 0x01000000 - 0x00140000,
55 .mask_flags = 0,
56 }, {
57 /* this disk is a secondary disk, which can be used as
58 needed, for simplicity, make it the size of the other
59 consus partition, although realistically it could be
60 the remainder of the disk (depending on the file
61 system used) */
62 .name = "Consus disk2",
63 .offset = 0x01000000,
64 .size = 0x01000000 - 0x00140000,
65 .mask_flags = 0,
66 }
67};
68
69/* Frodo has 2 x 16M 28F128J3A flash chips in bank 0: */
70static struct mtd_partition frodo_partitions[] =
71{
72 {
73 .name = "bootloader",
74 .size = 0x00040000,
75 .offset = 0x00000000,
76 .mask_flags = MTD_WRITEABLE
77 }, {
78 .name = "bootloader params",
79 .size = 0x00040000,
80 .offset = MTDPART_OFS_APPEND,
81 .mask_flags = MTD_WRITEABLE
82 }, {
83 .name = "kernel",
84 .size = 0x00100000,
85 .offset = MTDPART_OFS_APPEND,
86 .mask_flags = MTD_WRITEABLE
87 }, {
88 .name = "ramdisk",
89 .size = 0x00400000,
90 .offset = MTDPART_OFS_APPEND,
91 .mask_flags = MTD_WRITEABLE
92 }, {
93 .name = "file system",
94 .size = MTDPART_SIZ_FULL,
95 .offset = MTDPART_OFS_APPEND
96 }
97};
98
99static struct mtd_partition jornada56x_partitions[] = {
100 {
101 .name = "bootldr",
102 .size = 0x00040000,
103 .offset = 0,
104 .mask_flags = MTD_WRITEABLE,
105 }, {
106 .name = "rootfs",
107 .size = MTDPART_SIZ_FULL,
108 .offset = MTDPART_OFS_APPEND,
109 }
110};
111
112static void jornada56x_set_vpp(int vpp)
113{
114 if (vpp)
115 GPSR = GPIO_GPIO26;
116 else
117 GPCR = GPIO_GPIO26;
118 GPDR |= GPIO_GPIO26;
119}
120
121/*
122 * Machine Phys Size set_vpp
123 * Consus : SA1100_CS0_PHYS SZ_32M
124 * Frodo : SA1100_CS0_PHYS SZ_32M
125 * Jornada56x: SA1100_CS0_PHYS SZ_32M jornada56x_set_vpp
126 */
127#endif
128
129struct sa_subdev_info {
130 char name[16];
131 struct map_info map;
132 struct mtd_info *mtd;
133 struct flash_platform_data *data;
134};
135
136struct sa_info {
137 struct mtd_partition *parts;
138 struct mtd_info *mtd;
139 int num_subdev;
140 struct sa_subdev_info subdev[0];
141};
142
143static void sa1100_set_vpp(struct map_info *map, int on)
144{
145 struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map);
146 subdev->data->set_vpp(on);
147}
148
149static void sa1100_destroy_subdev(struct sa_subdev_info *subdev)
150{
151 if (subdev->mtd)
152 map_destroy(subdev->mtd);
153 if (subdev->map.virt)
154 iounmap(subdev->map.virt);
155 release_mem_region(subdev->map.phys, subdev->map.size);
156}
157
158static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *res)
159{
160 unsigned long phys;
161 unsigned int size;
162 int ret;
163
164 phys = res->start;
165 size = res->end - phys + 1;
166
167 /*
168 * Retrieve the bankwidth from the MSC registers.
169 * We currently only implement CS0 and CS1 here.
170 */
171 switch (phys) {
172 default:
173 printk(KERN_WARNING "SA1100 flash: unknown base address "
174 "0x%08lx, assuming CS0\n", phys);
175
176 case SA1100_CS0_PHYS:
177 subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
178 break;
179
180 case SA1100_CS1_PHYS:
181 subdev->map.bankwidth = ((MSC0 >> 16) & MSC_RBW) ? 2 : 4;
182 break;
183 }
184
185 if (!request_mem_region(phys, size, subdev->name)) {
186 ret = -EBUSY;
187 goto out;
188 }
189
190 if (subdev->data->set_vpp)
191 subdev->map.set_vpp = sa1100_set_vpp;
192
193 subdev->map.phys = phys;
194 subdev->map.size = size;
195 subdev->map.virt = ioremap(phys, size);
196 if (!subdev->map.virt) {
197 ret = -ENOMEM;
198 goto err;
199 }
200
201 simple_map_init(&subdev->map);
202
203 /*
204 * Now let's probe for the actual flash. Do it here since
205 * specific machine settings might have been set above.
206 */
207 subdev->mtd = do_map_probe(subdev->data->map_name, &subdev->map);
208 if (subdev->mtd == NULL) {
209 ret = -ENXIO;
210 goto err;
211 }
212 subdev->mtd->owner = THIS_MODULE;
213
214 printk(KERN_INFO "SA1100 flash: CFI device at 0x%08lx, %dMiB, "
215 "%d-bit\n", phys, subdev->mtd->size >> 20,
216 subdev->map.bankwidth * 8);
217
218 return 0;
219
220 err:
221 sa1100_destroy_subdev(subdev);
222 out:
223 return ret;
224}
225
226static void sa1100_destroy(struct sa_info *info)
227{
228 int i;
229
230 if (info->mtd) {
231 del_mtd_partitions(info->mtd);
232
233#ifdef CONFIG_MTD_CONCAT
234 if (info->mtd != info->subdev[0].mtd)
235 mtd_concat_destroy(info->mtd);
236#endif
237 }
238
239 if (info->parts)
240 kfree(info->parts);
241
242 for (i = info->num_subdev - 1; i >= 0; i--)
243 sa1100_destroy_subdev(&info->subdev[i]);
244 kfree(info);
245}
246
247static struct sa_info *__init
248sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *flash)
249{
250 struct sa_info *info;
251 int nr, size, i, ret = 0;
252
253 /*
254 * Count number of devices.
255 */
256 for (nr = 0; ; nr++)
257 if (!platform_get_resource(pdev, IORESOURCE_MEM, nr))
258 break;
259
260 if (nr == 0) {
261 ret = -ENODEV;
262 goto out;
263 }
264
265 size = sizeof(struct sa_info) + sizeof(struct sa_subdev_info) * nr;
266
267 /*
268 * Allocate the map_info structs in one go.
269 */
270 info = kmalloc(size, GFP_KERNEL);
271 if (!info) {
272 ret = -ENOMEM;
273 goto out;
274 }
275
276 memset(info, 0, size);
277
278 /*
279 * Claim and then map the memory regions.
280 */
281 for (i = 0; i < nr; i++) {
282 struct sa_subdev_info *subdev = &info->subdev[i];
283 struct resource *res;
284
285 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
286 if (!res)
287 break;
288
289 subdev->map.name = subdev->name;
290 sprintf(subdev->name, "sa1100-%d", i);
291 subdev->data = flash;
292
293 ret = sa1100_probe_subdev(subdev, res);
294 if (ret)
295 break;
296 }
297
298 info->num_subdev = i;
299
300 /*
301 * ENXIO is special. It means we didn't find a chip when we probed.
302 */
303 if (ret != 0 && !(ret == -ENXIO && info->num_subdev > 0))
304 goto err;
305
306 /*
307 * If we found one device, don't bother with concat support. If
308 * we found multiple devices, use concat if we have it available,
309 * otherwise fail. Either way, it'll be called "sa1100".
310 */
311 if (info->num_subdev == 1) {
312 strcpy(info->subdev[0].name, "sa1100");
313 info->mtd = info->subdev[0].mtd;
314 ret = 0;
315 } else if (info->num_subdev > 1) {
316#ifdef CONFIG_MTD_CONCAT
317 struct mtd_info *cdev[nr];
318 /*
319 * We detected multiple devices. Concatenate them together.
320 */
321 for (i = 0; i < info->num_subdev; i++)
322 cdev[i] = info->subdev[i].mtd;
323
324 info->mtd = mtd_concat_create(cdev, info->num_subdev,
325 "sa1100");
326 if (info->mtd == NULL)
327 ret = -ENXIO;
328#else
329 printk(KERN_ERR "SA1100 flash: multiple devices "
330 "found but MTD concat support disabled.\n");
331 ret = -ENXIO;
332#endif
333 }
334
335 if (ret == 0)
336 return info;
337
338 err:
339 sa1100_destroy(info);
340 out:
341 return ERR_PTR(ret);
342}
343
344static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
345
346static int __init sa1100_mtd_probe(struct device *dev)
347{
348 struct platform_device *pdev = to_platform_device(dev);
349 struct flash_platform_data *flash = pdev->dev.platform_data;
350 struct mtd_partition *parts;
351 const char *part_type = NULL;
352 struct sa_info *info;
353 int err, nr_parts = 0;
354
355 if (!flash)
356 return -ENODEV;
357
358 info = sa1100_setup_mtd(pdev, flash);
359 if (IS_ERR(info)) {
360 err = PTR_ERR(info);
361 goto out;
362 }
363
364 /*
365 * Partition selection stuff.
366 */
367#ifdef CONFIG_MTD_PARTITIONS
368 nr_parts = parse_mtd_partitions(info->mtd, part_probes, &parts, 0);
369 if (nr_parts > 0) {
370 info->parts = parts;
371 part_type = "dynamic";
372 } else
373#endif
374 {
375 parts = flash->parts;
376 nr_parts = flash->nr_parts;
377 part_type = "static";
378 }
379
380 if (nr_parts == 0) {
381 printk(KERN_NOTICE "SA1100 flash: no partition info "
382 "available, registering whole flash\n");
383 add_mtd_device(info->mtd);
384 } else {
385 printk(KERN_NOTICE "SA1100 flash: using %s partition "
386 "definition\n", part_type);
387 add_mtd_partitions(info->mtd, parts, nr_parts);
388 }
389
390 dev_set_drvdata(dev, info);
391 err = 0;
392
393 out:
394 return err;
395}
396
397static int __exit sa1100_mtd_remove(struct device *dev)
398{
399 struct sa_info *info = dev_get_drvdata(dev);
400 dev_set_drvdata(dev, NULL);
401 sa1100_destroy(info);
402 return 0;
403}
404
405#ifdef CONFIG_PM
406static int sa1100_mtd_suspend(struct device *dev, u32 state, u32 level)
407{
408 struct sa_info *info = dev_get_drvdata(dev);
409 int ret = 0;
410
411 if (info && level == SUSPEND_SAVE_STATE)
412 ret = info->mtd->suspend(info->mtd);
413
414 return ret;
415}
416
417static int sa1100_mtd_resume(struct device *dev, u32 level)
418{
419 struct sa_info *info = dev_get_drvdata(dev);
420 if (info && level == RESUME_RESTORE_STATE)
421 info->mtd->resume(info->mtd);
422 return 0;
423}
424#else
425#define sa1100_mtd_suspend NULL
426#define sa1100_mtd_resume NULL
427#endif
428
429static struct device_driver sa1100_mtd_driver = {
430 .name = "flash",
431 .bus = &platform_bus_type,
432 .probe = sa1100_mtd_probe,
433 .remove = __exit_p(sa1100_mtd_remove),
434 .suspend = sa1100_mtd_suspend,
435 .resume = sa1100_mtd_resume,
436};
437
438static int __init sa1100_mtd_init(void)
439{
440 return driver_register(&sa1100_mtd_driver);
441}
442
443static void __exit sa1100_mtd_exit(void)
444{
445 driver_unregister(&sa1100_mtd_driver);
446}
447
448module_init(sa1100_mtd_init);
449module_exit(sa1100_mtd_exit);
450
451MODULE_AUTHOR("Nicolas Pitre");
452MODULE_DESCRIPTION("SA1100 CFI map driver");
453MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c
new file mode 100644
index 000000000000..da684d3384e9
--- /dev/null
+++ b/drivers/mtd/maps/sbc8240.c
@@ -0,0 +1,247 @@
1/*
2 * Handle mapping of the flash memory access routines on the SBC8240 board.
3 *
4 * Carolyn Smith, Tektronix, Inc.
5 *
6 * This code is GPLed
7 *
8 * $Id: sbc8240.c,v 1.4 2004/07/12 22:38:29 dwmw2 Exp $
9 *
10 */
11
12/*
13 * The SBC8240 has 2 flash banks.
14 * Bank 0 is a 512 KiB AMD AM29F040B; 8 x 64 KiB sectors.
15 * It contains the U-Boot code (7 sectors) and the environment (1 sector).
16 * Bank 1 is 4 x 1 MiB AMD AM29LV800BT; 15 x 64 KiB sectors, 1 x 32 KiB sector,
17 * 2 x 8 KiB sectors, 1 x 16 KiB sectors.
18 * Both parts are JEDEC compatible.
19 */
20
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <asm/io.h>
26
27#include <linux/mtd/mtd.h>
28#include <linux/mtd/map.h>
29#include <linux/mtd/cfi.h>
30
31#ifdef CONFIG_MTD_PARTITIONS
32#include <linux/mtd/partitions.h>
33#endif
34
35#define DEBUG
36
37#ifdef DEBUG
38# define debugk(fmt,args...) printk(fmt ,##args)
39#else
40# define debugk(fmt,args...)
41#endif
42
43
44#define WINDOW_ADDR0 0xFFF00000 /* 512 KiB */
45#define WINDOW_SIZE0 0x00080000
46#define BUSWIDTH0 1
47
48#define WINDOW_ADDR1 0xFF000000 /* 4 MiB */
49#define WINDOW_SIZE1 0x00400000
50#define BUSWIDTH1 8
51
52#define MSG_PREFIX "sbc8240:" /* prefix for our printk()'s */
53#define MTDID "sbc8240-%d" /* for mtdparts= partitioning */
54
55
56static struct map_info sbc8240_map[2] = {
57 {
58 .name = "sbc8240 Flash Bank #0",
59 .size = WINDOW_SIZE0,
60 .bankwidth = BUSWIDTH0,
61 },
62 {
63 .name = "sbc8240 Flash Bank #1",
64 .size = WINDOW_SIZE1,
65 .bankwidth = BUSWIDTH1,
66 }
67};
68
69#define NUM_FLASH_BANKS (sizeof(sbc8240_map) / sizeof(struct map_info))
70
71/*
72 * The following defines the partition layout of SBC8240 boards.
73 *
74 * See include/linux/mtd/partitions.h for definition of the
75 * mtd_partition structure.
76 *
77 * The *_max_flash_size is the maximum possible mapped flash size
78 * which is not necessarily the actual flash size. It must correspond
79 * to the value specified in the mapping definition defined by the
80 * "struct map_desc *_io_desc" for the corresponding machine.
81 */
82
83#ifdef CONFIG_MTD_PARTITIONS
84
85static struct mtd_partition sbc8240_uboot_partitions [] = {
86 /* Bank 0 */
87 {
88 .name = "U-boot", /* U-Boot Firmware */
89 .offset = 0,
90 .size = 0x00070000, /* 7 x 64 KiB sectors */
91 .mask_flags = MTD_WRITEABLE, /* force read-only */
92 },
93 {
94 .name = "environment", /* U-Boot environment */
95 .offset = 0x00070000,
96 .size = 0x00010000, /* 1 x 64 KiB sector */
97 },
98};
99
100static struct mtd_partition sbc8240_fs_partitions [] = {
101 {
102 .name = "jffs", /* JFFS filesystem */
103 .offset = 0,
104 .size = 0x003C0000, /* 4 * 15 * 64KiB */
105 },
106 {
107 .name = "tmp32",
108 .offset = 0x003C0000,
109 .size = 0x00020000, /* 4 * 32KiB */
110 },
111 {
112 .name = "tmp8a",
113 .offset = 0x003E0000,
114 .size = 0x00008000, /* 4 * 8KiB */
115 },
116 {
117 .name = "tmp8b",
118 .offset = 0x003E8000,
119 .size = 0x00008000, /* 4 * 8KiB */
120 },
121 {
122 .name = "tmp16",
123 .offset = 0x003F0000,
124 .size = 0x00010000, /* 4 * 16KiB */
125 }
126};
127
128#define NB_OF(x) (sizeof (x) / sizeof (x[0]))
129
130/* trivial struct to describe partition information */
131struct mtd_part_def
132{
133 int nums;
134 unsigned char *type;
135 struct mtd_partition* mtd_part;
136};
137
138static struct mtd_info *sbc8240_mtd[NUM_FLASH_BANKS];
139static struct mtd_part_def sbc8240_part_banks[NUM_FLASH_BANKS];
140
141
142#endif /* CONFIG_MTD_PARTITIONS */
143
144
145int __init init_sbc8240_mtd (void)
146{
147 static struct _cjs {
148 u_long addr;
149 u_long size;
150 } pt[NUM_FLASH_BANKS] = {
151 {
152 .addr = WINDOW_ADDR0,
153 .size = WINDOW_SIZE0
154 },
155 {
156 .addr = WINDOW_ADDR1,
157 .size = WINDOW_SIZE1
158 },
159 };
160
161 int devicesfound = 0;
162 int i;
163
164 for (i = 0; i < NUM_FLASH_BANKS; i++) {
165 printk (KERN_NOTICE MSG_PREFIX
166 "Probing 0x%08lx at 0x%08lx\n", pt[i].size, pt[i].addr);
167
168 sbc8240_map[i].map_priv_1 =
169 (unsigned long) ioremap (pt[i].addr, pt[i].size);
170 if (!sbc8240_map[i].map_priv_1) {
171 printk (MSG_PREFIX "failed to ioremap\n");
172 return -EIO;
173 }
174 simple_map_init(&sbc8240_mtd[i]);
175
176 sbc8240_mtd[i] = do_map_probe("jedec_probe", &sbc8240_map[i]);
177
178 if (sbc8240_mtd[i]) {
179 sbc8240_mtd[i]->module = THIS_MODULE;
180 devicesfound++;
181 }
182 }
183
184 if (!devicesfound) {
185 printk(KERN_NOTICE MSG_PREFIX
186 "No suppported flash chips found!\n");
187 return -ENXIO;
188 }
189
190#ifdef CONFIG_MTD_PARTITIONS
191 sbc8240_part_banks[0].mtd_part = sbc8240_uboot_partitions;
192 sbc8240_part_banks[0].type = "static image";
193 sbc8240_part_banks[0].nums = NB_OF(sbc8240_uboot_partitions);
194 sbc8240_part_banks[1].mtd_part = sbc8240_fs_partitions;
195 sbc8240_part_banks[1].type = "static file system";
196 sbc8240_part_banks[1].nums = NB_OF(sbc8240_fs_partitions);
197
198 for (i = 0; i < NUM_FLASH_BANKS; i++) {
199
200 if (!sbc8240_mtd[i]) continue;
201 if (sbc8240_part_banks[i].nums == 0) {
202 printk (KERN_NOTICE MSG_PREFIX
203 "No partition info available, registering whole device\n");
204 add_mtd_device(sbc8240_mtd[i]);
205 } else {
206 printk (KERN_NOTICE MSG_PREFIX
207 "Using %s partition definition\n", sbc8240_part_banks[i].mtd_part->name);
208 add_mtd_partitions (sbc8240_mtd[i],
209 sbc8240_part_banks[i].mtd_part,
210 sbc8240_part_banks[i].nums);
211 }
212 }
213#else
214 printk(KERN_NOTICE MSG_PREFIX
215 "Registering %d flash banks at once\n", devicesfound);
216
217 for (i = 0; i < devicesfound; i++) {
218 add_mtd_device(sbc8240_mtd[i]);
219 }
220#endif /* CONFIG_MTD_PARTITIONS */
221
222 return devicesfound == 0 ? -ENXIO : 0;
223}
224
225static void __exit cleanup_sbc8240_mtd (void)
226{
227 int i;
228
229 for (i = 0; i < NUM_FLASH_BANKS; i++) {
230 if (sbc8240_mtd[i]) {
231 del_mtd_device (sbc8240_mtd[i]);
232 map_destroy (sbc8240_mtd[i]);
233 }
234 if (sbc8240_map[i].map_priv_1) {
235 iounmap ((void *) sbc8240_map[i].map_priv_1);
236 sbc8240_map[i].map_priv_1 = 0;
237 }
238 }
239}
240
241module_init (init_sbc8240_mtd);
242module_exit (cleanup_sbc8240_mtd);
243
244MODULE_LICENSE ("GPL");
245MODULE_AUTHOR ("Carolyn Smith <carolyn.smith@tektronix.com>");
246MODULE_DESCRIPTION ("MTD map driver for SBC8240 boards");
247
diff --git a/drivers/mtd/maps/sbc_gxx.c b/drivers/mtd/maps/sbc_gxx.c
new file mode 100644
index 000000000000..65add28bde14
--- /dev/null
+++ b/drivers/mtd/maps/sbc_gxx.c
@@ -0,0 +1,239 @@
1/* sbc_gxx.c -- MTD map driver for Arcom Control Systems SBC-MediaGX,
2 SBC-GXm and SBC-GX1 series boards.
3
4 Copyright (C) 2001 Arcom Control System Ltd
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
19
20 $Id: sbc_gxx.c,v 1.33 2004/11/28 09:40:40 dwmw2 Exp $
21
22The SBC-MediaGX / SBC-GXx has up to 16 MiB of
23Intel StrataFlash (28F320/28F640) in x8 mode.
24
25This driver uses the CFI probe and Intel Extended Command Set drivers.
26
27The flash is accessed as follows:
28
29 16 KiB memory window at 0xdc000-0xdffff
30
31 Two IO address locations for paging
32
33 0x258
34 bit 0-7: address bit 14-21
35 0x259
36 bit 0-1: address bit 22-23
37 bit 7: 0 - reset/powered down
38 1 - device enabled
39
40The single flash device is divided into 3 partition which appear as
41separate MTD devices.
42
4325/04/2001 AJL (Arcom) Modified signon strings and partition sizes
44 (to support bzImages up to 638KiB-ish)
45*/
46
47// Includes
48
49#include <linux/module.h>
50#include <linux/slab.h>
51#include <linux/ioport.h>
52#include <linux/init.h>
53#include <asm/io.h>
54
55#include <linux/mtd/mtd.h>
56#include <linux/mtd/map.h>
57#include <linux/mtd/partitions.h>
58
59// Defines
60
61// - Hardware specific
62
63#define WINDOW_START 0xdc000
64
65/* Number of bits in offset. */
66#define WINDOW_SHIFT 14
67#define WINDOW_LENGTH (1 << WINDOW_SHIFT)
68
69/* The bits for the offset into the window. */
70#define WINDOW_MASK (WINDOW_LENGTH-1)
71#define PAGE_IO 0x258
72#define PAGE_IO_SIZE 2
73
74/* bit 7 of 0x259 must be 1 to enable device. */
75#define DEVICE_ENABLE 0x8000
76
77// - Flash / Partition sizing
78
79#define MAX_SIZE_KiB 16384
80#define BOOT_PARTITION_SIZE_KiB 768
81#define DATA_PARTITION_SIZE_KiB 1280
82#define APP_PARTITION_SIZE_KiB 6144
83
84// Globals
85
86static volatile int page_in_window = -1; // Current page in window.
87static void __iomem *iomapadr;
88static DEFINE_SPINLOCK(sbc_gxx_spin);
89
90/* partition_info gives details on the logical partitions that the split the
91 * single flash device into. If the size if zero we use up to the end of the
92 * device. */
93static struct mtd_partition partition_info[]={
94 { .name = "SBC-GXx flash boot partition",
95 .offset = 0,
96 .size = BOOT_PARTITION_SIZE_KiB*1024 },
97 { .name = "SBC-GXx flash data partition",
98 .offset = BOOT_PARTITION_SIZE_KiB*1024,
99 .size = (DATA_PARTITION_SIZE_KiB)*1024 },
100 { .name = "SBC-GXx flash application partition",
101 .offset = (BOOT_PARTITION_SIZE_KiB+DATA_PARTITION_SIZE_KiB)*1024 }
102};
103
104#define NUM_PARTITIONS 3
105
106static inline void sbc_gxx_page(struct map_info *map, unsigned long ofs)
107{
108 unsigned long page = ofs >> WINDOW_SHIFT;
109
110 if( page!=page_in_window ) {
111 outw( page | DEVICE_ENABLE, PAGE_IO );
112 page_in_window = page;
113 }
114}
115
116
117static map_word sbc_gxx_read8(struct map_info *map, unsigned long ofs)
118{
119 map_word ret;
120 spin_lock(&sbc_gxx_spin);
121 sbc_gxx_page(map, ofs);
122 ret.x[0] = readb(iomapadr + (ofs & WINDOW_MASK));
123 spin_unlock(&sbc_gxx_spin);
124 return ret;
125}
126
127static void sbc_gxx_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
128{
129 while(len) {
130 unsigned long thislen = len;
131 if (len > (WINDOW_LENGTH - (from & WINDOW_MASK)))
132 thislen = WINDOW_LENGTH-(from & WINDOW_MASK);
133
134 spin_lock(&sbc_gxx_spin);
135 sbc_gxx_page(map, from);
136 memcpy_fromio(to, iomapadr + (from & WINDOW_MASK), thislen);
137 spin_unlock(&sbc_gxx_spin);
138 to += thislen;
139 from += thislen;
140 len -= thislen;
141 }
142}
143
144static void sbc_gxx_write8(struct map_info *map, map_word d, unsigned long adr)
145{
146 spin_lock(&sbc_gxx_spin);
147 sbc_gxx_page(map, adr);
148 writeb(d.x[0], iomapadr + (adr & WINDOW_MASK));
149 spin_unlock(&sbc_gxx_spin);
150}
151
152static void sbc_gxx_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
153{
154 while(len) {
155 unsigned long thislen = len;
156 if (len > (WINDOW_LENGTH - (to & WINDOW_MASK)))
157 thislen = WINDOW_LENGTH-(to & WINDOW_MASK);
158
159 spin_lock(&sbc_gxx_spin);
160 sbc_gxx_page(map, to);
161 memcpy_toio(iomapadr + (to & WINDOW_MASK), from, thislen);
162 spin_unlock(&sbc_gxx_spin);
163 to += thislen;
164 from += thislen;
165 len -= thislen;
166 }
167}
168
169static struct map_info sbc_gxx_map = {
170 .name = "SBC-GXx flash",
171 .phys = NO_XIP,
172 .size = MAX_SIZE_KiB*1024, /* this must be set to a maximum possible amount
173 of flash so the cfi probe routines find all
174 the chips */
175 .bankwidth = 1,
176 .read = sbc_gxx_read8,
177 .copy_from = sbc_gxx_copy_from,
178 .write = sbc_gxx_write8,
179 .copy_to = sbc_gxx_copy_to
180};
181
182/* MTD device for all of the flash. */
183static struct mtd_info *all_mtd;
184
185static void cleanup_sbc_gxx(void)
186{
187 if( all_mtd ) {
188 del_mtd_partitions( all_mtd );
189 map_destroy( all_mtd );
190 }
191
192 iounmap(iomapadr);
193 release_region(PAGE_IO,PAGE_IO_SIZE);
194}
195
196static int __init init_sbc_gxx(void)
197{
198 iomapadr = ioremap(WINDOW_START, WINDOW_LENGTH);
199 if (!iomapadr) {
200 printk( KERN_ERR"%s: failed to ioremap memory region\n",
201 sbc_gxx_map.name );
202 return -EIO;
203 }
204
205 if (!request_region( PAGE_IO, PAGE_IO_SIZE, "SBC-GXx flash")) {
206 printk( KERN_ERR"%s: IO ports 0x%x-0x%x in use\n",
207 sbc_gxx_map.name,
208 PAGE_IO, PAGE_IO+PAGE_IO_SIZE-1 );
209 iounmap(iomapadr);
210 return -EAGAIN;
211 }
212
213
214 printk( KERN_INFO"%s: IO:0x%x-0x%x MEM:0x%x-0x%x\n",
215 sbc_gxx_map.name,
216 PAGE_IO, PAGE_IO+PAGE_IO_SIZE-1,
217 WINDOW_START, WINDOW_START+WINDOW_LENGTH-1 );
218
219 /* Probe for chip. */
220 all_mtd = do_map_probe( "cfi_probe", &sbc_gxx_map );
221 if( !all_mtd ) {
222 cleanup_sbc_gxx();
223 return -ENXIO;
224 }
225
226 all_mtd->owner = THIS_MODULE;
227
228 /* Create MTD devices for each partition. */
229 add_mtd_partitions(all_mtd, partition_info, NUM_PARTITIONS );
230
231 return 0;
232}
233
234module_init(init_sbc_gxx);
235module_exit(cleanup_sbc_gxx);
236
237MODULE_LICENSE("GPL");
238MODULE_AUTHOR("Arcom Control Systems Ltd.");
239MODULE_DESCRIPTION("MTD map driver for SBC-GXm and SBC-GX1 series boards");
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
new file mode 100644
index 000000000000..a06ed21e7ed1
--- /dev/null
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -0,0 +1,304 @@
1/* sc520cdp.c -- MTD map driver for AMD SC520 Customer Development Platform
2 *
3 * Copyright (C) 2001 Sysgo Real-Time Solutions GmbH
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
18 *
19 * $Id: sc520cdp.c,v 1.21 2004/12/13 10:27:08 dedekind Exp $
20 *
21 *
22 * The SC520CDP is an evaluation board for the Elan SC520 processor available
23 * from AMD. It has two banks of 32-bit Flash ROM, each 8 Megabytes in size,
24 * and up to 512 KiB of 8-bit DIL Flash ROM.
25 * For details see http://www.amd.com/products/epd/desiging/evalboards/18.elansc520/520_cdp_brief/index.html
26 */
27
28#include <linux/config.h>
29#include <linux/module.h>
30#include <linux/types.h>
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <asm/io.h>
34#include <linux/mtd/mtd.h>
35#include <linux/mtd/map.h>
36#include <linux/mtd/concat.h>
37
38/*
39** The Embedded Systems BIOS decodes the first FLASH starting at
40** 0x8400000. This is a *terrible* place for it because accessing
41** the flash at this location causes the A22 address line to be high
42** (that's what 0x8400000 binary's ought to be). But this is the highest
43** order address line on the raw flash devices themselves!!
44** This causes the top HALF of the flash to be accessed first. Beyond
45** the physical limits of the flash, the flash chip aliases over (to
46** 0x880000 which causes the bottom half to be accessed. This splits the
47** flash into two and inverts it! If you then try to access this from another
48** program that does NOT do this insanity, then you *will* access the
49** first half of the flash, but not find what you expect there. That
50** stuff is in the *second* half! Similarly, the address used by the
51** BIOS for the second FLASH bank is also quite a bad choice.
52** If REPROGRAM_PAR is defined below (the default), then this driver will
53** choose more useful addresses for the FLASH banks by reprogramming the
54** responsible PARxx registers in the SC520's MMCR region. This will
55** cause the settings to be incompatible with the BIOS's settings, which
56** shouldn't be a problem since you are running Linux, (i.e. the BIOS is
57** not much use anyway). However, if you need to be compatible with
58** the BIOS for some reason, just undefine REPROGRAM_PAR.
59*/
60#define REPROGRAM_PAR
61
62
63
64#ifdef REPROGRAM_PAR
65
66/* These are the addresses we want.. */
67#define WINDOW_ADDR_0 0x08800000
68#define WINDOW_ADDR_1 0x09000000
69#define WINDOW_ADDR_2 0x09800000
70
71/* .. and these are the addresses the BIOS gives us */
72#define WINDOW_ADDR_0_BIOS 0x08400000
73#define WINDOW_ADDR_1_BIOS 0x08c00000
74#define WINDOW_ADDR_2_BIOS 0x09400000
75
76#else
77
78#define WINDOW_ADDR_0 0x08400000
79#define WINDOW_ADDR_1 0x08C00000
80#define WINDOW_ADDR_2 0x09400000
81
82#endif
83
84#define WINDOW_SIZE_0 0x00800000
85#define WINDOW_SIZE_1 0x00800000
86#define WINDOW_SIZE_2 0x00080000
87
88
89static struct map_info sc520cdp_map[] = {
90 {
91 .name = "SC520CDP Flash Bank #0",
92 .size = WINDOW_SIZE_0,
93 .bankwidth = 4,
94 .phys = WINDOW_ADDR_0
95 },
96 {
97 .name = "SC520CDP Flash Bank #1",
98 .size = WINDOW_SIZE_1,
99 .bankwidth = 4,
100 .phys = WINDOW_ADDR_1
101 },
102 {
103 .name = "SC520CDP DIL Flash",
104 .size = WINDOW_SIZE_2,
105 .bankwidth = 1,
106 .phys = WINDOW_ADDR_2
107 },
108};
109
110#define NUM_FLASH_BANKS (sizeof(sc520cdp_map)/sizeof(struct map_info))
111
112static struct mtd_info *mymtd[NUM_FLASH_BANKS];
113static struct mtd_info *merged_mtd;
114
115#ifdef REPROGRAM_PAR
116
117/*
118** The SC520 MMCR (memory mapped control register) region resides
119** at 0xFFFEF000. The 16 Programmable Address Region (PAR) registers
120** are at offset 0x88 in the MMCR:
121*/
122#define SC520_MMCR_BASE 0xFFFEF000
123#define SC520_MMCR_EXTENT 0x1000
124#define SC520_PAR(x) ((0x88/sizeof(unsigned long)) + (x))
125#define NUM_SC520_PAR 16 /* total number of PAR registers */
126
127/*
128** The highest three bits in a PAR register determine what target
129** device is controlled by this PAR. Here, only ROMCS? and BOOTCS
130** devices are of interest.
131*/
132#define SC520_PAR_BOOTCS (0x4<<29)
133#define SC520_PAR_ROMCS0 (0x5<<29)
134#define SC520_PAR_ROMCS1 (0x6<<29)
135#define SC520_PAR_TRGDEV (0x7<<29)
136
137/*
138** Bits 28 thru 26 determine some attributes for the
139** region controlled by the PAR. (We only use non-cacheable)
140*/
141#define SC520_PAR_WRPROT (1<<26) /* write protected */
142#define SC520_PAR_NOCACHE (1<<27) /* non-cacheable */
143#define SC520_PAR_NOEXEC (1<<28) /* code execution denied */
144
145
146/*
147** Bit 25 determines the granularity: 4K or 64K
148*/
149#define SC520_PAR_PG_SIZ4 (0<<25)
150#define SC520_PAR_PG_SIZ64 (1<<25)
151
152/*
153** Build a value to be written into a PAR register.
154** We only need ROM entries, 64K page size:
155*/
156#define SC520_PAR_ENTRY(trgdev, address, size) \
157 ((trgdev) | SC520_PAR_NOCACHE | SC520_PAR_PG_SIZ64 | \
158 (address) >> 16 | (((size) >> 16) - 1) << 14)
159
160struct sc520_par_table
161{
162 unsigned long trgdev;
163 unsigned long new_par;
164 unsigned long default_address;
165};
166
167static struct sc520_par_table par_table[NUM_FLASH_BANKS] =
168{
169 { /* Flash Bank #0: selected by ROMCS0 */
170 SC520_PAR_ROMCS0,
171 SC520_PAR_ENTRY(SC520_PAR_ROMCS0, WINDOW_ADDR_0, WINDOW_SIZE_0),
172 WINDOW_ADDR_0_BIOS
173 },
174 { /* Flash Bank #1: selected by ROMCS1 */
175 SC520_PAR_ROMCS1,
176 SC520_PAR_ENTRY(SC520_PAR_ROMCS1, WINDOW_ADDR_1, WINDOW_SIZE_1),
177 WINDOW_ADDR_1_BIOS
178 },
179 { /* DIL (BIOS) Flash: selected by BOOTCS */
180 SC520_PAR_BOOTCS,
181 SC520_PAR_ENTRY(SC520_PAR_BOOTCS, WINDOW_ADDR_2, WINDOW_SIZE_2),
182 WINDOW_ADDR_2_BIOS
183 }
184};
185
186
187static void sc520cdp_setup_par(void)
188{
189 volatile unsigned long __iomem *mmcr;
190 unsigned long mmcr_val;
191 int i, j;
192
193 /* map in SC520's MMCR area */
194 mmcr = ioremap_nocache(SC520_MMCR_BASE, SC520_MMCR_EXTENT);
195 if(!mmcr) { /* ioremap_nocache failed: skip the PAR reprogramming */
196 /* force physical address fields to BIOS defaults: */
197 for(i = 0; i < NUM_FLASH_BANKS; i++)
198 sc520cdp_map[i].phys = par_table[i].default_address;
199 return;
200 }
201
202 /*
203 ** Find the PARxx registers that are reponsible for activating
204 ** ROMCS0, ROMCS1 and BOOTCS. Reprogram each of these with a
205 ** new value from the table.
206 */
207 for(i = 0; i < NUM_FLASH_BANKS; i++) { /* for each par_table entry */
208 for(j = 0; j < NUM_SC520_PAR; j++) { /* for each PAR register */
209 mmcr_val = mmcr[SC520_PAR(j)];
210 /* if target device field matches, reprogram the PAR */
211 if((mmcr_val & SC520_PAR_TRGDEV) == par_table[i].trgdev)
212 {
213 mmcr[SC520_PAR(j)] = par_table[i].new_par;
214 break;
215 }
216 }
217 if(j == NUM_SC520_PAR)
218 { /* no matching PAR found: try default BIOS address */
219 printk(KERN_NOTICE "Could not find PAR responsible for %s\n",
220 sc520cdp_map[i].name);
221 printk(KERN_NOTICE "Trying default address 0x%lx\n",
222 par_table[i].default_address);
223 sc520cdp_map[i].phys = par_table[i].default_address;
224 }
225 }
226 iounmap(mmcr);
227}
228#endif
229
230
231static int __init init_sc520cdp(void)
232{
233 int i, devices_found = 0;
234
235#ifdef REPROGRAM_PAR
236 /* reprogram PAR registers so flash appears at the desired addresses */
237 sc520cdp_setup_par();
238#endif
239
240 for (i = 0; i < NUM_FLASH_BANKS; i++) {
241 printk(KERN_NOTICE "SC520 CDP flash device: 0x%lx at 0x%lx\n",
242 sc520cdp_map[i].size, sc520cdp_map[i].phys);
243
244 sc520cdp_map[i].virt = ioremap_nocache(sc520cdp_map[i].phys, sc520cdp_map[i].size);
245
246 if (!sc520cdp_map[i].virt) {
247 printk("Failed to ioremap_nocache\n");
248 return -EIO;
249 }
250
251 simple_map_init(&sc520cdp_map[i]);
252
253 mymtd[i] = do_map_probe("cfi_probe", &sc520cdp_map[i]);
254 if(!mymtd[i])
255 mymtd[i] = do_map_probe("jedec_probe", &sc520cdp_map[i]);
256 if(!mymtd[i])
257 mymtd[i] = do_map_probe("map_rom", &sc520cdp_map[i]);
258
259 if (mymtd[i]) {
260 mymtd[i]->owner = THIS_MODULE;
261 ++devices_found;
262 }
263 else {
264 iounmap(sc520cdp_map[i].virt);
265 }
266 }
267 if(devices_found >= 2) {
268 /* Combine the two flash banks into a single MTD device & register it: */
269 merged_mtd = mtd_concat_create(mymtd, 2, "SC520CDP Flash Banks #0 and #1");
270 if(merged_mtd)
271 add_mtd_device(merged_mtd);
272 }
273 if(devices_found == 3) /* register the third (DIL-Flash) device */
274 add_mtd_device(mymtd[2]);
275 return(devices_found ? 0 : -ENXIO);
276}
277
278static void __exit cleanup_sc520cdp(void)
279{
280 int i;
281
282 if (merged_mtd) {
283 del_mtd_device(merged_mtd);
284 mtd_concat_destroy(merged_mtd);
285 }
286 if (mymtd[2])
287 del_mtd_device(mymtd[2]);
288
289 for (i = 0; i < NUM_FLASH_BANKS; i++) {
290 if (mymtd[i])
291 map_destroy(mymtd[i]);
292 if (sc520cdp_map[i].virt) {
293 iounmap(sc520cdp_map[i].virt);
294 sc520cdp_map[i].virt = NULL;
295 }
296 }
297}
298
299module_init(init_sc520cdp);
300module_exit(cleanup_sc520cdp);
301
302MODULE_LICENSE("GPL");
303MODULE_AUTHOR("Sysgo Real-Time Solutions GmbH");
304MODULE_DESCRIPTION("MTD map driver for AMD SC520 Customer Development Platform");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
new file mode 100644
index 000000000000..5bb3b600e5d0
--- /dev/null
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -0,0 +1,256 @@
1/*
2 * MTD map driver for BIOS Flash on Intel SCB2 boards
3 * $Id: scb2_flash.c,v 1.11 2004/11/28 09:40:40 dwmw2 Exp $
4 * Copyright (C) 2002 Sun Microsystems, Inc.
5 * Tim Hockin <thockin@sun.com>
6 *
7 * A few notes on this MTD map:
8 *
9 * This was developed with a small number of SCB2 boards to test on.
10 * Hopefully, Intel has not introducted too many unaccounted variables in the
11 * making of this board.
12 *
13 * The BIOS marks its own memory region as 'reserved' in the e820 map. We
14 * try to request it here, but if it fails, we carry on anyway.
15 *
16 * This is how the chip is attached, so said the schematic:
17 * * a 4 MiB (32 Mib) 16 bit chip
18 * * a 1 MiB memory region
19 * * A20 and A21 pulled up
20 * * D8-D15 ignored
21 * What this means is that, while we are addressing bytes linearly, we are
22 * really addressing words, and discarding the other byte. This means that
23 * the chip MUST BE at least 2 MiB. This also means that every block is
24 * actually half as big as the chip reports. It also means that accesses of
25 * logical address 0 hit higher-address sections of the chip, not physical 0.
26 * One can only hope that these 4MiB x16 chips were a lot cheaper than 1MiB x8
27 * chips.
28 *
29 * This driver assumes the chip is not write-protected by an external signal.
30 * As of the this writing, that is true, but may change, just to spite me.
31 *
32 * The actual BIOS layout has been mostly reverse engineered. Intel BIOS
33 * updates for this board include 10 related (*.bio - &.bi9) binary files and
34 * another separate (*.bbo) binary file. The 10 files are 64k of data + a
35 * small header. If the headers are stripped off, the 10 64k files can be
36 * concatenated into a 640k image. This is your BIOS image, proper. The
37 * separate .bbo file also has a small header. It is the 'Boot Block'
38 * recovery BIOS. Once the header is stripped, no further prep is needed.
39 * As best I can tell, the BIOS is arranged as such:
40 * offset 0x00000 to 0x4ffff (320k): unknown - SCSI BIOS, etc?
41 * offset 0x50000 to 0xeffff (640k): BIOS proper
42 * offset 0xf0000 ty 0xfffff (64k): Boot Block region
43 *
44 * Intel's BIOS update program flashes the BIOS and Boot Block in separate
45 * steps. Probably a wise thing to do.
46 */
47
48#include <linux/module.h>
49#include <linux/types.h>
50#include <linux/kernel.h>
51#include <linux/init.h>
52#include <asm/io.h>
53#include <linux/mtd/mtd.h>
54#include <linux/mtd/map.h>
55#include <linux/mtd/cfi.h>
56#include <linux/config.h>
57#include <linux/pci.h>
58#include <linux/pci_ids.h>
59
60#define MODNAME "scb2_flash"
61#define SCB2_ADDR 0xfff00000
62#define SCB2_WINDOW 0x00100000
63
64
65static void __iomem *scb2_ioaddr;
66static struct mtd_info *scb2_mtd;
67static struct map_info scb2_map = {
68 .name = "SCB2 BIOS Flash",
69 .size = 0,
70 .bankwidth = 1,
71};
72static int region_fail;
73
74static int __devinit
75scb2_fixup_mtd(struct mtd_info *mtd)
76{
77 int i;
78 int done = 0;
79 struct map_info *map = mtd->priv;
80 struct cfi_private *cfi = map->fldrv_priv;
81
82 /* barf if this doesn't look right */
83 if (cfi->cfiq->InterfaceDesc != 1) {
84 printk(KERN_ERR MODNAME ": unsupported InterfaceDesc: %#x\n",
85 cfi->cfiq->InterfaceDesc);
86 return -1;
87 }
88
89 /* I wasn't here. I didn't see. dwmw2. */
90
91 /* the chip is sometimes bigger than the map - what a waste */
92 mtd->size = map->size;
93
94 /*
95 * We only REALLY get half the chip, due to the way it is
96 * wired up - D8-D15 are tossed away. We read linear bytes,
97 * but in reality we are getting 1/2 of each 16-bit read,
98 * which LOOKS linear to us. Because CFI code accounts for
99 * things like lock/unlock/erase by eraseregions, we need to
100 * fudge them to reflect this. Erases go like this:
101 * * send an erase to an address
102 * * the chip samples the address and erases the block
103 * * add the block erasesize to the address and repeat
104 * -- the problem is that addresses are 16-bit addressable
105 * -- we end up erasing every-other block
106 */
107 mtd->erasesize /= 2;
108 for (i = 0; i < mtd->numeraseregions; i++) {
109 struct mtd_erase_region_info *region = &mtd->eraseregions[i];
110 region->erasesize /= 2;
111 }
112
113 /*
114 * If the chip is bigger than the map, it is wired with the high
115 * address lines pulled up. This makes us access the top portion of
116 * the chip, so all our erase-region info is wrong. Start cutting from
117 * the bottom.
118 */
119 for (i = 0; !done && i < mtd->numeraseregions; i++) {
120 struct mtd_erase_region_info *region = &mtd->eraseregions[i];
121
122 if (region->numblocks * region->erasesize > mtd->size) {
123 region->numblocks = (mtd->size / region->erasesize);
124 done = 1;
125 } else {
126 region->numblocks = 0;
127 }
128 region->offset = 0;
129 }
130
131 return 0;
132}
133
134/* CSB5's 'Function Control Register' has bits for decoding @ >= 0xffc00000 */
135#define CSB5_FCR 0x41
136#define CSB5_FCR_DECODE_ALL 0x0e
137static int __devinit
138scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
139{
140 u8 reg;
141
142 /* enable decoding of the flash region in the south bridge */
143 pci_read_config_byte(dev, CSB5_FCR, &reg);
144 pci_write_config_byte(dev, CSB5_FCR, reg | CSB5_FCR_DECODE_ALL);
145
146 if (!request_mem_region(SCB2_ADDR, SCB2_WINDOW, scb2_map.name)) {
147 /*
148 * The BIOS seems to mark the flash region as 'reserved'
149 * in the e820 map. Warn and go about our business.
150 */
151 printk(KERN_WARNING MODNAME
152 ": warning - can't reserve rom window, continuing\n");
153 region_fail = 1;
154 }
155
156 /* remap the IO window (w/o caching) */
157 scb2_ioaddr = ioremap_nocache(SCB2_ADDR, SCB2_WINDOW);
158 if (!scb2_ioaddr) {
159 printk(KERN_ERR MODNAME ": Failed to ioremap window!\n");
160 if (!region_fail)
161 release_mem_region(SCB2_ADDR, SCB2_WINDOW);
162 return -ENOMEM;
163 }
164
165 scb2_map.phys = SCB2_ADDR;
166 scb2_map.virt = scb2_ioaddr;
167 scb2_map.size = SCB2_WINDOW;
168
169 simple_map_init(&scb2_map);
170
171 /* try to find a chip */
172 scb2_mtd = do_map_probe("cfi_probe", &scb2_map);
173
174 if (!scb2_mtd) {
175 printk(KERN_ERR MODNAME ": flash probe failed!\n");
176 iounmap(scb2_ioaddr);
177 if (!region_fail)
178 release_mem_region(SCB2_ADDR, SCB2_WINDOW);
179 return -ENODEV;
180 }
181
182 scb2_mtd->owner = THIS_MODULE;
183 if (scb2_fixup_mtd(scb2_mtd) < 0) {
184 del_mtd_device(scb2_mtd);
185 map_destroy(scb2_mtd);
186 iounmap(scb2_ioaddr);
187 if (!region_fail)
188 release_mem_region(SCB2_ADDR, SCB2_WINDOW);
189 return -ENODEV;
190 }
191
192 printk(KERN_NOTICE MODNAME ": chip size 0x%x at offset 0x%x\n",
193 scb2_mtd->size, SCB2_WINDOW - scb2_mtd->size);
194
195 add_mtd_device(scb2_mtd);
196
197 return 0;
198}
199
200static void __devexit
201scb2_flash_remove(struct pci_dev *dev)
202{
203 if (!scb2_mtd)
204 return;
205
206 /* disable flash writes */
207 if (scb2_mtd->lock)
208 scb2_mtd->lock(scb2_mtd, 0, scb2_mtd->size);
209
210 del_mtd_device(scb2_mtd);
211 map_destroy(scb2_mtd);
212
213 iounmap(scb2_ioaddr);
214 scb2_ioaddr = NULL;
215
216 if (!region_fail)
217 release_mem_region(SCB2_ADDR, SCB2_WINDOW);
218 pci_set_drvdata(dev, NULL);
219}
220
221static struct pci_device_id scb2_flash_pci_ids[] = {
222 {
223 .vendor = PCI_VENDOR_ID_SERVERWORKS,
224 .device = PCI_DEVICE_ID_SERVERWORKS_CSB5,
225 .subvendor = PCI_ANY_ID,
226 .subdevice = PCI_ANY_ID
227 },
228 { 0, }
229};
230
231static struct pci_driver scb2_flash_driver = {
232 .name = "Intel SCB2 BIOS Flash",
233 .id_table = scb2_flash_pci_ids,
234 .probe = scb2_flash_probe,
235 .remove = __devexit_p(scb2_flash_remove),
236};
237
238static int __init
239scb2_flash_init(void)
240{
241 return pci_module_init(&scb2_flash_driver);
242}
243
244static void __exit
245scb2_flash_exit(void)
246{
247 pci_unregister_driver(&scb2_flash_driver);
248}
249
250module_init(scb2_flash_init);
251module_exit(scb2_flash_exit);
252
253MODULE_LICENSE("GPL");
254MODULE_AUTHOR("Tim Hockin <thockin@sun.com>");
255MODULE_DESCRIPTION("MTD map driver for Intel SCB2 BIOS Flash");
256MODULE_DEVICE_TABLE(pci, scb2_flash_pci_ids);
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
new file mode 100644
index 000000000000..0ece3786d6ea
--- /dev/null
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -0,0 +1,233 @@
1/* linux/drivers/mtd/maps/scx200_docflash.c
2
3 Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
4
5 $Id: scx200_docflash.c,v 1.10 2004/11/28 09:40:40 dwmw2 Exp $
6
7 National Semiconductor SCx200 flash mapped with DOCCS
8*/
9
10#include <linux/module.h>
11#include <linux/config.h>
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <asm/io.h>
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h>
18#include <linux/mtd/partitions.h>
19
20#include <linux/pci.h>
21#include <linux/scx200.h>
22
23#define NAME "scx200_docflash"
24
25MODULE_AUTHOR("Christer Weinigel <wingel@hack.org>");
26MODULE_DESCRIPTION("NatSemi SCx200 DOCCS Flash Driver");
27MODULE_LICENSE("GPL");
28
29static int probe = 0; /* Don't autoprobe */
30static unsigned size = 0x1000000; /* 16 MiB the whole ISA address space */
31static unsigned width = 8; /* Default to 8 bits wide */
32static char *flashtype = "cfi_probe";
33
34module_param(probe, int, 0);
35MODULE_PARM_DESC(probe, "Probe for a BIOS mapping");
36module_param(size, int, 0);
37MODULE_PARM_DESC(size, "Size of the flash mapping");
38module_param(width, int, 0);
39MODULE_PARM_DESC(width, "Data width of the flash mapping (8/16)");
40module_param(flashtype, charp, 0);
41MODULE_PARM_DESC(flashtype, "Type of MTD probe to do");
42
43static struct resource docmem = {
44 .flags = IORESOURCE_MEM,
45 .name = "NatSemi SCx200 DOCCS Flash",
46};
47
48static struct mtd_info *mymtd;
49
50#ifdef CONFIG_MTD_PARTITIONS
51static struct mtd_partition partition_info[] = {
52 {
53 .name = "DOCCS Boot kernel",
54 .offset = 0,
55 .size = 0xc0000
56 },
57 {
58 .name = "DOCCS Low BIOS",
59 .offset = 0xc0000,
60 .size = 0x40000
61 },
62 {
63 .name = "DOCCS File system",
64 .offset = 0x100000,
65 .size = ~0 /* calculate from flash size */
66 },
67 {
68 .name = "DOCCS High BIOS",
69 .offset = ~0, /* calculate from flash size */
70 .size = 0x80000
71 },
72};
73#define NUM_PARTITIONS (sizeof(partition_info)/sizeof(partition_info[0]))
74#endif
75
76
77static struct map_info scx200_docflash_map = {
78 .name = "NatSemi SCx200 DOCCS Flash",
79};
80
81static int __init init_scx200_docflash(void)
82{
83 unsigned u;
84 unsigned base;
85 unsigned ctrl;
86 unsigned pmr;
87 struct pci_dev *bridge;
88
89 printk(KERN_DEBUG NAME ": NatSemi SCx200 DOCCS Flash Driver\n");
90
91 if ((bridge = pci_find_device(PCI_VENDOR_ID_NS,
92 PCI_DEVICE_ID_NS_SCx200_BRIDGE,
93 NULL)) == NULL)
94 return -ENODEV;
95
96 /* check that we have found the configuration block */
97 if (!scx200_cb_present())
98 return -ENODEV;
99
100 if (probe) {
101 /* Try to use the present flash mapping if any */
102 pci_read_config_dword(bridge, SCx200_DOCCS_BASE, &base);
103 pci_read_config_dword(bridge, SCx200_DOCCS_CTRL, &ctrl);
104 pmr = inl(scx200_cb_base + SCx200_PMR);
105
106 if (base == 0
107 || (ctrl & 0x07000000) != 0x07000000
108 || (ctrl & 0x0007ffff) == 0)
109 return -ENODEV;
110
111 size = ((ctrl&0x1fff)<<13) + (1<<13);
112
113 for (u = size; u > 1; u >>= 1)
114 ;
115 if (u != 1)
116 return -ENODEV;
117
118 if (pmr & (1<<6))
119 width = 16;
120 else
121 width = 8;
122
123 docmem.start = base;
124 docmem.end = base + size;
125
126 if (request_resource(&iomem_resource, &docmem)) {
127 printk(KERN_ERR NAME ": unable to allocate memory for flash mapping\n");
128 return -ENOMEM;
129 }
130 } else {
131 for (u = size; u > 1; u >>= 1)
132 ;
133 if (u != 1) {
134 printk(KERN_ERR NAME ": invalid size for flash mapping\n");
135 return -EINVAL;
136 }
137
138 if (width != 8 && width != 16) {
139 printk(KERN_ERR NAME ": invalid bus width for flash mapping\n");
140 return -EINVAL;
141 }
142
143 if (allocate_resource(&iomem_resource, &docmem,
144 size,
145 0xc0000000, 0xffffffff,
146 size, NULL, NULL)) {
147 printk(KERN_ERR NAME ": unable to allocate memory for flash mapping\n");
148 return -ENOMEM;
149 }
150
151 ctrl = 0x07000000 | ((size-1) >> 13);
152
153 printk(KERN_INFO "DOCCS BASE=0x%08lx, CTRL=0x%08lx\n", (long)docmem.start, (long)ctrl);
154
155 pci_write_config_dword(bridge, SCx200_DOCCS_BASE, docmem.start);
156 pci_write_config_dword(bridge, SCx200_DOCCS_CTRL, ctrl);
157 pmr = inl(scx200_cb_base + SCx200_PMR);
158
159 if (width == 8) {
160 pmr &= ~(1<<6);
161 } else {
162 pmr |= (1<<6);
163 }
164 outl(pmr, scx200_cb_base + SCx200_PMR);
165 }
166
167 printk(KERN_INFO NAME ": DOCCS mapped at 0x%lx-0x%lx, width %d\n",
168 docmem.start, docmem.end, width);
169
170 scx200_docflash_map.size = size;
171 if (width == 8)
172 scx200_docflash_map.bankwidth = 1;
173 else
174 scx200_docflash_map.bankwidth = 2;
175
176 simple_map_init(&scx200_docflash_map);
177
178 scx200_docflash_map.phys = docmem.start;
179 scx200_docflash_map.virt = ioremap(docmem.start, scx200_docflash_map.size);
180 if (!scx200_docflash_map.virt) {
181 printk(KERN_ERR NAME ": failed to ioremap the flash\n");
182 release_resource(&docmem);
183 return -EIO;
184 }
185
186 mymtd = do_map_probe(flashtype, &scx200_docflash_map);
187 if (!mymtd) {
188 printk(KERN_ERR NAME ": unable to detect flash\n");
189 iounmap(scx200_docflash_map.virt);
190 release_resource(&docmem);
191 return -ENXIO;
192 }
193
194 if (size < mymtd->size)
195 printk(KERN_WARNING NAME ": warning, flash mapping is smaller than flash size\n");
196
197 mymtd->owner = THIS_MODULE;
198
199#ifdef CONFIG_MTD_PARTITIONS
200 partition_info[3].offset = mymtd->size-partition_info[3].size;
201 partition_info[2].size = partition_info[3].offset-partition_info[2].offset;
202 add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
203#else
204 add_mtd_device(mymtd);
205#endif
206 return 0;
207}
208
209static void __exit cleanup_scx200_docflash(void)
210{
211 if (mymtd) {
212#ifdef CONFIG_MTD_PARTITIONS
213 del_mtd_partitions(mymtd);
214#else
215 del_mtd_device(mymtd);
216#endif
217 map_destroy(mymtd);
218 }
219 if (scx200_docflash_map.virt) {
220 iounmap(scx200_docflash_map.virt);
221 release_resource(&docmem);
222 }
223}
224
225module_init(init_scx200_docflash);
226module_exit(cleanup_scx200_docflash);
227
228/*
229 Local variables:
230 compile-command: "make -k -C ../../.. SUBDIRS=drivers/mtd/maps modules"
231 c-basic-offset: 8
232 End:
233*/
diff --git a/drivers/mtd/maps/sharpsl-flash.c b/drivers/mtd/maps/sharpsl-flash.c
new file mode 100644
index 000000000000..b3b39cb7c608
--- /dev/null
+++ b/drivers/mtd/maps/sharpsl-flash.c
@@ -0,0 +1,101 @@
1/*
2 * sharpsl-flash.c
3 *
4 * Copyright (C) 2001 Lineo Japan, Inc.
5 * Copyright (C) 2002 SHARP
6 *
7 * $Id: sharpsl-flash.c,v 1.2 2004/11/24 20:38:06 rpurdie Exp $
8 *
9 * based on rpxlite.c,v 1.15 2001/10/02 15:05:14 dwmw2 Exp
10 * Handle mapping of the flash on the RPX Lite and CLLF boards
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/kernel.h>
27#include <asm/io.h>
28#include <linux/mtd/mtd.h>
29#include <linux/mtd/map.h>
30#include <linux/mtd/partitions.h>
31
32#define WINDOW_ADDR 0x00000000
33#define WINDOW_SIZE 0x01000000
34#define BANK_WIDTH 2
35
36static struct mtd_info *mymtd;
37
38struct map_info sharpsl_map = {
39 .name = "sharpsl-flash",
40 .size = WINDOW_SIZE,
41 .bankwidth = BANK_WIDTH,
42 .phys = WINDOW_ADDR
43};
44
45static struct mtd_partition sharpsl_partitions[1] = {
46 {
47 name: "Filesystem",
48 size: 0x006d0000,
49 offset: 0x00120000
50 }
51};
52
53#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
54
55int __init init_sharpsl(void)
56{
57 struct mtd_partition *parts;
58 int nb_parts = 0;
59 char *part_type = "static";
60
61 printk(KERN_NOTICE "Sharp SL series flash device: %x at %x\n", WINDOW_SIZE, WINDOW_ADDR);
62 sharpsl_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
63 if (!sharpsl_map.virt) {
64 printk("Failed to ioremap\n");
65 return -EIO;
66 }
67 mymtd = do_map_probe("map_rom", &sharpsl_map);
68 if (!mymtd) {
69 iounmap(sharpsl_map.virt);
70 return -ENXIO;
71 }
72
73 mymtd->owner = THIS_MODULE;
74
75 parts = sharpsl_partitions;
76 nb_parts = NB_OF(sharpsl_partitions);
77
78 printk(KERN_NOTICE "Using %s partision definition\n", part_type);
79 add_mtd_partitions(mymtd, parts, nb_parts);
80
81 return 0;
82}
83
84static void __exit cleanup_sharpsl(void)
85{
86 if (mymtd) {
87 del_mtd_partitions(mymtd);
88 map_destroy(mymtd);
89 }
90 if (sharpsl_map.virt) {
91 iounmap(sharpsl_map.virt);
92 sharpsl_map.virt = 0;
93 }
94}
95
96module_init(init_sharpsl);
97module_exit(cleanup_sharpsl);
98
99MODULE_LICENSE("GPL");
100MODULE_AUTHOR("SHARP (Original: Arnold Christensen <AKC@pel.dk>)");
101MODULE_DESCRIPTION("MTD map driver for SHARP SL series");
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
new file mode 100644
index 000000000000..8ce5d897645c
--- /dev/null
+++ b/drivers/mtd/maps/solutionengine.c
@@ -0,0 +1,137 @@
1/*
2 * $Id: solutionengine.c,v 1.14 2004/09/16 23:27:14 gleixner Exp $
3 *
4 * Flash and EPROM on Hitachi Solution Engine and similar boards.
5 *
6 * (C) 2001 Red Hat, Inc.
7 *
8 * GPL'd
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <asm/io.h>
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h>
18#include <linux/mtd/partitions.h>
19#include <linux/config.h>
20#include <linux/errno.h>
21
22static struct mtd_info *flash_mtd;
23static struct mtd_info *eprom_mtd;
24
25static struct mtd_partition *parsed_parts;
26
27struct map_info soleng_eprom_map = {
28 .name = "Solution Engine EPROM",
29 .size = 0x400000,
30 .bankwidth = 4,
31};
32
33struct map_info soleng_flash_map = {
34 .name = "Solution Engine FLASH",
35 .size = 0x400000,
36 .bankwidth = 4,
37};
38
39static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
40
41#ifdef CONFIG_MTD_SUPERH_RESERVE
42static struct mtd_partition superh_se_partitions[] = {
43 /* Reserved for boot code, read-only */
44 {
45 .name = "flash_boot",
46 .offset = 0x00000000,
47 .size = CONFIG_MTD_SUPERH_RESERVE,
48 .mask_flags = MTD_WRITEABLE,
49 },
50 /* All else is writable (e.g. JFFS) */
51 {
52 .name = "Flash FS",
53 .offset = MTDPART_OFS_NXTBLK,
54 .size = MTDPART_SIZ_FULL,
55 }
56};
57#endif /* CONFIG_MTD_SUPERH_RESERVE */
58
59static int __init init_soleng_maps(void)
60{
61 int nr_parts = 0;
62
63 /* First probe at offset 0 */
64 soleng_flash_map.phys = 0;
65 soleng_flash_map.virt = (void __iomem *)P2SEGADDR(0);
66 soleng_eprom_map.phys = 0x01000000;
67 soleng_eprom_map.virt = (void __iomem *)P1SEGADDR(0x01000000);
68 simple_map_init(&soleng_eprom_map);
69 simple_map_init(&soleng_flash_map);
70
71 printk(KERN_NOTICE "Probing for flash chips at 0x00000000:\n");
72 flash_mtd = do_map_probe("cfi_probe", &soleng_flash_map);
73 if (!flash_mtd) {
74 /* Not there. Try swapping */
75 printk(KERN_NOTICE "Probing for flash chips at 0x01000000:\n");
76 soleng_flash_map.phys = 0x01000000;
77 soleng_flash_map.virt = P2SEGADDR(0x01000000);
78 soleng_eprom_map.phys = 0;
79 soleng_eprom_map.virt = P1SEGADDR(0);
80 flash_mtd = do_map_probe("cfi_probe", &soleng_flash_map);
81 if (!flash_mtd) {
82 /* Eep. */
83 printk(KERN_NOTICE "Flash chips not detected at either possible location.\n");
84 return -ENXIO;
85 }
86 }
87 printk(KERN_NOTICE "Solution Engine: Flash at 0x%08lx, EPROM at 0x%08lx\n",
88 soleng_flash_map.phys & 0x1fffffff,
89 soleng_eprom_map.phys & 0x1fffffff);
90 flash_mtd->owner = THIS_MODULE;
91
92 eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map);
93 if (eprom_mtd) {
94 eprom_mtd->owner = THIS_MODULE;
95 add_mtd_device(eprom_mtd);
96 }
97
98 nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0);
99
100#ifdef CONFIG_MTD_SUPERH_RESERVE
101 if (nr_parts <= 0) {
102 printk(KERN_NOTICE "Using configured partition at 0x%08x.\n",
103 CONFIG_MTD_SUPERH_RESERVE);
104 parsed_parts = superh_se_partitions;
105 nr_parts = sizeof(superh_se_partitions)/sizeof(*parsed_parts);
106 }
107#endif /* CONFIG_MTD_SUPERH_RESERVE */
108
109 if (nr_parts > 0)
110 add_mtd_partitions(flash_mtd, parsed_parts, nr_parts);
111 else
112 add_mtd_device(flash_mtd);
113
114 return 0;
115}
116
117static void __exit cleanup_soleng_maps(void)
118{
119 if (eprom_mtd) {
120 del_mtd_device(eprom_mtd);
121 map_destroy(eprom_mtd);
122 }
123
124 if (parsed_parts)
125 del_mtd_partitions(flash_mtd);
126 else
127 del_mtd_device(flash_mtd);
128 map_destroy(flash_mtd);
129}
130
131module_init(init_soleng_maps);
132module_exit(cleanup_soleng_maps);
133
134MODULE_LICENSE("GPL");
135MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
136MODULE_DESCRIPTION("MTD map driver for Hitachi SolutionEngine (and similar) boards");
137
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
new file mode 100644
index 000000000000..29091d10030a
--- /dev/null
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -0,0 +1,177 @@
1/* $Id: sun_uflash.c,v 1.11 2004/11/04 13:24:15 gleixner Exp $
2 *
3 * sun_uflash - Driver implementation for user-programmable flash
4 * present on many Sun Microsystems SME boardsets.
5 *
6 * This driver does NOT provide access to the OBP-flash for
7 * safety reasons-- use <linux>/drivers/sbus/char/flash.c instead.
8 *
9 * Copyright (c) 2001 Eric Brower (ebrower@usa.net)
10 *
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/fs.h>
16#include <linux/errno.h>
17#include <linux/init.h>
18#include <linux/ioport.h>
19#include <asm/ebus.h>
20#include <asm/oplib.h>
21#include <asm/uaccess.h>
22#include <asm/io.h>
23
24#include <linux/mtd/mtd.h>
25#include <linux/mtd/map.h>
26
27#define UFLASH_OBPNAME "flashprom"
28#define UFLASH_DEVNAME "userflash"
29
30#define UFLASH_WINDOW_SIZE 0x200000
31#define UFLASH_BUSWIDTH 1 /* EBus is 8-bit */
32
33MODULE_AUTHOR
34 ("Eric Brower <ebrower@usa.net>");
35MODULE_DESCRIPTION
36 ("User-programmable flash device on Sun Microsystems boardsets");
37MODULE_SUPPORTED_DEVICE
38 ("userflash");
39MODULE_LICENSE
40 ("GPL");
41
42static LIST_HEAD(device_list);
43struct uflash_dev {
44 char * name; /* device name */
45 struct map_info map; /* mtd map info */
46 struct mtd_info * mtd; /* mtd info */
47 struct list_head list;
48};
49
50
51struct map_info uflash_map_templ = {
52 .name = "SUNW,???-????",
53 .size = UFLASH_WINDOW_SIZE,
54 .bankwidth = UFLASH_BUSWIDTH,
55};
56
57int uflash_devinit(struct linux_ebus_device* edev)
58{
59 int iTmp, nregs;
60 struct linux_prom_registers regs[2];
61 struct uflash_dev *pdev;
62
63 iTmp = prom_getproperty(
64 edev->prom_node, "reg", (void *)regs, sizeof(regs));
65 if ((iTmp % sizeof(regs[0])) != 0) {
66 printk("%s: Strange reg property size %d\n",
67 UFLASH_DEVNAME, iTmp);
68 return -ENODEV;
69 }
70
71 nregs = iTmp / sizeof(regs[0]);
72
73 if (nregs != 1) {
74 /* Non-CFI userflash device-- once I find one we
75 * can work on supporting it.
76 */
77 printk("%s: unsupported device at 0x%lx (%d regs): " \
78 "email ebrower@usa.net\n",
79 UFLASH_DEVNAME, edev->resource[0].start, nregs);
80 return -ENODEV;
81 }
82
83 if(0 == (pdev = kmalloc(sizeof(struct uflash_dev), GFP_KERNEL))) {
84 printk("%s: unable to kmalloc new device\n", UFLASH_DEVNAME);
85 return(-ENOMEM);
86 }
87
88 /* copy defaults and tweak parameters */
89 memcpy(&pdev->map, &uflash_map_templ, sizeof(uflash_map_templ));
90 pdev->map.size = regs[0].reg_size;
91
92 iTmp = prom_getproplen(edev->prom_node, "model");
93 pdev->name = kmalloc(iTmp, GFP_KERNEL);
94 prom_getstring(edev->prom_node, "model", pdev->name, iTmp);
95 if(0 != pdev->name && 0 < strlen(pdev->name)) {
96 pdev->map.name = pdev->name;
97 }
98 pdev->map.phys = edev->resource[0].start;
99 pdev->map.virt = ioremap_nocache(edev->resource[0].start, pdev->map.size);
100 if(0 == pdev->map.virt) {
101 printk("%s: failed to map device\n", __FUNCTION__);
102 kfree(pdev->name);
103 kfree(pdev);
104 return(-1);
105 }
106
107 simple_map_init(&pdev->map);
108
109 /* MTD registration */
110 pdev->mtd = do_map_probe("cfi_probe", &pdev->map);
111 if(0 == pdev->mtd) {
112 iounmap(pdev->map.virt);
113 kfree(pdev->name);
114 kfree(pdev);
115 return(-ENXIO);
116 }
117
118 list_add(&pdev->list, &device_list);
119
120 pdev->mtd->owner = THIS_MODULE;
121
122 add_mtd_device(pdev->mtd);
123 return(0);
124}
125
126static int __init uflash_init(void)
127{
128 struct linux_ebus *ebus = NULL;
129 struct linux_ebus_device *edev = NULL;
130
131 for_each_ebus(ebus) {
132 for_each_ebusdev(edev, ebus) {
133 if (!strcmp(edev->prom_name, UFLASH_OBPNAME)) {
134 if(0 > prom_getproplen(edev->prom_node, "user")) {
135 DEBUG(2, "%s: ignoring device at 0x%lx\n",
136 UFLASH_DEVNAME, edev->resource[0].start);
137 } else {
138 uflash_devinit(edev);
139 }
140 }
141 }
142 }
143
144 if(list_empty(&device_list)) {
145 printk("%s: unable to locate device\n", UFLASH_DEVNAME);
146 return -ENODEV;
147 }
148 return(0);
149}
150
151static void __exit uflash_cleanup(void)
152{
153 struct list_head *udevlist;
154 struct uflash_dev *udev;
155
156 list_for_each(udevlist, &device_list) {
157 udev = list_entry(udevlist, struct uflash_dev, list);
158 DEBUG(2, "%s: removing device %s\n",
159 UFLASH_DEVNAME, udev->name);
160
161 if(0 != udev->mtd) {
162 del_mtd_device(udev->mtd);
163 map_destroy(udev->mtd);
164 }
165 if(0 != udev->map.virt) {
166 iounmap(udev->map.virt);
167 udev->map.virt = NULL;
168 }
169 if(0 != udev->name) {
170 kfree(udev->name);
171 }
172 kfree(udev);
173 }
174}
175
176module_init(uflash_init);
177module_exit(uflash_cleanup);
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
new file mode 100644
index 000000000000..995e9991cb8d
--- /dev/null
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -0,0 +1,263 @@
1/*
2 * Handle mapping of the flash memory access routines
3 * on TQM8xxL based devices.
4 *
5 * $Id: tqm8xxl.c,v 1.13 2004/10/20 22:21:53 dwmw2 Exp $
6 *
7 * based on rpxlite.c
8 *
9 * Copyright(C) 2001 Kirk Lee <kirk@hpc.ee.ntu.edu.tw>
10 *
11 * This code is GPLed
12 *
13 */
14
15/*
16 * According to TQM8xxL hardware manual, TQM8xxL series have
17 * following flash memory organisations:
18 * | capacity | | chip type | | bank0 | | bank1 |
19 * 2MiB 512Kx16 2MiB 0
20 * 4MiB 1Mx16 4MiB 0
21 * 8MiB 1Mx16 4MiB 4MiB
22 * Thus, we choose CONFIG_MTD_CFI_I2 & CONFIG_MTD_CFI_B4 at
23 * kernel configuration.
24 */
25#include <linux/config.h>
26#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/kernel.h>
29#include <linux/init.h>
30#include <asm/io.h>
31
32#include <linux/mtd/mtd.h>
33#include <linux/mtd/map.h>
34#include <linux/mtd/partitions.h>
35
36#define FLASH_ADDR 0x40000000
37#define FLASH_SIZE 0x00800000
38#define FLASH_BANK_MAX 4
39
40// trivial struct to describe partition information
41struct mtd_part_def
42{
43 int nums;
44 unsigned char *type;
45 struct mtd_partition* mtd_part;
46};
47
48//static struct mtd_info *mymtd;
49static struct mtd_info* mtd_banks[FLASH_BANK_MAX];
50static struct map_info* map_banks[FLASH_BANK_MAX];
51static struct mtd_part_def part_banks[FLASH_BANK_MAX];
52static unsigned long num_banks;
53static void __iomem *start_scan_addr;
54
55/*
56 * Here are partition information for all known TQM8xxL series devices.
57 * See include/linux/mtd/partitions.h for definition of the mtd_partition
58 * structure.
59 *
60 * The *_max_flash_size is the maximum possible mapped flash size which
61 * is not necessarily the actual flash size. It must correspond to the
62 * value specified in the mapping definition defined by the
63 * "struct map_desc *_io_desc" for the corresponding machine.
64 */
65
66#ifdef CONFIG_MTD_PARTITIONS
67/* Currently, TQM8xxL has upto 8MiB flash */
68static unsigned long tqm8xxl_max_flash_size = 0x00800000;
69
70/* partition definition for first flash bank
71 * (cf. "drivers/char/flash_config.c")
72 */
73static struct mtd_partition tqm8xxl_partitions[] = {
74 {
75 .name = "ppcboot",
76 .offset = 0x00000000,
77 .size = 0x00020000, /* 128KB */
78 .mask_flags = MTD_WRITEABLE, /* force read-only */
79 },
80 {
81 .name = "kernel", /* default kernel image */
82 .offset = 0x00020000,
83 .size = 0x000e0000,
84 .mask_flags = MTD_WRITEABLE, /* force read-only */
85 },
86 {
87 .name = "user",
88 .offset = 0x00100000,
89 .size = 0x00100000,
90 },
91 {
92 .name = "initrd",
93 .offset = 0x00200000,
94 .size = 0x00200000,
95 }
96};
97/* partition definition for second flash bank */
98static struct mtd_partition tqm8xxl_fs_partitions[] = {
99 {
100 .name = "cramfs",
101 .offset = 0x00000000,
102 .size = 0x00200000,
103 },
104 {
105 .name = "jffs",
106 .offset = 0x00200000,
107 .size = 0x00200000,
108 //.size = MTDPART_SIZ_FULL,
109 }
110};
111#endif
112
113int __init init_tqm_mtd(void)
114{
115 int idx = 0, ret = 0;
116 unsigned long flash_addr, flash_size, mtd_size = 0;
117 /* pointer to TQM8xxL board info data */
118 bd_t *bd = (bd_t *)__res;
119
120 flash_addr = bd->bi_flashstart;
121 flash_size = bd->bi_flashsize;
122
123 //request maximum flash size address space
124 start_scan_addr = ioremap(flash_addr, flash_size);
125 if (!start_scan_addr) {
126 printk(KERN_WARNING "%s:Failed to ioremap address:0x%x\n", __FUNCTION__, flash_addr);
127 return -EIO;
128 }
129
130 for (idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
131 if(mtd_size >= flash_size)
132 break;
133
134 printk(KERN_INFO "%s: chip probing count %d\n", __FUNCTION__, idx);
135
136 map_banks[idx] = (struct map_info *)kmalloc(sizeof(struct map_info), GFP_KERNEL);
137 if(map_banks[idx] == NULL) {
138 ret = -ENOMEM;
139 /* FIXME: What if some MTD devices were probed already? */
140 goto error_mem;
141 }
142
143 memset((void *)map_banks[idx], 0, sizeof(struct map_info));
144 map_banks[idx]->name = (char *)kmalloc(16, GFP_KERNEL);
145
146 if (!map_banks[idx]->name) {
147 ret = -ENOMEM;
148 /* FIXME: What if some MTD devices were probed already? */
149 goto error_mem;
150 }
151 sprintf(map_banks[idx]->name, "TQM8xxL%d", idx);
152
153 map_banks[idx]->size = flash_size;
154 map_banks[idx]->bankwidth = 4;
155
156 simple_map_init(map_banks[idx]);
157
158 map_banks[idx]->virt = start_scan_addr;
159 map_banks[idx]->phys = flash_addr;
160 /* FIXME: This looks utterly bogus, but I'm trying to
161 preserve the behaviour of the original (shown here)...
162
163 map_banks[idx]->map_priv_1 =
164 start_scan_addr + ((idx > 0) ?
165 (mtd_banks[idx-1] ? mtd_banks[idx-1]->size : 0) : 0);
166 */
167
168 if (idx && mtd_banks[idx-1]) {
169 map_banks[idx]->virt += mtd_banks[idx-1]->size;
170 map_banks[idx]->phys += mtd_banks[idx-1]->size;
171 }
172
173 //start to probe flash chips
174 mtd_banks[idx] = do_map_probe("cfi_probe", map_banks[idx]);
175
176 if (mtd_banks[idx]) {
177 mtd_banks[idx]->owner = THIS_MODULE;
178 mtd_size += mtd_banks[idx]->size;
179 num_banks++;
180
181 printk(KERN_INFO "%s: bank%d, name:%s, size:%dbytes \n", __FUNCTION__, num_banks,
182 mtd_banks[idx]->name, mtd_banks[idx]->size);
183 }
184 }
185
186 /* no supported flash chips found */
187 if (!num_banks) {
188 printk(KERN_NOTICE "TQM8xxL: No support flash chips found!\n");
189 ret = -ENXIO;
190 goto error_mem;
191 }
192
193#ifdef CONFIG_MTD_PARTITIONS
194 /*
195 * Select Static partition definitions
196 */
197 part_banks[0].mtd_part = tqm8xxl_partitions;
198 part_banks[0].type = "Static image";
199 part_banks[0].nums = ARRAY_SIZE(tqm8xxl_partitions);
200
201 part_banks[1].mtd_part = tqm8xxl_fs_partitions;
202 part_banks[1].type = "Static file system";
203 part_banks[1].nums = ARRAY_SIZE(tqm8xxl_fs_partitions);
204
205 for(idx = 0; idx < num_banks ; idx++) {
206 if (part_banks[idx].nums == 0) {
207 printk(KERN_NOTICE "TQM flash%d: no partition info available, registering whole flash at once\n", idx);
208 add_mtd_device(mtd_banks[idx]);
209 } else {
210 printk(KERN_NOTICE "TQM flash%d: Using %s partition definition\n",
211 idx, part_banks[idx].type);
212 add_mtd_partitions(mtd_banks[idx], part_banks[idx].mtd_part,
213 part_banks[idx].nums);
214 }
215 }
216#else
217 printk(KERN_NOTICE "TQM flash: registering %d whole flash banks at once\n", num_banks);
218 for(idx = 0 ; idx < num_banks ; idx++)
219 add_mtd_device(mtd_banks[idx]);
220#endif
221 return 0;
222error_mem:
223 for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
224 if(map_banks[idx] != NULL) {
225 if(map_banks[idx]->name != NULL) {
226 kfree(map_banks[idx]->name);
227 map_banks[idx]->name = NULL;
228 }
229 kfree(map_banks[idx]);
230 map_banks[idx] = NULL;
231 }
232 }
233error:
234 iounmap(start_scan_addr);
235 return ret;
236}
237
238static void __exit cleanup_tqm_mtd(void)
239{
240 unsigned int idx = 0;
241 for(idx = 0 ; idx < num_banks ; idx++) {
242 /* destroy mtd_info previously allocated */
243 if (mtd_banks[idx]) {
244 del_mtd_partitions(mtd_banks[idx]);
245 map_destroy(mtd_banks[idx]);
246 }
247 /* release map_info not used anymore */
248 kfree(map_banks[idx]->name);
249 kfree(map_banks[idx]);
250 }
251
252 if (start_scan_addr) {
253 iounmap(start_scan_addr);
254 start_scan_addr = 0;
255 }
256}
257
258module_init(init_tqm_mtd);
259module_exit(cleanup_tqm_mtd);
260
261MODULE_LICENSE("GPL");
262MODULE_AUTHOR("Kirk Lee <kirk@hpc.ee.ntu.edu.tw>");
263MODULE_DESCRIPTION("MTD map driver for TQM8xxL boards");
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
new file mode 100644
index 000000000000..3ebd90f56503
--- /dev/null
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -0,0 +1,141 @@
1/*
2 * ts5500_flash.c -- MTD map driver for Technology Systems TS-5500 board
3 *
4 * Copyright (C) 2004 Sean Young <sean@mess.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
19 *
20 * Note:
21 * - In order for detection to work, jumper 3 must be set.
22 * - Drive A and B use a proprietary FTL from General Software which isn't
23 * supported as of yet so standard drives can't be mounted; you can create
24 * your own (e.g. jffs) file system.
25 * - If you have created your own jffs file system and the bios overwrites
26 * it during boot, try disabling Drive A: and B: in the boot order.
27 *
28 * $Id: ts5500_flash.c,v 1.2 2004/11/28 09:40:40 dwmw2 Exp $
29 */
30
31#include <linux/config.h>
32#include <linux/module.h>
33#include <linux/types.h>
34#include <linux/kernel.h>
35#include <linux/init.h>
36#include <linux/mtd/mtd.h>
37#include <linux/mtd/map.h>
38
39#ifdef CONFIG_MTD_PARTITIONS
40#include <linux/mtd/partitions.h>
41#endif
42
43#define WINDOW_ADDR 0x09400000
44#define WINDOW_SIZE 0x00200000
45
46static struct map_info ts5500_map = {
47 .name = "TS-5500 Flash",
48 .size = WINDOW_SIZE,
49 .bankwidth = 1,
50 .phys = WINDOW_ADDR
51};
52
53#ifdef CONFIG_MTD_PARTITIONS
54static struct mtd_partition ts5500_partitions[] = {
55 {
56 .name = "Drive A",
57 .offset = 0,
58 .size = 0x0e0000
59 },
60 {
61 .name = "BIOS",
62 .offset = 0x0e0000,
63 .size = 0x020000,
64 },
65 {
66 .name = "Drive B",
67 .offset = 0x100000,
68 .size = 0x100000
69 }
70};
71
72#define NUM_PARTITIONS (sizeof(ts5500_partitions)/sizeof(struct mtd_partition))
73
74#endif
75
76static struct mtd_info *mymtd;
77
78static int __init init_ts5500_map(void)
79{
80 int rc = 0;
81
82 ts5500_map.virt = ioremap_nocache(ts5500_map.phys, ts5500_map.size);
83
84 if(!ts5500_map.virt) {
85 printk(KERN_ERR "Failed to ioremap_nocache\n");
86 rc = -EIO;
87 goto err_out_ioremap;
88 }
89
90 simple_map_init(&ts5500_map);
91
92 mymtd = do_map_probe("jedec_probe", &ts5500_map);
93 if(!mymtd)
94 mymtd = do_map_probe("map_rom", &ts5500_map);
95
96 if(!mymtd) {
97 rc = -ENXIO;
98 goto err_out_map;
99 }
100
101 mymtd->owner = THIS_MODULE;
102#ifdef CONFIG_MTD_PARTITIONS
103 add_mtd_partitions(mymtd, ts5500_partitions, NUM_PARTITIONS);
104#else
105 add_mtd_device(mymtd);
106#endif
107
108 return 0;
109
110err_out_map:
111 map_destroy(mymtd);
112err_out_ioremap:
113 iounmap(ts5500_map.virt);
114
115 return rc;
116}
117
118static void __exit cleanup_ts5500_map(void)
119{
120 if (mymtd) {
121#ifdef CONFIG_MTD_PARTITIONS
122 del_mtd_partitions(mymtd);
123#else
124 del_mtd_device(mymtd);
125#endif
126 map_destroy(mymtd);
127 }
128
129 if (ts5500_map.virt) {
130 iounmap(ts5500_map.virt);
131 ts5500_map.virt = NULL;
132 }
133}
134
135module_init(init_ts5500_map);
136module_exit(cleanup_ts5500_map);
137
138MODULE_LICENSE("GPL");
139MODULE_AUTHOR("Sean Young <sean@mess.org>");
140MODULE_DESCRIPTION("MTD map driver for Techology Systems TS-5500 board");
141
diff --git a/drivers/mtd/maps/tsunami_flash.c b/drivers/mtd/maps/tsunami_flash.c
new file mode 100644
index 000000000000..170d71239e5e
--- /dev/null
+++ b/drivers/mtd/maps/tsunami_flash.c
@@ -0,0 +1,108 @@
1/*
2 * tsunami_flash.c
3 *
4 * flash chip on alpha ds10...
5 * $Id: tsunami_flash.c,v 1.9 2004/07/14 09:52:55 dwmw2 Exp $
6 */
7#include <asm/io.h>
8#include <asm/core_tsunami.h>
9#include <linux/init.h>
10#include <linux/mtd/map.h>
11#include <linux/mtd/mtd.h>
12
13#define FLASH_ENABLE_PORT 0x00C00001
14#define FLASH_ENABLE_BYTE 0x01
15#define FLASH_DISABLE_BYTE 0x00
16
17#define MAX_TIG_FLASH_SIZE (12*1024*1024)
18static inline map_word tsunami_flash_read8(struct map_info *map, unsigned long offset)
19{
20 map_word val;
21 val.x[0] = tsunami_tig_readb(offset);
22 return val;
23}
24
25static void tsunami_flash_write8(struct map_info *map, map_word value, unsigned long offset)
26{
27 tsunami_tig_writeb(value.x[0], offset);
28}
29
30static void tsunami_flash_copy_from(
31 struct map_info *map, void *addr, unsigned long offset, ssize_t len)
32{
33 unsigned char *dest;
34 dest = addr;
35 while(len && (offset < MAX_TIG_FLASH_SIZE)) {
36 *dest = tsunami_tig_readb(offset);
37 offset++;
38 dest++;
39 len--;
40 }
41}
42
43static void tsunami_flash_copy_to(
44 struct map_info *map, unsigned long offset,
45 const void *addr, ssize_t len)
46{
47 const unsigned char *src;
48 src = addr;
49 while(len && (offset < MAX_TIG_FLASH_SIZE)) {
50 tsunami_tig_writeb(*src, offset);
51 offset++;
52 src++;
53 len--;
54 }
55}
56
57/*
58 * Deliberately don't provide operations wider than 8 bits. I don't
59 * have then and it scares me to think how you could mess up if
60 * you tried to use them. Buswidth is correctly so I'm safe.
61 */
62static struct map_info tsunami_flash_map = {
63 .name = "flash chip on the Tsunami TIG bus",
64 .size = MAX_TIG_FLASH_SIZE,
65 .phys = NO_XIP;
66 .bankwidth = 1,
67 .read = tsunami_flash_read8,
68 .copy_from = tsunami_flash_copy_from,
69 .write = tsunami_flash_write8,
70 .copy_to = tsunami_flash_copy_to,
71};
72
73static struct mtd_info *tsunami_flash_mtd;
74
75static void __exit cleanup_tsunami_flash(void)
76{
77 struct mtd_info *mtd;
78 mtd = tsunami_flash_mtd;
79 if (mtd) {
80 del_mtd_device(mtd);
81 map_destroy(mtd);
82 }
83 tsunami_flash_mtd = 0;
84}
85
86
87static int __init init_tsunami_flash(void)
88{
89 static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL };
90 char **type;
91
92 tsunami_tig_writeb(FLASH_ENABLE_BYTE, FLASH_ENABLE_PORT);
93
94 tsunami_flash_mtd = 0;
95 type = rom_probe_types;
96 for(; !tsunami_flash_mtd && *type; type++) {
97 tsunami_flash_mtd = do_map_probe(*type, &tsunami_flash_map);
98 }
99 if (tsunami_flash_mtd) {
100 tsunami_flash_mtd->owner = THIS_MODULE;
101 add_mtd_device(tsunami_flash_mtd);
102 return 0;
103 }
104 return -ENXIO;
105}
106
107module_init(init_tsunami_flash);
108module_exit(cleanup_tsunami_flash);
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
new file mode 100644
index 000000000000..811d92e5f5b1
--- /dev/null
+++ b/drivers/mtd/maps/uclinux.c
@@ -0,0 +1,127 @@
1/****************************************************************************/
2
3/*
4 * uclinux.c -- generic memory mapped MTD driver for uclinux
5 *
6 * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
7 *
8 * $Id: uclinux.c,v 1.10 2005/01/05 18:05:13 dwmw2 Exp $
9 */
10
11/****************************************************************************/
12
13#include <linux/config.h>
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/fs.h>
19#include <linux/major.h>
20#include <linux/root_dev.h>
21#include <linux/mtd/mtd.h>
22#include <linux/mtd/map.h>
23#include <linux/mtd/partitions.h>
24#include <asm/io.h>
25
26/****************************************************************************/
27
28
29/****************************************************************************/
30
31struct map_info uclinux_ram_map = {
32 .name = "RAM",
33};
34
35struct mtd_info *uclinux_ram_mtdinfo;
36
37/****************************************************************************/
38
39struct mtd_partition uclinux_romfs[] = {
40 { .name = "ROMfs" }
41};
42
43#define NUM_PARTITIONS (sizeof(uclinux_romfs) / sizeof(uclinux_romfs[0]))
44
45/****************************************************************************/
46
47int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
48 size_t *retlen, u_char **mtdbuf)
49{
50 struct map_info *map = mtd->priv;
51 *mtdbuf = (u_char *) (map->virt + ((int) from));
52 *retlen = len;
53 return(0);
54}
55
56/****************************************************************************/
57
58int __init uclinux_mtd_init(void)
59{
60 struct mtd_info *mtd;
61 struct map_info *mapp;
62 extern char _ebss;
63
64 mapp = &uclinux_ram_map;
65 mapp->phys = (unsigned long) &_ebss;
66 mapp->size = PAGE_ALIGN(*((unsigned long *)((&_ebss) + 8)));
67 mapp->bankwidth = 4;
68
69 printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n",
70 (int) mapp->map_priv_2, (int) mapp->size);
71
72 mapp->virt = ioremap_nocache(mapp->phys, mapp->size);
73
74 if (mapp->virt == 0) {
75 printk("uclinux[mtd]: ioremap_nocache() failed\n");
76 return(-EIO);
77 }
78
79 simple_map_init(mapp);
80
81 mtd = do_map_probe("map_ram", mapp);
82 if (!mtd) {
83 printk("uclinux[mtd]: failed to find a mapping?\n");
84 iounmap(mapp->virt);
85 return(-ENXIO);
86 }
87
88 mtd->owner = THIS_MODULE;
89 mtd->point = uclinux_point;
90 mtd->priv = mapp;
91
92 uclinux_ram_mtdinfo = mtd;
93 add_mtd_partitions(mtd, uclinux_romfs, NUM_PARTITIONS);
94
95 printk("uclinux[mtd]: set %s to be root filesystem\n",
96 uclinux_romfs[0].name);
97 ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, 0);
98 put_mtd_device(mtd);
99
100 return(0);
101}
102
103/****************************************************************************/
104
105void __exit uclinux_mtd_cleanup(void)
106{
107 if (uclinux_ram_mtdinfo) {
108 del_mtd_partitions(uclinux_ram_mtdinfo);
109 map_destroy(uclinux_ram_mtdinfo);
110 uclinux_ram_mtdinfo = NULL;
111 }
112 if (uclinux_ram_map.map_priv_1) {
113 iounmap((void *) uclinux_ram_map.virt);
114 uclinux_ram_map.virt = 0;
115 }
116}
117
118/****************************************************************************/
119
120module_init(uclinux_mtd_init);
121module_exit(uclinux_mtd_cleanup);
122
123MODULE_LICENSE("GPL");
124MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>");
125MODULE_DESCRIPTION("Generic RAM based MTD for uClinux");
126
127/****************************************************************************/
diff --git a/drivers/mtd/maps/vmax301.c b/drivers/mtd/maps/vmax301.c
new file mode 100644
index 000000000000..c8c74110ed1b
--- /dev/null
+++ b/drivers/mtd/maps/vmax301.c
@@ -0,0 +1,198 @@
1// $Id: vmax301.c,v 1.30 2004/07/12 22:38:29 dwmw2 Exp $
2/* ######################################################################
3
4 Tempustech VMAX SBC301 MTD Driver.
5
6 The VMAx 301 is a SBC based on . It
7 comes with three builtin AMD 29F016B flash chips and a socket for SRAM or
8 more flash. Each unit has it's own 8k mapping into a settable region
9 (0xD8000). There are two 8k mappings for each MTD, the first is always set
10 to the lower 8k of the device the second is paged. Writing a 16 bit page
11 value to anywhere in the first 8k will cause the second 8k to page around.
12
13 To boot the device a bios extension must be installed into the first 8k
14 of flash that is smart enough to copy itself down, page in the rest of
15 itself and begin executing.
16
17 ##################################################################### */
18
19#include <linux/module.h>
20#include <linux/slab.h>
21#include <linux/ioport.h>
22#include <linux/init.h>
23#include <linux/spinlock.h>
24#include <asm/io.h>
25
26#include <linux/mtd/map.h>
27#include <linux/mtd/mtd.h>
28
29
30#define WINDOW_START 0xd8000
31#define WINDOW_LENGTH 0x2000
32#define WINDOW_SHIFT 25
33#define WINDOW_MASK 0x1FFF
34
35/* Actually we could use two spinlocks, but we'd have to have
36 more private space in the struct map_info. We lose a little
37 performance like this, but we'd probably lose more by having
38 the extra indirection from having one of the map->map_priv
39 fields pointing to yet another private struct.
40*/
41static DEFINE_SPINLOCK(vmax301_spin);
42
43static void __vmax301_page(struct map_info *map, unsigned long page)
44{
45 writew(page, map->map_priv_2 - WINDOW_LENGTH);
46 map->map_priv_1 = page;
47}
48
49static inline void vmax301_page(struct map_info *map,
50 unsigned long ofs)
51{
52 unsigned long page = (ofs >> WINDOW_SHIFT);
53 if (map->map_priv_1 != page)
54 __vmax301_page(map, page);
55}
56
57static map_word vmax301_read8(struct map_info *map, unsigned long ofs)
58{
59 map_word ret;
60 spin_lock(&vmax301_spin);
61 vmax301_page(map, ofs);
62 ret.x[0] = readb(map->map_priv_2 + (ofs & WINDOW_MASK));
63 spin_unlock(&vmax301_spin);
64 return ret;
65}
66
67static void vmax301_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
68{
69 while(len) {
70 unsigned long thislen = len;
71 if (len > (WINDOW_LENGTH - (from & WINDOW_MASK)))
72 thislen = WINDOW_LENGTH-(from & WINDOW_MASK);
73 spin_lock(&vmax301_spin);
74 vmax301_page(map, from);
75 memcpy_fromio(to, map->map_priv_2 + from, thislen);
76 spin_unlock(&vmax301_spin);
77 to += thislen;
78 from += thislen;
79 len -= thislen;
80 }
81}
82
83static void vmax301_write8(struct map_info *map, map_word d, unsigned long adr)
84{
85 spin_lock(&vmax301_spin);
86 vmax301_page(map, adr);
87 writeb(d.x[0], map->map_priv_2 + (adr & WINDOW_MASK));
88 spin_unlock(&vmax301_spin);
89}
90
91static void vmax301_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
92{
93 while(len) {
94 unsigned long thislen = len;
95 if (len > (WINDOW_LENGTH - (to & WINDOW_MASK)))
96 thislen = WINDOW_LENGTH-(to & WINDOW_MASK);
97
98 spin_lock(&vmax301_spin);
99 vmax301_page(map, to);
100 memcpy_toio(map->map_priv_2 + to, from, thislen);
101 spin_unlock(&vmax301_spin);
102 to += thislen;
103 from += thislen;
104 len -= thislen;
105 }
106}
107
108static struct map_info vmax_map[2] = {
109 {
110 .name = "VMAX301 Internal Flash",
111 .phys = NO_XIP,
112 .size = 3*2*1024*1024,
113 .bankwidth = 1,
114 .read = vmax301_read8,
115 .copy_from = vmax301_copy_from,
116 .write = vmax301_write8,
117 .copy_to = vmax301_copy_to,
118 .map_priv_1 = WINDOW_START + WINDOW_LENGTH,
119 .map_priv_2 = 0xFFFFFFFF
120 },
121 {
122 .name = "VMAX301 Socket",
123 .phys = NO_XIP,
124 .size = 0,
125 .bankwidth = 1,
126 .read = vmax301_read8,
127 .copy_from = vmax301_copy_from,
128 .write = vmax301_write8,
129 .copy_to = vmax301_copy_to,
130 .map_priv_1 = WINDOW_START + (3*WINDOW_LENGTH),
131 .map_priv_2 = 0xFFFFFFFF
132 }
133};
134
135static struct mtd_info *vmax_mtd[2] = {NULL, NULL};
136
137static void __exit cleanup_vmax301(void)
138{
139 int i;
140
141 for (i=0; i<2; i++) {
142 if (vmax_mtd[i]) {
143 del_mtd_device(vmax_mtd[i]);
144 map_destroy(vmax_mtd[i]);
145 }
146 }
147 iounmap((void *)vmax_map[0].map_priv_1 - WINDOW_START);
148}
149
150int __init init_vmax301(void)
151{
152 int i;
153 unsigned long iomapadr;
154 // Print out our little header..
155 printk("Tempustech VMAX 301 MEM:0x%x-0x%x\n",WINDOW_START,
156 WINDOW_START+4*WINDOW_LENGTH);
157
158 iomapadr = (unsigned long)ioremap(WINDOW_START, WINDOW_LENGTH*4);
159 if (!iomapadr) {
160 printk("Failed to ioremap memory region\n");
161 return -EIO;
162 }
163 /* Put the address in the map's private data area.
164 We store the actual MTD IO address rather than the
165 address of the first half, because it's used more
166 often.
167 */
168 vmax_map[0].map_priv_2 = iomapadr + WINDOW_START;
169 vmax_map[1].map_priv_2 = iomapadr + (3*WINDOW_START);
170
171 for (i=0; i<2; i++) {
172 vmax_mtd[i] = do_map_probe("cfi_probe", &vmax_map[i]);
173 if (!vmax_mtd[i])
174 vmax_mtd[i] = do_map_probe("jedec", &vmax_map[i]);
175 if (!vmax_mtd[i])
176 vmax_mtd[i] = do_map_probe("map_ram", &vmax_map[i]);
177 if (!vmax_mtd[i])
178 vmax_mtd[i] = do_map_probe("map_rom", &vmax_map[i]);
179 if (vmax_mtd[i]) {
180 vmax_mtd[i]->owner = THIS_MODULE;
181 add_mtd_device(vmax_mtd[i]);
182 }
183 }
184
185 if (!vmax_mtd[1] && !vmax_mtd[2]) {
186 iounmap((void *)iomapadr);
187 return -ENXIO;
188 }
189
190 return 0;
191}
192
193module_init(init_vmax301);
194module_exit(cleanup_vmax301);
195
196MODULE_LICENSE("GPL");
197MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
198MODULE_DESCRIPTION("MTD map driver for Tempustech VMAX SBC301 board");
diff --git a/drivers/mtd/maps/walnut.c b/drivers/mtd/maps/walnut.c
new file mode 100644
index 000000000000..d6137b1b5670
--- /dev/null
+++ b/drivers/mtd/maps/walnut.c
@@ -0,0 +1,122 @@
1/*
2 * $Id: walnut.c,v 1.2 2004/12/10 12:07:42 holindho Exp $
3 *
4 * Mapping for Walnut flash
5 * (used ebony.c as a "framework")
6 *
7 * Heikki Lindholm <holindho@infradead.org>
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/mtd/mtd.h>
21#include <linux/mtd/map.h>
22#include <linux/mtd/partitions.h>
23#include <linux/config.h>
24#include <linux/version.h>
25#include <asm/io.h>
26#include <asm/ibm4xx.h>
27#include <platforms/4xx/walnut.h>
28
29/* these should be in platforms/4xx/walnut.h ? */
30#define WALNUT_FLASH_ONBD_N(x) (x & 0x02)
31#define WALNUT_FLASH_SRAM_SEL(x) (x & 0x01)
32#define WALNUT_FLASH_LOW 0xFFF00000
33#define WALNUT_FLASH_HIGH 0xFFF80000
34#define WALNUT_FLASH_SIZE 0x80000
35
36static struct mtd_info *flash;
37
38static struct map_info walnut_map = {
39 .name = "Walnut flash",
40 .size = WALNUT_FLASH_SIZE,
41 .bankwidth = 1,
42};
43
44/* Actually, OpenBIOS is the last 128 KiB of the flash - better
45 * partitioning could be made */
46static struct mtd_partition walnut_partitions[] = {
47 {
48 .name = "OpenBIOS",
49 .offset = 0x0,
50 .size = WALNUT_FLASH_SIZE,
51 /*.mask_flags = MTD_WRITEABLE, */ /* force read-only */
52 }
53};
54
55int __init init_walnut(void)
56{
57 u8 fpga_brds1;
58 void *fpga_brds1_adr;
59 void *fpga_status_adr;
60 unsigned long flash_base;
61
62 /* this should already be mapped (platform/4xx/walnut.c) */
63 fpga_status_adr = ioremap(WALNUT_FPGA_BASE, 8);
64 if (!fpga_status_adr)
65 return -ENOMEM;
66
67 fpga_brds1_adr = fpga_status_adr+5;
68 fpga_brds1 = readb(fpga_brds1_adr);
69 /* iounmap(fpga_status_adr); */
70
71 if (WALNUT_FLASH_ONBD_N(fpga_brds1)) {
72 printk("The on-board flash is disabled (U79 sw 5)!");
73 return -EIO;
74 }
75 if (WALNUT_FLASH_SRAM_SEL(fpga_brds1))
76 flash_base = WALNUT_FLASH_LOW;
77 else
78 flash_base = WALNUT_FLASH_HIGH;
79
80 walnut_map.phys = flash_base;
81 walnut_map.virt =
82 (void __iomem *)ioremap(flash_base, walnut_map.size);
83
84 if (!walnut_map.virt) {
85 printk("Failed to ioremap flash.\n");
86 return -EIO;
87 }
88
89 simple_map_init(&walnut_map);
90
91 flash = do_map_probe("jedec_probe", &walnut_map);
92 if (flash) {
93 flash->owner = THIS_MODULE;
94 add_mtd_partitions(flash, walnut_partitions,
95 ARRAY_SIZE(walnut_partitions));
96 } else {
97 printk("map probe failed for flash\n");
98 return -ENXIO;
99 }
100
101 return 0;
102}
103
104static void __exit cleanup_walnut(void)
105{
106 if (flash) {
107 del_mtd_partitions(flash);
108 map_destroy(flash);
109 }
110
111 if (walnut_map.virt) {
112 iounmap((void *)walnut_map.virt);
113 walnut_map.virt = 0;
114 }
115}
116
117module_init(init_walnut);
118module_exit(cleanup_walnut);
119
120MODULE_LICENSE("GPL");
121MODULE_AUTHOR("Heikki Lindholm <holindho@infradead.org>");
122MODULE_DESCRIPTION("MTD map and partitions for IBM 405GP Walnut boards");
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
new file mode 100644
index 000000000000..82b887b05707
--- /dev/null
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -0,0 +1,181 @@
1/*
2 * $Id: wr_sbc82xx_flash.c,v 1.7 2004/11/04 13:24:15 gleixner Exp $
3 *
4 * Map for flash chips on Wind River PowerQUICC II SBC82xx board.
5 *
6 * Copyright (C) 2004 Red Hat, Inc.
7 *
8 * Author: David Woodhouse <dwmw2@infradead.org>
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <asm/io.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/map.h>
20#include <linux/config.h>
21#include <linux/mtd/partitions.h>
22
23#include <asm/immap_cpm2.h>
24
25static struct mtd_info *sbcmtd[3];
26static struct mtd_partition *sbcmtd_parts[3];
27
28struct map_info sbc82xx_flash_map[3] = {
29 {.name = "Boot flash"},
30 {.name = "Alternate boot flash"},
31 {.name = "User flash"}
32};
33
34static struct mtd_partition smallflash_parts[] = {
35 {
36 .name = "space",
37 .size = 0x100000,
38 .offset = 0,
39 }, {
40 .name = "bootloader",
41 .size = MTDPART_SIZ_FULL,
42 .offset = MTDPART_OFS_APPEND,
43 }
44};
45
46static struct mtd_partition bigflash_parts[] = {
47 {
48 .name = "bootloader",
49 .size = 0x00100000,
50 .offset = 0,
51 }, {
52 .name = "file system",
53 .size = 0x01f00000,
54 .offset = MTDPART_OFS_APPEND,
55 }, {
56 .name = "boot config",
57 .size = 0x00100000,
58 .offset = MTDPART_OFS_APPEND,
59 }, {
60 .name = "space",
61 .size = 0x01f00000,
62 .offset = MTDPART_OFS_APPEND,
63 }
64};
65
66static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL};
67
68#define init_sbc82xx_one_flash(map, br, or) \
69do { \
70 (map).phys = (br & 1) ? (br & 0xffff8000) : 0; \
71 (map).size = (br & 1) ? (~(or & 0xffff8000) + 1) : 0; \
72 switch (br & 0x00001800) { \
73 case 0x00000000: \
74 case 0x00000800: (map).bankwidth = 1; break; \
75 case 0x00001000: (map).bankwidth = 2; break; \
76 case 0x00001800: (map).bankwidth = 4; break; \
77 } \
78} while (0);
79
80int __init init_sbc82xx_flash(void)
81{
82 volatile memctl_cpm2_t *mc = &cpm2_immr->im_memctl;
83 int bigflash;
84 int i;
85
86#ifdef CONFIG_SBC8560
87 mc = ioremap(0xff700000 + 0x5000, sizeof(memctl_cpm2_t));
88#else
89 mc = &cpm2_immr->im_memctl;
90#endif
91
92 bigflash = 1;
93 if ((mc->memc_br0 & 0x00001800) == 0x00001800)
94 bigflash = 0;
95
96 init_sbc82xx_one_flash(sbc82xx_flash_map[0], mc->memc_br0, mc->memc_or0);
97 init_sbc82xx_one_flash(sbc82xx_flash_map[1], mc->memc_br6, mc->memc_or6);
98 init_sbc82xx_one_flash(sbc82xx_flash_map[2], mc->memc_br1, mc->memc_or1);
99
100#ifdef CONFIG_SBC8560
101 iounmap((void *) mc);
102#endif
103
104 for (i=0; i<3; i++) {
105 int8_t flashcs[3] = { 0, 6, 1 };
106 int nr_parts;
107
108 printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d",
109 sbc82xx_flash_map[i].name,
110 (sbc82xx_flash_map[i].size >> 20),
111 flashcs[i]);
112 if (!sbc82xx_flash_map[i].phys) {
113 /* We know it can't be at zero. */
114 printk("): disabled by bootloader.\n");
115 continue;
116 }
117 printk(" at %08lx)\n", sbc82xx_flash_map[i].phys);
118
119 sbc82xx_flash_map[i].virt = ioremap(sbc82xx_flash_map[i].phys, sbc82xx_flash_map[i].size);
120
121 if (!sbc82xx_flash_map[i].virt) {
122 printk("Failed to ioremap\n");
123 continue;
124 }
125
126 simple_map_init(&sbc82xx_flash_map[i]);
127
128 sbcmtd[i] = do_map_probe("cfi_probe", &sbc82xx_flash_map[i]);
129
130 if (!sbcmtd[i])
131 continue;
132
133 sbcmtd[i]->owner = THIS_MODULE;
134
135 nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes,
136 &sbcmtd_parts[i], 0);
137 if (nr_parts > 0) {
138 add_mtd_partitions (sbcmtd[i], sbcmtd_parts[i], nr_parts);
139 continue;
140 }
141
142 /* No partitioning detected. Use default */
143 if (i == 2) {
144 add_mtd_device(sbcmtd[i]);
145 } else if (i == bigflash) {
146 add_mtd_partitions (sbcmtd[i], bigflash_parts, ARRAY_SIZE(bigflash_parts));
147 } else {
148 add_mtd_partitions (sbcmtd[i], smallflash_parts, ARRAY_SIZE(smallflash_parts));
149 }
150 }
151 return 0;
152}
153
154static void __exit cleanup_sbc82xx_flash(void)
155{
156 int i;
157
158 for (i=0; i<3; i++) {
159 if (!sbcmtd[i])
160 continue;
161
162 if (i<2 || sbcmtd_parts[i])
163 del_mtd_partitions(sbcmtd[i]);
164 else
165 del_mtd_device(sbcmtd[i]);
166
167 kfree(sbcmtd_parts[i]);
168 map_destroy(sbcmtd[i]);
169
170 iounmap((void *)sbc82xx_flash_map[i].virt);
171 sbc82xx_flash_map[i].virt = 0;
172 }
173}
174
175module_init(init_sbc82xx_flash);
176module_exit(cleanup_sbc82xx_flash);
177
178
179MODULE_LICENSE("GPL");
180MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
181MODULE_DESCRIPTION("Flash map driver for WindRiver PowerQUICC II");
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
new file mode 100644
index 000000000000..f8d2185819e7
--- /dev/null
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -0,0 +1,478 @@
1/*
2 * $Id: mtd_blkdevs.c,v 1.24 2004/11/16 18:28:59 dwmw2 Exp $
3 *
4 * (C) 2003 David Woodhouse <dwmw2@infradead.org>
5 *
6 * Interface to Linux 2.5 block layer for MTD 'translation layers'.
7 *
8 */
9
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/list.h>
14#include <linux/fs.h>
15#include <linux/mtd/blktrans.h>
16#include <linux/mtd/mtd.h>
17#include <linux/blkdev.h>
18#include <linux/blkpg.h>
19#include <linux/spinlock.h>
20#include <linux/hdreg.h>
21#include <linux/init.h>
22#include <asm/semaphore.h>
23#include <asm/uaccess.h>
24#include <linux/devfs_fs_kernel.h>
25
26static LIST_HEAD(blktrans_majors);
27
28extern struct semaphore mtd_table_mutex;
29extern struct mtd_info *mtd_table[];
30
31struct mtd_blkcore_priv {
32 struct completion thread_dead;
33 int exiting;
34 wait_queue_head_t thread_wq;
35 struct request_queue *rq;
36 spinlock_t queue_lock;
37};
38
39static int do_blktrans_request(struct mtd_blktrans_ops *tr,
40 struct mtd_blktrans_dev *dev,
41 struct request *req)
42{
43 unsigned long block, nsect;
44 char *buf;
45
46 block = req->sector;
47 nsect = req->current_nr_sectors;
48 buf = req->buffer;
49
50 if (!(req->flags & REQ_CMD))
51 return 0;
52
53 if (block + nsect > get_capacity(req->rq_disk))
54 return 0;
55
56 switch(rq_data_dir(req)) {
57 case READ:
58 for (; nsect > 0; nsect--, block++, buf += 512)
59 if (tr->readsect(dev, block, buf))
60 return 0;
61 return 1;
62
63 case WRITE:
64 if (!tr->writesect)
65 return 0;
66
67 for (; nsect > 0; nsect--, block++, buf += 512)
68 if (tr->writesect(dev, block, buf))
69 return 0;
70 return 1;
71
72 default:
73 printk(KERN_NOTICE "Unknown request %ld\n", rq_data_dir(req));
74 return 0;
75 }
76}
77
78static int mtd_blktrans_thread(void *arg)
79{
80 struct mtd_blktrans_ops *tr = arg;
81 struct request_queue *rq = tr->blkcore_priv->rq;
82
83 /* we might get involved when memory gets low, so use PF_MEMALLOC */
84 current->flags |= PF_MEMALLOC | PF_NOFREEZE;
85
86 daemonize("%sd", tr->name);
87
88 /* daemonize() doesn't do this for us since some kernel threads
89 actually want to deal with signals. We can't just call
90 exit_sighand() since that'll cause an oops when we finally
91 do exit. */
92 spin_lock_irq(&current->sighand->siglock);
93 sigfillset(&current->blocked);
94 recalc_sigpending();
95 spin_unlock_irq(&current->sighand->siglock);
96
97 spin_lock_irq(rq->queue_lock);
98
99 while (!tr->blkcore_priv->exiting) {
100 struct request *req;
101 struct mtd_blktrans_dev *dev;
102 int res = 0;
103 DECLARE_WAITQUEUE(wait, current);
104
105 req = elv_next_request(rq);
106
107 if (!req) {
108 add_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
109 set_current_state(TASK_INTERRUPTIBLE);
110
111 spin_unlock_irq(rq->queue_lock);
112
113 schedule();
114 remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
115
116 spin_lock_irq(rq->queue_lock);
117
118 continue;
119 }
120
121 dev = req->rq_disk->private_data;
122 tr = dev->tr;
123
124 spin_unlock_irq(rq->queue_lock);
125
126 down(&dev->sem);
127 res = do_blktrans_request(tr, dev, req);
128 up(&dev->sem);
129
130 spin_lock_irq(rq->queue_lock);
131
132 end_request(req, res);
133 }
134 spin_unlock_irq(rq->queue_lock);
135
136 complete_and_exit(&tr->blkcore_priv->thread_dead, 0);
137}
138
139static void mtd_blktrans_request(struct request_queue *rq)
140{
141 struct mtd_blktrans_ops *tr = rq->queuedata;
142 wake_up(&tr->blkcore_priv->thread_wq);
143}
144
145
146static int blktrans_open(struct inode *i, struct file *f)
147{
148 struct mtd_blktrans_dev *dev;
149 struct mtd_blktrans_ops *tr;
150 int ret = -ENODEV;
151
152 dev = i->i_bdev->bd_disk->private_data;
153 tr = dev->tr;
154
155 if (!try_module_get(dev->mtd->owner))
156 goto out;
157
158 if (!try_module_get(tr->owner))
159 goto out_tr;
160
161 /* FIXME: Locking. A hot pluggable device can go away
162 (del_mtd_device can be called for it) without its module
163 being unloaded. */
164 dev->mtd->usecount++;
165
166 ret = 0;
167 if (tr->open && (ret = tr->open(dev))) {
168 dev->mtd->usecount--;
169 module_put(dev->mtd->owner);
170 out_tr:
171 module_put(tr->owner);
172 }
173 out:
174 return ret;
175}
176
177static int blktrans_release(struct inode *i, struct file *f)
178{
179 struct mtd_blktrans_dev *dev;
180 struct mtd_blktrans_ops *tr;
181 int ret = 0;
182
183 dev = i->i_bdev->bd_disk->private_data;
184 tr = dev->tr;
185
186 if (tr->release)
187 ret = tr->release(dev);
188
189 if (!ret) {
190 dev->mtd->usecount--;
191 module_put(dev->mtd->owner);
192 module_put(tr->owner);
193 }
194
195 return ret;
196}
197
198
199static int blktrans_ioctl(struct inode *inode, struct file *file,
200 unsigned int cmd, unsigned long arg)
201{
202 struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data;
203 struct mtd_blktrans_ops *tr = dev->tr;
204
205 switch (cmd) {
206 case BLKFLSBUF:
207 if (tr->flush)
208 return tr->flush(dev);
209 /* The core code did the work, we had nothing to do. */
210 return 0;
211
212 case HDIO_GETGEO:
213 if (tr->getgeo) {
214 struct hd_geometry g;
215 int ret;
216
217 memset(&g, 0, sizeof(g));
218 ret = tr->getgeo(dev, &g);
219 if (ret)
220 return ret;
221
222 g.start = get_start_sect(inode->i_bdev);
223 if (copy_to_user((void __user *)arg, &g, sizeof(g)))
224 return -EFAULT;
225 return 0;
226 } /* else */
227 default:
228 return -ENOTTY;
229 }
230}
231
232struct block_device_operations mtd_blktrans_ops = {
233 .owner = THIS_MODULE,
234 .open = blktrans_open,
235 .release = blktrans_release,
236 .ioctl = blktrans_ioctl,
237};
238
239int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
240{
241 struct mtd_blktrans_ops *tr = new->tr;
242 struct list_head *this;
243 int last_devnum = -1;
244 struct gendisk *gd;
245
246 if (!down_trylock(&mtd_table_mutex)) {
247 up(&mtd_table_mutex);
248 BUG();
249 }
250
251 list_for_each(this, &tr->devs) {
252 struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list);
253 if (new->devnum == -1) {
254 /* Use first free number */
255 if (d->devnum != last_devnum+1) {
256 /* Found a free devnum. Plug it in here */
257 new->devnum = last_devnum+1;
258 list_add_tail(&new->list, &d->list);
259 goto added;
260 }
261 } else if (d->devnum == new->devnum) {
262 /* Required number taken */
263 return -EBUSY;
264 } else if (d->devnum > new->devnum) {
265 /* Required number was free */
266 list_add_tail(&new->list, &d->list);
267 goto added;
268 }
269 last_devnum = d->devnum;
270 }
271 if (new->devnum == -1)
272 new->devnum = last_devnum+1;
273
274 if ((new->devnum << tr->part_bits) > 256) {
275 return -EBUSY;
276 }
277
278 init_MUTEX(&new->sem);
279 list_add_tail(&new->list, &tr->devs);
280 added:
281 if (!tr->writesect)
282 new->readonly = 1;
283
284 gd = alloc_disk(1 << tr->part_bits);
285 if (!gd) {
286 list_del(&new->list);
287 return -ENOMEM;
288 }
289 gd->major = tr->major;
290 gd->first_minor = (new->devnum) << tr->part_bits;
291 gd->fops = &mtd_blktrans_ops;
292
293 snprintf(gd->disk_name, sizeof(gd->disk_name),
294 "%s%c", tr->name, (tr->part_bits?'a':'0') + new->devnum);
295 snprintf(gd->devfs_name, sizeof(gd->devfs_name),
296 "%s/%c", tr->name, (tr->part_bits?'a':'0') + new->devnum);
297
298 /* 2.5 has capacity in units of 512 bytes while still
299 having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
300 set_capacity(gd, (new->size * new->blksize) >> 9);
301
302 gd->private_data = new;
303 new->blkcore_priv = gd;
304 gd->queue = tr->blkcore_priv->rq;
305
306 if (new->readonly)
307 set_disk_ro(gd, 1);
308
309 add_disk(gd);
310
311 return 0;
312}
313
314int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
315{
316 if (!down_trylock(&mtd_table_mutex)) {
317 up(&mtd_table_mutex);
318 BUG();
319 }
320
321 list_del(&old->list);
322
323 del_gendisk(old->blkcore_priv);
324 put_disk(old->blkcore_priv);
325
326 return 0;
327}
328
329static void blktrans_notify_remove(struct mtd_info *mtd)
330{
331 struct list_head *this, *this2, *next;
332
333 list_for_each(this, &blktrans_majors) {
334 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
335
336 list_for_each_safe(this2, next, &tr->devs) {
337 struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list);
338
339 if (dev->mtd == mtd)
340 tr->remove_dev(dev);
341 }
342 }
343}
344
345static void blktrans_notify_add(struct mtd_info *mtd)
346{
347 struct list_head *this;
348
349 if (mtd->type == MTD_ABSENT)
350 return;
351
352 list_for_each(this, &blktrans_majors) {
353 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
354
355 tr->add_mtd(tr, mtd);
356 }
357
358}
359
360static struct mtd_notifier blktrans_notifier = {
361 .add = blktrans_notify_add,
362 .remove = blktrans_notify_remove,
363};
364
365int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
366{
367 int ret, i;
368
369 /* Register the notifier if/when the first device type is
370 registered, to prevent the link/init ordering from fucking
371 us over. */
372 if (!blktrans_notifier.list.next)
373 register_mtd_user(&blktrans_notifier);
374
375 tr->blkcore_priv = kmalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
376 if (!tr->blkcore_priv)
377 return -ENOMEM;
378
379 memset(tr->blkcore_priv, 0, sizeof(*tr->blkcore_priv));
380
381 down(&mtd_table_mutex);
382
383 ret = register_blkdev(tr->major, tr->name);
384 if (ret) {
385 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
386 tr->name, tr->major, ret);
387 kfree(tr->blkcore_priv);
388 up(&mtd_table_mutex);
389 return ret;
390 }
391 spin_lock_init(&tr->blkcore_priv->queue_lock);
392 init_completion(&tr->blkcore_priv->thread_dead);
393 init_waitqueue_head(&tr->blkcore_priv->thread_wq);
394
395 tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
396 if (!tr->blkcore_priv->rq) {
397 unregister_blkdev(tr->major, tr->name);
398 kfree(tr->blkcore_priv);
399 up(&mtd_table_mutex);
400 return -ENOMEM;
401 }
402
403 tr->blkcore_priv->rq->queuedata = tr;
404
405 ret = kernel_thread(mtd_blktrans_thread, tr, CLONE_KERNEL);
406 if (ret < 0) {
407 blk_cleanup_queue(tr->blkcore_priv->rq);
408 unregister_blkdev(tr->major, tr->name);
409 kfree(tr->blkcore_priv);
410 up(&mtd_table_mutex);
411 return ret;
412 }
413
414 devfs_mk_dir(tr->name);
415
416 INIT_LIST_HEAD(&tr->devs);
417 list_add(&tr->list, &blktrans_majors);
418
419 for (i=0; i<MAX_MTD_DEVICES; i++) {
420 if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
421 tr->add_mtd(tr, mtd_table[i]);
422 }
423
424 up(&mtd_table_mutex);
425
426 return 0;
427}
428
429int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
430{
431 struct list_head *this, *next;
432
433 down(&mtd_table_mutex);
434
435 /* Clean up the kernel thread */
436 tr->blkcore_priv->exiting = 1;
437 wake_up(&tr->blkcore_priv->thread_wq);
438 wait_for_completion(&tr->blkcore_priv->thread_dead);
439
440 /* Remove it from the list of active majors */
441 list_del(&tr->list);
442
443 list_for_each_safe(this, next, &tr->devs) {
444 struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list);
445 tr->remove_dev(dev);
446 }
447
448 devfs_remove(tr->name);
449 blk_cleanup_queue(tr->blkcore_priv->rq);
450 unregister_blkdev(tr->major, tr->name);
451
452 up(&mtd_table_mutex);
453
454 kfree(tr->blkcore_priv);
455
456 if (!list_empty(&tr->devs))
457 BUG();
458 return 0;
459}
460
461static void __exit mtd_blktrans_exit(void)
462{
463 /* No race here -- if someone's currently in register_mtd_blktrans
464 we're screwed anyway. */
465 if (blktrans_notifier.list.next)
466 unregister_mtd_user(&blktrans_notifier);
467}
468
469module_exit(mtd_blktrans_exit);
470
471EXPORT_SYMBOL_GPL(register_mtd_blktrans);
472EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
473EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
474EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
475
476MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
477MODULE_LICENSE("GPL");
478MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
new file mode 100644
index 000000000000..b7c32c242bc7
--- /dev/null
+++ b/drivers/mtd/mtdblock.c
@@ -0,0 +1,394 @@
1/*
2 * Direct MTD block device access
3 *
4 * $Id: mtdblock.c,v 1.66 2004/11/25 13:52:52 joern Exp $
5 *
6 * (C) 2000-2003 Nicolas Pitre <nico@cam.org>
7 * (C) 1999-2003 David Woodhouse <dwmw2@infradead.org>
8 */
9
10#include <linux/config.h>
11#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/fs.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/vmalloc.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/blktrans.h>
20
21static struct mtdblk_dev {
22 struct mtd_info *mtd;
23 int count;
24 struct semaphore cache_sem;
25 unsigned char *cache_data;
26 unsigned long cache_offset;
27 unsigned int cache_size;
28 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
29} *mtdblks[MAX_MTD_DEVICES];
30
31/*
32 * Cache stuff...
33 *
34 * Since typical flash erasable sectors are much larger than what Linux's
35 * buffer cache can handle, we must implement read-modify-write on flash
36 * sectors for each block write requests. To avoid over-erasing flash sectors
37 * and to speed things up, we locally cache a whole flash sector while it is
38 * being written to until a different sector is required.
39 */
40
41static void erase_callback(struct erase_info *done)
42{
43 wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
44 wake_up(wait_q);
45}
46
47static int erase_write (struct mtd_info *mtd, unsigned long pos,
48 int len, const char *buf)
49{
50 struct erase_info erase;
51 DECLARE_WAITQUEUE(wait, current);
52 wait_queue_head_t wait_q;
53 size_t retlen;
54 int ret;
55
56 /*
57 * First, let's erase the flash block.
58 */
59
60 init_waitqueue_head(&wait_q);
61 erase.mtd = mtd;
62 erase.callback = erase_callback;
63 erase.addr = pos;
64 erase.len = len;
65 erase.priv = (u_long)&wait_q;
66
67 set_current_state(TASK_INTERRUPTIBLE);
68 add_wait_queue(&wait_q, &wait);
69
70 ret = MTD_ERASE(mtd, &erase);
71 if (ret) {
72 set_current_state(TASK_RUNNING);
73 remove_wait_queue(&wait_q, &wait);
74 printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] "
75 "on \"%s\" failed\n",
76 pos, len, mtd->name);
77 return ret;
78 }
79
80 schedule(); /* Wait for erase to finish. */
81 remove_wait_queue(&wait_q, &wait);
82
83 /*
84 * Next, writhe data to flash.
85 */
86
87 ret = MTD_WRITE (mtd, pos, len, &retlen, buf);
88 if (ret)
89 return ret;
90 if (retlen != len)
91 return -EIO;
92 return 0;
93}
94
95
96static int write_cached_data (struct mtdblk_dev *mtdblk)
97{
98 struct mtd_info *mtd = mtdblk->mtd;
99 int ret;
100
101 if (mtdblk->cache_state != STATE_DIRTY)
102 return 0;
103
104 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for \"%s\" "
105 "at 0x%lx, size 0x%x\n", mtd->name,
106 mtdblk->cache_offset, mtdblk->cache_size);
107
108 ret = erase_write (mtd, mtdblk->cache_offset,
109 mtdblk->cache_size, mtdblk->cache_data);
110 if (ret)
111 return ret;
112
113 /*
114 * Here we could argubly set the cache state to STATE_CLEAN.
115 * However this could lead to inconsistency since we will not
116 * be notified if this content is altered on the flash by other
117 * means. Let's declare it empty and leave buffering tasks to
118 * the buffer cache instead.
119 */
120 mtdblk->cache_state = STATE_EMPTY;
121 return 0;
122}
123
124
125static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
126 int len, const char *buf)
127{
128 struct mtd_info *mtd = mtdblk->mtd;
129 unsigned int sect_size = mtdblk->cache_size;
130 size_t retlen;
131 int ret;
132
133 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
134 mtd->name, pos, len);
135
136 if (!sect_size)
137 return MTD_WRITE (mtd, pos, len, &retlen, buf);
138
139 while (len > 0) {
140 unsigned long sect_start = (pos/sect_size)*sect_size;
141 unsigned int offset = pos - sect_start;
142 unsigned int size = sect_size - offset;
143 if( size > len )
144 size = len;
145
146 if (size == sect_size) {
147 /*
148 * We are covering a whole sector. Thus there is no
149 * need to bother with the cache while it may still be
150 * useful for other partial writes.
151 */
152 ret = erase_write (mtd, pos, size, buf);
153 if (ret)
154 return ret;
155 } else {
156 /* Partial sector: need to use the cache */
157
158 if (mtdblk->cache_state == STATE_DIRTY &&
159 mtdblk->cache_offset != sect_start) {
160 ret = write_cached_data(mtdblk);
161 if (ret)
162 return ret;
163 }
164
165 if (mtdblk->cache_state == STATE_EMPTY ||
166 mtdblk->cache_offset != sect_start) {
167 /* fill the cache with the current sector */
168 mtdblk->cache_state = STATE_EMPTY;
169 ret = MTD_READ(mtd, sect_start, sect_size, &retlen, mtdblk->cache_data);
170 if (ret)
171 return ret;
172 if (retlen != sect_size)
173 return -EIO;
174
175 mtdblk->cache_offset = sect_start;
176 mtdblk->cache_size = sect_size;
177 mtdblk->cache_state = STATE_CLEAN;
178 }
179
180 /* write data to our local cache */
181 memcpy (mtdblk->cache_data + offset, buf, size);
182 mtdblk->cache_state = STATE_DIRTY;
183 }
184
185 buf += size;
186 pos += size;
187 len -= size;
188 }
189
190 return 0;
191}
192
193
194static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
195 int len, char *buf)
196{
197 struct mtd_info *mtd = mtdblk->mtd;
198 unsigned int sect_size = mtdblk->cache_size;
199 size_t retlen;
200 int ret;
201
202 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
203 mtd->name, pos, len);
204
205 if (!sect_size)
206 return MTD_READ (mtd, pos, len, &retlen, buf);
207
208 while (len > 0) {
209 unsigned long sect_start = (pos/sect_size)*sect_size;
210 unsigned int offset = pos - sect_start;
211 unsigned int size = sect_size - offset;
212 if (size > len)
213 size = len;
214
215 /*
216 * Check if the requested data is already cached
217 * Read the requested amount of data from our internal cache if it
218 * contains what we want, otherwise we read the data directly
219 * from flash.
220 */
221 if (mtdblk->cache_state != STATE_EMPTY &&
222 mtdblk->cache_offset == sect_start) {
223 memcpy (buf, mtdblk->cache_data + offset, size);
224 } else {
225 ret = MTD_READ (mtd, pos, size, &retlen, buf);
226 if (ret)
227 return ret;
228 if (retlen != size)
229 return -EIO;
230 }
231
232 buf += size;
233 pos += size;
234 len -= size;
235 }
236
237 return 0;
238}
239
240static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
241 unsigned long block, char *buf)
242{
243 struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
244 return do_cached_read(mtdblk, block<<9, 512, buf);
245}
246
247static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
248 unsigned long block, char *buf)
249{
250 struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
251 if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
252 mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize);
253 if (!mtdblk->cache_data)
254 return -EINTR;
255 /* -EINTR is not really correct, but it is the best match
256 * documented in man 2 write for all cases. We could also
257 * return -EAGAIN sometimes, but why bother?
258 */
259 }
260 return do_cached_write(mtdblk, block<<9, 512, buf);
261}
262
263static int mtdblock_open(struct mtd_blktrans_dev *mbd)
264{
265 struct mtdblk_dev *mtdblk;
266 struct mtd_info *mtd = mbd->mtd;
267 int dev = mbd->devnum;
268
269 DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
270
271 if (mtdblks[dev]) {
272 mtdblks[dev]->count++;
273 return 0;
274 }
275
276 /* OK, it's not open. Create cache info for it */
277 mtdblk = kmalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
278 if (!mtdblk)
279 return -ENOMEM;
280
281 memset(mtdblk, 0, sizeof(*mtdblk));
282 mtdblk->count = 1;
283 mtdblk->mtd = mtd;
284
285 init_MUTEX (&mtdblk->cache_sem);
286 mtdblk->cache_state = STATE_EMPTY;
287 if ((mtdblk->mtd->flags & MTD_CAP_RAM) != MTD_CAP_RAM &&
288 mtdblk->mtd->erasesize) {
289 mtdblk->cache_size = mtdblk->mtd->erasesize;
290 mtdblk->cache_data = NULL;
291 }
292
293 mtdblks[dev] = mtdblk;
294
295 DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
296
297 return 0;
298}
299
300static int mtdblock_release(struct mtd_blktrans_dev *mbd)
301{
302 int dev = mbd->devnum;
303 struct mtdblk_dev *mtdblk = mtdblks[dev];
304
305 DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
306
307 down(&mtdblk->cache_sem);
308 write_cached_data(mtdblk);
309 up(&mtdblk->cache_sem);
310
311 if (!--mtdblk->count) {
312 /* It was the last usage. Free the device */
313 mtdblks[dev] = NULL;
314 if (mtdblk->mtd->sync)
315 mtdblk->mtd->sync(mtdblk->mtd);
316 vfree(mtdblk->cache_data);
317 kfree(mtdblk);
318 }
319 DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
320
321 return 0;
322}
323
324static int mtdblock_flush(struct mtd_blktrans_dev *dev)
325{
326 struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
327
328 down(&mtdblk->cache_sem);
329 write_cached_data(mtdblk);
330 up(&mtdblk->cache_sem);
331
332 if (mtdblk->mtd->sync)
333 mtdblk->mtd->sync(mtdblk->mtd);
334 return 0;
335}
336
337static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
338{
339 struct mtd_blktrans_dev *dev = kmalloc(sizeof(*dev), GFP_KERNEL);
340
341 if (!dev)
342 return;
343
344 memset(dev, 0, sizeof(*dev));
345
346 dev->mtd = mtd;
347 dev->devnum = mtd->index;
348 dev->blksize = 512;
349 dev->size = mtd->size >> 9;
350 dev->tr = tr;
351
352 if (!(mtd->flags & MTD_WRITEABLE))
353 dev->readonly = 1;
354
355 add_mtd_blktrans_dev(dev);
356}
357
358static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
359{
360 del_mtd_blktrans_dev(dev);
361 kfree(dev);
362}
363
364static struct mtd_blktrans_ops mtdblock_tr = {
365 .name = "mtdblock",
366 .major = 31,
367 .part_bits = 0,
368 .open = mtdblock_open,
369 .flush = mtdblock_flush,
370 .release = mtdblock_release,
371 .readsect = mtdblock_readsect,
372 .writesect = mtdblock_writesect,
373 .add_mtd = mtdblock_add_mtd,
374 .remove_dev = mtdblock_remove_dev,
375 .owner = THIS_MODULE,
376};
377
378static int __init init_mtdblock(void)
379{
380 return register_mtd_blktrans(&mtdblock_tr);
381}
382
383static void __exit cleanup_mtdblock(void)
384{
385 deregister_mtd_blktrans(&mtdblock_tr);
386}
387
388module_init(init_mtdblock);
389module_exit(cleanup_mtdblock);
390
391
392MODULE_LICENSE("GPL");
393MODULE_AUTHOR("Nicolas Pitre <nico@cam.org> et al.");
394MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
new file mode 100644
index 000000000000..0c830ba41ef0
--- /dev/null
+++ b/drivers/mtd/mtdblock_ro.c
@@ -0,0 +1,87 @@
1/*
2 * $Id: mtdblock_ro.c,v 1.19 2004/11/16 18:28:59 dwmw2 Exp $
3 *
4 * (C) 2003 David Woodhouse <dwmw2@infradead.org>
5 *
6 * Simple read-only (writable only for RAM) mtdblock driver
7 */
8
9#include <linux/init.h>
10#include <linux/slab.h>
11#include <linux/mtd/mtd.h>
12#include <linux/mtd/blktrans.h>
13
14static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
15 unsigned long block, char *buf)
16{
17 size_t retlen;
18
19 if (dev->mtd->read(dev->mtd, (block * 512), 512, &retlen, buf))
20 return 1;
21 return 0;
22}
23
24static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
25 unsigned long block, char *buf)
26{
27 size_t retlen;
28
29 if (dev->mtd->write(dev->mtd, (block * 512), 512, &retlen, buf))
30 return 1;
31 return 0;
32}
33
34static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
35{
36 struct mtd_blktrans_dev *dev = kmalloc(sizeof(*dev), GFP_KERNEL);
37
38 if (!dev)
39 return;
40
41 memset(dev, 0, sizeof(*dev));
42
43 dev->mtd = mtd;
44 dev->devnum = mtd->index;
45 dev->blksize = 512;
46 dev->size = mtd->size >> 9;
47 dev->tr = tr;
48 if ((mtd->flags & (MTD_CLEAR_BITS|MTD_SET_BITS|MTD_WRITEABLE)) !=
49 (MTD_CLEAR_BITS|MTD_SET_BITS|MTD_WRITEABLE))
50 dev->readonly = 1;
51
52 add_mtd_blktrans_dev(dev);
53}
54
55static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
56{
57 del_mtd_blktrans_dev(dev);
58 kfree(dev);
59}
60
61static struct mtd_blktrans_ops mtdblock_tr = {
62 .name = "mtdblock",
63 .major = 31,
64 .part_bits = 0,
65 .readsect = mtdblock_readsect,
66 .writesect = mtdblock_writesect,
67 .add_mtd = mtdblock_add_mtd,
68 .remove_dev = mtdblock_remove_dev,
69 .owner = THIS_MODULE,
70};
71
72static int __init mtdblock_init(void)
73{
74 return register_mtd_blktrans(&mtdblock_tr);
75}
76
77static void __exit mtdblock_exit(void)
78{
79 deregister_mtd_blktrans(&mtdblock_tr);
80}
81
82module_init(mtdblock_init);
83module_exit(mtdblock_exit);
84
85MODULE_LICENSE("GPL");
86MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
87MODULE_DESCRIPTION("Simple read-only block device emulation access to MTD devices");
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
new file mode 100644
index 000000000000..510ad78312cc
--- /dev/null
+++ b/drivers/mtd/mtdchar.c
@@ -0,0 +1,562 @@
1/*
2 * $Id: mtdchar.c,v 1.66 2005/01/05 18:05:11 dwmw2 Exp $
3 *
4 * Character-device access to raw MTD devices.
5 *
6 */
7
8#include <linux/config.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/mtd/mtd.h>
12#include <linux/mtd/compatmac.h>
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/fs.h>
16#include <asm/uaccess.h>
17
18#ifdef CONFIG_DEVFS_FS
19#include <linux/devfs_fs_kernel.h>
20
21static void mtd_notify_add(struct mtd_info* mtd)
22{
23 if (!mtd)
24 return;
25
26 devfs_mk_cdev(MKDEV(MTD_CHAR_MAJOR, mtd->index*2),
27 S_IFCHR | S_IRUGO | S_IWUGO, "mtd/%d", mtd->index);
28
29 devfs_mk_cdev(MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1),
30 S_IFCHR | S_IRUGO, "mtd/%dro", mtd->index);
31}
32
33static void mtd_notify_remove(struct mtd_info* mtd)
34{
35 if (!mtd)
36 return;
37 devfs_remove("mtd/%d", mtd->index);
38 devfs_remove("mtd/%dro", mtd->index);
39}
40
41static struct mtd_notifier notifier = {
42 .add = mtd_notify_add,
43 .remove = mtd_notify_remove,
44};
45
46static inline void mtdchar_devfs_init(void)
47{
48 devfs_mk_dir("mtd");
49 register_mtd_user(&notifier);
50}
51
52static inline void mtdchar_devfs_exit(void)
53{
54 unregister_mtd_user(&notifier);
55 devfs_remove("mtd");
56}
57#else /* !DEVFS */
58#define mtdchar_devfs_init() do { } while(0)
59#define mtdchar_devfs_exit() do { } while(0)
60#endif
61
62static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
63{
64 struct mtd_info *mtd = file->private_data;
65
66 switch (orig) {
67 case 0:
68 /* SEEK_SET */
69 file->f_pos = offset;
70 break;
71 case 1:
72 /* SEEK_CUR */
73 file->f_pos += offset;
74 break;
75 case 2:
76 /* SEEK_END */
77 file->f_pos =mtd->size + offset;
78 break;
79 default:
80 return -EINVAL;
81 }
82
83 if (file->f_pos < 0)
84 file->f_pos = 0;
85 else if (file->f_pos >= mtd->size)
86 file->f_pos = mtd->size - 1;
87
88 return file->f_pos;
89}
90
91
92
93static int mtd_open(struct inode *inode, struct file *file)
94{
95 int minor = iminor(inode);
96 int devnum = minor >> 1;
97 struct mtd_info *mtd;
98
99 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
100
101 if (devnum >= MAX_MTD_DEVICES)
102 return -ENODEV;
103
104 /* You can't open the RO devices RW */
105 if ((file->f_mode & 2) && (minor & 1))
106 return -EACCES;
107
108 mtd = get_mtd_device(NULL, devnum);
109
110 if (!mtd)
111 return -ENODEV;
112
113 if (MTD_ABSENT == mtd->type) {
114 put_mtd_device(mtd);
115 return -ENODEV;
116 }
117
118 file->private_data = mtd;
119
120 /* You can't open it RW if it's not a writeable device */
121 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) {
122 put_mtd_device(mtd);
123 return -EACCES;
124 }
125
126 return 0;
127} /* mtd_open */
128
129/*====================================================================*/
130
131static int mtd_close(struct inode *inode, struct file *file)
132{
133 struct mtd_info *mtd;
134
135 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
136
137 mtd = file->private_data;
138
139 if (mtd->sync)
140 mtd->sync(mtd);
141
142 put_mtd_device(mtd);
143
144 return 0;
145} /* mtd_close */
146
147/* FIXME: This _really_ needs to die. In 2.5, we should lock the
148 userspace buffer down and use it directly with readv/writev.
149*/
150#define MAX_KMALLOC_SIZE 0x20000
151
152static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
153{
154 struct mtd_info *mtd = file->private_data;
155 size_t retlen=0;
156 size_t total_retlen=0;
157 int ret=0;
158 int len;
159 char *kbuf;
160
161 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
162
163 if (*ppos + count > mtd->size)
164 count = mtd->size - *ppos;
165
166 if (!count)
167 return 0;
168
169 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
170 and pass them directly to the MTD functions */
171 while (count) {
172 if (count > MAX_KMALLOC_SIZE)
173 len = MAX_KMALLOC_SIZE;
174 else
175 len = count;
176
177 kbuf=kmalloc(len,GFP_KERNEL);
178 if (!kbuf)
179 return -ENOMEM;
180
181 ret = MTD_READ(mtd, *ppos, len, &retlen, kbuf);
182 /* Nand returns -EBADMSG on ecc errors, but it returns
183 * the data. For our userspace tools it is important
184 * to dump areas with ecc errors !
185 * Userspace software which accesses NAND this way
186 * must be aware of the fact that it deals with NAND
187 */
188 if (!ret || (ret == -EBADMSG)) {
189 *ppos += retlen;
190 if (copy_to_user(buf, kbuf, retlen)) {
191 kfree(kbuf);
192 return -EFAULT;
193 }
194 else
195 total_retlen += retlen;
196
197 count -= retlen;
198 buf += retlen;
199 }
200 else {
201 kfree(kbuf);
202 return ret;
203 }
204
205 kfree(kbuf);
206 }
207
208 return total_retlen;
209} /* mtd_read */
210
211static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
212{
213 struct mtd_info *mtd = file->private_data;
214 char *kbuf;
215 size_t retlen;
216 size_t total_retlen=0;
217 int ret=0;
218 int len;
219
220 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
221
222 if (*ppos == mtd->size)
223 return -ENOSPC;
224
225 if (*ppos + count > mtd->size)
226 count = mtd->size - *ppos;
227
228 if (!count)
229 return 0;
230
231 while (count) {
232 if (count > MAX_KMALLOC_SIZE)
233 len = MAX_KMALLOC_SIZE;
234 else
235 len = count;
236
237 kbuf=kmalloc(len,GFP_KERNEL);
238 if (!kbuf) {
239 printk("kmalloc is null\n");
240 return -ENOMEM;
241 }
242
243 if (copy_from_user(kbuf, buf, len)) {
244 kfree(kbuf);
245 return -EFAULT;
246 }
247
248 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
249 if (!ret) {
250 *ppos += retlen;
251 total_retlen += retlen;
252 count -= retlen;
253 buf += retlen;
254 }
255 else {
256 kfree(kbuf);
257 return ret;
258 }
259
260 kfree(kbuf);
261 }
262
263 return total_retlen;
264} /* mtd_write */
265
266/*======================================================================
267
268 IOCTL calls for getting device parameters.
269
270======================================================================*/
271static void mtdchar_erase_callback (struct erase_info *instr)
272{
273 wake_up((wait_queue_head_t *)instr->priv);
274}
275
276static int mtd_ioctl(struct inode *inode, struct file *file,
277 u_int cmd, u_long arg)
278{
279 struct mtd_info *mtd = file->private_data;
280 void __user *argp = (void __user *)arg;
281 int ret = 0;
282 u_long size;
283
284 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
285
286 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
287 if (cmd & IOC_IN) {
288 if (!access_ok(VERIFY_READ, argp, size))
289 return -EFAULT;
290 }
291 if (cmd & IOC_OUT) {
292 if (!access_ok(VERIFY_WRITE, argp, size))
293 return -EFAULT;
294 }
295
296 switch (cmd) {
297 case MEMGETREGIONCOUNT:
298 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
299 return -EFAULT;
300 break;
301
302 case MEMGETREGIONINFO:
303 {
304 struct region_info_user ur;
305
306 if (copy_from_user(&ur, argp, sizeof(struct region_info_user)))
307 return -EFAULT;
308
309 if (ur.regionindex >= mtd->numeraseregions)
310 return -EINVAL;
311 if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]),
312 sizeof(struct mtd_erase_region_info)))
313 return -EFAULT;
314 break;
315 }
316
317 case MEMGETINFO:
318 if (copy_to_user(argp, mtd, sizeof(struct mtd_info_user)))
319 return -EFAULT;
320 break;
321
322 case MEMERASE:
323 {
324 struct erase_info *erase;
325
326 if(!(file->f_mode & 2))
327 return -EPERM;
328
329 erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL);
330 if (!erase)
331 ret = -ENOMEM;
332 else {
333 wait_queue_head_t waitq;
334 DECLARE_WAITQUEUE(wait, current);
335
336 init_waitqueue_head(&waitq);
337
338 memset (erase,0,sizeof(struct erase_info));
339 if (copy_from_user(&erase->addr, argp,
340 sizeof(struct erase_info_user))) {
341 kfree(erase);
342 return -EFAULT;
343 }
344 erase->mtd = mtd;
345 erase->callback = mtdchar_erase_callback;
346 erase->priv = (unsigned long)&waitq;
347
348 /*
349 FIXME: Allow INTERRUPTIBLE. Which means
350 not having the wait_queue head on the stack.
351
352 If the wq_head is on the stack, and we
353 leave because we got interrupted, then the
354 wq_head is no longer there when the
355 callback routine tries to wake us up.
356 */
357 ret = mtd->erase(mtd, erase);
358 if (!ret) {
359 set_current_state(TASK_UNINTERRUPTIBLE);
360 add_wait_queue(&waitq, &wait);
361 if (erase->state != MTD_ERASE_DONE &&
362 erase->state != MTD_ERASE_FAILED)
363 schedule();
364 remove_wait_queue(&waitq, &wait);
365 set_current_state(TASK_RUNNING);
366
367 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
368 }
369 kfree(erase);
370 }
371 break;
372 }
373
374 case MEMWRITEOOB:
375 {
376 struct mtd_oob_buf buf;
377 void *databuf;
378 ssize_t retlen;
379
380 if(!(file->f_mode & 2))
381 return -EPERM;
382
383 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
384 return -EFAULT;
385
386 if (buf.length > 0x4096)
387 return -EINVAL;
388
389 if (!mtd->write_oob)
390 ret = -EOPNOTSUPP;
391 else
392 ret = access_ok(VERIFY_READ, buf.ptr,
393 buf.length) ? 0 : EFAULT;
394
395 if (ret)
396 return ret;
397
398 databuf = kmalloc(buf.length, GFP_KERNEL);
399 if (!databuf)
400 return -ENOMEM;
401
402 if (copy_from_user(databuf, buf.ptr, buf.length)) {
403 kfree(databuf);
404 return -EFAULT;
405 }
406
407 ret = (mtd->write_oob)(mtd, buf.start, buf.length, &retlen, databuf);
408
409 if (copy_to_user(argp + sizeof(uint32_t), &retlen, sizeof(uint32_t)))
410 ret = -EFAULT;
411
412 kfree(databuf);
413 break;
414
415 }
416
417 case MEMREADOOB:
418 {
419 struct mtd_oob_buf buf;
420 void *databuf;
421 ssize_t retlen;
422
423 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
424 return -EFAULT;
425
426 if (buf.length > 0x4096)
427 return -EINVAL;
428
429 if (!mtd->read_oob)
430 ret = -EOPNOTSUPP;
431 else
432 ret = access_ok(VERIFY_WRITE, buf.ptr,
433 buf.length) ? 0 : -EFAULT;
434
435 if (ret)
436 return ret;
437
438 databuf = kmalloc(buf.length, GFP_KERNEL);
439 if (!databuf)
440 return -ENOMEM;
441
442 ret = (mtd->read_oob)(mtd, buf.start, buf.length, &retlen, databuf);
443
444 if (put_user(retlen, (uint32_t __user *)argp))
445 ret = -EFAULT;
446 else if (retlen && copy_to_user(buf.ptr, databuf, retlen))
447 ret = -EFAULT;
448
449 kfree(databuf);
450 break;
451 }
452
453 case MEMLOCK:
454 {
455 struct erase_info_user info;
456
457 if (copy_from_user(&info, argp, sizeof(info)))
458 return -EFAULT;
459
460 if (!mtd->lock)
461 ret = -EOPNOTSUPP;
462 else
463 ret = mtd->lock(mtd, info.start, info.length);
464 break;
465 }
466
467 case MEMUNLOCK:
468 {
469 struct erase_info_user info;
470
471 if (copy_from_user(&info, argp, sizeof(info)))
472 return -EFAULT;
473
474 if (!mtd->unlock)
475 ret = -EOPNOTSUPP;
476 else
477 ret = mtd->unlock(mtd, info.start, info.length);
478 break;
479 }
480
481 case MEMSETOOBSEL:
482 {
483 if (copy_from_user(&mtd->oobinfo, argp, sizeof(struct nand_oobinfo)))
484 return -EFAULT;
485 break;
486 }
487
488 case MEMGETOOBSEL:
489 {
490 if (copy_to_user(argp, &(mtd->oobinfo), sizeof(struct nand_oobinfo)))
491 return -EFAULT;
492 break;
493 }
494
495 case MEMGETBADBLOCK:
496 {
497 loff_t offs;
498
499 if (copy_from_user(&offs, argp, sizeof(loff_t)))
500 return -EFAULT;
501 if (!mtd->block_isbad)
502 ret = -EOPNOTSUPP;
503 else
504 return mtd->block_isbad(mtd, offs);
505 break;
506 }
507
508 case MEMSETBADBLOCK:
509 {
510 loff_t offs;
511
512 if (copy_from_user(&offs, argp, sizeof(loff_t)))
513 return -EFAULT;
514 if (!mtd->block_markbad)
515 ret = -EOPNOTSUPP;
516 else
517 return mtd->block_markbad(mtd, offs);
518 break;
519 }
520
521 default:
522 ret = -ENOTTY;
523 }
524
525 return ret;
526} /* memory_ioctl */
527
528static struct file_operations mtd_fops = {
529 .owner = THIS_MODULE,
530 .llseek = mtd_lseek,
531 .read = mtd_read,
532 .write = mtd_write,
533 .ioctl = mtd_ioctl,
534 .open = mtd_open,
535 .release = mtd_close,
536};
537
538static int __init init_mtdchar(void)
539{
540 if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) {
541 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
542 MTD_CHAR_MAJOR);
543 return -EAGAIN;
544 }
545
546 mtdchar_devfs_init();
547 return 0;
548}
549
550static void __exit cleanup_mtdchar(void)
551{
552 mtdchar_devfs_exit();
553 unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
554}
555
556module_init(init_mtdchar);
557module_exit(cleanup_mtdchar);
558
559
560MODULE_LICENSE("GPL");
561MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
562MODULE_DESCRIPTION("Direct character-device access to MTD devices");
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
new file mode 100644
index 000000000000..8f66d093c80d
--- /dev/null
+++ b/drivers/mtd/mtdconcat.c
@@ -0,0 +1,897 @@
1/*
2 * MTD device concatenation layer
3 *
4 * (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
5 *
6 * NAND support by Christian Gan <cgan@iders.ca>
7 *
8 * This code is GPL
9 *
10 * $Id: mtdconcat.c,v 1.9 2004/06/30 15:17:41 dbrown Exp $
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/concat.h>
20
21/*
22 * Our storage structure:
23 * Subdev points to an array of pointers to struct mtd_info objects
24 * which is allocated along with this structure
25 *
26 */
27struct mtd_concat {
28 struct mtd_info mtd;
29 int num_subdev;
30 struct mtd_info **subdev;
31};
32
33/*
34 * how to calculate the size required for the above structure,
35 * including the pointer array subdev points to:
36 */
37#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
38 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
39
40/*
41 * Given a pointer to the MTD object in the mtd_concat structure,
42 * we can retrieve the pointer to that structure with this macro.
43 */
44#define CONCAT(x) ((struct mtd_concat *)(x))
45
46/*
47 * MTD methods which look up the relevant subdevice, translate the
48 * effective address and pass through to the subdevice.
49 */
50
51static int
52concat_read(struct mtd_info *mtd, loff_t from, size_t len,
53 size_t * retlen, u_char * buf)
54{
55 struct mtd_concat *concat = CONCAT(mtd);
56 int err = -EINVAL;
57 int i;
58
59 *retlen = 0;
60
61 for (i = 0; i < concat->num_subdev; i++) {
62 struct mtd_info *subdev = concat->subdev[i];
63 size_t size, retsize;
64
65 if (from >= subdev->size) {
66 /* Not destined for this subdev */
67 size = 0;
68 from -= subdev->size;
69 continue;
70 }
71 if (from + len > subdev->size)
72 /* First part goes into this subdev */
73 size = subdev->size - from;
74 else
75 /* Entire transaction goes into this subdev */
76 size = len;
77
78 err = subdev->read(subdev, from, size, &retsize, buf);
79
80 if (err)
81 break;
82
83 *retlen += retsize;
84 len -= size;
85 if (len == 0)
86 break;
87
88 err = -EINVAL;
89 buf += size;
90 from = 0;
91 }
92 return err;
93}
94
95static int
96concat_write(struct mtd_info *mtd, loff_t to, size_t len,
97 size_t * retlen, const u_char * buf)
98{
99 struct mtd_concat *concat = CONCAT(mtd);
100 int err = -EINVAL;
101 int i;
102
103 if (!(mtd->flags & MTD_WRITEABLE))
104 return -EROFS;
105
106 *retlen = 0;
107
108 for (i = 0; i < concat->num_subdev; i++) {
109 struct mtd_info *subdev = concat->subdev[i];
110 size_t size, retsize;
111
112 if (to >= subdev->size) {
113 size = 0;
114 to -= subdev->size;
115 continue;
116 }
117 if (to + len > subdev->size)
118 size = subdev->size - to;
119 else
120 size = len;
121
122 if (!(subdev->flags & MTD_WRITEABLE))
123 err = -EROFS;
124 else
125 err = subdev->write(subdev, to, size, &retsize, buf);
126
127 if (err)
128 break;
129
130 *retlen += retsize;
131 len -= size;
132 if (len == 0)
133 break;
134
135 err = -EINVAL;
136 buf += size;
137 to = 0;
138 }
139 return err;
140}
141
142static int
143concat_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
144 size_t * retlen, u_char * buf, u_char * eccbuf,
145 struct nand_oobinfo *oobsel)
146{
147 struct mtd_concat *concat = CONCAT(mtd);
148 int err = -EINVAL;
149 int i;
150
151 *retlen = 0;
152
153 for (i = 0; i < concat->num_subdev; i++) {
154 struct mtd_info *subdev = concat->subdev[i];
155 size_t size, retsize;
156
157 if (from >= subdev->size) {
158 /* Not destined for this subdev */
159 size = 0;
160 from -= subdev->size;
161 continue;
162 }
163
164 if (from + len > subdev->size)
165 /* First part goes into this subdev */
166 size = subdev->size - from;
167 else
168 /* Entire transaction goes into this subdev */
169 size = len;
170
171 if (subdev->read_ecc)
172 err = subdev->read_ecc(subdev, from, size,
173 &retsize, buf, eccbuf, oobsel);
174 else
175 err = -EINVAL;
176
177 if (err)
178 break;
179
180 *retlen += retsize;
181 len -= size;
182 if (len == 0)
183 break;
184
185 err = -EINVAL;
186 buf += size;
187 if (eccbuf) {
188 eccbuf += subdev->oobsize;
189 /* in nand.c at least, eccbufs are
190 tagged with 2 (int)eccstatus'; we
191 must account for these */
192 eccbuf += 2 * (sizeof (int));
193 }
194 from = 0;
195 }
196 return err;
197}
198
199static int
200concat_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
201 size_t * retlen, const u_char * buf, u_char * eccbuf,
202 struct nand_oobinfo *oobsel)
203{
204 struct mtd_concat *concat = CONCAT(mtd);
205 int err = -EINVAL;
206 int i;
207
208 if (!(mtd->flags & MTD_WRITEABLE))
209 return -EROFS;
210
211 *retlen = 0;
212
213 for (i = 0; i < concat->num_subdev; i++) {
214 struct mtd_info *subdev = concat->subdev[i];
215 size_t size, retsize;
216
217 if (to >= subdev->size) {
218 size = 0;
219 to -= subdev->size;
220 continue;
221 }
222 if (to + len > subdev->size)
223 size = subdev->size - to;
224 else
225 size = len;
226
227 if (!(subdev->flags & MTD_WRITEABLE))
228 err = -EROFS;
229 else if (subdev->write_ecc)
230 err = subdev->write_ecc(subdev, to, size,
231 &retsize, buf, eccbuf, oobsel);
232 else
233 err = -EINVAL;
234
235 if (err)
236 break;
237
238 *retlen += retsize;
239 len -= size;
240 if (len == 0)
241 break;
242
243 err = -EINVAL;
244 buf += size;
245 if (eccbuf)
246 eccbuf += subdev->oobsize;
247 to = 0;
248 }
249 return err;
250}
251
252static int
253concat_read_oob(struct mtd_info *mtd, loff_t from, size_t len,
254 size_t * retlen, u_char * buf)
255{
256 struct mtd_concat *concat = CONCAT(mtd);
257 int err = -EINVAL;
258 int i;
259
260 *retlen = 0;
261
262 for (i = 0; i < concat->num_subdev; i++) {
263 struct mtd_info *subdev = concat->subdev[i];
264 size_t size, retsize;
265
266 if (from >= subdev->size) {
267 /* Not destined for this subdev */
268 size = 0;
269 from -= subdev->size;
270 continue;
271 }
272 if (from + len > subdev->size)
273 /* First part goes into this subdev */
274 size = subdev->size - from;
275 else
276 /* Entire transaction goes into this subdev */
277 size = len;
278
279 if (subdev->read_oob)
280 err = subdev->read_oob(subdev, from, size,
281 &retsize, buf);
282 else
283 err = -EINVAL;
284
285 if (err)
286 break;
287
288 *retlen += retsize;
289 len -= size;
290 if (len == 0)
291 break;
292
293 err = -EINVAL;
294 buf += size;
295 from = 0;
296 }
297 return err;
298}
299
300static int
301concat_write_oob(struct mtd_info *mtd, loff_t to, size_t len,
302 size_t * retlen, const u_char * buf)
303{
304 struct mtd_concat *concat = CONCAT(mtd);
305 int err = -EINVAL;
306 int i;
307
308 if (!(mtd->flags & MTD_WRITEABLE))
309 return -EROFS;
310
311 *retlen = 0;
312
313 for (i = 0; i < concat->num_subdev; i++) {
314 struct mtd_info *subdev = concat->subdev[i];
315 size_t size, retsize;
316
317 if (to >= subdev->size) {
318 size = 0;
319 to -= subdev->size;
320 continue;
321 }
322 if (to + len > subdev->size)
323 size = subdev->size - to;
324 else
325 size = len;
326
327 if (!(subdev->flags & MTD_WRITEABLE))
328 err = -EROFS;
329 else if (subdev->write_oob)
330 err = subdev->write_oob(subdev, to, size, &retsize,
331 buf);
332 else
333 err = -EINVAL;
334
335 if (err)
336 break;
337
338 *retlen += retsize;
339 len -= size;
340 if (len == 0)
341 break;
342
343 err = -EINVAL;
344 buf += size;
345 to = 0;
346 }
347 return err;
348}
349
350static void concat_erase_callback(struct erase_info *instr)
351{
352 wake_up((wait_queue_head_t *) instr->priv);
353}
354
355static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
356{
357 int err;
358 wait_queue_head_t waitq;
359 DECLARE_WAITQUEUE(wait, current);
360
361 /*
362 * This code was stol^H^H^H^Hinspired by mtdchar.c
363 */
364 init_waitqueue_head(&waitq);
365
366 erase->mtd = mtd;
367 erase->callback = concat_erase_callback;
368 erase->priv = (unsigned long) &waitq;
369
370 /*
371 * FIXME: Allow INTERRUPTIBLE. Which means
372 * not having the wait_queue head on the stack.
373 */
374 err = mtd->erase(mtd, erase);
375 if (!err) {
376 set_current_state(TASK_UNINTERRUPTIBLE);
377 add_wait_queue(&waitq, &wait);
378 if (erase->state != MTD_ERASE_DONE
379 && erase->state != MTD_ERASE_FAILED)
380 schedule();
381 remove_wait_queue(&waitq, &wait);
382 set_current_state(TASK_RUNNING);
383
384 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
385 }
386 return err;
387}
388
389static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
390{
391 struct mtd_concat *concat = CONCAT(mtd);
392 struct mtd_info *subdev;
393 int i, err;
394 u_int32_t length, offset = 0;
395 struct erase_info *erase;
396
397 if (!(mtd->flags & MTD_WRITEABLE))
398 return -EROFS;
399
400 if (instr->addr > concat->mtd.size)
401 return -EINVAL;
402
403 if (instr->len + instr->addr > concat->mtd.size)
404 return -EINVAL;
405
406 /*
407 * Check for proper erase block alignment of the to-be-erased area.
408 * It is easier to do this based on the super device's erase
409 * region info rather than looking at each particular sub-device
410 * in turn.
411 */
412 if (!concat->mtd.numeraseregions) {
413 /* the easy case: device has uniform erase block size */
414 if (instr->addr & (concat->mtd.erasesize - 1))
415 return -EINVAL;
416 if (instr->len & (concat->mtd.erasesize - 1))
417 return -EINVAL;
418 } else {
419 /* device has variable erase size */
420 struct mtd_erase_region_info *erase_regions =
421 concat->mtd.eraseregions;
422
423 /*
424 * Find the erase region where the to-be-erased area begins:
425 */
426 for (i = 0; i < concat->mtd.numeraseregions &&
427 instr->addr >= erase_regions[i].offset; i++) ;
428 --i;
429
430 /*
431 * Now erase_regions[i] is the region in which the
432 * to-be-erased area begins. Verify that the starting
433 * offset is aligned to this region's erase size:
434 */
435 if (instr->addr & (erase_regions[i].erasesize - 1))
436 return -EINVAL;
437
438 /*
439 * now find the erase region where the to-be-erased area ends:
440 */
441 for (; i < concat->mtd.numeraseregions &&
442 (instr->addr + instr->len) >= erase_regions[i].offset;
443 ++i) ;
444 --i;
445 /*
446 * check if the ending offset is aligned to this region's erase size
447 */
448 if ((instr->addr + instr->len) & (erase_regions[i].erasesize -
449 1))
450 return -EINVAL;
451 }
452
453 instr->fail_addr = 0xffffffff;
454
455 /* make a local copy of instr to avoid modifying the caller's struct */
456 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
457
458 if (!erase)
459 return -ENOMEM;
460
461 *erase = *instr;
462 length = instr->len;
463
464 /*
465 * find the subdevice where the to-be-erased area begins, adjust
466 * starting offset to be relative to the subdevice start
467 */
468 for (i = 0; i < concat->num_subdev; i++) {
469 subdev = concat->subdev[i];
470 if (subdev->size <= erase->addr) {
471 erase->addr -= subdev->size;
472 offset += subdev->size;
473 } else {
474 break;
475 }
476 }
477
478 /* must never happen since size limit has been verified above */
479 if (i >= concat->num_subdev)
480 BUG();
481
482 /* now do the erase: */
483 err = 0;
484 for (; length > 0; i++) {
485 /* loop for all subdevices affected by this request */
486 subdev = concat->subdev[i]; /* get current subdevice */
487
488 /* limit length to subdevice's size: */
489 if (erase->addr + length > subdev->size)
490 erase->len = subdev->size - erase->addr;
491 else
492 erase->len = length;
493
494 if (!(subdev->flags & MTD_WRITEABLE)) {
495 err = -EROFS;
496 break;
497 }
498 length -= erase->len;
499 if ((err = concat_dev_erase(subdev, erase))) {
500 /* sanity check: should never happen since
501 * block alignment has been checked above */
502 if (err == -EINVAL)
503 BUG();
504 if (erase->fail_addr != 0xffffffff)
505 instr->fail_addr = erase->fail_addr + offset;
506 break;
507 }
508 /*
509 * erase->addr specifies the offset of the area to be
510 * erased *within the current subdevice*. It can be
511 * non-zero only the first time through this loop, i.e.
512 * for the first subdevice where blocks need to be erased.
513 * All the following erases must begin at the start of the
514 * current subdevice, i.e. at offset zero.
515 */
516 erase->addr = 0;
517 offset += subdev->size;
518 }
519 instr->state = erase->state;
520 kfree(erase);
521 if (err)
522 return err;
523
524 if (instr->callback)
525 instr->callback(instr);
526 return 0;
527}
528
529static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
530{
531 struct mtd_concat *concat = CONCAT(mtd);
532 int i, err = -EINVAL;
533
534 if ((len + ofs) > mtd->size)
535 return -EINVAL;
536
537 for (i = 0; i < concat->num_subdev; i++) {
538 struct mtd_info *subdev = concat->subdev[i];
539 size_t size;
540
541 if (ofs >= subdev->size) {
542 size = 0;
543 ofs -= subdev->size;
544 continue;
545 }
546 if (ofs + len > subdev->size)
547 size = subdev->size - ofs;
548 else
549 size = len;
550
551 err = subdev->lock(subdev, ofs, size);
552
553 if (err)
554 break;
555
556 len -= size;
557 if (len == 0)
558 break;
559
560 err = -EINVAL;
561 ofs = 0;
562 }
563
564 return err;
565}
566
567static int concat_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
568{
569 struct mtd_concat *concat = CONCAT(mtd);
570 int i, err = 0;
571
572 if ((len + ofs) > mtd->size)
573 return -EINVAL;
574
575 for (i = 0; i < concat->num_subdev; i++) {
576 struct mtd_info *subdev = concat->subdev[i];
577 size_t size;
578
579 if (ofs >= subdev->size) {
580 size = 0;
581 ofs -= subdev->size;
582 continue;
583 }
584 if (ofs + len > subdev->size)
585 size = subdev->size - ofs;
586 else
587 size = len;
588
589 err = subdev->unlock(subdev, ofs, size);
590
591 if (err)
592 break;
593
594 len -= size;
595 if (len == 0)
596 break;
597
598 err = -EINVAL;
599 ofs = 0;
600 }
601
602 return err;
603}
604
605static void concat_sync(struct mtd_info *mtd)
606{
607 struct mtd_concat *concat = CONCAT(mtd);
608 int i;
609
610 for (i = 0; i < concat->num_subdev; i++) {
611 struct mtd_info *subdev = concat->subdev[i];
612 subdev->sync(subdev);
613 }
614}
615
616static int concat_suspend(struct mtd_info *mtd)
617{
618 struct mtd_concat *concat = CONCAT(mtd);
619 int i, rc = 0;
620
621 for (i = 0; i < concat->num_subdev; i++) {
622 struct mtd_info *subdev = concat->subdev[i];
623 if ((rc = subdev->suspend(subdev)) < 0)
624 return rc;
625 }
626 return rc;
627}
628
629static void concat_resume(struct mtd_info *mtd)
630{
631 struct mtd_concat *concat = CONCAT(mtd);
632 int i;
633
634 for (i = 0; i < concat->num_subdev; i++) {
635 struct mtd_info *subdev = concat->subdev[i];
636 subdev->resume(subdev);
637 }
638}
639
640/*
641 * This function constructs a virtual MTD device by concatenating
642 * num_devs MTD devices. A pointer to the new device object is
643 * stored to *new_dev upon success. This function does _not_
644 * register any devices: this is the caller's responsibility.
645 */
646struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
647 int num_devs, /* number of subdevices */
648 char *name)
649{ /* name for the new device */
650 int i;
651 size_t size;
652 struct mtd_concat *concat;
653 u_int32_t max_erasesize, curr_erasesize;
654 int num_erase_region;
655
656 printk(KERN_NOTICE "Concatenating MTD devices:\n");
657 for (i = 0; i < num_devs; i++)
658 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
659 printk(KERN_NOTICE "into device \"%s\"\n", name);
660
661 /* allocate the device structure */
662 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
663 concat = kmalloc(size, GFP_KERNEL);
664 if (!concat) {
665 printk
666 ("memory allocation error while creating concatenated device \"%s\"\n",
667 name);
668 return NULL;
669 }
670 memset(concat, 0, size);
671 concat->subdev = (struct mtd_info **) (concat + 1);
672
673 /*
674 * Set up the new "super" device's MTD object structure, check for
675 * incompatibilites between the subdevices.
676 */
677 concat->mtd.type = subdev[0]->type;
678 concat->mtd.flags = subdev[0]->flags;
679 concat->mtd.size = subdev[0]->size;
680 concat->mtd.erasesize = subdev[0]->erasesize;
681 concat->mtd.oobblock = subdev[0]->oobblock;
682 concat->mtd.oobsize = subdev[0]->oobsize;
683 concat->mtd.ecctype = subdev[0]->ecctype;
684 concat->mtd.eccsize = subdev[0]->eccsize;
685 if (subdev[0]->read_ecc)
686 concat->mtd.read_ecc = concat_read_ecc;
687 if (subdev[0]->write_ecc)
688 concat->mtd.write_ecc = concat_write_ecc;
689 if (subdev[0]->read_oob)
690 concat->mtd.read_oob = concat_read_oob;
691 if (subdev[0]->write_oob)
692 concat->mtd.write_oob = concat_write_oob;
693
694 concat->subdev[0] = subdev[0];
695
696 for (i = 1; i < num_devs; i++) {
697 if (concat->mtd.type != subdev[i]->type) {
698 kfree(concat);
699 printk("Incompatible device type on \"%s\"\n",
700 subdev[i]->name);
701 return NULL;
702 }
703 if (concat->mtd.flags != subdev[i]->flags) {
704 /*
705 * Expect all flags except MTD_WRITEABLE to be
706 * equal on all subdevices.
707 */
708 if ((concat->mtd.flags ^ subdev[i]->
709 flags) & ~MTD_WRITEABLE) {
710 kfree(concat);
711 printk("Incompatible device flags on \"%s\"\n",
712 subdev[i]->name);
713 return NULL;
714 } else
715 /* if writeable attribute differs,
716 make super device writeable */
717 concat->mtd.flags |=
718 subdev[i]->flags & MTD_WRITEABLE;
719 }
720 concat->mtd.size += subdev[i]->size;
721 if (concat->mtd.oobblock != subdev[i]->oobblock ||
722 concat->mtd.oobsize != subdev[i]->oobsize ||
723 concat->mtd.ecctype != subdev[i]->ecctype ||
724 concat->mtd.eccsize != subdev[i]->eccsize ||
725 !concat->mtd.read_ecc != !subdev[i]->read_ecc ||
726 !concat->mtd.write_ecc != !subdev[i]->write_ecc ||
727 !concat->mtd.read_oob != !subdev[i]->read_oob ||
728 !concat->mtd.write_oob != !subdev[i]->write_oob) {
729 kfree(concat);
730 printk("Incompatible OOB or ECC data on \"%s\"\n",
731 subdev[i]->name);
732 return NULL;
733 }
734 concat->subdev[i] = subdev[i];
735
736 }
737
738 concat->num_subdev = num_devs;
739 concat->mtd.name = name;
740
741 /*
742 * NOTE: for now, we do not provide any readv()/writev() methods
743 * because they are messy to implement and they are not
744 * used to a great extent anyway.
745 */
746 concat->mtd.erase = concat_erase;
747 concat->mtd.read = concat_read;
748 concat->mtd.write = concat_write;
749 concat->mtd.sync = concat_sync;
750 concat->mtd.lock = concat_lock;
751 concat->mtd.unlock = concat_unlock;
752 concat->mtd.suspend = concat_suspend;
753 concat->mtd.resume = concat_resume;
754
755 /*
756 * Combine the erase block size info of the subdevices:
757 *
758 * first, walk the map of the new device and see how
759 * many changes in erase size we have
760 */
761 max_erasesize = curr_erasesize = subdev[0]->erasesize;
762 num_erase_region = 1;
763 for (i = 0; i < num_devs; i++) {
764 if (subdev[i]->numeraseregions == 0) {
765 /* current subdevice has uniform erase size */
766 if (subdev[i]->erasesize != curr_erasesize) {
767 /* if it differs from the last subdevice's erase size, count it */
768 ++num_erase_region;
769 curr_erasesize = subdev[i]->erasesize;
770 if (curr_erasesize > max_erasesize)
771 max_erasesize = curr_erasesize;
772 }
773 } else {
774 /* current subdevice has variable erase size */
775 int j;
776 for (j = 0; j < subdev[i]->numeraseregions; j++) {
777
778 /* walk the list of erase regions, count any changes */
779 if (subdev[i]->eraseregions[j].erasesize !=
780 curr_erasesize) {
781 ++num_erase_region;
782 curr_erasesize =
783 subdev[i]->eraseregions[j].
784 erasesize;
785 if (curr_erasesize > max_erasesize)
786 max_erasesize = curr_erasesize;
787 }
788 }
789 }
790 }
791
792 if (num_erase_region == 1) {
793 /*
794 * All subdevices have the same uniform erase size.
795 * This is easy:
796 */
797 concat->mtd.erasesize = curr_erasesize;
798 concat->mtd.numeraseregions = 0;
799 } else {
800 /*
801 * erase block size varies across the subdevices: allocate
802 * space to store the data describing the variable erase regions
803 */
804 struct mtd_erase_region_info *erase_region_p;
805 u_int32_t begin, position;
806
807 concat->mtd.erasesize = max_erasesize;
808 concat->mtd.numeraseregions = num_erase_region;
809 concat->mtd.eraseregions = erase_region_p =
810 kmalloc(num_erase_region *
811 sizeof (struct mtd_erase_region_info), GFP_KERNEL);
812 if (!erase_region_p) {
813 kfree(concat);
814 printk
815 ("memory allocation error while creating erase region list"
816 " for device \"%s\"\n", name);
817 return NULL;
818 }
819
820 /*
821 * walk the map of the new device once more and fill in
822 * in erase region info:
823 */
824 curr_erasesize = subdev[0]->erasesize;
825 begin = position = 0;
826 for (i = 0; i < num_devs; i++) {
827 if (subdev[i]->numeraseregions == 0) {
828 /* current subdevice has uniform erase size */
829 if (subdev[i]->erasesize != curr_erasesize) {
830 /*
831 * fill in an mtd_erase_region_info structure for the area
832 * we have walked so far:
833 */
834 erase_region_p->offset = begin;
835 erase_region_p->erasesize =
836 curr_erasesize;
837 erase_region_p->numblocks =
838 (position - begin) / curr_erasesize;
839 begin = position;
840
841 curr_erasesize = subdev[i]->erasesize;
842 ++erase_region_p;
843 }
844 position += subdev[i]->size;
845 } else {
846 /* current subdevice has variable erase size */
847 int j;
848 for (j = 0; j < subdev[i]->numeraseregions; j++) {
849 /* walk the list of erase regions, count any changes */
850 if (subdev[i]->eraseregions[j].
851 erasesize != curr_erasesize) {
852 erase_region_p->offset = begin;
853 erase_region_p->erasesize =
854 curr_erasesize;
855 erase_region_p->numblocks =
856 (position -
857 begin) / curr_erasesize;
858 begin = position;
859
860 curr_erasesize =
861 subdev[i]->eraseregions[j].
862 erasesize;
863 ++erase_region_p;
864 }
865 position +=
866 subdev[i]->eraseregions[j].
867 numblocks * curr_erasesize;
868 }
869 }
870 }
871 /* Now write the final entry */
872 erase_region_p->offset = begin;
873 erase_region_p->erasesize = curr_erasesize;
874 erase_region_p->numblocks = (position - begin) / curr_erasesize;
875 }
876
877 return &concat->mtd;
878}
879
880/*
881 * This function destroys an MTD object obtained from concat_mtd_devs()
882 */
883
884void mtd_concat_destroy(struct mtd_info *mtd)
885{
886 struct mtd_concat *concat = CONCAT(mtd);
887 if (concat->mtd.numeraseregions)
888 kfree(concat->mtd.eraseregions);
889 kfree(concat);
890}
891
892EXPORT_SYMBOL(mtd_concat_create);
893EXPORT_SYMBOL(mtd_concat_destroy);
894
895MODULE_LICENSE("GPL");
896MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
897MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
new file mode 100644
index 000000000000..9c0315d1b1c4
--- /dev/null
+++ b/drivers/mtd/mtdcore.c
@@ -0,0 +1,419 @@
1/*
2 * $Id: mtdcore.c,v 1.44 2004/11/16 18:28:59 dwmw2 Exp $
3 *
4 * Core registration and callback routines for MTD
5 * drivers and users.
6 *
7 */
8
9#include <linux/config.h>
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/ptrace.h>
14#include <linux/slab.h>
15#include <linux/string.h>
16#include <linux/timer.h>
17#include <linux/major.h>
18#include <linux/fs.h>
19#include <linux/ioctl.h>
20#include <linux/init.h>
21#include <linux/mtd/compatmac.h>
22#ifdef CONFIG_PROC_FS
23#include <linux/proc_fs.h>
24#endif
25
26#include <linux/mtd/mtd.h>
27
28/* These are exported solely for the purpose of mtd_blkdevs.c. You
29 should not use them for _anything_ else */
30DECLARE_MUTEX(mtd_table_mutex);
31struct mtd_info *mtd_table[MAX_MTD_DEVICES];
32
33EXPORT_SYMBOL_GPL(mtd_table_mutex);
34EXPORT_SYMBOL_GPL(mtd_table);
35
36static LIST_HEAD(mtd_notifiers);
37
38/**
39 * add_mtd_device - register an MTD device
40 * @mtd: pointer to new MTD device info structure
41 *
42 * Add a device to the list of MTD devices present in the system, and
43 * notify each currently active MTD 'user' of its arrival. Returns
44 * zero on success or 1 on failure, which currently will only happen
45 * if the number of present devices exceeds MAX_MTD_DEVICES (i.e. 16)
46 */
47
48int add_mtd_device(struct mtd_info *mtd)
49{
50 int i;
51
52 down(&mtd_table_mutex);
53
54 for (i=0; i < MAX_MTD_DEVICES; i++)
55 if (!mtd_table[i]) {
56 struct list_head *this;
57
58 mtd_table[i] = mtd;
59 mtd->index = i;
60 mtd->usecount = 0;
61
62 DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name);
63 /* No need to get a refcount on the module containing
64 the notifier, since we hold the mtd_table_mutex */
65 list_for_each(this, &mtd_notifiers) {
66 struct mtd_notifier *not = list_entry(this, struct mtd_notifier, list);
67 not->add(mtd);
68 }
69
70 up(&mtd_table_mutex);
71 /* We _know_ we aren't being removed, because
72 our caller is still holding us here. So none
73 of this try_ nonsense, and no bitching about it
74 either. :) */
75 __module_get(THIS_MODULE);
76 return 0;
77 }
78
79 up(&mtd_table_mutex);
80 return 1;
81}
82
83/**
84 * del_mtd_device - unregister an MTD device
85 * @mtd: pointer to MTD device info structure
86 *
87 * Remove a device from the list of MTD devices present in the system,
88 * and notify each currently active MTD 'user' of its departure.
89 * Returns zero on success or 1 on failure, which currently will happen
90 * if the requested device does not appear to be present in the list.
91 */
92
93int del_mtd_device (struct mtd_info *mtd)
94{
95 int ret;
96
97 down(&mtd_table_mutex);
98
99 if (mtd_table[mtd->index] != mtd) {
100 ret = -ENODEV;
101 } else if (mtd->usecount) {
102 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
103 mtd->index, mtd->name, mtd->usecount);
104 ret = -EBUSY;
105 } else {
106 struct list_head *this;
107
108 /* No need to get a refcount on the module containing
109 the notifier, since we hold the mtd_table_mutex */
110 list_for_each(this, &mtd_notifiers) {
111 struct mtd_notifier *not = list_entry(this, struct mtd_notifier, list);
112 not->remove(mtd);
113 }
114
115 mtd_table[mtd->index] = NULL;
116
117 module_put(THIS_MODULE);
118 ret = 0;
119 }
120
121 up(&mtd_table_mutex);
122 return ret;
123}
124
125/**
126 * register_mtd_user - register a 'user' of MTD devices.
127 * @new: pointer to notifier info structure
128 *
129 * Registers a pair of callbacks function to be called upon addition
130 * or removal of MTD devices. Causes the 'add' callback to be immediately
131 * invoked for each MTD device currently present in the system.
132 */
133
134void register_mtd_user (struct mtd_notifier *new)
135{
136 int i;
137
138 down(&mtd_table_mutex);
139
140 list_add(&new->list, &mtd_notifiers);
141
142 __module_get(THIS_MODULE);
143
144 for (i=0; i< MAX_MTD_DEVICES; i++)
145 if (mtd_table[i])
146 new->add(mtd_table[i]);
147
148 up(&mtd_table_mutex);
149}
150
151/**
152 * register_mtd_user - unregister a 'user' of MTD devices.
153 * @new: pointer to notifier info structure
154 *
155 * Removes a callback function pair from the list of 'users' to be
156 * notified upon addition or removal of MTD devices. Causes the
157 * 'remove' callback to be immediately invoked for each MTD device
158 * currently present in the system.
159 */
160
161int unregister_mtd_user (struct mtd_notifier *old)
162{
163 int i;
164
165 down(&mtd_table_mutex);
166
167 module_put(THIS_MODULE);
168
169 for (i=0; i< MAX_MTD_DEVICES; i++)
170 if (mtd_table[i])
171 old->remove(mtd_table[i]);
172
173 list_del(&old->list);
174 up(&mtd_table_mutex);
175 return 0;
176}
177
178
179/**
180 * get_mtd_device - obtain a validated handle for an MTD device
181 * @mtd: last known address of the required MTD device
182 * @num: internal device number of the required MTD device
183 *
184 * Given a number and NULL address, return the num'th entry in the device
185 * table, if any. Given an address and num == -1, search the device table
186 * for a device with that address and return if it's still present. Given
187 * both, return the num'th driver only if its address matches. Return NULL
188 * if not.
189 */
190
191struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
192{
193 struct mtd_info *ret = NULL;
194 int i;
195
196 down(&mtd_table_mutex);
197
198 if (num == -1) {
199 for (i=0; i< MAX_MTD_DEVICES; i++)
200 if (mtd_table[i] == mtd)
201 ret = mtd_table[i];
202 } else if (num < MAX_MTD_DEVICES) {
203 ret = mtd_table[num];
204 if (mtd && mtd != ret)
205 ret = NULL;
206 }
207
208 if (ret && !try_module_get(ret->owner))
209 ret = NULL;
210
211 if (ret)
212 ret->usecount++;
213
214 up(&mtd_table_mutex);
215 return ret;
216}
217
218void put_mtd_device(struct mtd_info *mtd)
219{
220 int c;
221
222 down(&mtd_table_mutex);
223 c = --mtd->usecount;
224 up(&mtd_table_mutex);
225 BUG_ON(c < 0);
226
227 module_put(mtd->owner);
228}
229
230/* default_mtd_writev - default mtd writev method for MTD devices that
231 * dont implement their own
232 */
233
234int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
235 unsigned long count, loff_t to, size_t *retlen)
236{
237 unsigned long i;
238 size_t totlen = 0, thislen;
239 int ret = 0;
240
241 if(!mtd->write) {
242 ret = -EROFS;
243 } else {
244 for (i=0; i<count; i++) {
245 if (!vecs[i].iov_len)
246 continue;
247 ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base);
248 totlen += thislen;
249 if (ret || thislen != vecs[i].iov_len)
250 break;
251 to += vecs[i].iov_len;
252 }
253 }
254 if (retlen)
255 *retlen = totlen;
256 return ret;
257}
258
259
260/* default_mtd_readv - default mtd readv method for MTD devices that dont
261 * implement their own
262 */
263
264int default_mtd_readv(struct mtd_info *mtd, struct kvec *vecs,
265 unsigned long count, loff_t from, size_t *retlen)
266{
267 unsigned long i;
268 size_t totlen = 0, thislen;
269 int ret = 0;
270
271 if(!mtd->read) {
272 ret = -EIO;
273 } else {
274 for (i=0; i<count; i++) {
275 if (!vecs[i].iov_len)
276 continue;
277 ret = mtd->read(mtd, from, vecs[i].iov_len, &thislen, vecs[i].iov_base);
278 totlen += thislen;
279 if (ret || thislen != vecs[i].iov_len)
280 break;
281 from += vecs[i].iov_len;
282 }
283 }
284 if (retlen)
285 *retlen = totlen;
286 return ret;
287}
288
289
290EXPORT_SYMBOL(add_mtd_device);
291EXPORT_SYMBOL(del_mtd_device);
292EXPORT_SYMBOL(get_mtd_device);
293EXPORT_SYMBOL(put_mtd_device);
294EXPORT_SYMBOL(register_mtd_user);
295EXPORT_SYMBOL(unregister_mtd_user);
296EXPORT_SYMBOL(default_mtd_writev);
297EXPORT_SYMBOL(default_mtd_readv);
298
299/*====================================================================*/
300/* Power management code */
301
302#ifdef CONFIG_PM
303
304#include <linux/pm.h>
305
306static struct pm_dev *mtd_pm_dev = NULL;
307
308static int mtd_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data)
309{
310 int ret = 0, i;
311
312 if (down_trylock(&mtd_table_mutex))
313 return -EAGAIN;
314 if (rqst == PM_SUSPEND) {
315 for (i = 0; ret == 0 && i < MAX_MTD_DEVICES; i++) {
316 if (mtd_table[i] && mtd_table[i]->suspend)
317 ret = mtd_table[i]->suspend(mtd_table[i]);
318 }
319 } else i = MAX_MTD_DEVICES-1;
320
321 if (rqst == PM_RESUME || ret) {
322 for ( ; i >= 0; i--) {
323 if (mtd_table[i] && mtd_table[i]->resume)
324 mtd_table[i]->resume(mtd_table[i]);
325 }
326 }
327 up(&mtd_table_mutex);
328 return ret;
329}
330#endif
331
332/*====================================================================*/
333/* Support for /proc/mtd */
334
335#ifdef CONFIG_PROC_FS
336static struct proc_dir_entry *proc_mtd;
337
338static inline int mtd_proc_info (char *buf, int i)
339{
340 struct mtd_info *this = mtd_table[i];
341
342 if (!this)
343 return 0;
344
345 return sprintf(buf, "mtd%d: %8.8x %8.8x \"%s\"\n", i, this->size,
346 this->erasesize, this->name);
347}
348
349static int mtd_read_proc (char *page, char **start, off_t off, int count,
350 int *eof, void *data_unused)
351{
352 int len, l, i;
353 off_t begin = 0;
354
355 down(&mtd_table_mutex);
356
357 len = sprintf(page, "dev: size erasesize name\n");
358 for (i=0; i< MAX_MTD_DEVICES; i++) {
359
360 l = mtd_proc_info(page + len, i);
361 len += l;
362 if (len+begin > off+count)
363 goto done;
364 if (len+begin < off) {
365 begin += len;
366 len = 0;
367 }
368 }
369
370 *eof = 1;
371
372done:
373 up(&mtd_table_mutex);
374 if (off >= len+begin)
375 return 0;
376 *start = page + (off-begin);
377 return ((count < begin+len-off) ? count : begin+len-off);
378}
379
380#endif /* CONFIG_PROC_FS */
381
382/*====================================================================*/
383/* Init code */
384
385static int __init init_mtd(void)
386{
387#ifdef CONFIG_PROC_FS
388 if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
389 proc_mtd->read_proc = mtd_read_proc;
390#endif
391
392#ifdef CONFIG_PM
393 mtd_pm_dev = pm_register(PM_UNKNOWN_DEV, 0, mtd_pm_callback);
394#endif
395 return 0;
396}
397
398static void __exit cleanup_mtd(void)
399{
400#ifdef CONFIG_PM
401 if (mtd_pm_dev) {
402 pm_unregister(mtd_pm_dev);
403 mtd_pm_dev = NULL;
404 }
405#endif
406
407#ifdef CONFIG_PROC_FS
408 if (proc_mtd)
409 remove_proc_entry( "mtd", NULL);
410#endif
411}
412
413module_init(init_mtd);
414module_exit(cleanup_mtd);
415
416
417MODULE_LICENSE("GPL");
418MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
419MODULE_DESCRIPTION("Core MTD registration and access routines");
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
new file mode 100644
index 000000000000..96ebb52f24b1
--- /dev/null
+++ b/drivers/mtd/mtdpart.c
@@ -0,0 +1,599 @@
1/*
2 * Simple MTD partitioning layer
3 *
4 * (C) 2000 Nicolas Pitre <nico@cam.org>
5 *
6 * This code is GPL
7 *
8 * $Id: mtdpart.c,v 1.51 2004/11/16 18:28:59 dwmw2 Exp $
9 *
10 * 02-21-2002 Thomas Gleixner <gleixner@autronix.de>
11 * added support for read_oob, write_oob
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/list.h>
19#include <linux/config.h>
20#include <linux/kmod.h>
21#include <linux/mtd/mtd.h>
22#include <linux/mtd/partitions.h>
23#include <linux/mtd/compatmac.h>
24
25/* Our partition linked list */
26static LIST_HEAD(mtd_partitions);
27
28/* Our partition node structure */
29struct mtd_part {
30 struct mtd_info mtd;
31 struct mtd_info *master;
32 u_int32_t offset;
33 int index;
34 struct list_head list;
35 int registered;
36};
37
38/*
39 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
40 * the pointer to that structure with this macro.
41 */
42#define PART(x) ((struct mtd_part *)(x))
43
44
45/*
46 * MTD methods which simply translate the effective address and pass through
47 * to the _real_ device.
48 */
49
50static int part_read (struct mtd_info *mtd, loff_t from, size_t len,
51 size_t *retlen, u_char *buf)
52{
53 struct mtd_part *part = PART(mtd);
54 if (from >= mtd->size)
55 len = 0;
56 else if (from + len > mtd->size)
57 len = mtd->size - from;
58 if (part->master->read_ecc == NULL)
59 return part->master->read (part->master, from + part->offset,
60 len, retlen, buf);
61 else
62 return part->master->read_ecc (part->master, from + part->offset,
63 len, retlen, buf, NULL, &mtd->oobinfo);
64}
65
66static int part_point (struct mtd_info *mtd, loff_t from, size_t len,
67 size_t *retlen, u_char **buf)
68{
69 struct mtd_part *part = PART(mtd);
70 if (from >= mtd->size)
71 len = 0;
72 else if (from + len > mtd->size)
73 len = mtd->size - from;
74 return part->master->point (part->master, from + part->offset,
75 len, retlen, buf);
76}
77static void part_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
78{
79 struct mtd_part *part = PART(mtd);
80
81 part->master->unpoint (part->master, addr, from + part->offset, len);
82}
83
84
85static int part_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
86 size_t *retlen, u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel)
87{
88 struct mtd_part *part = PART(mtd);
89 if (oobsel == NULL)
90 oobsel = &mtd->oobinfo;
91 if (from >= mtd->size)
92 len = 0;
93 else if (from + len > mtd->size)
94 len = mtd->size - from;
95 return part->master->read_ecc (part->master, from + part->offset,
96 len, retlen, buf, eccbuf, oobsel);
97}
98
99static int part_read_oob (struct mtd_info *mtd, loff_t from, size_t len,
100 size_t *retlen, u_char *buf)
101{
102 struct mtd_part *part = PART(mtd);
103 if (from >= mtd->size)
104 len = 0;
105 else if (from + len > mtd->size)
106 len = mtd->size - from;
107 return part->master->read_oob (part->master, from + part->offset,
108 len, retlen, buf);
109}
110
111static int part_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
112 size_t *retlen, u_char *buf)
113{
114 struct mtd_part *part = PART(mtd);
115 return part->master->read_user_prot_reg (part->master, from,
116 len, retlen, buf);
117}
118
119static int part_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
120 size_t *retlen, u_char *buf)
121{
122 struct mtd_part *part = PART(mtd);
123 return part->master->read_fact_prot_reg (part->master, from,
124 len, retlen, buf);
125}
126
127static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
128 size_t *retlen, const u_char *buf)
129{
130 struct mtd_part *part = PART(mtd);
131 if (!(mtd->flags & MTD_WRITEABLE))
132 return -EROFS;
133 if (to >= mtd->size)
134 len = 0;
135 else if (to + len > mtd->size)
136 len = mtd->size - to;
137 if (part->master->write_ecc == NULL)
138 return part->master->write (part->master, to + part->offset,
139 len, retlen, buf);
140 else
141 return part->master->write_ecc (part->master, to + part->offset,
142 len, retlen, buf, NULL, &mtd->oobinfo);
143
144}
145
146static int part_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
147 size_t *retlen, const u_char *buf,
148 u_char *eccbuf, struct nand_oobinfo *oobsel)
149{
150 struct mtd_part *part = PART(mtd);
151 if (!(mtd->flags & MTD_WRITEABLE))
152 return -EROFS;
153 if (oobsel == NULL)
154 oobsel = &mtd->oobinfo;
155 if (to >= mtd->size)
156 len = 0;
157 else if (to + len > mtd->size)
158 len = mtd->size - to;
159 return part->master->write_ecc (part->master, to + part->offset,
160 len, retlen, buf, eccbuf, oobsel);
161}
162
163static int part_write_oob (struct mtd_info *mtd, loff_t to, size_t len,
164 size_t *retlen, const u_char *buf)
165{
166 struct mtd_part *part = PART(mtd);
167 if (!(mtd->flags & MTD_WRITEABLE))
168 return -EROFS;
169 if (to >= mtd->size)
170 len = 0;
171 else if (to + len > mtd->size)
172 len = mtd->size - to;
173 return part->master->write_oob (part->master, to + part->offset,
174 len, retlen, buf);
175}
176
177static int part_write_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
178 size_t *retlen, u_char *buf)
179{
180 struct mtd_part *part = PART(mtd);
181 return part->master->write_user_prot_reg (part->master, from,
182 len, retlen, buf);
183}
184
185static int part_writev (struct mtd_info *mtd, const struct kvec *vecs,
186 unsigned long count, loff_t to, size_t *retlen)
187{
188 struct mtd_part *part = PART(mtd);
189 if (!(mtd->flags & MTD_WRITEABLE))
190 return -EROFS;
191 if (part->master->writev_ecc == NULL)
192 return part->master->writev (part->master, vecs, count,
193 to + part->offset, retlen);
194 else
195 return part->master->writev_ecc (part->master, vecs, count,
196 to + part->offset, retlen,
197 NULL, &mtd->oobinfo);
198}
199
200static int part_readv (struct mtd_info *mtd, struct kvec *vecs,
201 unsigned long count, loff_t from, size_t *retlen)
202{
203 struct mtd_part *part = PART(mtd);
204 if (part->master->readv_ecc == NULL)
205 return part->master->readv (part->master, vecs, count,
206 from + part->offset, retlen);
207 else
208 return part->master->readv_ecc (part->master, vecs, count,
209 from + part->offset, retlen,
210 NULL, &mtd->oobinfo);
211}
212
213static int part_writev_ecc (struct mtd_info *mtd, const struct kvec *vecs,
214 unsigned long count, loff_t to, size_t *retlen,
215 u_char *eccbuf, struct nand_oobinfo *oobsel)
216{
217 struct mtd_part *part = PART(mtd);
218 if (!(mtd->flags & MTD_WRITEABLE))
219 return -EROFS;
220 if (oobsel == NULL)
221 oobsel = &mtd->oobinfo;
222 return part->master->writev_ecc (part->master, vecs, count,
223 to + part->offset, retlen,
224 eccbuf, oobsel);
225}
226
227static int part_readv_ecc (struct mtd_info *mtd, struct kvec *vecs,
228 unsigned long count, loff_t from, size_t *retlen,
229 u_char *eccbuf, struct nand_oobinfo *oobsel)
230{
231 struct mtd_part *part = PART(mtd);
232 if (oobsel == NULL)
233 oobsel = &mtd->oobinfo;
234 return part->master->readv_ecc (part->master, vecs, count,
235 from + part->offset, retlen,
236 eccbuf, oobsel);
237}
238
239static int part_erase (struct mtd_info *mtd, struct erase_info *instr)
240{
241 struct mtd_part *part = PART(mtd);
242 int ret;
243 if (!(mtd->flags & MTD_WRITEABLE))
244 return -EROFS;
245 if (instr->addr >= mtd->size)
246 return -EINVAL;
247 instr->addr += part->offset;
248 ret = part->master->erase(part->master, instr);
249 return ret;
250}
251
252void mtd_erase_callback(struct erase_info *instr)
253{
254 if (instr->mtd->erase == part_erase) {
255 struct mtd_part *part = PART(instr->mtd);
256
257 if (instr->fail_addr != 0xffffffff)
258 instr->fail_addr -= part->offset;
259 instr->addr -= part->offset;
260 }
261 if (instr->callback)
262 instr->callback(instr);
263}
264EXPORT_SYMBOL_GPL(mtd_erase_callback);
265
266static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
267{
268 struct mtd_part *part = PART(mtd);
269 if ((len + ofs) > mtd->size)
270 return -EINVAL;
271 return part->master->lock(part->master, ofs + part->offset, len);
272}
273
274static int part_unlock (struct mtd_info *mtd, loff_t ofs, size_t len)
275{
276 struct mtd_part *part = PART(mtd);
277 if ((len + ofs) > mtd->size)
278 return -EINVAL;
279 return part->master->unlock(part->master, ofs + part->offset, len);
280}
281
282static void part_sync(struct mtd_info *mtd)
283{
284 struct mtd_part *part = PART(mtd);
285 part->master->sync(part->master);
286}
287
288static int part_suspend(struct mtd_info *mtd)
289{
290 struct mtd_part *part = PART(mtd);
291 return part->master->suspend(part->master);
292}
293
294static void part_resume(struct mtd_info *mtd)
295{
296 struct mtd_part *part = PART(mtd);
297 part->master->resume(part->master);
298}
299
300static int part_block_isbad (struct mtd_info *mtd, loff_t ofs)
301{
302 struct mtd_part *part = PART(mtd);
303 if (ofs >= mtd->size)
304 return -EINVAL;
305 ofs += part->offset;
306 return part->master->block_isbad(part->master, ofs);
307}
308
309static int part_block_markbad (struct mtd_info *mtd, loff_t ofs)
310{
311 struct mtd_part *part = PART(mtd);
312 if (!(mtd->flags & MTD_WRITEABLE))
313 return -EROFS;
314 if (ofs >= mtd->size)
315 return -EINVAL;
316 ofs += part->offset;
317 return part->master->block_markbad(part->master, ofs);
318}
319
320/*
321 * This function unregisters and destroy all slave MTD objects which are
322 * attached to the given master MTD object.
323 */
324
325int del_mtd_partitions(struct mtd_info *master)
326{
327 struct list_head *node;
328 struct mtd_part *slave;
329
330 for (node = mtd_partitions.next;
331 node != &mtd_partitions;
332 node = node->next) {
333 slave = list_entry(node, struct mtd_part, list);
334 if (slave->master == master) {
335 struct list_head *prev = node->prev;
336 __list_del(prev, node->next);
337 if(slave->registered)
338 del_mtd_device(&slave->mtd);
339 kfree(slave);
340 node = prev;
341 }
342 }
343
344 return 0;
345}
346
347/*
348 * This function, given a master MTD object and a partition table, creates
349 * and registers slave MTD objects which are bound to the master according to
350 * the partition definitions.
351 * (Q: should we register the master MTD object as well?)
352 */
353
354int add_mtd_partitions(struct mtd_info *master,
355 const struct mtd_partition *parts,
356 int nbparts)
357{
358 struct mtd_part *slave;
359 u_int32_t cur_offset = 0;
360 int i;
361
362 printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
363
364 for (i = 0; i < nbparts; i++) {
365
366 /* allocate the partition structure */
367 slave = kmalloc (sizeof(*slave), GFP_KERNEL);
368 if (!slave) {
369 printk ("memory allocation error while creating partitions for \"%s\"\n",
370 master->name);
371 del_mtd_partitions(master);
372 return -ENOMEM;
373 }
374 memset(slave, 0, sizeof(*slave));
375 list_add(&slave->list, &mtd_partitions);
376
377 /* set up the MTD object for this partition */
378 slave->mtd.type = master->type;
379 slave->mtd.flags = master->flags & ~parts[i].mask_flags;
380 slave->mtd.size = parts[i].size;
381 slave->mtd.oobblock = master->oobblock;
382 slave->mtd.oobsize = master->oobsize;
383 slave->mtd.ecctype = master->ecctype;
384 slave->mtd.eccsize = master->eccsize;
385
386 slave->mtd.name = parts[i].name;
387 slave->mtd.bank_size = master->bank_size;
388 slave->mtd.owner = master->owner;
389
390 slave->mtd.read = part_read;
391 slave->mtd.write = part_write;
392
393 if(master->point && master->unpoint){
394 slave->mtd.point = part_point;
395 slave->mtd.unpoint = part_unpoint;
396 }
397
398 if (master->read_ecc)
399 slave->mtd.read_ecc = part_read_ecc;
400 if (master->write_ecc)
401 slave->mtd.write_ecc = part_write_ecc;
402 if (master->read_oob)
403 slave->mtd.read_oob = part_read_oob;
404 if (master->write_oob)
405 slave->mtd.write_oob = part_write_oob;
406 if(master->read_user_prot_reg)
407 slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
408 if(master->read_fact_prot_reg)
409 slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
410 if(master->write_user_prot_reg)
411 slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
412 if (master->sync)
413 slave->mtd.sync = part_sync;
414 if (!i && master->suspend && master->resume) {
415 slave->mtd.suspend = part_suspend;
416 slave->mtd.resume = part_resume;
417 }
418 if (master->writev)
419 slave->mtd.writev = part_writev;
420 if (master->readv)
421 slave->mtd.readv = part_readv;
422 if (master->writev_ecc)
423 slave->mtd.writev_ecc = part_writev_ecc;
424 if (master->readv_ecc)
425 slave->mtd.readv_ecc = part_readv_ecc;
426 if (master->lock)
427 slave->mtd.lock = part_lock;
428 if (master->unlock)
429 slave->mtd.unlock = part_unlock;
430 if (master->block_isbad)
431 slave->mtd.block_isbad = part_block_isbad;
432 if (master->block_markbad)
433 slave->mtd.block_markbad = part_block_markbad;
434 slave->mtd.erase = part_erase;
435 slave->master = master;
436 slave->offset = parts[i].offset;
437 slave->index = i;
438
439 if (slave->offset == MTDPART_OFS_APPEND)
440 slave->offset = cur_offset;
441 if (slave->offset == MTDPART_OFS_NXTBLK) {
442 u_int32_t emask = master->erasesize-1;
443 slave->offset = (cur_offset + emask) & ~emask;
444 if (slave->offset != cur_offset) {
445 printk(KERN_NOTICE "Moving partition %d: "
446 "0x%08x -> 0x%08x\n", i,
447 cur_offset, slave->offset);
448 }
449 }
450 if (slave->mtd.size == MTDPART_SIZ_FULL)
451 slave->mtd.size = master->size - slave->offset;
452 cur_offset = slave->offset + slave->mtd.size;
453
454 printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
455 slave->offset + slave->mtd.size, slave->mtd.name);
456
457 /* let's do some sanity checks */
458 if (slave->offset >= master->size) {
459 /* let's register it anyway to preserve ordering */
460 slave->offset = 0;
461 slave->mtd.size = 0;
462 printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
463 parts[i].name);
464 }
465 if (slave->offset + slave->mtd.size > master->size) {
466 slave->mtd.size = master->size - slave->offset;
467 printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
468 parts[i].name, master->name, slave->mtd.size);
469 }
470 if (master->numeraseregions>1) {
471 /* Deal with variable erase size stuff */
472 int i;
473 struct mtd_erase_region_info *regions = master->eraseregions;
474
475 /* Find the first erase regions which is part of this partition. */
476 for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
477 ;
478
479 for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
480 if (slave->mtd.erasesize < regions[i].erasesize) {
481 slave->mtd.erasesize = regions[i].erasesize;
482 }
483 }
484 } else {
485 /* Single erase size */
486 slave->mtd.erasesize = master->erasesize;
487 }
488
489 if ((slave->mtd.flags & MTD_WRITEABLE) &&
490 (slave->offset % slave->mtd.erasesize)) {
491 /* Doesn't start on a boundary of major erase size */
492 /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
493 slave->mtd.flags &= ~MTD_WRITEABLE;
494 printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
495 parts[i].name);
496 }
497 if ((slave->mtd.flags & MTD_WRITEABLE) &&
498 (slave->mtd.size % slave->mtd.erasesize)) {
499 slave->mtd.flags &= ~MTD_WRITEABLE;
500 printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
501 parts[i].name);
502 }
503
504 /* copy oobinfo from master */
505 memcpy(&slave->mtd.oobinfo, &master->oobinfo, sizeof(slave->mtd.oobinfo));
506
507 if(parts[i].mtdp)
508 { /* store the object pointer (caller may or may not register it */
509 *parts[i].mtdp = &slave->mtd;
510 slave->registered = 0;
511 }
512 else
513 {
514 /* register our partition */
515 add_mtd_device(&slave->mtd);
516 slave->registered = 1;
517 }
518 }
519
520 return 0;
521}
522
523EXPORT_SYMBOL(add_mtd_partitions);
524EXPORT_SYMBOL(del_mtd_partitions);
525
526static DEFINE_SPINLOCK(part_parser_lock);
527static LIST_HEAD(part_parsers);
528
529static struct mtd_part_parser *get_partition_parser(const char *name)
530{
531 struct list_head *this;
532 void *ret = NULL;
533 spin_lock(&part_parser_lock);
534
535 list_for_each(this, &part_parsers) {
536 struct mtd_part_parser *p = list_entry(this, struct mtd_part_parser, list);
537
538 if (!strcmp(p->name, name) && try_module_get(p->owner)) {
539 ret = p;
540 break;
541 }
542 }
543 spin_unlock(&part_parser_lock);
544
545 return ret;
546}
547
548int register_mtd_parser(struct mtd_part_parser *p)
549{
550 spin_lock(&part_parser_lock);
551 list_add(&p->list, &part_parsers);
552 spin_unlock(&part_parser_lock);
553
554 return 0;
555}
556
557int deregister_mtd_parser(struct mtd_part_parser *p)
558{
559 spin_lock(&part_parser_lock);
560 list_del(&p->list);
561 spin_unlock(&part_parser_lock);
562 return 0;
563}
564
565int parse_mtd_partitions(struct mtd_info *master, const char **types,
566 struct mtd_partition **pparts, unsigned long origin)
567{
568 struct mtd_part_parser *parser;
569 int ret = 0;
570
571 for ( ; ret <= 0 && *types; types++) {
572 parser = get_partition_parser(*types);
573#ifdef CONFIG_KMOD
574 if (!parser && !request_module("%s", *types))
575 parser = get_partition_parser(*types);
576#endif
577 if (!parser) {
578 printk(KERN_NOTICE "%s partition parsing not available\n",
579 *types);
580 continue;
581 }
582 ret = (*parser->parse_fn)(master, pparts, origin);
583 if (ret > 0) {
584 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
585 ret, parser->name, master->name);
586 }
587 put_partition_parser(parser);
588 }
589 return ret;
590}
591
592EXPORT_SYMBOL_GPL(parse_mtd_partitions);
593EXPORT_SYMBOL_GPL(register_mtd_parser);
594EXPORT_SYMBOL_GPL(deregister_mtd_parser);
595
596MODULE_LICENSE("GPL");
597MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>");
598MODULE_DESCRIPTION("Generic support for partitioning of MTD devices");
599
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
new file mode 100644
index 000000000000..f7801eb730ce
--- /dev/null
+++ b/drivers/mtd/nand/Kconfig
@@ -0,0 +1,207 @@
1# drivers/mtd/nand/Kconfig
2# $Id: Kconfig,v 1.26 2005/01/05 12:42:24 dwmw2 Exp $
3
4menu "NAND Flash Device Drivers"
5 depends on MTD!=n
6
7config MTD_NAND
8 tristate "NAND Device Support"
9 depends on MTD
10 select MTD_NAND_IDS
11 help
12 This enables support for accessing all type of NAND flash
13 devices. For further information see
14 <http://www.linux-mtd.infradead.org/tech/nand.html>.
15
16config MTD_NAND_VERIFY_WRITE
17 bool "Verify NAND page writes"
18 depends on MTD_NAND
19 help
20 This adds an extra check when data is written to the flash. The
21 NAND flash device internally checks only bits transitioning
22 from 1 to 0. There is a rare possibility that even though the
23 device thinks the write was successful, a bit could have been
24 flipped accidentaly due to device wear or something else.
25
26config MTD_NAND_AUTCPU12
27 tristate "SmartMediaCard on autronix autcpu12 board"
28 depends on ARM && MTD_NAND && ARCH_AUTCPU12
29 help
30 This enables the driver for the autronix autcpu12 board to
31 access the SmartMediaCard.
32
33config MTD_NAND_EDB7312
34 tristate "Support for Cirrus Logic EBD7312 evaluation board"
35 depends on ARM && MTD_NAND && ARCH_EDB7312
36 help
37 This enables the driver for the Cirrus Logic EBD7312 evaluation
38 board to access the onboard NAND Flash.
39
40config MTD_NAND_H1900
41 tristate "iPAQ H1900 flash"
42 depends on ARM && MTD_NAND && ARCH_PXA && MTD_PARTITIONS
43 help
44 This enables the driver for the iPAQ h1900 flash.
45
46config MTD_NAND_SPIA
47 tristate "NAND Flash device on SPIA board"
48 depends on ARM && ARCH_P720T && MTD_NAND
49 help
50 If you had to ask, you don't have one. Say 'N'.
51
52config MTD_NAND_TOTO
53 tristate "NAND Flash device on TOTO board"
54 depends on ARM && ARCH_OMAP && MTD_NAND
55 help
56 Support for NAND flash on Texas Instruments Toto platform.
57
58config MTD_NAND_IDS
59 tristate
60
61config MTD_NAND_TX4925NDFMC
62 tristate "SmartMedia Card on Toshiba RBTX4925 reference board"
63 depends on TOSHIBA_RBTX4925 && MTD_NAND && TOSHIBA_RBTX4925_MPLEX_NAND
64 help
65 This enables the driver for the NAND flash device found on the
66 Toshiba RBTX4925 reference board, which is a SmartMediaCard.
67
68config MTD_NAND_TX4938NDFMC
69 tristate "NAND Flash device on Toshiba RBTX4938 reference board"
70 depends on TOSHIBA_RBTX4938 && MTD_NAND && TOSHIBA_RBTX4938_MPLEX_NAND
71 help
72 This enables the driver for the NAND flash device found on the
73 Toshiba RBTX4938 reference board.
74
75config MTD_NAND_AU1550
76 tristate "Au1550 NAND support"
77 depends on SOC_AU1550 && MTD_NAND
78 help
79 This enables the driver for the NAND flash controller on the
80 AMD/Alchemy 1550 SOC.
81
82config MTD_NAND_RTC_FROM4
83 tristate "Renesas Flash ROM 4-slot interface board (FROM_BOARD4)"
84 depends on MTD_NAND && SH_SOLUTION_ENGINE
85 select REED_SOLOMON
86 select REED_SOLOMON_DEC8
87 help
88 This enables the driver for the Renesas Technology AG-AND
89 flash interface board (FROM_BOARD4)
90
91config MTD_NAND_PPCHAMELEONEVB
92 tristate "NAND Flash device on PPChameleonEVB board"
93 depends on PPCHAMELEONEVB && MTD_NAND
94 help
95 This enables the NAND flash driver on the PPChameleon EVB Board.
96
97config MTD_NAND_S3C2410
98 tristate "NAND Flash support for S3C2410 SoC"
99 depends on ARCH_S3C2410 && MTD_NAND
100 help
101 This enables the NAND flash controller on the S3C2410.
102
103 No board specfic support is done by this driver, each board
104 must advertise a platform_device for the driver to attach.
105
106config MTD_NAND_S3C2410_DEBUG
107 bool "S3C2410 NAND driver debug"
108 depends on MTD_NAND_S3C2410
109 help
110 Enable debugging of the S3C2410 NAND driver
111
112config MTD_NAND_S3C2410_HWECC
113 bool "S3C2410 NAND Hardware ECC"
114 depends on MTD_NAND_S3C2410
115 help
116 Enable the use of the S3C2410's internal ECC generator when
117 using NAND. Early versions of the chip have had problems with
118 incorrect ECC generation, and if using these, the default of
119 software ECC is preferable.
120
121 If you lay down a device with the hardware ECC, then you will
122 currently not be able to switch to software, as there is no
123 implementation for ECC method used by the S3C2410
124
125config MTD_NAND_DISKONCHIP
126 tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)"
127 depends on MTD_NAND && EXPERIMENTAL
128 select REED_SOLOMON
129 select REED_SOLOMON_DEC16
130 help
131 This is a reimplementation of M-Systems DiskOnChip 2000,
132 Millennium and Millennium Plus as a standard NAND device driver,
133 as opposed to the earlier self-contained MTD device drivers.
134 This should enable, among other things, proper JFFS2 operation on
135 these devices.
136
137config MTD_NAND_DISKONCHIP_PROBE_ADVANCED
138 bool "Advanced detection options for DiskOnChip"
139 depends on MTD_NAND_DISKONCHIP
140 help
141 This option allows you to specify nonstandard address at which to
142 probe for a DiskOnChip, or to change the detection options. You
143 are unlikely to need any of this unless you are using LinuxBIOS.
144 Say 'N'.
145
146config MTD_NAND_DISKONCHIP_PROBE_ADDRESS
147 hex "Physical address of DiskOnChip" if MTD_NAND_DISKONCHIP_PROBE_ADVANCED
148 depends on MTD_NAND_DISKONCHIP
149 default "0"
150 ---help---
151 By default, the probe for DiskOnChip devices will look for a
152 DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
153 This option allows you to specify a single address at which to probe
154 for the device, which is useful if you have other devices in that
155 range which get upset when they are probed.
156
157 (Note that on PowerPC, the normal probe will only check at
158 0xE4000000.)
159
160 Normally, you should leave this set to zero, to allow the probe at
161 the normal addresses.
162
163config MTD_NAND_DISKONCHIP_PROBE_HIGH
164 bool "Probe high addresses"
165 depends on MTD_NAND_DISKONCHIP_PROBE_ADVANCED
166 help
167 By default, the probe for DiskOnChip devices will look for a
168 DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
169 This option changes to make it probe between 0xFFFC8000 and
170 0xFFFEE000. Unless you are using LinuxBIOS, this is unlikely to be
171 useful to you. Say 'N'.
172
173config MTD_NAND_DISKONCHIP_BBTWRITE
174 bool "Allow BBT writes on DiskOnChip Millennium and 2000TSOP"
175 depends on MTD_NAND_DISKONCHIP
176 help
177 On DiskOnChip devices shipped with the INFTL filesystem (Millennium
178 and 2000 TSOP/Alon), Linux reserves some space at the end of the
179 device for the Bad Block Table (BBT). If you have existing INFTL
180 data on your device (created by non-Linux tools such as M-Systems'
181 DOS drivers), your data might overlap the area Linux wants to use for
182 the BBT. If this is a concern for you, leave this option disabled and
183 Linux will not write BBT data into this area.
184 The downside of leaving this option disabled is that if bad blocks
185 are detected by Linux, they will not be recorded in the BBT, which
186 could cause future problems.
187 Once you enable this option, new filesystems (INFTL or others, created
188 in Linux or other operating systems) will not use the reserved area.
189 The only reason not to enable this option is to prevent damage to
190 preexisting filesystems.
191 Even if you leave this disabled, you can enable BBT writes at module
192 load time (assuming you build diskonchip as a module) with the module
193 parameter "inftl_bbt_write=1".
194
195 config MTD_NAND_SHARPSL
196 bool "Support for NAND Flash on Sharp SL Series (C7xx + others)"
197 depends on MTD_NAND && ARCH_PXA
198
199 config MTD_NAND_NANDSIM
200 bool "Support for NAND Flash Simulator"
201 depends on MTD_NAND && MTD_PARTITIONS
202
203 help
204 The simulator may simulate verious NAND flash chips for the
205 MTD nand layer.
206
207endmenu
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
new file mode 100644
index 000000000000..d9dc8cc2da8c
--- /dev/null
+++ b/drivers/mtd/nand/Makefile
@@ -0,0 +1,24 @@
1#
2# linux/drivers/nand/Makefile
3#
4# $Id: Makefile.common,v 1.15 2004/11/26 12:28:22 dedekind Exp $
5
6obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o
7obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
8
9obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
10obj-$(CONFIG_MTD_NAND_TOTO) += toto.o
11obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
12obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o
13obj-$(CONFIG_MTD_NAND_TX4925NDFMC) += tx4925ndfmc.o
14obj-$(CONFIG_MTD_NAND_TX4938NDFMC) += tx4938ndfmc.o
15obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
16obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
17obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
18obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
19obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
20obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
21obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
22obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
23
24nand-objs = nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
new file mode 100644
index 000000000000..4c7719ce3f48
--- /dev/null
+++ b/drivers/mtd/nand/au1550nd.c
@@ -0,0 +1,477 @@
1/*
2 * drivers/mtd/nand/au1550nd.c
3 *
4 * Copyright (C) 2004 Embedded Edge, LLC
5 *
6 * $Id: au1550nd.c,v 1.11 2004/11/04 12:53:10 gleixner Exp $
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/mtd/mtd.h>
18#include <linux/mtd/nand.h>
19#include <linux/mtd/partitions.h>
20#include <asm/io.h>
21
22/* fixme: this is ugly */
23#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 0)
24#include <asm/mach-au1x00/au1000.h>
25#ifdef CONFIG_MIPS_PB1550
26#include <asm/mach-pb1x00/pb1550.h>
27#endif
28#ifdef CONFIG_MIPS_DB1550
29#include <asm/mach-db1x00/db1x00.h>
30#endif
31#else
32#include <asm/au1000.h>
33#ifdef CONFIG_MIPS_PB1550
34#include <asm/pb1550.h>
35#endif
36#ifdef CONFIG_MIPS_DB1550
37#include <asm/db1x00.h>
38#endif
39#endif
40
41/*
42 * MTD structure for NAND controller
43 */
44static struct mtd_info *au1550_mtd = NULL;
45static void __iomem *p_nand;
46static int nand_width = 1; /* default x8*/
47
48#define NAND_CS 1
49
50/*
51 * Define partitions for flash device
52 */
53const static struct mtd_partition partition_info[] = {
54#ifdef CONFIG_MIPS_PB1550
55#define NUM_PARTITIONS 2
56 {
57 .name = "Pb1550 NAND FS 0",
58 .offset = 0,
59 .size = 8*1024*1024
60 },
61 {
62 .name = "Pb1550 NAND FS 1",
63 .offset = MTDPART_OFS_APPEND,
64 .size = MTDPART_SIZ_FULL
65 }
66#endif
67#ifdef CONFIG_MIPS_DB1550
68#define NUM_PARTITIONS 2
69 {
70 .name = "Db1550 NAND FS 0",
71 .offset = 0,
72 .size = 8*1024*1024
73 },
74 {
75 .name = "Db1550 NAND FS 1",
76 .offset = MTDPART_OFS_APPEND,
77 .size = MTDPART_SIZ_FULL
78 }
79#endif
80};
81
82
83/**
84 * au_read_byte - read one byte from the chip
85 * @mtd: MTD device structure
86 *
87 * read function for 8bit buswith
88 */
89static u_char au_read_byte(struct mtd_info *mtd)
90{
91 struct nand_chip *this = mtd->priv;
92 u_char ret = readb(this->IO_ADDR_R);
93 au_sync();
94 return ret;
95}
96
97/**
98 * au_write_byte - write one byte to the chip
99 * @mtd: MTD device structure
100 * @byte: pointer to data byte to write
101 *
102 * write function for 8it buswith
103 */
104static void au_write_byte(struct mtd_info *mtd, u_char byte)
105{
106 struct nand_chip *this = mtd->priv;
107 writeb(byte, this->IO_ADDR_W);
108 au_sync();
109}
110
111/**
112 * au_read_byte16 - read one byte endianess aware from the chip
113 * @mtd: MTD device structure
114 *
115 * read function for 16bit buswith with
116 * endianess conversion
117 */
118static u_char au_read_byte16(struct mtd_info *mtd)
119{
120 struct nand_chip *this = mtd->priv;
121 u_char ret = (u_char) cpu_to_le16(readw(this->IO_ADDR_R));
122 au_sync();
123 return ret;
124}
125
126/**
127 * au_write_byte16 - write one byte endianess aware to the chip
128 * @mtd: MTD device structure
129 * @byte: pointer to data byte to write
130 *
131 * write function for 16bit buswith with
132 * endianess conversion
133 */
134static void au_write_byte16(struct mtd_info *mtd, u_char byte)
135{
136 struct nand_chip *this = mtd->priv;
137 writew(le16_to_cpu((u16) byte), this->IO_ADDR_W);
138 au_sync();
139}
140
141/**
142 * au_read_word - read one word from the chip
143 * @mtd: MTD device structure
144 *
145 * read function for 16bit buswith without
146 * endianess conversion
147 */
148static u16 au_read_word(struct mtd_info *mtd)
149{
150 struct nand_chip *this = mtd->priv;
151 u16 ret = readw(this->IO_ADDR_R);
152 au_sync();
153 return ret;
154}
155
156/**
157 * au_write_word - write one word to the chip
158 * @mtd: MTD device structure
159 * @word: data word to write
160 *
161 * write function for 16bit buswith without
162 * endianess conversion
163 */
164static void au_write_word(struct mtd_info *mtd, u16 word)
165{
166 struct nand_chip *this = mtd->priv;
167 writew(word, this->IO_ADDR_W);
168 au_sync();
169}
170
171/**
172 * au_write_buf - write buffer to chip
173 * @mtd: MTD device structure
174 * @buf: data buffer
175 * @len: number of bytes to write
176 *
177 * write function for 8bit buswith
178 */
179static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
180{
181 int i;
182 struct nand_chip *this = mtd->priv;
183
184 for (i=0; i<len; i++) {
185 writeb(buf[i], this->IO_ADDR_W);
186 au_sync();
187 }
188}
189
190/**
191 * au_read_buf - read chip data into buffer
192 * @mtd: MTD device structure
193 * @buf: buffer to store date
194 * @len: number of bytes to read
195 *
196 * read function for 8bit buswith
197 */
198static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
199{
200 int i;
201 struct nand_chip *this = mtd->priv;
202
203 for (i=0; i<len; i++) {
204 buf[i] = readb(this->IO_ADDR_R);
205 au_sync();
206 }
207}
208
209/**
210 * au_verify_buf - Verify chip data against buffer
211 * @mtd: MTD device structure
212 * @buf: buffer containing the data to compare
213 * @len: number of bytes to compare
214 *
215 * verify function for 8bit buswith
216 */
217static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
218{
219 int i;
220 struct nand_chip *this = mtd->priv;
221
222 for (i=0; i<len; i++) {
223 if (buf[i] != readb(this->IO_ADDR_R))
224 return -EFAULT;
225 au_sync();
226 }
227
228 return 0;
229}
230
231/**
232 * au_write_buf16 - write buffer to chip
233 * @mtd: MTD device structure
234 * @buf: data buffer
235 * @len: number of bytes to write
236 *
237 * write function for 16bit buswith
238 */
239static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
240{
241 int i;
242 struct nand_chip *this = mtd->priv;
243 u16 *p = (u16 *) buf;
244 len >>= 1;
245
246 for (i=0; i<len; i++) {
247 writew(p[i], this->IO_ADDR_W);
248 au_sync();
249 }
250
251}
252
253/**
254 * au_read_buf16 - read chip data into buffer
255 * @mtd: MTD device structure
256 * @buf: buffer to store date
257 * @len: number of bytes to read
258 *
259 * read function for 16bit buswith
260 */
261static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
262{
263 int i;
264 struct nand_chip *this = mtd->priv;
265 u16 *p = (u16 *) buf;
266 len >>= 1;
267
268 for (i=0; i<len; i++) {
269 p[i] = readw(this->IO_ADDR_R);
270 au_sync();
271 }
272}
273
274/**
275 * au_verify_buf16 - Verify chip data against buffer
276 * @mtd: MTD device structure
277 * @buf: buffer containing the data to compare
278 * @len: number of bytes to compare
279 *
280 * verify function for 16bit buswith
281 */
282static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
283{
284 int i;
285 struct nand_chip *this = mtd->priv;
286 u16 *p = (u16 *) buf;
287 len >>= 1;
288
289 for (i=0; i<len; i++) {
290 if (p[i] != readw(this->IO_ADDR_R))
291 return -EFAULT;
292 au_sync();
293 }
294 return 0;
295}
296
297
298static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
299{
300 register struct nand_chip *this = mtd->priv;
301
302 switch(cmd){
303
304 case NAND_CTL_SETCLE: this->IO_ADDR_W = p_nand + MEM_STNAND_CMD; break;
305 case NAND_CTL_CLRCLE: this->IO_ADDR_W = p_nand + MEM_STNAND_DATA; break;
306
307 case NAND_CTL_SETALE: this->IO_ADDR_W = p_nand + MEM_STNAND_ADDR; break;
308 case NAND_CTL_CLRALE:
309 this->IO_ADDR_W = p_nand + MEM_STNAND_DATA;
310 /* FIXME: Nobody knows why this is neccecary,
311 * but it works only that way */
312 udelay(1);
313 break;
314
315 case NAND_CTL_SETNCE:
316 /* assert (force assert) chip enable */
317 au_writel((1<<(4+NAND_CS)) , MEM_STNDCTL); break;
318 break;
319
320 case NAND_CTL_CLRNCE:
321 /* deassert chip enable */
322 au_writel(0, MEM_STNDCTL); break;
323 break;
324 }
325
326 this->IO_ADDR_R = this->IO_ADDR_W;
327
328 /* Drain the writebuffer */
329 au_sync();
330}
331
332int au1550_device_ready(struct mtd_info *mtd)
333{
334 int ret = (au_readl(MEM_STSTAT) & 0x1) ? 1 : 0;
335 au_sync();
336 return ret;
337}
338
339/*
340 * Main initialization routine
341 */
342int __init au1550_init (void)
343{
344 struct nand_chip *this;
345 u16 boot_swapboot = 0; /* default value */
346 int retval;
347
348 /* Allocate memory for MTD device structure and private data */
349 au1550_mtd = kmalloc (sizeof(struct mtd_info) +
350 sizeof (struct nand_chip), GFP_KERNEL);
351 if (!au1550_mtd) {
352 printk ("Unable to allocate NAND MTD dev structure.\n");
353 return -ENOMEM;
354 }
355
356 /* Get pointer to private data */
357 this = (struct nand_chip *) (&au1550_mtd[1]);
358
359 /* Initialize structures */
360 memset((char *) au1550_mtd, 0, sizeof(struct mtd_info));
361 memset((char *) this, 0, sizeof(struct nand_chip));
362
363 /* Link the private data with the MTD structure */
364 au1550_mtd->priv = this;
365
366
367 /* MEM_STNDCTL: disable ints, disable nand boot */
368 au_writel(0, MEM_STNDCTL);
369
370#ifdef CONFIG_MIPS_PB1550
371 /* set gpio206 high */
372 au_writel(au_readl(GPIO2_DIR) & ~(1<<6), GPIO2_DIR);
373
374 boot_swapboot = (au_readl(MEM_STSTAT) & (0x7<<1)) |
375 ((bcsr->status >> 6) & 0x1);
376 switch (boot_swapboot) {
377 case 0:
378 case 2:
379 case 8:
380 case 0xC:
381 case 0xD:
382 /* x16 NAND Flash */
383 nand_width = 0;
384 break;
385 case 1:
386 case 9:
387 case 3:
388 case 0xE:
389 case 0xF:
390 /* x8 NAND Flash */
391 nand_width = 1;
392 break;
393 default:
394 printk("Pb1550 NAND: bad boot:swap\n");
395 retval = -EINVAL;
396 goto outmem;
397 }
398#endif
399
400 /* Configure RCE1 - should be done by YAMON */
401 au_writel(0x5 | (nand_width << 22), 0xB4001010); /* MEM_STCFG1 */
402 au_writel(NAND_TIMING, 0xB4001014); /* MEM_STTIME1 */
403 au_sync();
404
405 /* setup and enable chip select, MEM_STADDR1 */
406 /* we really need to decode offsets only up till 0x20 */
407 au_writel((1<<28) | (NAND_PHYS_ADDR>>4) |
408 (((NAND_PHYS_ADDR + 0x1000)-1) & (0x3fff<<18)>>18),
409 MEM_STADDR1);
410 au_sync();
411
412 p_nand = ioremap(NAND_PHYS_ADDR, 0x1000);
413
414 /* Set address of hardware control function */
415 this->hwcontrol = au1550_hwcontrol;
416 this->dev_ready = au1550_device_ready;
417 /* 30 us command delay time */
418 this->chip_delay = 30;
419 this->eccmode = NAND_ECC_SOFT;
420
421 this->options = NAND_NO_AUTOINCR;
422
423 if (!nand_width)
424 this->options |= NAND_BUSWIDTH_16;
425
426 this->read_byte = (!nand_width) ? au_read_byte16 : au_read_byte;
427 this->write_byte = (!nand_width) ? au_write_byte16 : au_write_byte;
428 this->write_word = au_write_word;
429 this->read_word = au_read_word;
430 this->write_buf = (!nand_width) ? au_write_buf16 : au_write_buf;
431 this->read_buf = (!nand_width) ? au_read_buf16 : au_read_buf;
432 this->verify_buf = (!nand_width) ? au_verify_buf16 : au_verify_buf;
433
434 /* Scan to find existence of the device */
435 if (nand_scan (au1550_mtd, 1)) {
436 retval = -ENXIO;
437 goto outio;
438 }
439
440 /* Register the partitions */
441 add_mtd_partitions(au1550_mtd, partition_info, NUM_PARTITIONS);
442
443 return 0;
444
445 outio:
446 iounmap ((void *)p_nand);
447
448 outmem:
449 kfree (au1550_mtd);
450 return retval;
451}
452
453module_init(au1550_init);
454
455/*
456 * Clean up routine
457 */
458#ifdef MODULE
459static void __exit au1550_cleanup (void)
460{
461 struct nand_chip *this = (struct nand_chip *) &au1550_mtd[1];
462
463 /* Release resources, unregister device */
464 nand_release (au1550_mtd);
465
466 /* Free the MTD device structure */
467 kfree (au1550_mtd);
468
469 /* Unmap */
470 iounmap ((void *)p_nand);
471}
472module_exit(au1550_cleanup);
473#endif
474
475MODULE_LICENSE("GPL");
476MODULE_AUTHOR("Embedded Edge, LLC");
477MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on Pb1550 board");
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
new file mode 100644
index 000000000000..4afa8ced05ad
--- /dev/null
+++ b/drivers/mtd/nand/autcpu12.c
@@ -0,0 +1,225 @@
1/*
2 * drivers/mtd/autcpu12.c
3 *
4 * Copyright (c) 2002 Thomas Gleixner <tgxl@linutronix.de>
5 *
6 * Derived from drivers/mtd/spia.c
7 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
8 *
9 * $Id: autcpu12.c,v 1.22 2004/11/04 12:53:10 gleixner Exp $
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * Overview:
16 * This is a device driver for the NAND flash device found on the
17 * autronix autcpu12 board, which is a SmartMediaCard. It supports
18 * 16MiB, 32MiB and 64MiB cards.
19 *
20 *
21 * 02-12-2002 TG Cleanup of module params
22 *
23 * 02-20-2002 TG adjusted for different rd/wr adress support
24 * added support for read device ready/busy line
25 * added page_cache
26 *
27 * 10-06-2002 TG 128K card support added
28 */
29
30#include <linux/version.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/module.h>
34#include <linux/mtd/mtd.h>
35#include <linux/mtd/nand.h>
36#include <linux/mtd/partitions.h>
37#include <asm/io.h>
38#include <asm/arch/hardware.h>
39#include <asm/sizes.h>
40#include <asm/arch/autcpu12.h>
41
42/*
43 * MTD structure for AUTCPU12 board
44 */
45static struct mtd_info *autcpu12_mtd = NULL;
46
47static int autcpu12_io_base = CS89712_VIRT_BASE;
48static int autcpu12_fio_pbase = AUTCPU12_PHYS_SMC;
49static int autcpu12_fio_ctrl = AUTCPU12_SMC_SELECT_OFFSET;
50static int autcpu12_pedr = AUTCPU12_SMC_PORT_OFFSET;
51static void __iomem * autcpu12_fio_base;
52
53/*
54 * Define partitions for flash devices
55 */
56static struct mtd_partition partition_info16k[] = {
57 { .name = "AUTCPU12 flash partition 1",
58 .offset = 0,
59 .size = 8 * SZ_1M },
60 { .name = "AUTCPU12 flash partition 2",
61 .offset = 8 * SZ_1M,
62 .size = 8 * SZ_1M },
63};
64
65static struct mtd_partition partition_info32k[] = {
66 { .name = "AUTCPU12 flash partition 1",
67 .offset = 0,
68 .size = 8 * SZ_1M },
69 { .name = "AUTCPU12 flash partition 2",
70 .offset = 8 * SZ_1M,
71 .size = 24 * SZ_1M },
72};
73
74static struct mtd_partition partition_info64k[] = {
75 { .name = "AUTCPU12 flash partition 1",
76 .offset = 0,
77 .size = 16 * SZ_1M },
78 { .name = "AUTCPU12 flash partition 2",
79 .offset = 16 * SZ_1M,
80 .size = 48 * SZ_1M },
81};
82
83static struct mtd_partition partition_info128k[] = {
84 { .name = "AUTCPU12 flash partition 1",
85 .offset = 0,
86 .size = 16 * SZ_1M },
87 { .name = "AUTCPU12 flash partition 2",
88 .offset = 16 * SZ_1M,
89 .size = 112 * SZ_1M },
90};
91
92#define NUM_PARTITIONS16K 2
93#define NUM_PARTITIONS32K 2
94#define NUM_PARTITIONS64K 2
95#define NUM_PARTITIONS128K 2
96/*
97 * hardware specific access to control-lines
98*/
99static void autcpu12_hwcontrol(struct mtd_info *mtd, int cmd)
100{
101
102 switch(cmd){
103
104 case NAND_CTL_SETCLE: (*(volatile unsigned char *) (autcpu12_io_base + autcpu12_pedr)) |= AUTCPU12_SMC_CLE; break;
105 case NAND_CTL_CLRCLE: (*(volatile unsigned char *) (autcpu12_io_base + autcpu12_pedr)) &= ~AUTCPU12_SMC_CLE; break;
106
107 case NAND_CTL_SETALE: (*(volatile unsigned char *) (autcpu12_io_base + autcpu12_pedr)) |= AUTCPU12_SMC_ALE; break;
108 case NAND_CTL_CLRALE: (*(volatile unsigned char *) (autcpu12_io_base + autcpu12_pedr)) &= ~AUTCPU12_SMC_ALE; break;
109
110 case NAND_CTL_SETNCE: (*(volatile unsigned char *) (autcpu12_fio_base + autcpu12_fio_ctrl)) = 0x01; break;
111 case NAND_CTL_CLRNCE: (*(volatile unsigned char *) (autcpu12_fio_base + autcpu12_fio_ctrl)) = 0x00; break;
112 }
113}
114
115/*
116* read device ready pin
117*/
118int autcpu12_device_ready(struct mtd_info *mtd)
119{
120
121 return ( (*(volatile unsigned char *) (autcpu12_io_base + autcpu12_pedr)) & AUTCPU12_SMC_RDY) ? 1 : 0;
122
123}
124
125/*
126 * Main initialization routine
127 */
128int __init autcpu12_init (void)
129{
130 struct nand_chip *this;
131 int err = 0;
132
133 /* Allocate memory for MTD device structure and private data */
134 autcpu12_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip),
135 GFP_KERNEL);
136 if (!autcpu12_mtd) {
137 printk ("Unable to allocate AUTCPU12 NAND MTD device structure.\n");
138 err = -ENOMEM;
139 goto out;
140 }
141
142 /* map physical adress */
143 autcpu12_fio_base = ioremap(autcpu12_fio_pbase,SZ_1K);
144 if(!autcpu12_fio_base){
145 printk("Ioremap autcpu12 SmartMedia Card failed\n");
146 err = -EIO;
147 goto out_mtd;
148 }
149
150 /* Get pointer to private data */
151 this = (struct nand_chip *) (&autcpu12_mtd[1]);
152
153 /* Initialize structures */
154 memset((char *) autcpu12_mtd, 0, sizeof(struct mtd_info));
155 memset((char *) this, 0, sizeof(struct nand_chip));
156
157 /* Link the private data with the MTD structure */
158 autcpu12_mtd->priv = this;
159
160 /* Set address of NAND IO lines */
161 this->IO_ADDR_R = autcpu12_fio_base;
162 this->IO_ADDR_W = autcpu12_fio_base;
163 this->hwcontrol = autcpu12_hwcontrol;
164 this->dev_ready = autcpu12_device_ready;
165 /* 20 us command delay time */
166 this->chip_delay = 20;
167 this->eccmode = NAND_ECC_SOFT;
168
169 /* Enable the following for a flash based bad block table */
170 /*
171 this->options = NAND_USE_FLASH_BBT;
172 */
173 this->options = NAND_USE_FLASH_BBT;
174
175 /* Scan to find existance of the device */
176 if (nand_scan (autcpu12_mtd, 1)) {
177 err = -ENXIO;
178 goto out_ior;
179 }
180
181 /* Register the partitions */
182 switch(autcpu12_mtd->size){
183 case SZ_16M: add_mtd_partitions(autcpu12_mtd, partition_info16k, NUM_PARTITIONS16K); break;
184 case SZ_32M: add_mtd_partitions(autcpu12_mtd, partition_info32k, NUM_PARTITIONS32K); break;
185 case SZ_64M: add_mtd_partitions(autcpu12_mtd, partition_info64k, NUM_PARTITIONS64K); break;
186 case SZ_128M: add_mtd_partitions(autcpu12_mtd, partition_info128k, NUM_PARTITIONS128K); break;
187 default: {
188 printk ("Unsupported SmartMedia device\n");
189 err = -ENXIO;
190 goto out_ior;
191 }
192 }
193 goto out;
194
195out_ior:
196 iounmap((void *)autcpu12_fio_base);
197out_mtd:
198 kfree (autcpu12_mtd);
199out:
200 return err;
201}
202
203module_init(autcpu12_init);
204
205/*
206 * Clean up routine
207 */
208#ifdef MODULE
209static void __exit autcpu12_cleanup (void)
210{
211 /* Release resources, unregister device */
212 nand_release (autcpu12_mtd);
213
214 /* unmap physical adress */
215 iounmap((void *)autcpu12_fio_base);
216
217 /* Free the MTD device structure */
218 kfree (autcpu12_mtd);
219}
220module_exit(autcpu12_cleanup);
221#endif
222
223MODULE_LICENSE("GPL");
224MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
225MODULE_DESCRIPTION("Glue layer for SmartMediaCard on autronix autcpu12");
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
new file mode 100644
index 000000000000..02135c3ac29a
--- /dev/null
+++ b/drivers/mtd/nand/diskonchip.c
@@ -0,0 +1,1782 @@
1/*
2 * drivers/mtd/nand/diskonchip.c
3 *
4 * (C) 2003 Red Hat, Inc.
5 * (C) 2004 Dan Brown <dan_brown@ieee.org>
6 * (C) 2004 Kalev Lember <kalev@smartlink.ee>
7 *
8 * Author: David Woodhouse <dwmw2@infradead.org>
9 * Additional Diskonchip 2000 and Millennium support by Dan Brown <dan_brown@ieee.org>
10 * Diskonchip Millennium Plus support by Kalev Lember <kalev@smartlink.ee>
11 *
12 * Error correction code lifted from the old docecc code
13 * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
14 * Copyright (C) 2000 Netgem S.A.
15 * converted to the generic Reed-Solomon library by Thomas Gleixner <tglx@linutronix.de>
16 *
17 * Interface to generic NAND code for M-Systems DiskOnChip devices
18 *
19 * $Id: diskonchip.c,v 1.45 2005/01/05 18:05:14 dwmw2 Exp $
20 */
21
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/sched.h>
25#include <linux/delay.h>
26#include <linux/rslib.h>
27#include <linux/moduleparam.h>
28#include <asm/io.h>
29
30#include <linux/mtd/mtd.h>
31#include <linux/mtd/nand.h>
32#include <linux/mtd/doc2000.h>
33#include <linux/mtd/compatmac.h>
34#include <linux/mtd/partitions.h>
35#include <linux/mtd/inftl.h>
36
37/* Where to look for the devices? */
38#ifndef CONFIG_MTD_DISKONCHIP_PROBE_ADDRESS
39#define CONFIG_MTD_DISKONCHIP_PROBE_ADDRESS 0
40#endif
41
42static unsigned long __initdata doc_locations[] = {
43#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
44#ifdef CONFIG_MTD_DISKONCHIP_PROBE_HIGH
45 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
46 0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
47 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
48 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
49 0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
50#else /* CONFIG_MTD_DOCPROBE_HIGH */
51 0xc8000, 0xca000, 0xcc000, 0xce000,
52 0xd0000, 0xd2000, 0xd4000, 0xd6000,
53 0xd8000, 0xda000, 0xdc000, 0xde000,
54 0xe0000, 0xe2000, 0xe4000, 0xe6000,
55 0xe8000, 0xea000, 0xec000, 0xee000,
56#endif /* CONFIG_MTD_DOCPROBE_HIGH */
57#elif defined(__PPC__)
58 0xe4000000,
59#elif defined(CONFIG_MOMENCO_OCELOT)
60 0x2f000000,
61 0xff000000,
62#elif defined(CONFIG_MOMENCO_OCELOT_G) || defined (CONFIG_MOMENCO_OCELOT_C)
63 0xff000000,
64##else
65#warning Unknown architecture for DiskOnChip. No default probe locations defined
66#endif
67 0xffffffff };
68
69static struct mtd_info *doclist = NULL;
70
71struct doc_priv {
72 void __iomem *virtadr;
73 unsigned long physadr;
74 u_char ChipID;
75 u_char CDSNControl;
76 int chips_per_floor; /* The number of chips detected on each floor */
77 int curfloor;
78 int curchip;
79 int mh0_page;
80 int mh1_page;
81 struct mtd_info *nextdoc;
82};
83
84/* Max number of eraseblocks to scan (from start of device) for the (I)NFTL
85 MediaHeader. The spec says to just keep going, I think, but that's just
86 silly. */
87#define MAX_MEDIAHEADER_SCAN 8
88
89/* This is the syndrome computed by the HW ecc generator upon reading an empty
90 page, one with all 0xff for data and stored ecc code. */
91static u_char empty_read_syndrome[6] = { 0x26, 0xff, 0x6d, 0x47, 0x73, 0x7a };
92/* This is the ecc value computed by the HW ecc generator upon writing an empty
93 page, one with all 0xff for data. */
94static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 };
95
96#define INFTL_BBT_RESERVED_BLOCKS 4
97
98#define DoC_is_MillenniumPlus(doc) ((doc)->ChipID == DOC_ChipID_DocMilPlus16 || (doc)->ChipID == DOC_ChipID_DocMilPlus32)
99#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil)
100#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k)
101
102static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd);
103static void doc200x_select_chip(struct mtd_info *mtd, int chip);
104
105static int debug=0;
106module_param(debug, int, 0);
107
108static int try_dword=1;
109module_param(try_dword, int, 0);
110
111static int no_ecc_failures=0;
112module_param(no_ecc_failures, int, 0);
113
114#ifdef CONFIG_MTD_PARTITIONS
115static int no_autopart=0;
116module_param(no_autopart, int, 0);
117#endif
118
119#ifdef MTD_NAND_DISKONCHIP_BBTWRITE
120static int inftl_bbt_write=1;
121#else
122static int inftl_bbt_write=0;
123#endif
124module_param(inftl_bbt_write, int, 0);
125
126static unsigned long doc_config_location = CONFIG_MTD_DISKONCHIP_PROBE_ADDRESS;
127module_param(doc_config_location, ulong, 0);
128MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
129
130
131/* Sector size for HW ECC */
132#define SECTOR_SIZE 512
133/* The sector bytes are packed into NB_DATA 10 bit words */
134#define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / 10)
135/* Number of roots */
136#define NROOTS 4
137/* First consective root */
138#define FCR 510
139/* Number of symbols */
140#define NN 1023
141
142/* the Reed Solomon control structure */
143static struct rs_control *rs_decoder;
144
145/*
146 * The HW decoder in the DoC ASIC's provides us a error syndrome,
147 * which we must convert to a standard syndrom usable by the generic
148 * Reed-Solomon library code.
149 *
150 * Fabrice Bellard figured this out in the old docecc code. I added
151 * some comments, improved a minor bit and converted it to make use
152 * of the generic Reed-Solomon libary. tglx
153 */
154static int doc_ecc_decode (struct rs_control *rs, uint8_t *data, uint8_t *ecc)
155{
156 int i, j, nerr, errpos[8];
157 uint8_t parity;
158 uint16_t ds[4], s[5], tmp, errval[8], syn[4];
159
160 /* Convert the ecc bytes into words */
161 ds[0] = ((ecc[4] & 0xff) >> 0) | ((ecc[5] & 0x03) << 8);
162 ds[1] = ((ecc[5] & 0xfc) >> 2) | ((ecc[2] & 0x0f) << 6);
163 ds[2] = ((ecc[2] & 0xf0) >> 4) | ((ecc[3] & 0x3f) << 4);
164 ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2);
165 parity = ecc[1];
166
167 /* Initialize the syndrom buffer */
168 for (i = 0; i < NROOTS; i++)
169 s[i] = ds[0];
170 /*
171 * Evaluate
172 * s[i] = ds[3]x^3 + ds[2]x^2 + ds[1]x^1 + ds[0]
173 * where x = alpha^(FCR + i)
174 */
175 for(j = 1; j < NROOTS; j++) {
176 if(ds[j] == 0)
177 continue;
178 tmp = rs->index_of[ds[j]];
179 for(i = 0; i < NROOTS; i++)
180 s[i] ^= rs->alpha_to[rs_modnn(rs, tmp + (FCR + i) * j)];
181 }
182
183 /* Calc s[i] = s[i] / alpha^(v + i) */
184 for (i = 0; i < NROOTS; i++) {
185 if (syn[i])
186 syn[i] = rs_modnn(rs, rs->index_of[s[i]] + (NN - FCR - i));
187 }
188 /* Call the decoder library */
189 nerr = decode_rs16(rs, NULL, NULL, 1019, syn, 0, errpos, 0, errval);
190
191 /* Incorrectable errors ? */
192 if (nerr < 0)
193 return nerr;
194
195 /*
196 * Correct the errors. The bitpositions are a bit of magic,
197 * but they are given by the design of the de/encoder circuit
198 * in the DoC ASIC's.
199 */
200 for(i = 0;i < nerr; i++) {
201 int index, bitpos, pos = 1015 - errpos[i];
202 uint8_t val;
203 if (pos >= NB_DATA && pos < 1019)
204 continue;
205 if (pos < NB_DATA) {
206 /* extract bit position (MSB first) */
207 pos = 10 * (NB_DATA - 1 - pos) - 6;
208 /* now correct the following 10 bits. At most two bytes
209 can be modified since pos is even */
210 index = (pos >> 3) ^ 1;
211 bitpos = pos & 7;
212 if ((index >= 0 && index < SECTOR_SIZE) ||
213 index == (SECTOR_SIZE + 1)) {
214 val = (uint8_t) (errval[i] >> (2 + bitpos));
215 parity ^= val;
216 if (index < SECTOR_SIZE)
217 data[index] ^= val;
218 }
219 index = ((pos >> 3) + 1) ^ 1;
220 bitpos = (bitpos + 10) & 7;
221 if (bitpos == 0)
222 bitpos = 8;
223 if ((index >= 0 && index < SECTOR_SIZE) ||
224 index == (SECTOR_SIZE + 1)) {
225 val = (uint8_t)(errval[i] << (8 - bitpos));
226 parity ^= val;
227 if (index < SECTOR_SIZE)
228 data[index] ^= val;
229 }
230 }
231 }
232 /* If the parity is wrong, no rescue possible */
233 return parity ? -1 : nerr;
234}
235
236static void DoC_Delay(struct doc_priv *doc, unsigned short cycles)
237{
238 volatile char dummy;
239 int i;
240
241 for (i = 0; i < cycles; i++) {
242 if (DoC_is_Millennium(doc))
243 dummy = ReadDOC(doc->virtadr, NOP);
244 else if (DoC_is_MillenniumPlus(doc))
245 dummy = ReadDOC(doc->virtadr, Mplus_NOP);
246 else
247 dummy = ReadDOC(doc->virtadr, DOCStatus);
248 }
249
250}
251
252#define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
253
254/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
255static int _DoC_WaitReady(struct doc_priv *doc)
256{
257 void __iomem *docptr = doc->virtadr;
258 unsigned long timeo = jiffies + (HZ * 10);
259
260 if(debug) printk("_DoC_WaitReady...\n");
261 /* Out-of-line routine to wait for chip response */
262 if (DoC_is_MillenniumPlus(doc)) {
263 while ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
264 if (time_after(jiffies, timeo)) {
265 printk("_DoC_WaitReady timed out.\n");
266 return -EIO;
267 }
268 udelay(1);
269 cond_resched();
270 }
271 } else {
272 while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
273 if (time_after(jiffies, timeo)) {
274 printk("_DoC_WaitReady timed out.\n");
275 return -EIO;
276 }
277 udelay(1);
278 cond_resched();
279 }
280 }
281
282 return 0;
283}
284
285static inline int DoC_WaitReady(struct doc_priv *doc)
286{
287 void __iomem *docptr = doc->virtadr;
288 int ret = 0;
289
290 if (DoC_is_MillenniumPlus(doc)) {
291 DoC_Delay(doc, 4);
292
293 if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
294 /* Call the out-of-line routine to wait */
295 ret = _DoC_WaitReady(doc);
296 } else {
297 DoC_Delay(doc, 4);
298
299 if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
300 /* Call the out-of-line routine to wait */
301 ret = _DoC_WaitReady(doc);
302 DoC_Delay(doc, 2);
303 }
304
305 if(debug) printk("DoC_WaitReady OK\n");
306 return ret;
307}
308
309static void doc2000_write_byte(struct mtd_info *mtd, u_char datum)
310{
311 struct nand_chip *this = mtd->priv;
312 struct doc_priv *doc = this->priv;
313 void __iomem *docptr = doc->virtadr;
314
315 if(debug)printk("write_byte %02x\n", datum);
316 WriteDOC(datum, docptr, CDSNSlowIO);
317 WriteDOC(datum, docptr, 2k_CDSN_IO);
318}
319
320static u_char doc2000_read_byte(struct mtd_info *mtd)
321{
322 struct nand_chip *this = mtd->priv;
323 struct doc_priv *doc = this->priv;
324 void __iomem *docptr = doc->virtadr;
325 u_char ret;
326
327 ReadDOC(docptr, CDSNSlowIO);
328 DoC_Delay(doc, 2);
329 ret = ReadDOC(docptr, 2k_CDSN_IO);
330 if (debug) printk("read_byte returns %02x\n", ret);
331 return ret;
332}
333
334static void doc2000_writebuf(struct mtd_info *mtd,
335 const u_char *buf, int len)
336{
337 struct nand_chip *this = mtd->priv;
338 struct doc_priv *doc = this->priv;
339 void __iomem *docptr = doc->virtadr;
340 int i;
341 if (debug)printk("writebuf of %d bytes: ", len);
342 for (i=0; i < len; i++) {
343 WriteDOC_(buf[i], docptr, DoC_2k_CDSN_IO + i);
344 if (debug && i < 16)
345 printk("%02x ", buf[i]);
346 }
347 if (debug) printk("\n");
348}
349
350static void doc2000_readbuf(struct mtd_info *mtd,
351 u_char *buf, int len)
352{
353 struct nand_chip *this = mtd->priv;
354 struct doc_priv *doc = this->priv;
355 void __iomem *docptr = doc->virtadr;
356 int i;
357
358 if (debug)printk("readbuf of %d bytes: ", len);
359
360 for (i=0; i < len; i++) {
361 buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
362 }
363}
364
365static void doc2000_readbuf_dword(struct mtd_info *mtd,
366 u_char *buf, int len)
367{
368 struct nand_chip *this = mtd->priv;
369 struct doc_priv *doc = this->priv;
370 void __iomem *docptr = doc->virtadr;
371 int i;
372
373 if (debug) printk("readbuf_dword of %d bytes: ", len);
374
375 if (unlikely((((unsigned long)buf)|len) & 3)) {
376 for (i=0; i < len; i++) {
377 *(uint8_t *)(&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i);
378 }
379 } else {
380 for (i=0; i < len; i+=4) {
381 *(uint32_t*)(&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i);
382 }
383 }
384}
385
386static int doc2000_verifybuf(struct mtd_info *mtd,
387 const u_char *buf, int len)
388{
389 struct nand_chip *this = mtd->priv;
390 struct doc_priv *doc = this->priv;
391 void __iomem *docptr = doc->virtadr;
392 int i;
393
394 for (i=0; i < len; i++)
395 if (buf[i] != ReadDOC(docptr, 2k_CDSN_IO))
396 return -EFAULT;
397 return 0;
398}
399
400static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
401{
402 struct nand_chip *this = mtd->priv;
403 struct doc_priv *doc = this->priv;
404 uint16_t ret;
405
406 doc200x_select_chip(mtd, nr);
407 doc200x_hwcontrol(mtd, NAND_CTL_SETCLE);
408 this->write_byte(mtd, NAND_CMD_READID);
409 doc200x_hwcontrol(mtd, NAND_CTL_CLRCLE);
410 doc200x_hwcontrol(mtd, NAND_CTL_SETALE);
411 this->write_byte(mtd, 0);
412 doc200x_hwcontrol(mtd, NAND_CTL_CLRALE);
413
414 ret = this->read_byte(mtd) << 8;
415 ret |= this->read_byte(mtd);
416
417 if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) {
418 /* First chip probe. See if we get same results by 32-bit access */
419 union {
420 uint32_t dword;
421 uint8_t byte[4];
422 } ident;
423 void __iomem *docptr = doc->virtadr;
424
425 doc200x_hwcontrol(mtd, NAND_CTL_SETCLE);
426 doc2000_write_byte(mtd, NAND_CMD_READID);
427 doc200x_hwcontrol(mtd, NAND_CTL_CLRCLE);
428 doc200x_hwcontrol(mtd, NAND_CTL_SETALE);
429 doc2000_write_byte(mtd, 0);
430 doc200x_hwcontrol(mtd, NAND_CTL_CLRALE);
431
432 ident.dword = readl(docptr + DoC_2k_CDSN_IO);
433 if (((ident.byte[0] << 8) | ident.byte[1]) == ret) {
434 printk(KERN_INFO "DiskOnChip 2000 responds to DWORD access\n");
435 this->read_buf = &doc2000_readbuf_dword;
436 }
437 }
438
439 return ret;
440}
441
442static void __init doc2000_count_chips(struct mtd_info *mtd)
443{
444 struct nand_chip *this = mtd->priv;
445 struct doc_priv *doc = this->priv;
446 uint16_t mfrid;
447 int i;
448
449 /* Max 4 chips per floor on DiskOnChip 2000 */
450 doc->chips_per_floor = 4;
451
452 /* Find out what the first chip is */
453 mfrid = doc200x_ident_chip(mtd, 0);
454
455 /* Find how many chips in each floor. */
456 for (i = 1; i < 4; i++) {
457 if (doc200x_ident_chip(mtd, i) != mfrid)
458 break;
459 }
460 doc->chips_per_floor = i;
461 printk(KERN_DEBUG "Detected %d chips per floor.\n", i);
462}
463
464static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this, int state)
465{
466 struct doc_priv *doc = this->priv;
467
468 int status;
469
470 DoC_WaitReady(doc);
471 this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
472 DoC_WaitReady(doc);
473 status = (int)this->read_byte(mtd);
474
475 return status;
476}
477
478static void doc2001_write_byte(struct mtd_info *mtd, u_char datum)
479{
480 struct nand_chip *this = mtd->priv;
481 struct doc_priv *doc = this->priv;
482 void __iomem *docptr = doc->virtadr;
483
484 WriteDOC(datum, docptr, CDSNSlowIO);
485 WriteDOC(datum, docptr, Mil_CDSN_IO);
486 WriteDOC(datum, docptr, WritePipeTerm);
487}
488
489static u_char doc2001_read_byte(struct mtd_info *mtd)
490{
491 struct nand_chip *this = mtd->priv;
492 struct doc_priv *doc = this->priv;
493 void __iomem *docptr = doc->virtadr;
494
495 //ReadDOC(docptr, CDSNSlowIO);
496 /* 11.4.5 -- delay twice to allow extended length cycle */
497 DoC_Delay(doc, 2);
498 ReadDOC(docptr, ReadPipeInit);
499 //return ReadDOC(docptr, Mil_CDSN_IO);
500 return ReadDOC(docptr, LastDataRead);
501}
502
503static void doc2001_writebuf(struct mtd_info *mtd,
504 const u_char *buf, int len)
505{
506 struct nand_chip *this = mtd->priv;
507 struct doc_priv *doc = this->priv;
508 void __iomem *docptr = doc->virtadr;
509 int i;
510
511 for (i=0; i < len; i++)
512 WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
513 /* Terminate write pipeline */
514 WriteDOC(0x00, docptr, WritePipeTerm);
515}
516
517static void doc2001_readbuf(struct mtd_info *mtd,
518 u_char *buf, int len)
519{
520 struct nand_chip *this = mtd->priv;
521 struct doc_priv *doc = this->priv;
522 void __iomem *docptr = doc->virtadr;
523 int i;
524
525 /* Start read pipeline */
526 ReadDOC(docptr, ReadPipeInit);
527
528 for (i=0; i < len-1; i++)
529 buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
530
531 /* Terminate read pipeline */
532 buf[i] = ReadDOC(docptr, LastDataRead);
533}
534
535static int doc2001_verifybuf(struct mtd_info *mtd,
536 const u_char *buf, int len)
537{
538 struct nand_chip *this = mtd->priv;
539 struct doc_priv *doc = this->priv;
540 void __iomem *docptr = doc->virtadr;
541 int i;
542
543 /* Start read pipeline */
544 ReadDOC(docptr, ReadPipeInit);
545
546 for (i=0; i < len-1; i++)
547 if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
548 ReadDOC(docptr, LastDataRead);
549 return i;
550 }
551 if (buf[i] != ReadDOC(docptr, LastDataRead))
552 return i;
553 return 0;
554}
555
556static u_char doc2001plus_read_byte(struct mtd_info *mtd)
557{
558 struct nand_chip *this = mtd->priv;
559 struct doc_priv *doc = this->priv;
560 void __iomem *docptr = doc->virtadr;
561 u_char ret;
562
563 ReadDOC(docptr, Mplus_ReadPipeInit);
564 ReadDOC(docptr, Mplus_ReadPipeInit);
565 ret = ReadDOC(docptr, Mplus_LastDataRead);
566 if (debug) printk("read_byte returns %02x\n", ret);
567 return ret;
568}
569
570static void doc2001plus_writebuf(struct mtd_info *mtd,
571 const u_char *buf, int len)
572{
573 struct nand_chip *this = mtd->priv;
574 struct doc_priv *doc = this->priv;
575 void __iomem *docptr = doc->virtadr;
576 int i;
577
578 if (debug)printk("writebuf of %d bytes: ", len);
579 for (i=0; i < len; i++) {
580 WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
581 if (debug && i < 16)
582 printk("%02x ", buf[i]);
583 }
584 if (debug) printk("\n");
585}
586
587static void doc2001plus_readbuf(struct mtd_info *mtd,
588 u_char *buf, int len)
589{
590 struct nand_chip *this = mtd->priv;
591 struct doc_priv *doc = this->priv;
592 void __iomem *docptr = doc->virtadr;
593 int i;
594
595 if (debug)printk("readbuf of %d bytes: ", len);
596
597 /* Start read pipeline */
598 ReadDOC(docptr, Mplus_ReadPipeInit);
599 ReadDOC(docptr, Mplus_ReadPipeInit);
600
601 for (i=0; i < len-2; i++) {
602 buf[i] = ReadDOC(docptr, Mil_CDSN_IO);
603 if (debug && i < 16)
604 printk("%02x ", buf[i]);
605 }
606
607 /* Terminate read pipeline */
608 buf[len-2] = ReadDOC(docptr, Mplus_LastDataRead);
609 if (debug && i < 16)
610 printk("%02x ", buf[len-2]);
611 buf[len-1] = ReadDOC(docptr, Mplus_LastDataRead);
612 if (debug && i < 16)
613 printk("%02x ", buf[len-1]);
614 if (debug) printk("\n");
615}
616
617static int doc2001plus_verifybuf(struct mtd_info *mtd,
618 const u_char *buf, int len)
619{
620 struct nand_chip *this = mtd->priv;
621 struct doc_priv *doc = this->priv;
622 void __iomem *docptr = doc->virtadr;
623 int i;
624
625 if (debug)printk("verifybuf of %d bytes: ", len);
626
627 /* Start read pipeline */
628 ReadDOC(docptr, Mplus_ReadPipeInit);
629 ReadDOC(docptr, Mplus_ReadPipeInit);
630
631 for (i=0; i < len-2; i++)
632 if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
633 ReadDOC(docptr, Mplus_LastDataRead);
634 ReadDOC(docptr, Mplus_LastDataRead);
635 return i;
636 }
637 if (buf[len-2] != ReadDOC(docptr, Mplus_LastDataRead))
638 return len-2;
639 if (buf[len-1] != ReadDOC(docptr, Mplus_LastDataRead))
640 return len-1;
641 return 0;
642}
643
644static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
645{
646 struct nand_chip *this = mtd->priv;
647 struct doc_priv *doc = this->priv;
648 void __iomem *docptr = doc->virtadr;
649 int floor = 0;
650
651 if(debug)printk("select chip (%d)\n", chip);
652
653 if (chip == -1) {
654 /* Disable flash internally */
655 WriteDOC(0, docptr, Mplus_FlashSelect);
656 return;
657 }
658
659 floor = chip / doc->chips_per_floor;
660 chip -= (floor * doc->chips_per_floor);
661
662 /* Assert ChipEnable and deassert WriteProtect */
663 WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
664 this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
665
666 doc->curchip = chip;
667 doc->curfloor = floor;
668}
669
670static void doc200x_select_chip(struct mtd_info *mtd, int chip)
671{
672 struct nand_chip *this = mtd->priv;
673 struct doc_priv *doc = this->priv;
674 void __iomem *docptr = doc->virtadr;
675 int floor = 0;
676
677 if(debug)printk("select chip (%d)\n", chip);
678
679 if (chip == -1)
680 return;
681
682 floor = chip / doc->chips_per_floor;
683 chip -= (floor * doc->chips_per_floor);
684
685 /* 11.4.4 -- deassert CE before changing chip */
686 doc200x_hwcontrol(mtd, NAND_CTL_CLRNCE);
687
688 WriteDOC(floor, docptr, FloorSelect);
689 WriteDOC(chip, docptr, CDSNDeviceSelect);
690
691 doc200x_hwcontrol(mtd, NAND_CTL_SETNCE);
692
693 doc->curchip = chip;
694 doc->curfloor = floor;
695}
696
697static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd)
698{
699 struct nand_chip *this = mtd->priv;
700 struct doc_priv *doc = this->priv;
701 void __iomem *docptr = doc->virtadr;
702
703 switch(cmd) {
704 case NAND_CTL_SETNCE:
705 doc->CDSNControl |= CDSN_CTRL_CE;
706 break;
707 case NAND_CTL_CLRNCE:
708 doc->CDSNControl &= ~CDSN_CTRL_CE;
709 break;
710 case NAND_CTL_SETCLE:
711 doc->CDSNControl |= CDSN_CTRL_CLE;
712 break;
713 case NAND_CTL_CLRCLE:
714 doc->CDSNControl &= ~CDSN_CTRL_CLE;
715 break;
716 case NAND_CTL_SETALE:
717 doc->CDSNControl |= CDSN_CTRL_ALE;
718 break;
719 case NAND_CTL_CLRALE:
720 doc->CDSNControl &= ~CDSN_CTRL_ALE;
721 break;
722 case NAND_CTL_SETWP:
723 doc->CDSNControl |= CDSN_CTRL_WP;
724 break;
725 case NAND_CTL_CLRWP:
726 doc->CDSNControl &= ~CDSN_CTRL_WP;
727 break;
728 }
729 if (debug)printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl);
730 WriteDOC(doc->CDSNControl, docptr, CDSNControl);
731 /* 11.4.3 -- 4 NOPs after CSDNControl write */
732 DoC_Delay(doc, 4);
733}
734
735static void doc2001plus_command (struct mtd_info *mtd, unsigned command, int column, int page_addr)
736{
737 struct nand_chip *this = mtd->priv;
738 struct doc_priv *doc = this->priv;
739 void __iomem *docptr = doc->virtadr;
740
741 /*
742 * Must terminate write pipeline before sending any commands
743 * to the device.
744 */
745 if (command == NAND_CMD_PAGEPROG) {
746 WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
747 WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
748 }
749
750 /*
751 * Write out the command to the device.
752 */
753 if (command == NAND_CMD_SEQIN) {
754 int readcmd;
755
756 if (column >= mtd->oobblock) {
757 /* OOB area */
758 column -= mtd->oobblock;
759 readcmd = NAND_CMD_READOOB;
760 } else if (column < 256) {
761 /* First 256 bytes --> READ0 */
762 readcmd = NAND_CMD_READ0;
763 } else {
764 column -= 256;
765 readcmd = NAND_CMD_READ1;
766 }
767 WriteDOC(readcmd, docptr, Mplus_FlashCmd);
768 }
769 WriteDOC(command, docptr, Mplus_FlashCmd);
770 WriteDOC(0, docptr, Mplus_WritePipeTerm);
771 WriteDOC(0, docptr, Mplus_WritePipeTerm);
772
773 if (column != -1 || page_addr != -1) {
774 /* Serially input address */
775 if (column != -1) {
776 /* Adjust columns for 16 bit buswidth */
777 if (this->options & NAND_BUSWIDTH_16)
778 column >>= 1;
779 WriteDOC(column, docptr, Mplus_FlashAddress);
780 }
781 if (page_addr != -1) {
782 WriteDOC((unsigned char) (page_addr & 0xff), docptr, Mplus_FlashAddress);
783 WriteDOC((unsigned char) ((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
784 /* One more address cycle for higher density devices */
785 if (this->chipsize & 0x0c000000) {
786 WriteDOC((unsigned char) ((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
787 printk("high density\n");
788 }
789 }
790 WriteDOC(0, docptr, Mplus_WritePipeTerm);
791 WriteDOC(0, docptr, Mplus_WritePipeTerm);
792 /* deassert ALE */
793 if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 || command == NAND_CMD_READOOB || command == NAND_CMD_READID)
794 WriteDOC(0, docptr, Mplus_FlashControl);
795 }
796
797 /*
798 * program and erase have their own busy handlers
799 * status and sequential in needs no delay
800 */
801 switch (command) {
802
803 case NAND_CMD_PAGEPROG:
804 case NAND_CMD_ERASE1:
805 case NAND_CMD_ERASE2:
806 case NAND_CMD_SEQIN:
807 case NAND_CMD_STATUS:
808 return;
809
810 case NAND_CMD_RESET:
811 if (this->dev_ready)
812 break;
813 udelay(this->chip_delay);
814 WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd);
815 WriteDOC(0, docptr, Mplus_WritePipeTerm);
816 WriteDOC(0, docptr, Mplus_WritePipeTerm);
817 while ( !(this->read_byte(mtd) & 0x40));
818 return;
819
820 /* This applies to read commands */
821 default:
822 /*
823 * If we don't have access to the busy pin, we apply the given
824 * command delay
825 */
826 if (!this->dev_ready) {
827 udelay (this->chip_delay);
828 return;
829 }
830 }
831
832 /* Apply this short delay always to ensure that we do wait tWB in
833 * any case on any machine. */
834 ndelay (100);
835 /* wait until command is processed */
836 while (!this->dev_ready(mtd));
837}
838
839static int doc200x_dev_ready(struct mtd_info *mtd)
840{
841 struct nand_chip *this = mtd->priv;
842 struct doc_priv *doc = this->priv;
843 void __iomem *docptr = doc->virtadr;
844
845 if (DoC_is_MillenniumPlus(doc)) {
846 /* 11.4.2 -- must NOP four times before checking FR/B# */
847 DoC_Delay(doc, 4);
848 if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
849 if(debug)
850 printk("not ready\n");
851 return 0;
852 }
853 if (debug)printk("was ready\n");
854 return 1;
855 } else {
856 /* 11.4.2 -- must NOP four times before checking FR/B# */
857 DoC_Delay(doc, 4);
858 if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
859 if(debug)
860 printk("not ready\n");
861 return 0;
862 }
863 /* 11.4.2 -- Must NOP twice if it's ready */
864 DoC_Delay(doc, 2);
865 if (debug)printk("was ready\n");
866 return 1;
867 }
868}
869
870static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
871{
872 /* This is our last resort if we couldn't find or create a BBT. Just
873 pretend all blocks are good. */
874 return 0;
875}
876
877static void doc200x_enable_hwecc(struct mtd_info *mtd, int mode)
878{
879 struct nand_chip *this = mtd->priv;
880 struct doc_priv *doc = this->priv;
881 void __iomem *docptr = doc->virtadr;
882
883 /* Prime the ECC engine */
884 switch(mode) {
885 case NAND_ECC_READ:
886 WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
887 WriteDOC(DOC_ECC_EN, docptr, ECCConf);
888 break;
889 case NAND_ECC_WRITE:
890 WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
891 WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
892 break;
893 }
894}
895
896static void doc2001plus_enable_hwecc(struct mtd_info *mtd, int mode)
897{
898 struct nand_chip *this = mtd->priv;
899 struct doc_priv *doc = this->priv;
900 void __iomem *docptr = doc->virtadr;
901
902 /* Prime the ECC engine */
903 switch(mode) {
904 case NAND_ECC_READ:
905 WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
906 WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
907 break;
908 case NAND_ECC_WRITE:
909 WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
910 WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
911 break;
912 }
913}
914
915/* This code is only called on write */
916static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
917 unsigned char *ecc_code)
918{
919 struct nand_chip *this = mtd->priv;
920 struct doc_priv *doc = this->priv;
921 void __iomem *docptr = doc->virtadr;
922 int i;
923 int emptymatch = 1;
924
925 /* flush the pipeline */
926 if (DoC_is_2000(doc)) {
927 WriteDOC(doc->CDSNControl & ~CDSN_CTRL_FLASH_IO, docptr, CDSNControl);
928 WriteDOC(0, docptr, 2k_CDSN_IO);
929 WriteDOC(0, docptr, 2k_CDSN_IO);
930 WriteDOC(0, docptr, 2k_CDSN_IO);
931 WriteDOC(doc->CDSNControl, docptr, CDSNControl);
932 } else if (DoC_is_MillenniumPlus(doc)) {
933 WriteDOC(0, docptr, Mplus_NOP);
934 WriteDOC(0, docptr, Mplus_NOP);
935 WriteDOC(0, docptr, Mplus_NOP);
936 } else {
937 WriteDOC(0, docptr, NOP);
938 WriteDOC(0, docptr, NOP);
939 WriteDOC(0, docptr, NOP);
940 }
941
942 for (i = 0; i < 6; i++) {
943 if (DoC_is_MillenniumPlus(doc))
944 ecc_code[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
945 else
946 ecc_code[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
947 if (ecc_code[i] != empty_write_ecc[i])
948 emptymatch = 0;
949 }
950 if (DoC_is_MillenniumPlus(doc))
951 WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
952 else
953 WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
954#if 0
955 /* If emptymatch=1, we might have an all-0xff data buffer. Check. */
956 if (emptymatch) {
957 /* Note: this somewhat expensive test should not be triggered
958 often. It could be optimized away by examining the data in
959 the writebuf routine, and remembering the result. */
960 for (i = 0; i < 512; i++) {
961 if (dat[i] == 0xff) continue;
962 emptymatch = 0;
963 break;
964 }
965 }
966 /* If emptymatch still =1, we do have an all-0xff data buffer.
967 Return all-0xff ecc value instead of the computed one, so
968 it'll look just like a freshly-erased page. */
969 if (emptymatch) memset(ecc_code, 0xff, 6);
970#endif
971 return 0;
972}
973
974static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc)
975{
976 int i, ret = 0;
977 struct nand_chip *this = mtd->priv;
978 struct doc_priv *doc = this->priv;
979 void __iomem *docptr = doc->virtadr;
980 volatile u_char dummy;
981 int emptymatch = 1;
982
983 /* flush the pipeline */
984 if (DoC_is_2000(doc)) {
985 dummy = ReadDOC(docptr, 2k_ECCStatus);
986 dummy = ReadDOC(docptr, 2k_ECCStatus);
987 dummy = ReadDOC(docptr, 2k_ECCStatus);
988 } else if (DoC_is_MillenniumPlus(doc)) {
989 dummy = ReadDOC(docptr, Mplus_ECCConf);
990 dummy = ReadDOC(docptr, Mplus_ECCConf);
991 dummy = ReadDOC(docptr, Mplus_ECCConf);
992 } else {
993 dummy = ReadDOC(docptr, ECCConf);
994 dummy = ReadDOC(docptr, ECCConf);
995 dummy = ReadDOC(docptr, ECCConf);
996 }
997
998 /* Error occured ? */
999 if (dummy & 0x80) {
1000 for (i = 0; i < 6; i++) {
1001 if (DoC_is_MillenniumPlus(doc))
1002 calc_ecc[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
1003 else
1004 calc_ecc[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
1005 if (calc_ecc[i] != empty_read_syndrome[i])
1006 emptymatch = 0;
1007 }
1008 /* If emptymatch=1, the read syndrome is consistent with an
1009 all-0xff data and stored ecc block. Check the stored ecc. */
1010 if (emptymatch) {
1011 for (i = 0; i < 6; i++) {
1012 if (read_ecc[i] == 0xff) continue;
1013 emptymatch = 0;
1014 break;
1015 }
1016 }
1017 /* If emptymatch still =1, check the data block. */
1018 if (emptymatch) {
1019 /* Note: this somewhat expensive test should not be triggered
1020 often. It could be optimized away by examining the data in
1021 the readbuf routine, and remembering the result. */
1022 for (i = 0; i < 512; i++) {
1023 if (dat[i] == 0xff) continue;
1024 emptymatch = 0;
1025 break;
1026 }
1027 }
1028 /* If emptymatch still =1, this is almost certainly a freshly-
1029 erased block, in which case the ECC will not come out right.
1030 We'll suppress the error and tell the caller everything's
1031 OK. Because it is. */
1032 if (!emptymatch) ret = doc_ecc_decode (rs_decoder, dat, calc_ecc);
1033 if (ret > 0)
1034 printk(KERN_ERR "doc200x_correct_data corrected %d errors\n", ret);
1035 }
1036 if (DoC_is_MillenniumPlus(doc))
1037 WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
1038 else
1039 WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
1040 if (no_ecc_failures && (ret == -1)) {
1041 printk(KERN_ERR "suppressing ECC failure\n");
1042 ret = 0;
1043 }
1044 return ret;
1045}
1046
1047//u_char mydatabuf[528];
1048
1049static struct nand_oobinfo doc200x_oobinfo = {
1050 .useecc = MTD_NANDECC_AUTOPLACE,
1051 .eccbytes = 6,
1052 .eccpos = {0, 1, 2, 3, 4, 5},
1053 .oobfree = { {8, 8} }
1054};
1055
1056/* Find the (I)NFTL Media Header, and optionally also the mirror media header.
1057 On sucessful return, buf will contain a copy of the media header for
1058 further processing. id is the string to scan for, and will presumably be
1059 either "ANAND" or "BNAND". If findmirror=1, also look for the mirror media
1060 header. The page #s of the found media headers are placed in mh0_page and
1061 mh1_page in the DOC private structure. */
1062static int __init find_media_headers(struct mtd_info *mtd, u_char *buf,
1063 const char *id, int findmirror)
1064{
1065 struct nand_chip *this = mtd->priv;
1066 struct doc_priv *doc = this->priv;
1067 unsigned offs, end = (MAX_MEDIAHEADER_SCAN << this->phys_erase_shift);
1068 int ret;
1069 size_t retlen;
1070
1071 end = min(end, mtd->size); // paranoia
1072 for (offs = 0; offs < end; offs += mtd->erasesize) {
1073 ret = mtd->read(mtd, offs, mtd->oobblock, &retlen, buf);
1074 if (retlen != mtd->oobblock) continue;
1075 if (ret) {
1076 printk(KERN_WARNING "ECC error scanning DOC at 0x%x\n",
1077 offs);
1078 }
1079 if (memcmp(buf, id, 6)) continue;
1080 printk(KERN_INFO "Found DiskOnChip %s Media Header at 0x%x\n", id, offs);
1081 if (doc->mh0_page == -1) {
1082 doc->mh0_page = offs >> this->page_shift;
1083 if (!findmirror) return 1;
1084 continue;
1085 }
1086 doc->mh1_page = offs >> this->page_shift;
1087 return 2;
1088 }
1089 if (doc->mh0_page == -1) {
1090 printk(KERN_WARNING "DiskOnChip %s Media Header not found.\n", id);
1091 return 0;
1092 }
1093 /* Only one mediaheader was found. We want buf to contain a
1094 mediaheader on return, so we'll have to re-read the one we found. */
1095 offs = doc->mh0_page << this->page_shift;
1096 ret = mtd->read(mtd, offs, mtd->oobblock, &retlen, buf);
1097 if (retlen != mtd->oobblock) {
1098 /* Insanity. Give up. */
1099 printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n");
1100 return 0;
1101 }
1102 return 1;
1103}
1104
1105static inline int __init nftl_partscan(struct mtd_info *mtd,
1106 struct mtd_partition *parts)
1107{
1108 struct nand_chip *this = mtd->priv;
1109 struct doc_priv *doc = this->priv;
1110 int ret = 0;
1111 u_char *buf;
1112 struct NFTLMediaHeader *mh;
1113 const unsigned psize = 1 << this->page_shift;
1114 unsigned blocks, maxblocks;
1115 int offs, numheaders;
1116
1117 buf = kmalloc(mtd->oobblock, GFP_KERNEL);
1118 if (!buf) {
1119 printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
1120 return 0;
1121 }
1122 if (!(numheaders=find_media_headers(mtd, buf, "ANAND", 1))) goto out;
1123 mh = (struct NFTLMediaHeader *) buf;
1124
1125//#ifdef CONFIG_MTD_DEBUG_VERBOSE
1126// if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
1127 printk(KERN_INFO " DataOrgID = %s\n"
1128 " NumEraseUnits = %d\n"
1129 " FirstPhysicalEUN = %d\n"
1130 " FormattedSize = %d\n"
1131 " UnitSizeFactor = %d\n",
1132 mh->DataOrgID, mh->NumEraseUnits,
1133 mh->FirstPhysicalEUN, mh->FormattedSize,
1134 mh->UnitSizeFactor);
1135//#endif
1136
1137 blocks = mtd->size >> this->phys_erase_shift;
1138 maxblocks = min(32768U, mtd->erasesize - psize);
1139
1140 if (mh->UnitSizeFactor == 0x00) {
1141 /* Auto-determine UnitSizeFactor. The constraints are:
1142 - There can be at most 32768 virtual blocks.
1143 - There can be at most (virtual block size - page size)
1144 virtual blocks (because MediaHeader+BBT must fit in 1).
1145 */
1146 mh->UnitSizeFactor = 0xff;
1147 while (blocks > maxblocks) {
1148 blocks >>= 1;
1149 maxblocks = min(32768U, (maxblocks << 1) + psize);
1150 mh->UnitSizeFactor--;
1151 }
1152 printk(KERN_WARNING "UnitSizeFactor=0x00 detected. Correct value is assumed to be 0x%02x.\n", mh->UnitSizeFactor);
1153 }
1154
1155 /* NOTE: The lines below modify internal variables of the NAND and MTD
1156 layers; variables with have already been configured by nand_scan.
1157 Unfortunately, we didn't know before this point what these values
1158 should be. Thus, this code is somewhat dependant on the exact
1159 implementation of the NAND layer. */
1160 if (mh->UnitSizeFactor != 0xff) {
1161 this->bbt_erase_shift += (0xff - mh->UnitSizeFactor);
1162 mtd->erasesize <<= (0xff - mh->UnitSizeFactor);
1163 printk(KERN_INFO "Setting virtual erase size to %d\n", mtd->erasesize);
1164 blocks = mtd->size >> this->bbt_erase_shift;
1165 maxblocks = min(32768U, mtd->erasesize - psize);
1166 }
1167
1168 if (blocks > maxblocks) {
1169 printk(KERN_ERR "UnitSizeFactor of 0x%02x is inconsistent with device size. Aborting.\n", mh->UnitSizeFactor);
1170 goto out;
1171 }
1172
1173 /* Skip past the media headers. */
1174 offs = max(doc->mh0_page, doc->mh1_page);
1175 offs <<= this->page_shift;
1176 offs += mtd->erasesize;
1177
1178 //parts[0].name = " DiskOnChip Boot / Media Header partition";
1179 //parts[0].offset = 0;
1180 //parts[0].size = offs;
1181
1182 parts[0].name = " DiskOnChip BDTL partition";
1183 parts[0].offset = offs;
1184 parts[0].size = (mh->NumEraseUnits - numheaders) << this->bbt_erase_shift;
1185
1186 offs += parts[0].size;
1187 if (offs < mtd->size) {
1188 parts[1].name = " DiskOnChip Remainder partition";
1189 parts[1].offset = offs;
1190 parts[1].size = mtd->size - offs;
1191 ret = 2;
1192 goto out;
1193 }
1194 ret = 1;
1195out:
1196 kfree(buf);
1197 return ret;
1198}
1199
1200/* This is a stripped-down copy of the code in inftlmount.c */
1201static inline int __init inftl_partscan(struct mtd_info *mtd,
1202 struct mtd_partition *parts)
1203{
1204 struct nand_chip *this = mtd->priv;
1205 struct doc_priv *doc = this->priv;
1206 int ret = 0;
1207 u_char *buf;
1208 struct INFTLMediaHeader *mh;
1209 struct INFTLPartition *ip;
1210 int numparts = 0;
1211 int blocks;
1212 int vshift, lastvunit = 0;
1213 int i;
1214 int end = mtd->size;
1215
1216 if (inftl_bbt_write)
1217 end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift);
1218
1219 buf = kmalloc(mtd->oobblock, GFP_KERNEL);
1220 if (!buf) {
1221 printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
1222 return 0;
1223 }
1224
1225 if (!find_media_headers(mtd, buf, "BNAND", 0)) goto out;
1226 doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift);
1227 mh = (struct INFTLMediaHeader *) buf;
1228
1229 mh->NoOfBootImageBlocks = le32_to_cpu(mh->NoOfBootImageBlocks);
1230 mh->NoOfBinaryPartitions = le32_to_cpu(mh->NoOfBinaryPartitions);
1231 mh->NoOfBDTLPartitions = le32_to_cpu(mh->NoOfBDTLPartitions);
1232 mh->BlockMultiplierBits = le32_to_cpu(mh->BlockMultiplierBits);
1233 mh->FormatFlags = le32_to_cpu(mh->FormatFlags);
1234 mh->PercentUsed = le32_to_cpu(mh->PercentUsed);
1235
1236//#ifdef CONFIG_MTD_DEBUG_VERBOSE
1237// if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
1238 printk(KERN_INFO " bootRecordID = %s\n"
1239 " NoOfBootImageBlocks = %d\n"
1240 " NoOfBinaryPartitions = %d\n"
1241 " NoOfBDTLPartitions = %d\n"
1242 " BlockMultiplerBits = %d\n"
1243 " FormatFlgs = %d\n"
1244 " OsakVersion = %d.%d.%d.%d\n"
1245 " PercentUsed = %d\n",
1246 mh->bootRecordID, mh->NoOfBootImageBlocks,
1247 mh->NoOfBinaryPartitions,
1248 mh->NoOfBDTLPartitions,
1249 mh->BlockMultiplierBits, mh->FormatFlags,
1250 ((unsigned char *) &mh->OsakVersion)[0] & 0xf,
1251 ((unsigned char *) &mh->OsakVersion)[1] & 0xf,
1252 ((unsigned char *) &mh->OsakVersion)[2] & 0xf,
1253 ((unsigned char *) &mh->OsakVersion)[3] & 0xf,
1254 mh->PercentUsed);
1255//#endif
1256
1257 vshift = this->phys_erase_shift + mh->BlockMultiplierBits;
1258
1259 blocks = mtd->size >> vshift;
1260 if (blocks > 32768) {
1261 printk(KERN_ERR "BlockMultiplierBits=%d is inconsistent with device size. Aborting.\n", mh->BlockMultiplierBits);
1262 goto out;
1263 }
1264
1265 blocks = doc->chips_per_floor << (this->chip_shift - this->phys_erase_shift);
1266 if (inftl_bbt_write && (blocks > mtd->erasesize)) {
1267 printk(KERN_ERR "Writeable BBTs spanning more than one erase block are not yet supported. FIX ME!\n");
1268 goto out;
1269 }
1270
1271 /* Scan the partitions */
1272 for (i = 0; (i < 4); i++) {
1273 ip = &(mh->Partitions[i]);
1274 ip->virtualUnits = le32_to_cpu(ip->virtualUnits);
1275 ip->firstUnit = le32_to_cpu(ip->firstUnit);
1276 ip->lastUnit = le32_to_cpu(ip->lastUnit);
1277 ip->flags = le32_to_cpu(ip->flags);
1278 ip->spareUnits = le32_to_cpu(ip->spareUnits);
1279 ip->Reserved0 = le32_to_cpu(ip->Reserved0);
1280
1281//#ifdef CONFIG_MTD_DEBUG_VERBOSE
1282// if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
1283 printk(KERN_INFO " PARTITION[%d] ->\n"
1284 " virtualUnits = %d\n"
1285 " firstUnit = %d\n"
1286 " lastUnit = %d\n"
1287 " flags = 0x%x\n"
1288 " spareUnits = %d\n",
1289 i, ip->virtualUnits, ip->firstUnit,
1290 ip->lastUnit, ip->flags,
1291 ip->spareUnits);
1292//#endif
1293
1294/*
1295 if ((i == 0) && (ip->firstUnit > 0)) {
1296 parts[0].name = " DiskOnChip IPL / Media Header partition";
1297 parts[0].offset = 0;
1298 parts[0].size = mtd->erasesize * ip->firstUnit;
1299 numparts = 1;
1300 }
1301*/
1302
1303 if (ip->flags & INFTL_BINARY)
1304 parts[numparts].name = " DiskOnChip BDK partition";
1305 else
1306 parts[numparts].name = " DiskOnChip BDTL partition";
1307 parts[numparts].offset = ip->firstUnit << vshift;
1308 parts[numparts].size = (1 + ip->lastUnit - ip->firstUnit) << vshift;
1309 numparts++;
1310 if (ip->lastUnit > lastvunit) lastvunit = ip->lastUnit;
1311 if (ip->flags & INFTL_LAST) break;
1312 }
1313 lastvunit++;
1314 if ((lastvunit << vshift) < end) {
1315 parts[numparts].name = " DiskOnChip Remainder partition";
1316 parts[numparts].offset = lastvunit << vshift;
1317 parts[numparts].size = end - parts[numparts].offset;
1318 numparts++;
1319 }
1320 ret = numparts;
1321out:
1322 kfree(buf);
1323 return ret;
1324}
1325
1326static int __init nftl_scan_bbt(struct mtd_info *mtd)
1327{
1328 int ret, numparts;
1329 struct nand_chip *this = mtd->priv;
1330 struct doc_priv *doc = this->priv;
1331 struct mtd_partition parts[2];
1332
1333 memset((char *) parts, 0, sizeof(parts));
1334 /* On NFTL, we have to find the media headers before we can read the
1335 BBTs, since they're stored in the media header eraseblocks. */
1336 numparts = nftl_partscan(mtd, parts);
1337 if (!numparts) return -EIO;
1338 this->bbt_td->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
1339 NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
1340 NAND_BBT_VERSION;
1341 this->bbt_td->veroffs = 7;
1342 this->bbt_td->pages[0] = doc->mh0_page + 1;
1343 if (doc->mh1_page != -1) {
1344 this->bbt_md->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
1345 NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
1346 NAND_BBT_VERSION;
1347 this->bbt_md->veroffs = 7;
1348 this->bbt_md->pages[0] = doc->mh1_page + 1;
1349 } else {
1350 this->bbt_md = NULL;
1351 }
1352
1353 /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set.
1354 At least as nand_bbt.c is currently written. */
1355 if ((ret = nand_scan_bbt(mtd, NULL)))
1356 return ret;
1357 add_mtd_device(mtd);
1358#ifdef CONFIG_MTD_PARTITIONS
1359 if (!no_autopart)
1360 add_mtd_partitions(mtd, parts, numparts);
1361#endif
1362 return 0;
1363}
1364
1365static int __init inftl_scan_bbt(struct mtd_info *mtd)
1366{
1367 int ret, numparts;
1368 struct nand_chip *this = mtd->priv;
1369 struct doc_priv *doc = this->priv;
1370 struct mtd_partition parts[5];
1371
1372 if (this->numchips > doc->chips_per_floor) {
1373 printk(KERN_ERR "Multi-floor INFTL devices not yet supported.\n");
1374 return -EIO;
1375 }
1376
1377 if (DoC_is_MillenniumPlus(doc)) {
1378 this->bbt_td->options = NAND_BBT_2BIT | NAND_BBT_ABSPAGE;
1379 if (inftl_bbt_write)
1380 this->bbt_td->options |= NAND_BBT_WRITE;
1381 this->bbt_td->pages[0] = 2;
1382 this->bbt_md = NULL;
1383 } else {
1384 this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT |
1385 NAND_BBT_VERSION;
1386 if (inftl_bbt_write)
1387 this->bbt_td->options |= NAND_BBT_WRITE;
1388 this->bbt_td->offs = 8;
1389 this->bbt_td->len = 8;
1390 this->bbt_td->veroffs = 7;
1391 this->bbt_td->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
1392 this->bbt_td->reserved_block_code = 0x01;
1393 this->bbt_td->pattern = "MSYS_BBT";
1394
1395 this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT |
1396 NAND_BBT_VERSION;
1397 if (inftl_bbt_write)
1398 this->bbt_md->options |= NAND_BBT_WRITE;
1399 this->bbt_md->offs = 8;
1400 this->bbt_md->len = 8;
1401 this->bbt_md->veroffs = 7;
1402 this->bbt_md->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
1403 this->bbt_md->reserved_block_code = 0x01;
1404 this->bbt_md->pattern = "TBB_SYSM";
1405 }
1406
1407 /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set.
1408 At least as nand_bbt.c is currently written. */
1409 if ((ret = nand_scan_bbt(mtd, NULL)))
1410 return ret;
1411 memset((char *) parts, 0, sizeof(parts));
1412 numparts = inftl_partscan(mtd, parts);
1413 /* At least for now, require the INFTL Media Header. We could probably
1414 do without it for non-INFTL use, since all it gives us is
1415 autopartitioning, but I want to give it more thought. */
1416 if (!numparts) return -EIO;
1417 add_mtd_device(mtd);
1418#ifdef CONFIG_MTD_PARTITIONS
1419 if (!no_autopart)
1420 add_mtd_partitions(mtd, parts, numparts);
1421#endif
1422 return 0;
1423}
1424
1425static inline int __init doc2000_init(struct mtd_info *mtd)
1426{
1427 struct nand_chip *this = mtd->priv;
1428 struct doc_priv *doc = this->priv;
1429
1430 this->write_byte = doc2000_write_byte;
1431 this->read_byte = doc2000_read_byte;
1432 this->write_buf = doc2000_writebuf;
1433 this->read_buf = doc2000_readbuf;
1434 this->verify_buf = doc2000_verifybuf;
1435 this->scan_bbt = nftl_scan_bbt;
1436
1437 doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
1438 doc2000_count_chips(mtd);
1439 mtd->name = "DiskOnChip 2000 (NFTL Model)";
1440 return (4 * doc->chips_per_floor);
1441}
1442
1443static inline int __init doc2001_init(struct mtd_info *mtd)
1444{
1445 struct nand_chip *this = mtd->priv;
1446 struct doc_priv *doc = this->priv;
1447
1448 this->write_byte = doc2001_write_byte;
1449 this->read_byte = doc2001_read_byte;
1450 this->write_buf = doc2001_writebuf;
1451 this->read_buf = doc2001_readbuf;
1452 this->verify_buf = doc2001_verifybuf;
1453
1454 ReadDOC(doc->virtadr, ChipID);
1455 ReadDOC(doc->virtadr, ChipID);
1456 ReadDOC(doc->virtadr, ChipID);
1457 if (ReadDOC(doc->virtadr, ChipID) != DOC_ChipID_DocMil) {
1458 /* It's not a Millennium; it's one of the newer
1459 DiskOnChip 2000 units with a similar ASIC.
1460 Treat it like a Millennium, except that it
1461 can have multiple chips. */
1462 doc2000_count_chips(mtd);
1463 mtd->name = "DiskOnChip 2000 (INFTL Model)";
1464 this->scan_bbt = inftl_scan_bbt;
1465 return (4 * doc->chips_per_floor);
1466 } else {
1467 /* Bog-standard Millennium */
1468 doc->chips_per_floor = 1;
1469 mtd->name = "DiskOnChip Millennium";
1470 this->scan_bbt = nftl_scan_bbt;
1471 return 1;
1472 }
1473}
1474
1475static inline int __init doc2001plus_init(struct mtd_info *mtd)
1476{
1477 struct nand_chip *this = mtd->priv;
1478 struct doc_priv *doc = this->priv;
1479
1480 this->write_byte = NULL;
1481 this->read_byte = doc2001plus_read_byte;
1482 this->write_buf = doc2001plus_writebuf;
1483 this->read_buf = doc2001plus_readbuf;
1484 this->verify_buf = doc2001plus_verifybuf;
1485 this->scan_bbt = inftl_scan_bbt;
1486 this->hwcontrol = NULL;
1487 this->select_chip = doc2001plus_select_chip;
1488 this->cmdfunc = doc2001plus_command;
1489 this->enable_hwecc = doc2001plus_enable_hwecc;
1490
1491 doc->chips_per_floor = 1;
1492 mtd->name = "DiskOnChip Millennium Plus";
1493
1494 return 1;
1495}
1496
1497static inline int __init doc_probe(unsigned long physadr)
1498{
1499 unsigned char ChipID;
1500 struct mtd_info *mtd;
1501 struct nand_chip *nand;
1502 struct doc_priv *doc;
1503 void __iomem *virtadr;
1504 unsigned char save_control;
1505 unsigned char tmp, tmpb, tmpc;
1506 int reg, len, numchips;
1507 int ret = 0;
1508
1509 virtadr = ioremap(physadr, DOC_IOREMAP_LEN);
1510 if (!virtadr) {
1511 printk(KERN_ERR "Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n", DOC_IOREMAP_LEN, physadr);
1512 return -EIO;
1513 }
1514
1515 /* It's not possible to cleanly detect the DiskOnChip - the
1516 * bootup procedure will put the device into reset mode, and
1517 * it's not possible to talk to it without actually writing
1518 * to the DOCControl register. So we store the current contents
1519 * of the DOCControl register's location, in case we later decide
1520 * that it's not a DiskOnChip, and want to put it back how we
1521 * found it.
1522 */
1523 save_control = ReadDOC(virtadr, DOCControl);
1524
1525 /* Reset the DiskOnChip ASIC */
1526 WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
1527 virtadr, DOCControl);
1528 WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
1529 virtadr, DOCControl);
1530
1531 /* Enable the DiskOnChip ASIC */
1532 WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
1533 virtadr, DOCControl);
1534 WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
1535 virtadr, DOCControl);
1536
1537 ChipID = ReadDOC(virtadr, ChipID);
1538
1539 switch(ChipID) {
1540 case DOC_ChipID_Doc2k:
1541 reg = DoC_2k_ECCStatus;
1542 break;
1543 case DOC_ChipID_DocMil:
1544 reg = DoC_ECCConf;
1545 break;
1546 case DOC_ChipID_DocMilPlus16:
1547 case DOC_ChipID_DocMilPlus32:
1548 case 0:
1549 /* Possible Millennium Plus, need to do more checks */
1550 /* Possibly release from power down mode */
1551 for (tmp = 0; (tmp < 4); tmp++)
1552 ReadDOC(virtadr, Mplus_Power);
1553
1554 /* Reset the Millennium Plus ASIC */
1555 tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
1556 DOC_MODE_BDECT;
1557 WriteDOC(tmp, virtadr, Mplus_DOCControl);
1558 WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
1559
1560 mdelay(1);
1561 /* Enable the Millennium Plus ASIC */
1562 tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
1563 DOC_MODE_BDECT;
1564 WriteDOC(tmp, virtadr, Mplus_DOCControl);
1565 WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
1566 mdelay(1);
1567
1568 ChipID = ReadDOC(virtadr, ChipID);
1569
1570 switch (ChipID) {
1571 case DOC_ChipID_DocMilPlus16:
1572 reg = DoC_Mplus_Toggle;
1573 break;
1574 case DOC_ChipID_DocMilPlus32:
1575 printk(KERN_ERR "DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
1576 default:
1577 ret = -ENODEV;
1578 goto notfound;
1579 }
1580 break;
1581
1582 default:
1583 ret = -ENODEV;
1584 goto notfound;
1585 }
1586 /* Check the TOGGLE bit in the ECC register */
1587 tmp = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
1588 tmpb = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
1589 tmpc = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
1590 if ((tmp == tmpb) || (tmp != tmpc)) {
1591 printk(KERN_WARNING "Possible DiskOnChip at 0x%lx failed TOGGLE test, dropping.\n", physadr);
1592 ret = -ENODEV;
1593 goto notfound;
1594 }
1595
1596 for (mtd = doclist; mtd; mtd = doc->nextdoc) {
1597 unsigned char oldval;
1598 unsigned char newval;
1599 nand = mtd->priv;
1600 doc = nand->priv;
1601 /* Use the alias resolution register to determine if this is
1602 in fact the same DOC aliased to a new address. If writes
1603 to one chip's alias resolution register change the value on
1604 the other chip, they're the same chip. */
1605 if (ChipID == DOC_ChipID_DocMilPlus16) {
1606 oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
1607 newval = ReadDOC(virtadr, Mplus_AliasResolution);
1608 } else {
1609 oldval = ReadDOC(doc->virtadr, AliasResolution);
1610 newval = ReadDOC(virtadr, AliasResolution);
1611 }
1612 if (oldval != newval)
1613 continue;
1614 if (ChipID == DOC_ChipID_DocMilPlus16) {
1615 WriteDOC(~newval, virtadr, Mplus_AliasResolution);
1616 oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
1617 WriteDOC(newval, virtadr, Mplus_AliasResolution); // restore it
1618 } else {
1619 WriteDOC(~newval, virtadr, AliasResolution);
1620 oldval = ReadDOC(doc->virtadr, AliasResolution);
1621 WriteDOC(newval, virtadr, AliasResolution); // restore it
1622 }
1623 newval = ~newval;
1624 if (oldval == newval) {
1625 printk(KERN_DEBUG "Found alias of DOC at 0x%lx to 0x%lx\n", doc->physadr, physadr);
1626 goto notfound;
1627 }
1628 }
1629
1630 printk(KERN_NOTICE "DiskOnChip found at 0x%lx\n", physadr);
1631
1632 len = sizeof(struct mtd_info) +
1633 sizeof(struct nand_chip) +
1634 sizeof(struct doc_priv) +
1635 (2 * sizeof(struct nand_bbt_descr));
1636 mtd = kmalloc(len, GFP_KERNEL);
1637 if (!mtd) {
1638 printk(KERN_ERR "DiskOnChip kmalloc (%d bytes) failed!\n", len);
1639 ret = -ENOMEM;
1640 goto fail;
1641 }
1642 memset(mtd, 0, len);
1643
1644 nand = (struct nand_chip *) (mtd + 1);
1645 doc = (struct doc_priv *) (nand + 1);
1646 nand->bbt_td = (struct nand_bbt_descr *) (doc + 1);
1647 nand->bbt_md = nand->bbt_td + 1;
1648
1649 mtd->priv = nand;
1650 mtd->owner = THIS_MODULE;
1651
1652 nand->priv = doc;
1653 nand->select_chip = doc200x_select_chip;
1654 nand->hwcontrol = doc200x_hwcontrol;
1655 nand->dev_ready = doc200x_dev_ready;
1656 nand->waitfunc = doc200x_wait;
1657 nand->block_bad = doc200x_block_bad;
1658 nand->enable_hwecc = doc200x_enable_hwecc;
1659 nand->calculate_ecc = doc200x_calculate_ecc;
1660 nand->correct_data = doc200x_correct_data;
1661
1662 nand->autooob = &doc200x_oobinfo;
1663 nand->eccmode = NAND_ECC_HW6_512;
1664 nand->options = NAND_USE_FLASH_BBT | NAND_HWECC_SYNDROME;
1665
1666 doc->physadr = physadr;
1667 doc->virtadr = virtadr;
1668 doc->ChipID = ChipID;
1669 doc->curfloor = -1;
1670 doc->curchip = -1;
1671 doc->mh0_page = -1;
1672 doc->mh1_page = -1;
1673 doc->nextdoc = doclist;
1674
1675 if (ChipID == DOC_ChipID_Doc2k)
1676 numchips = doc2000_init(mtd);
1677 else if (ChipID == DOC_ChipID_DocMilPlus16)
1678 numchips = doc2001plus_init(mtd);
1679 else
1680 numchips = doc2001_init(mtd);
1681
1682 if ((ret = nand_scan(mtd, numchips))) {
1683 /* DBB note: i believe nand_release is necessary here, as
1684 buffers may have been allocated in nand_base. Check with
1685 Thomas. FIX ME! */
1686 /* nand_release will call del_mtd_device, but we haven't yet
1687 added it. This is handled without incident by
1688 del_mtd_device, as far as I can tell. */
1689 nand_release(mtd);
1690 kfree(mtd);
1691 goto fail;
1692 }
1693
1694 /* Success! */
1695 doclist = mtd;
1696 return 0;
1697
1698notfound:
1699 /* Put back the contents of the DOCControl register, in case it's not
1700 actually a DiskOnChip. */
1701 WriteDOC(save_control, virtadr, DOCControl);
1702fail:
1703 iounmap(virtadr);
1704 return ret;
1705}
1706
1707static void release_nanddoc(void)
1708{
1709 struct mtd_info *mtd, *nextmtd;
1710 struct nand_chip *nand;
1711 struct doc_priv *doc;
1712
1713 for (mtd = doclist; mtd; mtd = nextmtd) {
1714 nand = mtd->priv;
1715 doc = nand->priv;
1716
1717 nextmtd = doc->nextdoc;
1718 nand_release(mtd);
1719 iounmap(doc->virtadr);
1720 kfree(mtd);
1721 }
1722}
1723
1724static int __init init_nanddoc(void)
1725{
1726 int i, ret = 0;
1727
1728 /* We could create the decoder on demand, if memory is a concern.
1729 * This way we have it handy, if an error happens
1730 *
1731 * Symbolsize is 10 (bits)
1732 * Primitve polynomial is x^10+x^3+1
1733 * first consecutive root is 510
1734 * primitve element to generate roots = 1
1735 * generator polinomial degree = 4
1736 */
1737 rs_decoder = init_rs(10, 0x409, FCR, 1, NROOTS);
1738 if (!rs_decoder) {
1739 printk (KERN_ERR "DiskOnChip: Could not create a RS decoder\n");
1740 return -ENOMEM;
1741 }
1742
1743 if (doc_config_location) {
1744 printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location);
1745 ret = doc_probe(doc_config_location);
1746 if (ret < 0)
1747 goto outerr;
1748 } else {
1749 for (i=0; (doc_locations[i] != 0xffffffff); i++) {
1750 doc_probe(doc_locations[i]);
1751 }
1752 }
1753 /* No banner message any more. Print a message if no DiskOnChip
1754 found, so the user knows we at least tried. */
1755 if (!doclist) {
1756 printk(KERN_INFO "No valid DiskOnChip devices found\n");
1757 ret = -ENODEV;
1758 goto outerr;
1759 }
1760 return 0;
1761outerr:
1762 free_rs(rs_decoder);
1763 return ret;
1764}
1765
1766static void __exit cleanup_nanddoc(void)
1767{
1768 /* Cleanup the nand/DoC resources */
1769 release_nanddoc();
1770
1771 /* Free the reed solomon resources */
1772 if (rs_decoder) {
1773 free_rs(rs_decoder);
1774 }
1775}
1776
1777module_init(init_nanddoc);
1778module_exit(cleanup_nanddoc);
1779
1780MODULE_LICENSE("GPL");
1781MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
1782MODULE_DESCRIPTION("M-Systems DiskOnChip 2000, Millennium and Millennium Plus device driver\n");
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
new file mode 100644
index 000000000000..5549681ccdce
--- /dev/null
+++ b/drivers/mtd/nand/edb7312.c
@@ -0,0 +1,218 @@
1/*
2 * drivers/mtd/nand/edb7312.c
3 *
4 * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
5 *
6 * Derived from drivers/mtd/nand/autcpu12.c
7 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
8 *
9 * $Id: edb7312.c,v 1.11 2004/11/04 12:53:10 gleixner Exp $
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * Overview:
16 * This is a device driver for the NAND flash device found on the
17 * CLEP7312 board which utilizes the Toshiba TC58V64AFT part. This is
18 * a 64Mibit (8MiB x 8 bits) NAND flash device.
19 */
20
21#include <linux/slab.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/mtd/mtd.h>
25#include <linux/mtd/nand.h>
26#include <linux/mtd/partitions.h>
27#include <asm/io.h>
28#include <asm/arch/hardware.h> /* for CLPS7111_VIRT_BASE */
29#include <asm/sizes.h>
30#include <asm/hardware/clps7111.h>
31
32/*
33 * MTD structure for EDB7312 board
34 */
35static struct mtd_info *ep7312_mtd = NULL;
36
37/*
38 * Values specific to the EDB7312 board (used with EP7312 processor)
39 */
40#define EP7312_FIO_PBASE 0x10000000 /* Phys address of flash */
41#define EP7312_PXDR 0x0001 /*
42 * IO offset to Port B data register
43 * where the CLE, ALE and NCE pins
44 * are wired to.
45 */
46#define EP7312_PXDDR 0x0041 /*
47 * IO offset to Port B data direction
48 * register so we can control the IO
49 * lines.
50 */
51
52/*
53 * Module stuff
54 */
55
56static unsigned long ep7312_fio_pbase = EP7312_FIO_PBASE;
57static void __iomem * ep7312_pxdr = (void __iomem *) EP7312_PXDR;
58static void __iomem * ep7312_pxddr = (void __iomem *) EP7312_PXDDR;
59
60#ifdef CONFIG_MTD_PARTITIONS
61/*
62 * Define static partitions for flash device
63 */
64static struct mtd_partition partition_info[] = {
65 { .name = "EP7312 Nand Flash",
66 .offset = 0,
67 .size = 8*1024*1024 }
68};
69#define NUM_PARTITIONS 1
70
71#endif
72
73
74/*
75 * hardware specific access to control-lines
76 */
77static void ep7312_hwcontrol(struct mtd_info *mtd, int cmd)
78{
79 switch(cmd) {
80
81 case NAND_CTL_SETCLE:
82 clps_writeb(clps_readb(ep7312_pxdr) | 0x10, ep7312_pxdr);
83 break;
84 case NAND_CTL_CLRCLE:
85 clps_writeb(clps_readb(ep7312_pxdr) & ~0x10, ep7312_pxdr);
86 break;
87
88 case NAND_CTL_SETALE:
89 clps_writeb(clps_readb(ep7312_pxdr) | 0x20, ep7312_pxdr);
90 break;
91 case NAND_CTL_CLRALE:
92 clps_writeb(clps_readb(ep7312_pxdr) & ~0x20, ep7312_pxdr);
93 break;
94
95 case NAND_CTL_SETNCE:
96 clps_writeb((clps_readb(ep7312_pxdr) | 0x80) & ~0x40, ep7312_pxdr);
97 break;
98 case NAND_CTL_CLRNCE:
99 clps_writeb((clps_readb(ep7312_pxdr) | 0x80) | 0x40, ep7312_pxdr);
100 break;
101 }
102}
103
104/*
105 * read device ready pin
106 */
107static int ep7312_device_ready(struct mtd_info *mtd)
108{
109 return 1;
110}
111#ifdef CONFIG_MTD_PARTITIONS
112const char *part_probes[] = { "cmdlinepart", NULL };
113#endif
114
115/*
116 * Main initialization routine
117 */
118static int __init ep7312_init (void)
119{
120 struct nand_chip *this;
121 const char *part_type = 0;
122 int mtd_parts_nb = 0;
123 struct mtd_partition *mtd_parts = 0;
124 void __iomem * ep7312_fio_base;
125
126 /* Allocate memory for MTD device structure and private data */
127 ep7312_mtd = kmalloc(sizeof(struct mtd_info) +
128 sizeof(struct nand_chip),
129 GFP_KERNEL);
130 if (!ep7312_mtd) {
131 printk("Unable to allocate EDB7312 NAND MTD device structure.\n");
132 return -ENOMEM;
133 }
134
135 /* map physical adress */
136 ep7312_fio_base = ioremap(ep7312_fio_pbase, SZ_1K);
137 if(!ep7312_fio_base) {
138 printk("ioremap EDB7312 NAND flash failed\n");
139 kfree(ep7312_mtd);
140 return -EIO;
141 }
142
143 /* Get pointer to private data */
144 this = (struct nand_chip *) (&ep7312_mtd[1]);
145
146 /* Initialize structures */
147 memset((char *) ep7312_mtd, 0, sizeof(struct mtd_info));
148 memset((char *) this, 0, sizeof(struct nand_chip));
149
150 /* Link the private data with the MTD structure */
151 ep7312_mtd->priv = this;
152
153 /*
154 * Set GPIO Port B control register so that the pins are configured
155 * to be outputs for controlling the NAND flash.
156 */
157 clps_writeb(0xf0, ep7312_pxddr);
158
159 /* insert callbacks */
160 this->IO_ADDR_R = ep7312_fio_base;
161 this->IO_ADDR_W = ep7312_fio_base;
162 this->hwcontrol = ep7312_hwcontrol;
163 this->dev_ready = ep7312_device_ready;
164 /* 15 us command delay time */
165 this->chip_delay = 15;
166
167 /* Scan to find existence of the device */
168 if (nand_scan (ep7312_mtd, 1)) {
169 iounmap((void *)ep7312_fio_base);
170 kfree (ep7312_mtd);
171 return -ENXIO;
172 }
173
174#ifdef CONFIG_MTD_PARTITIONS
175 ep7312_mtd->name = "edb7312-nand";
176 mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes,
177 &mtd_parts, 0);
178 if (mtd_parts_nb > 0)
179 part_type = "command line";
180 else
181 mtd_parts_nb = 0;
182#endif
183 if (mtd_parts_nb == 0) {
184 mtd_parts = partition_info;
185 mtd_parts_nb = NUM_PARTITIONS;
186 part_type = "static";
187 }
188
189 /* Register the partitions */
190 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
191 add_mtd_partitions(ep7312_mtd, mtd_parts, mtd_parts_nb);
192
193 /* Return happy */
194 return 0;
195}
196module_init(ep7312_init);
197
198/*
199 * Clean up routine
200 */
201static void __exit ep7312_cleanup (void)
202{
203 struct nand_chip *this = (struct nand_chip *) &ep7312_mtd[1];
204
205 /* Release resources, unregister device */
206 nand_release (ap7312_mtd);
207
208 /* Free internal data buffer */
209 kfree (this->data_buf);
210
211 /* Free the MTD device structure */
212 kfree (ep7312_mtd);
213}
214module_exit(ep7312_cleanup);
215
216MODULE_LICENSE("GPL");
217MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
218MODULE_DESCRIPTION("MTD map driver for Cogent EDB7312 board");
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
new file mode 100644
index 000000000000..3825a7a0900c
--- /dev/null
+++ b/drivers/mtd/nand/h1910.c
@@ -0,0 +1,208 @@
1/*
2 * drivers/mtd/nand/h1910.c
3 *
4 * Copyright (C) 2003 Joshua Wise (joshua@joshuawise.com)
5 *
6 * Derived from drivers/mtd/nand/edb7312.c
7 * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
8 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
9 *
10 * $Id: h1910.c,v 1.5 2004/11/04 12:53:10 gleixner Exp $
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 * Overview:
17 * This is a device driver for the NAND flash device found on the
18 * iPAQ h1910 board which utilizes the Samsung K9F2808 part. This is
19 * a 128Mibit (16MiB x 8 bits) NAND flash device.
20 */
21
22#include <linux/slab.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/mtd/mtd.h>
26#include <linux/mtd/nand.h>
27#include <linux/mtd/partitions.h>
28#include <asm/io.h>
29#include <asm/arch/hardware.h> /* for CLPS7111_VIRT_BASE */
30#include <asm/sizes.h>
31#include <asm/arch/h1900-gpio.h>
32#include <asm/arch/ipaq.h>
33
34/*
35 * MTD structure for EDB7312 board
36 */
37static struct mtd_info *h1910_nand_mtd = NULL;
38
39/*
40 * Module stuff
41 */
42
43#ifdef CONFIG_MTD_PARTITIONS
44/*
45 * Define static partitions for flash device
46 */
47static struct mtd_partition partition_info[] = {
48 { name: "h1910 NAND Flash",
49 offset: 0,
50 size: 16*1024*1024 }
51};
52#define NUM_PARTITIONS 1
53
54#endif
55
56
57/*
58 * hardware specific access to control-lines
59 */
60static void h1910_hwcontrol(struct mtd_info *mtd, int cmd)
61{
62 struct nand_chip* this = (struct nand_chip *) (mtd->priv);
63
64 switch(cmd) {
65
66 case NAND_CTL_SETCLE:
67 this->IO_ADDR_R |= (1 << 2);
68 this->IO_ADDR_W |= (1 << 2);
69 break;
70 case NAND_CTL_CLRCLE:
71 this->IO_ADDR_R &= ~(1 << 2);
72 this->IO_ADDR_W &= ~(1 << 2);
73 break;
74
75 case NAND_CTL_SETALE:
76 this->IO_ADDR_R |= (1 << 3);
77 this->IO_ADDR_W |= (1 << 3);
78 break;
79 case NAND_CTL_CLRALE:
80 this->IO_ADDR_R &= ~(1 << 3);
81 this->IO_ADDR_W &= ~(1 << 3);
82 break;
83
84 case NAND_CTL_SETNCE:
85 break;
86 case NAND_CTL_CLRNCE:
87 break;
88 }
89}
90
91/*
92 * read device ready pin
93 */
94#if 0
95static int h1910_device_ready(struct mtd_info *mtd)
96{
97 return (GPLR(55) & GPIO_bit(55));
98}
99#endif
100
101/*
102 * Main initialization routine
103 */
104static int __init h1910_init (void)
105{
106 struct nand_chip *this;
107 const char *part_type = 0;
108 int mtd_parts_nb = 0;
109 struct mtd_partition *mtd_parts = 0;
110 void __iomem *nandaddr;
111
112 if (!machine_is_h1900())
113 return -ENODEV;
114
115 nandaddr = __ioremap(0x08000000, 0x1000, 0, 1);
116 if (!nandaddr) {
117 printk("Failed to ioremap nand flash.\n");
118 return -ENOMEM;
119 }
120
121 /* Allocate memory for MTD device structure and private data */
122 h1910_nand_mtd = kmalloc(sizeof(struct mtd_info) +
123 sizeof(struct nand_chip),
124 GFP_KERNEL);
125 if (!h1910_nand_mtd) {
126 printk("Unable to allocate h1910 NAND MTD device structure.\n");
127 iounmap ((void *) nandaddr);
128 return -ENOMEM;
129 }
130
131 /* Get pointer to private data */
132 this = (struct nand_chip *) (&h1910_nand_mtd[1]);
133
134 /* Initialize structures */
135 memset((char *) h1910_nand_mtd, 0, sizeof(struct mtd_info));
136 memset((char *) this, 0, sizeof(struct nand_chip));
137
138 /* Link the private data with the MTD structure */
139 h1910_nand_mtd->priv = this;
140
141 /*
142 * Enable VPEN
143 */
144 GPSR(37) = GPIO_bit(37);
145
146 /* insert callbacks */
147 this->IO_ADDR_R = nandaddr;
148 this->IO_ADDR_W = nandaddr;
149 this->hwcontrol = h1910_hwcontrol;
150 this->dev_ready = NULL; /* unknown whether that was correct or not so we will just do it like this */
151 /* 15 us command delay time */
152 this->chip_delay = 50;
153 this->eccmode = NAND_ECC_SOFT;
154 this->options = NAND_NO_AUTOINCR;
155
156 /* Scan to find existence of the device */
157 if (nand_scan (h1910_nand_mtd, 1)) {
158 printk(KERN_NOTICE "No NAND device - returning -ENXIO\n");
159 kfree (h1910_nand_mtd);
160 iounmap ((void *) nandaddr);
161 return -ENXIO;
162 }
163
164#ifdef CONFIG_MTD_CMDLINE_PARTS
165 mtd_parts_nb = parse_cmdline_partitions(h1910_nand_mtd, &mtd_parts,
166 "h1910-nand");
167 if (mtd_parts_nb > 0)
168 part_type = "command line";
169 else
170 mtd_parts_nb = 0;
171#endif
172 if (mtd_parts_nb == 0)
173 {
174 mtd_parts = partition_info;
175 mtd_parts_nb = NUM_PARTITIONS;
176 part_type = "static";
177 }
178
179 /* Register the partitions */
180 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
181 add_mtd_partitions(h1910_nand_mtd, mtd_parts, mtd_parts_nb);
182
183 /* Return happy */
184 return 0;
185}
186module_init(h1910_init);
187
188/*
189 * Clean up routine
190 */
191static void __exit h1910_cleanup (void)
192{
193 struct nand_chip *this = (struct nand_chip *) &h1910_nand_mtd[1];
194
195 /* Release resources, unregister device */
196 nand_release (h1910_nand_mtd);
197
198 /* Release io resource */
199 iounmap ((void *) this->IO_ADDR_W);
200
201 /* Free the MTD device structure */
202 kfree (h1910_nand_mtd);
203}
204module_exit(h1910_cleanup);
205
206MODULE_LICENSE("GPL");
207MODULE_AUTHOR("Joshua Wise <joshua at joshuawise dot com>");
208MODULE_DESCRIPTION("NAND flash driver for iPAQ h1910");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
new file mode 100644
index 000000000000..44d5b128911f
--- /dev/null
+++ b/drivers/mtd/nand/nand_base.c
@@ -0,0 +1,2563 @@
1/*
2 * drivers/mtd/nand.c
3 *
4 * Overview:
5 * This is the generic MTD driver for NAND flash devices. It should be
6 * capable of working with almost all NAND chips currently available.
7 * Basic support for AG-AND chips is provided.
8 *
9 * Additional technical information is available on
10 * http://www.linux-mtd.infradead.org/tech/nand.html
11 *
12 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
13 * 2002 Thomas Gleixner (tglx@linutronix.de)
14 *
15 * 02-08-2004 tglx: support for strange chips, which cannot auto increment
16 * pages on read / read_oob
17 *
18 * 03-17-2004 tglx: Check ready before auto increment check. Simon Bayes
19 * pointed this out, as he marked an auto increment capable chip
20 * as NOAUTOINCR in the board driver.
21 * Make reads over block boundaries work too
22 *
23 * 04-14-2004 tglx: first working version for 2k page size chips
24 *
25 * 05-19-2004 tglx: Basic support for Renesas AG-AND chips
26 *
27 * 09-24-2004 tglx: add support for hardware controllers (e.g. ECC) shared
28 * among multiple independend devices. Suggestions and initial patch
29 * from Ben Dooks <ben-mtd@fluff.org>
30 *
31 * Credits:
32 * David Woodhouse for adding multichip support
33 *
34 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
35 * rework for 2K page size chips
36 *
37 * TODO:
38 * Enable cached programming for 2k page size chips
39 * Check, if mtd->ecctype should be set to MTD_ECC_HW
40 * if we have HW ecc support.
41 * The AG-AND chips have nice features for speed improvement,
42 * which are not supported yet. Read / program 4 pages in one go.
43 *
44 * $Id: nand_base.c,v 1.126 2004/12/13 11:22:25 lavinen Exp $
45 *
46 * This program is free software; you can redistribute it and/or modify
47 * it under the terms of the GNU General Public License version 2 as
48 * published by the Free Software Foundation.
49 *
50 */
51
52#include <linux/delay.h>
53#include <linux/errno.h>
54#include <linux/sched.h>
55#include <linux/slab.h>
56#include <linux/types.h>
57#include <linux/mtd/mtd.h>
58#include <linux/mtd/nand.h>
59#include <linux/mtd/nand_ecc.h>
60#include <linux/mtd/compatmac.h>
61#include <linux/interrupt.h>
62#include <linux/bitops.h>
63#include <asm/io.h>
64
65#ifdef CONFIG_MTD_PARTITIONS
66#include <linux/mtd/partitions.h>
67#endif
68
69/* Define default oob placement schemes for large and small page devices */
70static struct nand_oobinfo nand_oob_8 = {
71 .useecc = MTD_NANDECC_AUTOPLACE,
72 .eccbytes = 3,
73 .eccpos = {0, 1, 2},
74 .oobfree = { {3, 2}, {6, 2} }
75};
76
77static struct nand_oobinfo nand_oob_16 = {
78 .useecc = MTD_NANDECC_AUTOPLACE,
79 .eccbytes = 6,
80 .eccpos = {0, 1, 2, 3, 6, 7},
81 .oobfree = { {8, 8} }
82};
83
84static struct nand_oobinfo nand_oob_64 = {
85 .useecc = MTD_NANDECC_AUTOPLACE,
86 .eccbytes = 24,
87 .eccpos = {
88 40, 41, 42, 43, 44, 45, 46, 47,
89 48, 49, 50, 51, 52, 53, 54, 55,
90 56, 57, 58, 59, 60, 61, 62, 63},
91 .oobfree = { {2, 38} }
92};
93
94/* This is used for padding purposes in nand_write_oob */
95static u_char ffchars[] = {
96 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
97 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
98 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
99 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
100 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
101 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
102 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
103 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
104};
105
106/*
107 * NAND low-level MTD interface functions
108 */
109static void nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len);
110static void nand_read_buf(struct mtd_info *mtd, u_char *buf, int len);
111static int nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len);
112
113static int nand_read (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf);
114static int nand_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
115 size_t * retlen, u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel);
116static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf);
117static int nand_write (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf);
118static int nand_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
119 size_t * retlen, const u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel);
120static int nand_write_oob (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char *buf);
121static int nand_writev (struct mtd_info *mtd, const struct kvec *vecs,
122 unsigned long count, loff_t to, size_t * retlen);
123static int nand_writev_ecc (struct mtd_info *mtd, const struct kvec *vecs,
124 unsigned long count, loff_t to, size_t * retlen, u_char *eccbuf, struct nand_oobinfo *oobsel);
125static int nand_erase (struct mtd_info *mtd, struct erase_info *instr);
126static void nand_sync (struct mtd_info *mtd);
127
128/* Some internal functions */
129static int nand_write_page (struct mtd_info *mtd, struct nand_chip *this, int page, u_char *oob_buf,
130 struct nand_oobinfo *oobsel, int mode);
131#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
132static int nand_verify_pages (struct mtd_info *mtd, struct nand_chip *this, int page, int numpages,
133 u_char *oob_buf, struct nand_oobinfo *oobsel, int chipnr, int oobmode);
134#else
135#define nand_verify_pages(...) (0)
136#endif
137
138static void nand_get_device (struct nand_chip *this, struct mtd_info *mtd, int new_state);
139
140/**
141 * nand_release_device - [GENERIC] release chip
142 * @mtd: MTD device structure
143 *
144 * Deselect, release chip lock and wake up anyone waiting on the device
145 */
146static void nand_release_device (struct mtd_info *mtd)
147{
148 struct nand_chip *this = mtd->priv;
149
150 /* De-select the NAND device */
151 this->select_chip(mtd, -1);
152 /* Do we have a hardware controller ? */
153 if (this->controller) {
154 spin_lock(&this->controller->lock);
155 this->controller->active = NULL;
156 spin_unlock(&this->controller->lock);
157 }
158 /* Release the chip */
159 spin_lock (&this->chip_lock);
160 this->state = FL_READY;
161 wake_up (&this->wq);
162 spin_unlock (&this->chip_lock);
163}
164
165/**
166 * nand_read_byte - [DEFAULT] read one byte from the chip
167 * @mtd: MTD device structure
168 *
169 * Default read function for 8bit buswith
170 */
171static u_char nand_read_byte(struct mtd_info *mtd)
172{
173 struct nand_chip *this = mtd->priv;
174 return readb(this->IO_ADDR_R);
175}
176
177/**
178 * nand_write_byte - [DEFAULT] write one byte to the chip
179 * @mtd: MTD device structure
180 * @byte: pointer to data byte to write
181 *
182 * Default write function for 8it buswith
183 */
184static void nand_write_byte(struct mtd_info *mtd, u_char byte)
185{
186 struct nand_chip *this = mtd->priv;
187 writeb(byte, this->IO_ADDR_W);
188}
189
190/**
191 * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip
192 * @mtd: MTD device structure
193 *
194 * Default read function for 16bit buswith with
195 * endianess conversion
196 */
197static u_char nand_read_byte16(struct mtd_info *mtd)
198{
199 struct nand_chip *this = mtd->priv;
200 return (u_char) cpu_to_le16(readw(this->IO_ADDR_R));
201}
202
203/**
204 * nand_write_byte16 - [DEFAULT] write one byte endianess aware to the chip
205 * @mtd: MTD device structure
206 * @byte: pointer to data byte to write
207 *
208 * Default write function for 16bit buswith with
209 * endianess conversion
210 */
211static void nand_write_byte16(struct mtd_info *mtd, u_char byte)
212{
213 struct nand_chip *this = mtd->priv;
214 writew(le16_to_cpu((u16) byte), this->IO_ADDR_W);
215}
216
217/**
218 * nand_read_word - [DEFAULT] read one word from the chip
219 * @mtd: MTD device structure
220 *
221 * Default read function for 16bit buswith without
222 * endianess conversion
223 */
224static u16 nand_read_word(struct mtd_info *mtd)
225{
226 struct nand_chip *this = mtd->priv;
227 return readw(this->IO_ADDR_R);
228}
229
230/**
231 * nand_write_word - [DEFAULT] write one word to the chip
232 * @mtd: MTD device structure
233 * @word: data word to write
234 *
235 * Default write function for 16bit buswith without
236 * endianess conversion
237 */
238static void nand_write_word(struct mtd_info *mtd, u16 word)
239{
240 struct nand_chip *this = mtd->priv;
241 writew(word, this->IO_ADDR_W);
242}
243
244/**
245 * nand_select_chip - [DEFAULT] control CE line
246 * @mtd: MTD device structure
247 * @chip: chipnumber to select, -1 for deselect
248 *
249 * Default select function for 1 chip devices.
250 */
251static void nand_select_chip(struct mtd_info *mtd, int chip)
252{
253 struct nand_chip *this = mtd->priv;
254 switch(chip) {
255 case -1:
256 this->hwcontrol(mtd, NAND_CTL_CLRNCE);
257 break;
258 case 0:
259 this->hwcontrol(mtd, NAND_CTL_SETNCE);
260 break;
261
262 default:
263 BUG();
264 }
265}
266
267/**
268 * nand_write_buf - [DEFAULT] write buffer to chip
269 * @mtd: MTD device structure
270 * @buf: data buffer
271 * @len: number of bytes to write
272 *
273 * Default write function for 8bit buswith
274 */
275static void nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
276{
277 int i;
278 struct nand_chip *this = mtd->priv;
279
280 for (i=0; i<len; i++)
281 writeb(buf[i], this->IO_ADDR_W);
282}
283
284/**
285 * nand_read_buf - [DEFAULT] read chip data into buffer
286 * @mtd: MTD device structure
287 * @buf: buffer to store date
288 * @len: number of bytes to read
289 *
290 * Default read function for 8bit buswith
291 */
292static void nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
293{
294 int i;
295 struct nand_chip *this = mtd->priv;
296
297 for (i=0; i<len; i++)
298 buf[i] = readb(this->IO_ADDR_R);
299}
300
301/**
302 * nand_verify_buf - [DEFAULT] Verify chip data against buffer
303 * @mtd: MTD device structure
304 * @buf: buffer containing the data to compare
305 * @len: number of bytes to compare
306 *
307 * Default verify function for 8bit buswith
308 */
309static int nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
310{
311 int i;
312 struct nand_chip *this = mtd->priv;
313
314 for (i=0; i<len; i++)
315 if (buf[i] != readb(this->IO_ADDR_R))
316 return -EFAULT;
317
318 return 0;
319}
320
321/**
322 * nand_write_buf16 - [DEFAULT] write buffer to chip
323 * @mtd: MTD device structure
324 * @buf: data buffer
325 * @len: number of bytes to write
326 *
327 * Default write function for 16bit buswith
328 */
329static void nand_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
330{
331 int i;
332 struct nand_chip *this = mtd->priv;
333 u16 *p = (u16 *) buf;
334 len >>= 1;
335
336 for (i=0; i<len; i++)
337 writew(p[i], this->IO_ADDR_W);
338
339}
340
341/**
342 * nand_read_buf16 - [DEFAULT] read chip data into buffer
343 * @mtd: MTD device structure
344 * @buf: buffer to store date
345 * @len: number of bytes to read
346 *
347 * Default read function for 16bit buswith
348 */
349static void nand_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
350{
351 int i;
352 struct nand_chip *this = mtd->priv;
353 u16 *p = (u16 *) buf;
354 len >>= 1;
355
356 for (i=0; i<len; i++)
357 p[i] = readw(this->IO_ADDR_R);
358}
359
360/**
361 * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer
362 * @mtd: MTD device structure
363 * @buf: buffer containing the data to compare
364 * @len: number of bytes to compare
365 *
366 * Default verify function for 16bit buswith
367 */
368static int nand_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
369{
370 int i;
371 struct nand_chip *this = mtd->priv;
372 u16 *p = (u16 *) buf;
373 len >>= 1;
374
375 for (i=0; i<len; i++)
376 if (p[i] != readw(this->IO_ADDR_R))
377 return -EFAULT;
378
379 return 0;
380}
381
382/**
383 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
384 * @mtd: MTD device structure
385 * @ofs: offset from device start
386 * @getchip: 0, if the chip is already selected
387 *
388 * Check, if the block is bad.
389 */
390static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
391{
392 int page, chipnr, res = 0;
393 struct nand_chip *this = mtd->priv;
394 u16 bad;
395
396 if (getchip) {
397 page = (int)(ofs >> this->page_shift);
398 chipnr = (int)(ofs >> this->chip_shift);
399
400 /* Grab the lock and see if the device is available */
401 nand_get_device (this, mtd, FL_READING);
402
403 /* Select the NAND device */
404 this->select_chip(mtd, chipnr);
405 } else
406 page = (int) ofs;
407
408 if (this->options & NAND_BUSWIDTH_16) {
409 this->cmdfunc (mtd, NAND_CMD_READOOB, this->badblockpos & 0xFE, page & this->pagemask);
410 bad = cpu_to_le16(this->read_word(mtd));
411 if (this->badblockpos & 0x1)
412 bad >>= 1;
413 if ((bad & 0xFF) != 0xff)
414 res = 1;
415 } else {
416 this->cmdfunc (mtd, NAND_CMD_READOOB, this->badblockpos, page & this->pagemask);
417 if (this->read_byte(mtd) != 0xff)
418 res = 1;
419 }
420
421 if (getchip) {
422 /* Deselect and wake up anyone waiting on the device */
423 nand_release_device(mtd);
424 }
425
426 return res;
427}
428
429/**
430 * nand_default_block_markbad - [DEFAULT] mark a block bad
431 * @mtd: MTD device structure
432 * @ofs: offset from device start
433 *
434 * This is the default implementation, which can be overridden by
435 * a hardware specific driver.
436*/
437static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
438{
439 struct nand_chip *this = mtd->priv;
440 u_char buf[2] = {0, 0};
441 size_t retlen;
442 int block;
443
444 /* Get block number */
445 block = ((int) ofs) >> this->bbt_erase_shift;
446 this->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
447
448 /* Do we have a flash based bad block table ? */
449 if (this->options & NAND_USE_FLASH_BBT)
450 return nand_update_bbt (mtd, ofs);
451
452 /* We write two bytes, so we dont have to mess with 16 bit access */
453 ofs += mtd->oobsize + (this->badblockpos & ~0x01);
454 return nand_write_oob (mtd, ofs , 2, &retlen, buf);
455}
456
457/**
458 * nand_check_wp - [GENERIC] check if the chip is write protected
459 * @mtd: MTD device structure
460 * Check, if the device is write protected
461 *
462 * The function expects, that the device is already selected
463 */
464static int nand_check_wp (struct mtd_info *mtd)
465{
466 struct nand_chip *this = mtd->priv;
467 /* Check the WP bit */
468 this->cmdfunc (mtd, NAND_CMD_STATUS, -1, -1);
469 return (this->read_byte(mtd) & 0x80) ? 0 : 1;
470}
471
472/**
473 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
474 * @mtd: MTD device structure
475 * @ofs: offset from device start
476 * @getchip: 0, if the chip is already selected
477 * @allowbbt: 1, if its allowed to access the bbt area
478 *
479 * Check, if the block is bad. Either by reading the bad block table or
480 * calling of the scan function.
481 */
482static int nand_block_checkbad (struct mtd_info *mtd, loff_t ofs, int getchip, int allowbbt)
483{
484 struct nand_chip *this = mtd->priv;
485
486 if (!this->bbt)
487 return this->block_bad(mtd, ofs, getchip);
488
489 /* Return info from the table */
490 return nand_isbad_bbt (mtd, ofs, allowbbt);
491}
492
493/**
494 * nand_command - [DEFAULT] Send command to NAND device
495 * @mtd: MTD device structure
496 * @command: the command to be sent
497 * @column: the column address for this command, -1 if none
498 * @page_addr: the page address for this command, -1 if none
499 *
500 * Send command to NAND device. This function is used for small page
501 * devices (256/512 Bytes per page)
502 */
503static void nand_command (struct mtd_info *mtd, unsigned command, int column, int page_addr)
504{
505 register struct nand_chip *this = mtd->priv;
506
507 /* Begin command latch cycle */
508 this->hwcontrol(mtd, NAND_CTL_SETCLE);
509 /*
510 * Write out the command to the device.
511 */
512 if (command == NAND_CMD_SEQIN) {
513 int readcmd;
514
515 if (column >= mtd->oobblock) {
516 /* OOB area */
517 column -= mtd->oobblock;
518 readcmd = NAND_CMD_READOOB;
519 } else if (column < 256) {
520 /* First 256 bytes --> READ0 */
521 readcmd = NAND_CMD_READ0;
522 } else {
523 column -= 256;
524 readcmd = NAND_CMD_READ1;
525 }
526 this->write_byte(mtd, readcmd);
527 }
528 this->write_byte(mtd, command);
529
530 /* Set ALE and clear CLE to start address cycle */
531 this->hwcontrol(mtd, NAND_CTL_CLRCLE);
532
533 if (column != -1 || page_addr != -1) {
534 this->hwcontrol(mtd, NAND_CTL_SETALE);
535
536 /* Serially input address */
537 if (column != -1) {
538 /* Adjust columns for 16 bit buswidth */
539 if (this->options & NAND_BUSWIDTH_16)
540 column >>= 1;
541 this->write_byte(mtd, column);
542 }
543 if (page_addr != -1) {
544 this->write_byte(mtd, (unsigned char) (page_addr & 0xff));
545 this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff));
546 /* One more address cycle for devices > 32MiB */
547 if (this->chipsize > (32 << 20))
548 this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0x0f));
549 }
550 /* Latch in address */
551 this->hwcontrol(mtd, NAND_CTL_CLRALE);
552 }
553
554 /*
555 * program and erase have their own busy handlers
556 * status and sequential in needs no delay
557 */
558 switch (command) {
559
560 case NAND_CMD_PAGEPROG:
561 case NAND_CMD_ERASE1:
562 case NAND_CMD_ERASE2:
563 case NAND_CMD_SEQIN:
564 case NAND_CMD_STATUS:
565 return;
566
567 case NAND_CMD_RESET:
568 if (this->dev_ready)
569 break;
570 udelay(this->chip_delay);
571 this->hwcontrol(mtd, NAND_CTL_SETCLE);
572 this->write_byte(mtd, NAND_CMD_STATUS);
573 this->hwcontrol(mtd, NAND_CTL_CLRCLE);
574 while ( !(this->read_byte(mtd) & 0x40));
575 return;
576
577 /* This applies to read commands */
578 default:
579 /*
580 * If we don't have access to the busy pin, we apply the given
581 * command delay
582 */
583 if (!this->dev_ready) {
584 udelay (this->chip_delay);
585 return;
586 }
587 }
588
589 /* Apply this short delay always to ensure that we do wait tWB in
590 * any case on any machine. */
591 ndelay (100);
592 /* wait until command is processed */
593 while (!this->dev_ready(mtd));
594}
595
596/**
597 * nand_command_lp - [DEFAULT] Send command to NAND large page device
598 * @mtd: MTD device structure
599 * @command: the command to be sent
600 * @column: the column address for this command, -1 if none
601 * @page_addr: the page address for this command, -1 if none
602 *
603 * Send command to NAND device. This is the version for the new large page devices
604 * We dont have the seperate regions as we have in the small page devices.
605 * We must emulate NAND_CMD_READOOB to keep the code compatible.
606 *
607 */
608static void nand_command_lp (struct mtd_info *mtd, unsigned command, int column, int page_addr)
609{
610 register struct nand_chip *this = mtd->priv;
611
612 /* Emulate NAND_CMD_READOOB */
613 if (command == NAND_CMD_READOOB) {
614 column += mtd->oobblock;
615 command = NAND_CMD_READ0;
616 }
617
618
619 /* Begin command latch cycle */
620 this->hwcontrol(mtd, NAND_CTL_SETCLE);
621 /* Write out the command to the device. */
622 this->write_byte(mtd, command);
623 /* End command latch cycle */
624 this->hwcontrol(mtd, NAND_CTL_CLRCLE);
625
626 if (column != -1 || page_addr != -1) {
627 this->hwcontrol(mtd, NAND_CTL_SETALE);
628
629 /* Serially input address */
630 if (column != -1) {
631 /* Adjust columns for 16 bit buswidth */
632 if (this->options & NAND_BUSWIDTH_16)
633 column >>= 1;
634 this->write_byte(mtd, column & 0xff);
635 this->write_byte(mtd, column >> 8);
636 }
637 if (page_addr != -1) {
638 this->write_byte(mtd, (unsigned char) (page_addr & 0xff));
639 this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff));
640 /* One more address cycle for devices > 128MiB */
641 if (this->chipsize > (128 << 20))
642 this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0xff));
643 }
644 /* Latch in address */
645 this->hwcontrol(mtd, NAND_CTL_CLRALE);
646 }
647
648 /*
649 * program and erase have their own busy handlers
650 * status and sequential in needs no delay
651 */
652 switch (command) {
653
654 case NAND_CMD_CACHEDPROG:
655 case NAND_CMD_PAGEPROG:
656 case NAND_CMD_ERASE1:
657 case NAND_CMD_ERASE2:
658 case NAND_CMD_SEQIN:
659 case NAND_CMD_STATUS:
660 return;
661
662
663 case NAND_CMD_RESET:
664 if (this->dev_ready)
665 break;
666 udelay(this->chip_delay);
667 this->hwcontrol(mtd, NAND_CTL_SETCLE);
668 this->write_byte(mtd, NAND_CMD_STATUS);
669 this->hwcontrol(mtd, NAND_CTL_CLRCLE);
670 while ( !(this->read_byte(mtd) & 0x40));
671 return;
672
673 case NAND_CMD_READ0:
674 /* Begin command latch cycle */
675 this->hwcontrol(mtd, NAND_CTL_SETCLE);
676 /* Write out the start read command */
677 this->write_byte(mtd, NAND_CMD_READSTART);
678 /* End command latch cycle */
679 this->hwcontrol(mtd, NAND_CTL_CLRCLE);
680 /* Fall through into ready check */
681
682 /* This applies to read commands */
683 default:
684 /*
685 * If we don't have access to the busy pin, we apply the given
686 * command delay
687 */
688 if (!this->dev_ready) {
689 udelay (this->chip_delay);
690 return;
691 }
692 }
693
694 /* Apply this short delay always to ensure that we do wait tWB in
695 * any case on any machine. */
696 ndelay (100);
697 /* wait until command is processed */
698 while (!this->dev_ready(mtd));
699}
700
701/**
702 * nand_get_device - [GENERIC] Get chip for selected access
703 * @this: the nand chip descriptor
704 * @mtd: MTD device structure
705 * @new_state: the state which is requested
706 *
707 * Get the device and lock it for exclusive access
708 */
709static void nand_get_device (struct nand_chip *this, struct mtd_info *mtd, int new_state)
710{
711 struct nand_chip *active = this;
712
713 DECLARE_WAITQUEUE (wait, current);
714
715 /*
716 * Grab the lock and see if the device is available
717 */
718retry:
719 /* Hardware controller shared among independend devices */
720 if (this->controller) {
721 spin_lock (&this->controller->lock);
722 if (this->controller->active)
723 active = this->controller->active;
724 else
725 this->controller->active = this;
726 spin_unlock (&this->controller->lock);
727 }
728
729 if (active == this) {
730 spin_lock (&this->chip_lock);
731 if (this->state == FL_READY) {
732 this->state = new_state;
733 spin_unlock (&this->chip_lock);
734 return;
735 }
736 }
737 set_current_state (TASK_UNINTERRUPTIBLE);
738 add_wait_queue (&active->wq, &wait);
739 spin_unlock (&active->chip_lock);
740 schedule ();
741 remove_wait_queue (&active->wq, &wait);
742 goto retry;
743}
744
745/**
746 * nand_wait - [DEFAULT] wait until the command is done
747 * @mtd: MTD device structure
748 * @this: NAND chip structure
749 * @state: state to select the max. timeout value
750 *
751 * Wait for command done. This applies to erase and program only
752 * Erase can take up to 400ms and program up to 20ms according to
753 * general NAND and SmartMedia specs
754 *
755*/
756static int nand_wait(struct mtd_info *mtd, struct nand_chip *this, int state)
757{
758
759 unsigned long timeo = jiffies;
760 int status;
761
762 if (state == FL_ERASING)
763 timeo += (HZ * 400) / 1000;
764 else
765 timeo += (HZ * 20) / 1000;
766
767 /* Apply this short delay always to ensure that we do wait tWB in
768 * any case on any machine. */
769 ndelay (100);
770
771 if ((state == FL_ERASING) && (this->options & NAND_IS_AND))
772 this->cmdfunc (mtd, NAND_CMD_STATUS_MULTI, -1, -1);
773 else
774 this->cmdfunc (mtd, NAND_CMD_STATUS, -1, -1);
775
776 while (time_before(jiffies, timeo)) {
777 /* Check, if we were interrupted */
778 if (this->state != state)
779 return 0;
780
781 if (this->dev_ready) {
782 if (this->dev_ready(mtd))
783 break;
784 } else {
785 if (this->read_byte(mtd) & NAND_STATUS_READY)
786 break;
787 }
788 yield ();
789 }
790 status = (int) this->read_byte(mtd);
791 return status;
792}
793
794/**
795 * nand_write_page - [GENERIC] write one page
796 * @mtd: MTD device structure
797 * @this: NAND chip structure
798 * @page: startpage inside the chip, must be called with (page & this->pagemask)
799 * @oob_buf: out of band data buffer
800 * @oobsel: out of band selecttion structre
801 * @cached: 1 = enable cached programming if supported by chip
802 *
803 * Nand_page_program function is used for write and writev !
804 * This function will always program a full page of data
805 * If you call it with a non page aligned buffer, you're lost :)
806 *
807 * Cached programming is not supported yet.
808 */
809static int nand_write_page (struct mtd_info *mtd, struct nand_chip *this, int page,
810 u_char *oob_buf, struct nand_oobinfo *oobsel, int cached)
811{
812 int i, status;
813 u_char ecc_code[32];
814 int eccmode = oobsel->useecc ? this->eccmode : NAND_ECC_NONE;
815 int *oob_config = oobsel->eccpos;
816 int datidx = 0, eccidx = 0, eccsteps = this->eccsteps;
817 int eccbytes = 0;
818
819 /* FIXME: Enable cached programming */
820 cached = 0;
821
822 /* Send command to begin auto page programming */
823 this->cmdfunc (mtd, NAND_CMD_SEQIN, 0x00, page);
824
825 /* Write out complete page of data, take care of eccmode */
826 switch (eccmode) {
827 /* No ecc, write all */
828 case NAND_ECC_NONE:
829 printk (KERN_WARNING "Writing data without ECC to NAND-FLASH is not recommended\n");
830 this->write_buf(mtd, this->data_poi, mtd->oobblock);
831 break;
832
833 /* Software ecc 3/256, write all */
834 case NAND_ECC_SOFT:
835 for (; eccsteps; eccsteps--) {
836 this->calculate_ecc(mtd, &this->data_poi[datidx], ecc_code);
837 for (i = 0; i < 3; i++, eccidx++)
838 oob_buf[oob_config[eccidx]] = ecc_code[i];
839 datidx += this->eccsize;
840 }
841 this->write_buf(mtd, this->data_poi, mtd->oobblock);
842 break;
843 default:
844 eccbytes = this->eccbytes;
845 for (; eccsteps; eccsteps--) {
846 /* enable hardware ecc logic for write */
847 this->enable_hwecc(mtd, NAND_ECC_WRITE);
848 this->write_buf(mtd, &this->data_poi[datidx], this->eccsize);
849 this->calculate_ecc(mtd, &this->data_poi[datidx], ecc_code);
850 for (i = 0; i < eccbytes; i++, eccidx++)
851 oob_buf[oob_config[eccidx]] = ecc_code[i];
852 /* If the hardware ecc provides syndromes then
853 * the ecc code must be written immidiately after
854 * the data bytes (words) */
855 if (this->options & NAND_HWECC_SYNDROME)
856 this->write_buf(mtd, ecc_code, eccbytes);
857 datidx += this->eccsize;
858 }
859 break;
860 }
861
862 /* Write out OOB data */
863 if (this->options & NAND_HWECC_SYNDROME)
864 this->write_buf(mtd, &oob_buf[oobsel->eccbytes], mtd->oobsize - oobsel->eccbytes);
865 else
866 this->write_buf(mtd, oob_buf, mtd->oobsize);
867
868 /* Send command to actually program the data */
869 this->cmdfunc (mtd, cached ? NAND_CMD_CACHEDPROG : NAND_CMD_PAGEPROG, -1, -1);
870
871 if (!cached) {
872 /* call wait ready function */
873 status = this->waitfunc (mtd, this, FL_WRITING);
874 /* See if device thinks it succeeded */
875 if (status & 0x01) {
876 DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write, page 0x%08x, ", __FUNCTION__, page);
877 return -EIO;
878 }
879 } else {
880 /* FIXME: Implement cached programming ! */
881 /* wait until cache is ready*/
882 // status = this->waitfunc (mtd, this, FL_CACHEDRPG);
883 }
884 return 0;
885}
886
887#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
888/**
889 * nand_verify_pages - [GENERIC] verify the chip contents after a write
890 * @mtd: MTD device structure
891 * @this: NAND chip structure
892 * @page: startpage inside the chip, must be called with (page & this->pagemask)
893 * @numpages: number of pages to verify
894 * @oob_buf: out of band data buffer
895 * @oobsel: out of band selecttion structre
896 * @chipnr: number of the current chip
897 * @oobmode: 1 = full buffer verify, 0 = ecc only
898 *
899 * The NAND device assumes that it is always writing to a cleanly erased page.
900 * Hence, it performs its internal write verification only on bits that
901 * transitioned from 1 to 0. The device does NOT verify the whole page on a
902 * byte by byte basis. It is possible that the page was not completely erased
903 * or the page is becoming unusable due to wear. The read with ECC would catch
904 * the error later when the ECC page check fails, but we would rather catch
905 * it early in the page write stage. Better to write no data than invalid data.
906 */
907static int nand_verify_pages (struct mtd_info *mtd, struct nand_chip *this, int page, int numpages,
908 u_char *oob_buf, struct nand_oobinfo *oobsel, int chipnr, int oobmode)
909{
910 int i, j, datidx = 0, oobofs = 0, res = -EIO;
911 int eccsteps = this->eccsteps;
912 int hweccbytes;
913 u_char oobdata[64];
914
915 hweccbytes = (this->options & NAND_HWECC_SYNDROME) ? (oobsel->eccbytes / eccsteps) : 0;
916
917 /* Send command to read back the first page */
918 this->cmdfunc (mtd, NAND_CMD_READ0, 0, page);
919
920 for(;;) {
921 for (j = 0; j < eccsteps; j++) {
922 /* Loop through and verify the data */
923 if (this->verify_buf(mtd, &this->data_poi[datidx], mtd->eccsize)) {
924 DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page);
925 goto out;
926 }
927 datidx += mtd->eccsize;
928 /* Have we a hw generator layout ? */
929 if (!hweccbytes)
930 continue;
931 if (this->verify_buf(mtd, &this->oob_buf[oobofs], hweccbytes)) {
932 DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page);
933 goto out;
934 }
935 oobofs += hweccbytes;
936 }
937
938 /* check, if we must compare all data or if we just have to
939 * compare the ecc bytes
940 */
941 if (oobmode) {
942 if (this->verify_buf(mtd, &oob_buf[oobofs], mtd->oobsize - hweccbytes * eccsteps)) {
943 DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page);
944 goto out;
945 }
946 } else {
947 /* Read always, else autoincrement fails */
948 this->read_buf(mtd, oobdata, mtd->oobsize - hweccbytes * eccsteps);
949
950 if (oobsel->useecc != MTD_NANDECC_OFF && !hweccbytes) {
951 int ecccnt = oobsel->eccbytes;
952
953 for (i = 0; i < ecccnt; i++) {
954 int idx = oobsel->eccpos[i];
955 if (oobdata[idx] != oob_buf[oobofs + idx] ) {
956 DEBUG (MTD_DEBUG_LEVEL0,
957 "%s: Failed ECC write "
958 "verify, page 0x%08x, " "%6i bytes were succesful\n", __FUNCTION__, page, i);
959 goto out;
960 }
961 }
962 }
963 }
964 oobofs += mtd->oobsize - hweccbytes * eccsteps;
965 page++;
966 numpages--;
967
968 /* Apply delay or wait for ready/busy pin
969 * Do this before the AUTOINCR check, so no problems
970 * arise if a chip which does auto increment
971 * is marked as NOAUTOINCR by the board driver.
972 * Do this also before returning, so the chip is
973 * ready for the next command.
974 */
975 if (!this->dev_ready)
976 udelay (this->chip_delay);
977 else
978 while (!this->dev_ready(mtd));
979
980 /* All done, return happy */
981 if (!numpages)
982 return 0;
983
984
985 /* Check, if the chip supports auto page increment */
986 if (!NAND_CANAUTOINCR(this))
987 this->cmdfunc (mtd, NAND_CMD_READ0, 0x00, page);
988 }
989 /*
990 * Terminate the read command. We come here in case of an error
991 * So we must issue a reset command.
992 */
993out:
994 this->cmdfunc (mtd, NAND_CMD_RESET, -1, -1);
995 return res;
996}
997#endif
998
999/**
1000 * nand_read - [MTD Interface] MTD compability function for nand_read_ecc
1001 * @mtd: MTD device structure
1002 * @from: offset to read from
1003 * @len: number of bytes to read
1004 * @retlen: pointer to variable to store the number of read bytes
1005 * @buf: the databuffer to put data
1006 *
1007 * This function simply calls nand_read_ecc with oob buffer and oobsel = NULL
1008*/
1009static int nand_read (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf)
1010{
1011 return nand_read_ecc (mtd, from, len, retlen, buf, NULL, NULL);
1012}
1013
1014
1015/**
1016 * nand_read_ecc - [MTD Interface] Read data with ECC
1017 * @mtd: MTD device structure
1018 * @from: offset to read from
1019 * @len: number of bytes to read
1020 * @retlen: pointer to variable to store the number of read bytes
1021 * @buf: the databuffer to put data
1022 * @oob_buf: filesystem supplied oob data buffer
1023 * @oobsel: oob selection structure
1024 *
1025 * NAND read with ECC
1026 */
1027static int nand_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
1028 size_t * retlen, u_char * buf, u_char * oob_buf, struct nand_oobinfo *oobsel)
1029{
1030 int i, j, col, realpage, page, end, ecc, chipnr, sndcmd = 1;
1031 int read = 0, oob = 0, ecc_status = 0, ecc_failed = 0;
1032 struct nand_chip *this = mtd->priv;
1033 u_char *data_poi, *oob_data = oob_buf;
1034 u_char ecc_calc[32];
1035 u_char ecc_code[32];
1036 int eccmode, eccsteps;
1037 int *oob_config, datidx;
1038 int blockcheck = (1 << (this->phys_erase_shift - this->page_shift)) - 1;
1039 int eccbytes;
1040 int compareecc = 1;
1041 int oobreadlen;
1042
1043
1044 DEBUG (MTD_DEBUG_LEVEL3, "nand_read_ecc: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
1045
1046 /* Do not allow reads past end of device */
1047 if ((from + len) > mtd->size) {
1048 DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: Attempt read beyond end of device\n");
1049 *retlen = 0;
1050 return -EINVAL;
1051 }
1052
1053 /* Grab the lock and see if the device is available */
1054 nand_get_device (this, mtd ,FL_READING);
1055
1056 /* use userspace supplied oobinfo, if zero */
1057 if (oobsel == NULL)
1058 oobsel = &mtd->oobinfo;
1059
1060 /* Autoplace of oob data ? Use the default placement scheme */
1061 if (oobsel->useecc == MTD_NANDECC_AUTOPLACE)
1062 oobsel = this->autooob;
1063
1064 eccmode = oobsel->useecc ? this->eccmode : NAND_ECC_NONE;
1065 oob_config = oobsel->eccpos;
1066
1067 /* Select the NAND device */
1068 chipnr = (int)(from >> this->chip_shift);
1069 this->select_chip(mtd, chipnr);
1070
1071 /* First we calculate the starting page */
1072 realpage = (int) (from >> this->page_shift);
1073 page = realpage & this->pagemask;
1074
1075 /* Get raw starting column */
1076 col = from & (mtd->oobblock - 1);
1077
1078 end = mtd->oobblock;
1079 ecc = this->eccsize;
1080 eccbytes = this->eccbytes;
1081
1082 if ((eccmode == NAND_ECC_NONE) || (this->options & NAND_HWECC_SYNDROME))
1083 compareecc = 0;
1084
1085 oobreadlen = mtd->oobsize;
1086 if (this->options & NAND_HWECC_SYNDROME)
1087 oobreadlen -= oobsel->eccbytes;
1088
1089 /* Loop until all data read */
1090 while (read < len) {
1091
1092 int aligned = (!col && (len - read) >= end);
1093 /*
1094 * If the read is not page aligned, we have to read into data buffer
1095 * due to ecc, else we read into return buffer direct
1096 */
1097 if (aligned)
1098 data_poi = &buf[read];
1099 else
1100 data_poi = this->data_buf;
1101
1102 /* Check, if we have this page in the buffer
1103 *
1104 * FIXME: Make it work when we must provide oob data too,
1105 * check the usage of data_buf oob field
1106 */
1107 if (realpage == this->pagebuf && !oob_buf) {
1108 /* aligned read ? */
1109 if (aligned)
1110 memcpy (data_poi, this->data_buf, end);
1111 goto readdata;
1112 }
1113
1114 /* Check, if we must send the read command */
1115 if (sndcmd) {
1116 this->cmdfunc (mtd, NAND_CMD_READ0, 0x00, page);
1117 sndcmd = 0;
1118 }
1119
1120 /* get oob area, if we have no oob buffer from fs-driver */
1121 if (!oob_buf || oobsel->useecc == MTD_NANDECC_AUTOPLACE)
1122 oob_data = &this->data_buf[end];
1123
1124 eccsteps = this->eccsteps;
1125
1126 switch (eccmode) {
1127 case NAND_ECC_NONE: { /* No ECC, Read in a page */
1128 static unsigned long lastwhinge = 0;
1129 if ((lastwhinge / HZ) != (jiffies / HZ)) {
1130 printk (KERN_WARNING "Reading data from NAND FLASH without ECC is not recommended\n");
1131 lastwhinge = jiffies;
1132 }
1133 this->read_buf(mtd, data_poi, end);
1134 break;
1135 }
1136
1137 case NAND_ECC_SOFT: /* Software ECC 3/256: Read in a page + oob data */
1138 this->read_buf(mtd, data_poi, end);
1139 for (i = 0, datidx = 0; eccsteps; eccsteps--, i+=3, datidx += ecc)
1140 this->calculate_ecc(mtd, &data_poi[datidx], &ecc_calc[i]);
1141 break;
1142
1143 default:
1144 for (i = 0, datidx = 0; eccsteps; eccsteps--, i+=eccbytes, datidx += ecc) {
1145 this->enable_hwecc(mtd, NAND_ECC_READ);
1146 this->read_buf(mtd, &data_poi[datidx], ecc);
1147
1148 /* HW ecc with syndrome calculation must read the
1149 * syndrome from flash immidiately after the data */
1150 if (!compareecc) {
1151 /* Some hw ecc generators need to know when the
1152 * syndrome is read from flash */
1153 this->enable_hwecc(mtd, NAND_ECC_READSYN);
1154 this->read_buf(mtd, &oob_data[i], eccbytes);
1155 /* We calc error correction directly, it checks the hw
1156 * generator for an error, reads back the syndrome and
1157 * does the error correction on the fly */
1158 if (this->correct_data(mtd, &data_poi[datidx], &oob_data[i], &ecc_code[i]) == -1) {
1159 DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: "
1160 "Failed ECC read, page 0x%08x on chip %d\n", page, chipnr);
1161 ecc_failed++;
1162 }
1163 } else {
1164 this->calculate_ecc(mtd, &data_poi[datidx], &ecc_calc[i]);
1165 }
1166 }
1167 break;
1168 }
1169
1170 /* read oobdata */
1171 this->read_buf(mtd, &oob_data[mtd->oobsize - oobreadlen], oobreadlen);
1172
1173 /* Skip ECC check, if not requested (ECC_NONE or HW_ECC with syndromes) */
1174 if (!compareecc)
1175 goto readoob;
1176
1177 /* Pick the ECC bytes out of the oob data */
1178 for (j = 0; j < oobsel->eccbytes; j++)
1179 ecc_code[j] = oob_data[oob_config[j]];
1180
1181 /* correct data, if neccecary */
1182 for (i = 0, j = 0, datidx = 0; i < this->eccsteps; i++, datidx += ecc) {
1183 ecc_status = this->correct_data(mtd, &data_poi[datidx], &ecc_code[j], &ecc_calc[j]);
1184
1185 /* Get next chunk of ecc bytes */
1186 j += eccbytes;
1187
1188 /* Check, if we have a fs supplied oob-buffer,
1189 * This is the legacy mode. Used by YAFFS1
1190 * Should go away some day
1191 */
1192 if (oob_buf && oobsel->useecc == MTD_NANDECC_PLACE) {
1193 int *p = (int *)(&oob_data[mtd->oobsize]);
1194 p[i] = ecc_status;
1195 }
1196
1197 if (ecc_status == -1) {
1198 DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: " "Failed ECC read, page 0x%08x\n", page);
1199 ecc_failed++;
1200 }
1201 }
1202
1203 readoob:
1204 /* check, if we have a fs supplied oob-buffer */
1205 if (oob_buf) {
1206 /* without autoplace. Legacy mode used by YAFFS1 */
1207 switch(oobsel->useecc) {
1208 case MTD_NANDECC_AUTOPLACE:
1209 /* Walk through the autoplace chunks */
1210 for (i = 0, j = 0; j < mtd->oobavail; i++) {
1211 int from = oobsel->oobfree[i][0];
1212 int num = oobsel->oobfree[i][1];
1213 memcpy(&oob_buf[oob], &oob_data[from], num);
1214 j+= num;
1215 }
1216 oob += mtd->oobavail;
1217 break;
1218 case MTD_NANDECC_PLACE:
1219 /* YAFFS1 legacy mode */
1220 oob_data += this->eccsteps * sizeof (int);
1221 default:
1222 oob_data += mtd->oobsize;
1223 }
1224 }
1225 readdata:
1226 /* Partial page read, transfer data into fs buffer */
1227 if (!aligned) {
1228 for (j = col; j < end && read < len; j++)
1229 buf[read++] = data_poi[j];
1230 this->pagebuf = realpage;
1231 } else
1232 read += mtd->oobblock;
1233
1234 /* Apply delay or wait for ready/busy pin
1235 * Do this before the AUTOINCR check, so no problems
1236 * arise if a chip which does auto increment
1237 * is marked as NOAUTOINCR by the board driver.
1238 */
1239 if (!this->dev_ready)
1240 udelay (this->chip_delay);
1241 else
1242 while (!this->dev_ready(mtd));
1243
1244 if (read == len)
1245 break;
1246
1247 /* For subsequent reads align to page boundary. */
1248 col = 0;
1249 /* Increment page address */
1250 realpage++;
1251
1252 page = realpage & this->pagemask;
1253 /* Check, if we cross a chip boundary */
1254 if (!page) {
1255 chipnr++;
1256 this->select_chip(mtd, -1);
1257 this->select_chip(mtd, chipnr);
1258 }
1259 /* Check, if the chip supports auto page increment
1260 * or if we have hit a block boundary.
1261 */
1262 if (!NAND_CANAUTOINCR(this) || !(page & blockcheck))
1263 sndcmd = 1;
1264 }
1265
1266 /* Deselect and wake up anyone waiting on the device */
1267 nand_release_device(mtd);
1268
1269 /*
1270 * Return success, if no ECC failures, else -EBADMSG
1271 * fs driver will take care of that, because
1272 * retlen == desired len and result == -EBADMSG
1273 */
1274 *retlen = read;
1275 return ecc_failed ? -EBADMSG : 0;
1276}
1277
1278/**
1279 * nand_read_oob - [MTD Interface] NAND read out-of-band
1280 * @mtd: MTD device structure
1281 * @from: offset to read from
1282 * @len: number of bytes to read
1283 * @retlen: pointer to variable to store the number of read bytes
1284 * @buf: the databuffer to put data
1285 *
1286 * NAND read out-of-band data from the spare area
1287 */
1288static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf)
1289{
1290 int i, col, page, chipnr;
1291 struct nand_chip *this = mtd->priv;
1292 int blockcheck = (1 << (this->phys_erase_shift - this->page_shift)) - 1;
1293
1294 DEBUG (MTD_DEBUG_LEVEL3, "nand_read_oob: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
1295
1296 /* Shift to get page */
1297 page = (int)(from >> this->page_shift);
1298 chipnr = (int)(from >> this->chip_shift);
1299
1300 /* Mask to get column */
1301 col = from & (mtd->oobsize - 1);
1302
1303 /* Initialize return length value */
1304 *retlen = 0;
1305
1306 /* Do not allow reads past end of device */
1307 if ((from + len) > mtd->size) {
1308 DEBUG (MTD_DEBUG_LEVEL0, "nand_read_oob: Attempt read beyond end of device\n");
1309 *retlen = 0;
1310 return -EINVAL;
1311 }
1312
1313 /* Grab the lock and see if the device is available */
1314 nand_get_device (this, mtd , FL_READING);
1315
1316 /* Select the NAND device */
1317 this->select_chip(mtd, chipnr);
1318
1319 /* Send the read command */
1320 this->cmdfunc (mtd, NAND_CMD_READOOB, col, page & this->pagemask);
1321 /*
1322 * Read the data, if we read more than one page
1323 * oob data, let the device transfer the data !
1324 */
1325 i = 0;
1326 while (i < len) {
1327 int thislen = mtd->oobsize - col;
1328 thislen = min_t(int, thislen, len);
1329 this->read_buf(mtd, &buf[i], thislen);
1330 i += thislen;
1331
1332 /* Apply delay or wait for ready/busy pin
1333 * Do this before the AUTOINCR check, so no problems
1334 * arise if a chip which does auto increment
1335 * is marked as NOAUTOINCR by the board driver.
1336 */
1337 if (!this->dev_ready)
1338 udelay (this->chip_delay);
1339 else
1340 while (!this->dev_ready(mtd));
1341
1342 /* Read more ? */
1343 if (i < len) {
1344 page++;
1345 col = 0;
1346
1347 /* Check, if we cross a chip boundary */
1348 if (!(page & this->pagemask)) {
1349 chipnr++;
1350 this->select_chip(mtd, -1);
1351 this->select_chip(mtd, chipnr);
1352 }
1353
1354 /* Check, if the chip supports auto page increment
1355 * or if we have hit a block boundary.
1356 */
1357 if (!NAND_CANAUTOINCR(this) || !(page & blockcheck)) {
1358 /* For subsequent page reads set offset to 0 */
1359 this->cmdfunc (mtd, NAND_CMD_READOOB, 0x0, page & this->pagemask);
1360 }
1361 }
1362 }
1363
1364 /* Deselect and wake up anyone waiting on the device */
1365 nand_release_device(mtd);
1366
1367 /* Return happy */
1368 *retlen = len;
1369 return 0;
1370}
1371
1372/**
1373 * nand_read_raw - [GENERIC] Read raw data including oob into buffer
1374 * @mtd: MTD device structure
1375 * @buf: temporary buffer
1376 * @from: offset to read from
1377 * @len: number of bytes to read
1378 * @ooblen: number of oob data bytes to read
1379 *
1380 * Read raw data including oob into buffer
1381 */
1382int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_t len, size_t ooblen)
1383{
1384 struct nand_chip *this = mtd->priv;
1385 int page = (int) (from >> this->page_shift);
1386 int chip = (int) (from >> this->chip_shift);
1387 int sndcmd = 1;
1388 int cnt = 0;
1389 int pagesize = mtd->oobblock + mtd->oobsize;
1390 int blockcheck = (1 << (this->phys_erase_shift - this->page_shift)) - 1;
1391
1392 /* Do not allow reads past end of device */
1393 if ((from + len) > mtd->size) {
1394 DEBUG (MTD_DEBUG_LEVEL0, "nand_read_raw: Attempt read beyond end of device\n");
1395 return -EINVAL;
1396 }
1397
1398 /* Grab the lock and see if the device is available */
1399 nand_get_device (this, mtd , FL_READING);
1400
1401 this->select_chip (mtd, chip);
1402
1403 /* Add requested oob length */
1404 len += ooblen;
1405
1406 while (len) {
1407 if (sndcmd)
1408 this->cmdfunc (mtd, NAND_CMD_READ0, 0, page & this->pagemask);
1409 sndcmd = 0;
1410
1411 this->read_buf (mtd, &buf[cnt], pagesize);
1412
1413 len -= pagesize;
1414 cnt += pagesize;
1415 page++;
1416
1417 if (!this->dev_ready)
1418 udelay (this->chip_delay);
1419 else
1420 while (!this->dev_ready(mtd));
1421
1422 /* Check, if the chip supports auto page increment */
1423 if (!NAND_CANAUTOINCR(this) || !(page & blockcheck))
1424 sndcmd = 1;
1425 }
1426
1427 /* Deselect and wake up anyone waiting on the device */
1428 nand_release_device(mtd);
1429 return 0;
1430}
1431
1432
1433/**
1434 * nand_prepare_oobbuf - [GENERIC] Prepare the out of band buffer
1435 * @mtd: MTD device structure
1436 * @fsbuf: buffer given by fs driver
1437 * @oobsel: out of band selection structre
1438 * @autoplace: 1 = place given buffer into the oob bytes
1439 * @numpages: number of pages to prepare
1440 *
1441 * Return:
1442 * 1. Filesystem buffer available and autoplacement is off,
1443 * return filesystem buffer
1444 * 2. No filesystem buffer or autoplace is off, return internal
1445 * buffer
1446 * 3. Filesystem buffer is given and autoplace selected
1447 * put data from fs buffer into internal buffer and
1448 * retrun internal buffer
1449 *
1450 * Note: The internal buffer is filled with 0xff. This must
1451 * be done only once, when no autoplacement happens
1452 * Autoplacement sets the buffer dirty flag, which
1453 * forces the 0xff fill before using the buffer again.
1454 *
1455*/
1456static u_char * nand_prepare_oobbuf (struct mtd_info *mtd, u_char *fsbuf, struct nand_oobinfo *oobsel,
1457 int autoplace, int numpages)
1458{
1459 struct nand_chip *this = mtd->priv;
1460 int i, len, ofs;
1461
1462 /* Zero copy fs supplied buffer */
1463 if (fsbuf && !autoplace)
1464 return fsbuf;
1465
1466 /* Check, if the buffer must be filled with ff again */
1467 if (this->oobdirty) {
1468 memset (this->oob_buf, 0xff,
1469 mtd->oobsize << (this->phys_erase_shift - this->page_shift));
1470 this->oobdirty = 0;
1471 }
1472
1473 /* If we have no autoplacement or no fs buffer use the internal one */
1474 if (!autoplace || !fsbuf)
1475 return this->oob_buf;
1476
1477 /* Walk through the pages and place the data */
1478 this->oobdirty = 1;
1479 ofs = 0;
1480 while (numpages--) {
1481 for (i = 0, len = 0; len < mtd->oobavail; i++) {
1482 int to = ofs + oobsel->oobfree[i][0];
1483 int num = oobsel->oobfree[i][1];
1484 memcpy (&this->oob_buf[to], fsbuf, num);
1485 len += num;
1486 fsbuf += num;
1487 }
1488 ofs += mtd->oobavail;
1489 }
1490 return this->oob_buf;
1491}
1492
1493#define NOTALIGNED(x) (x & (mtd->oobblock-1)) != 0
1494
1495/**
1496 * nand_write - [MTD Interface] compability function for nand_write_ecc
1497 * @mtd: MTD device structure
1498 * @to: offset to write to
1499 * @len: number of bytes to write
1500 * @retlen: pointer to variable to store the number of written bytes
1501 * @buf: the data to write
1502 *
1503 * This function simply calls nand_write_ecc with oob buffer and oobsel = NULL
1504 *
1505*/
1506static int nand_write (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf)
1507{
1508 return (nand_write_ecc (mtd, to, len, retlen, buf, NULL, NULL));
1509}
1510
1511/**
1512 * nand_write_ecc - [MTD Interface] NAND write with ECC
1513 * @mtd: MTD device structure
1514 * @to: offset to write to
1515 * @len: number of bytes to write
1516 * @retlen: pointer to variable to store the number of written bytes
1517 * @buf: the data to write
1518 * @eccbuf: filesystem supplied oob data buffer
1519 * @oobsel: oob selection structure
1520 *
1521 * NAND write with ECC
1522 */
1523static int nand_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
1524 size_t * retlen, const u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel)
1525{
1526 int startpage, page, ret = -EIO, oob = 0, written = 0, chipnr;
1527 int autoplace = 0, numpages, totalpages;
1528 struct nand_chip *this = mtd->priv;
1529 u_char *oobbuf, *bufstart;
1530 int ppblock = (1 << (this->phys_erase_shift - this->page_shift));
1531
1532 DEBUG (MTD_DEBUG_LEVEL3, "nand_write_ecc: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len);
1533
1534 /* Initialize retlen, in case of early exit */
1535 *retlen = 0;
1536
1537 /* Do not allow write past end of device */
1538 if ((to + len) > mtd->size) {
1539 DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: Attempt to write past end of page\n");
1540 return -EINVAL;
1541 }
1542
1543 /* reject writes, which are not page aligned */
1544 if (NOTALIGNED (to) || NOTALIGNED(len)) {
1545 printk (KERN_NOTICE "nand_write_ecc: Attempt to write not page aligned data\n");
1546 return -EINVAL;
1547 }
1548
1549 /* Grab the lock and see if the device is available */
1550 nand_get_device (this, mtd, FL_WRITING);
1551
1552 /* Calculate chipnr */
1553 chipnr = (int)(to >> this->chip_shift);
1554 /* Select the NAND device */
1555 this->select_chip(mtd, chipnr);
1556
1557 /* Check, if it is write protected */
1558 if (nand_check_wp(mtd))
1559 goto out;
1560
1561 /* if oobsel is NULL, use chip defaults */
1562 if (oobsel == NULL)
1563 oobsel = &mtd->oobinfo;
1564
1565 /* Autoplace of oob data ? Use the default placement scheme */
1566 if (oobsel->useecc == MTD_NANDECC_AUTOPLACE) {
1567 oobsel = this->autooob;
1568 autoplace = 1;
1569 }
1570
1571 /* Setup variables and oob buffer */
1572 totalpages = len >> this->page_shift;
1573 page = (int) (to >> this->page_shift);
1574 /* Invalidate the page cache, if we write to the cached page */
1575 if (page <= this->pagebuf && this->pagebuf < (page + totalpages))
1576 this->pagebuf = -1;
1577
1578 /* Set it relative to chip */
1579 page &= this->pagemask;
1580 startpage = page;
1581 /* Calc number of pages we can write in one go */
1582 numpages = min (ppblock - (startpage & (ppblock - 1)), totalpages);
1583 oobbuf = nand_prepare_oobbuf (mtd, eccbuf, oobsel, autoplace, numpages);
1584 bufstart = (u_char *)buf;
1585
1586 /* Loop until all data is written */
1587 while (written < len) {
1588
1589 this->data_poi = (u_char*) &buf[written];
1590 /* Write one page. If this is the last page to write
1591 * or the last page in this block, then use the
1592 * real pageprogram command, else select cached programming
1593 * if supported by the chip.
1594 */
1595 ret = nand_write_page (mtd, this, page, &oobbuf[oob], oobsel, (--numpages > 0));
1596 if (ret) {
1597 DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: write_page failed %d\n", ret);
1598 goto out;
1599 }
1600 /* Next oob page */
1601 oob += mtd->oobsize;
1602 /* Update written bytes count */
1603 written += mtd->oobblock;
1604 if (written == len)
1605 goto cmp;
1606
1607 /* Increment page address */
1608 page++;
1609
1610 /* Have we hit a block boundary ? Then we have to verify and
1611 * if verify is ok, we have to setup the oob buffer for
1612 * the next pages.
1613 */
1614 if (!(page & (ppblock - 1))){
1615 int ofs;
1616 this->data_poi = bufstart;
1617 ret = nand_verify_pages (mtd, this, startpage,
1618 page - startpage,
1619 oobbuf, oobsel, chipnr, (eccbuf != NULL));
1620 if (ret) {
1621 DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: verify_pages failed %d\n", ret);
1622 goto out;
1623 }
1624 *retlen = written;
1625
1626 ofs = autoplace ? mtd->oobavail : mtd->oobsize;
1627 if (eccbuf)
1628 eccbuf += (page - startpage) * ofs;
1629 totalpages -= page - startpage;
1630 numpages = min (totalpages, ppblock);
1631 page &= this->pagemask;
1632 startpage = page;
1633 oobbuf = nand_prepare_oobbuf (mtd, eccbuf, oobsel,
1634 autoplace, numpages);
1635 /* Check, if we cross a chip boundary */
1636 if (!page) {
1637 chipnr++;
1638 this->select_chip(mtd, -1);
1639 this->select_chip(mtd, chipnr);
1640 }
1641 }
1642 }
1643 /* Verify the remaining pages */
1644cmp:
1645 this->data_poi = bufstart;
1646 ret = nand_verify_pages (mtd, this, startpage, totalpages,
1647 oobbuf, oobsel, chipnr, (eccbuf != NULL));
1648 if (!ret)
1649 *retlen = written;
1650 else
1651 DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: verify_pages failed %d\n", ret);
1652
1653out:
1654 /* Deselect and wake up anyone waiting on the device */
1655 nand_release_device(mtd);
1656
1657 return ret;
1658}
1659
1660
1661/**
1662 * nand_write_oob - [MTD Interface] NAND write out-of-band
1663 * @mtd: MTD device structure
1664 * @to: offset to write to
1665 * @len: number of bytes to write
1666 * @retlen: pointer to variable to store the number of written bytes
1667 * @buf: the data to write
1668 *
1669 * NAND write out-of-band
1670 */
1671static int nand_write_oob (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf)
1672{
1673 int column, page, status, ret = -EIO, chipnr;
1674 struct nand_chip *this = mtd->priv;
1675
1676 DEBUG (MTD_DEBUG_LEVEL3, "nand_write_oob: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len);
1677
1678 /* Shift to get page */
1679 page = (int) (to >> this->page_shift);
1680 chipnr = (int) (to >> this->chip_shift);
1681
1682 /* Mask to get column */
1683 column = to & (mtd->oobsize - 1);
1684
1685 /* Initialize return length value */
1686 *retlen = 0;
1687
1688 /* Do not allow write past end of page */
1689 if ((column + len) > mtd->oobsize) {
1690 DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: Attempt to write past end of page\n");
1691 return -EINVAL;
1692 }
1693
1694 /* Grab the lock and see if the device is available */
1695 nand_get_device (this, mtd, FL_WRITING);
1696
1697 /* Select the NAND device */
1698 this->select_chip(mtd, chipnr);
1699
1700 /* Reset the chip. Some chips (like the Toshiba TC5832DC found
1701 in one of my DiskOnChip 2000 test units) will clear the whole
1702 data page too if we don't do this. I have no clue why, but
1703 I seem to have 'fixed' it in the doc2000 driver in
1704 August 1999. dwmw2. */
1705 this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1706
1707 /* Check, if it is write protected */
1708 if (nand_check_wp(mtd))
1709 goto out;
1710
1711 /* Invalidate the page cache, if we write to the cached page */
1712 if (page == this->pagebuf)
1713 this->pagebuf = -1;
1714
1715 if (NAND_MUST_PAD(this)) {
1716 /* Write out desired data */
1717 this->cmdfunc (mtd, NAND_CMD_SEQIN, mtd->oobblock, page & this->pagemask);
1718 /* prepad 0xff for partial programming */
1719 this->write_buf(mtd, ffchars, column);
1720 /* write data */
1721 this->write_buf(mtd, buf, len);
1722 /* postpad 0xff for partial programming */
1723 this->write_buf(mtd, ffchars, mtd->oobsize - (len+column));
1724 } else {
1725 /* Write out desired data */
1726 this->cmdfunc (mtd, NAND_CMD_SEQIN, mtd->oobblock + column, page & this->pagemask);
1727 /* write data */
1728 this->write_buf(mtd, buf, len);
1729 }
1730 /* Send command to program the OOB data */
1731 this->cmdfunc (mtd, NAND_CMD_PAGEPROG, -1, -1);
1732
1733 status = this->waitfunc (mtd, this, FL_WRITING);
1734
1735 /* See if device thinks it succeeded */
1736 if (status & 0x01) {
1737 DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: " "Failed write, page 0x%08x\n", page);
1738 ret = -EIO;
1739 goto out;
1740 }
1741 /* Return happy */
1742 *retlen = len;
1743
1744#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
1745 /* Send command to read back the data */
1746 this->cmdfunc (mtd, NAND_CMD_READOOB, column, page & this->pagemask);
1747
1748 if (this->verify_buf(mtd, buf, len)) {
1749 DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: " "Failed write verify, page 0x%08x\n", page);
1750 ret = -EIO;
1751 goto out;
1752 }
1753#endif
1754 ret = 0;
1755out:
1756 /* Deselect and wake up anyone waiting on the device */
1757 nand_release_device(mtd);
1758
1759 return ret;
1760}
1761
1762
1763/**
1764 * nand_writev - [MTD Interface] compabilty function for nand_writev_ecc
1765 * @mtd: MTD device structure
1766 * @vecs: the iovectors to write
1767 * @count: number of vectors
1768 * @to: offset to write to
1769 * @retlen: pointer to variable to store the number of written bytes
1770 *
1771 * NAND write with kvec. This just calls the ecc function
1772 */
1773static int nand_writev (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count,
1774 loff_t to, size_t * retlen)
1775{
1776 return (nand_writev_ecc (mtd, vecs, count, to, retlen, NULL, NULL));
1777}
1778
1779/**
1780 * nand_writev_ecc - [MTD Interface] write with iovec with ecc
1781 * @mtd: MTD device structure
1782 * @vecs: the iovectors to write
1783 * @count: number of vectors
1784 * @to: offset to write to
1785 * @retlen: pointer to variable to store the number of written bytes
1786 * @eccbuf: filesystem supplied oob data buffer
1787 * @oobsel: oob selection structure
1788 *
1789 * NAND write with iovec with ecc
1790 */
1791static int nand_writev_ecc (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count,
1792 loff_t to, size_t * retlen, u_char *eccbuf, struct nand_oobinfo *oobsel)
1793{
1794 int i, page, len, total_len, ret = -EIO, written = 0, chipnr;
1795 int oob, numpages, autoplace = 0, startpage;
1796 struct nand_chip *this = mtd->priv;
1797 int ppblock = (1 << (this->phys_erase_shift - this->page_shift));
1798 u_char *oobbuf, *bufstart;
1799
1800 /* Preset written len for early exit */
1801 *retlen = 0;
1802
1803 /* Calculate total length of data */
1804 total_len = 0;
1805 for (i = 0; i < count; i++)
1806 total_len += (int) vecs[i].iov_len;
1807
1808 DEBUG (MTD_DEBUG_LEVEL3,
1809 "nand_writev: to = 0x%08x, len = %i, count = %ld\n", (unsigned int) to, (unsigned int) total_len, count);
1810
1811 /* Do not allow write past end of page */
1812 if ((to + total_len) > mtd->size) {
1813 DEBUG (MTD_DEBUG_LEVEL0, "nand_writev: Attempted write past end of device\n");
1814 return -EINVAL;
1815 }
1816
1817 /* reject writes, which are not page aligned */
1818 if (NOTALIGNED (to) || NOTALIGNED(total_len)) {
1819 printk (KERN_NOTICE "nand_write_ecc: Attempt to write not page aligned data\n");
1820 return -EINVAL;
1821 }
1822
1823 /* Grab the lock and see if the device is available */
1824 nand_get_device (this, mtd, FL_WRITING);
1825
1826 /* Get the current chip-nr */
1827 chipnr = (int) (to >> this->chip_shift);
1828 /* Select the NAND device */
1829 this->select_chip(mtd, chipnr);
1830
1831 /* Check, if it is write protected */
1832 if (nand_check_wp(mtd))
1833 goto out;
1834
1835 /* if oobsel is NULL, use chip defaults */
1836 if (oobsel == NULL)
1837 oobsel = &mtd->oobinfo;
1838
1839 /* Autoplace of oob data ? Use the default placement scheme */
1840 if (oobsel->useecc == MTD_NANDECC_AUTOPLACE) {
1841 oobsel = this->autooob;
1842 autoplace = 1;
1843 }
1844
1845 /* Setup start page */
1846 page = (int) (to >> this->page_shift);
1847 /* Invalidate the page cache, if we write to the cached page */
1848 if (page <= this->pagebuf && this->pagebuf < ((to + total_len) >> this->page_shift))
1849 this->pagebuf = -1;
1850
1851 startpage = page & this->pagemask;
1852
1853 /* Loop until all kvec' data has been written */
1854 len = 0;
1855 while (count) {
1856 /* If the given tuple is >= pagesize then
1857 * write it out from the iov
1858 */
1859 if ((vecs->iov_len - len) >= mtd->oobblock) {
1860 /* Calc number of pages we can write
1861 * out of this iov in one go */
1862 numpages = (vecs->iov_len - len) >> this->page_shift;
1863 /* Do not cross block boundaries */
1864 numpages = min (ppblock - (startpage & (ppblock - 1)), numpages);
1865 oobbuf = nand_prepare_oobbuf (mtd, NULL, oobsel, autoplace, numpages);
1866 bufstart = (u_char *)vecs->iov_base;
1867 bufstart += len;
1868 this->data_poi = bufstart;
1869 oob = 0;
1870 for (i = 1; i <= numpages; i++) {
1871 /* Write one page. If this is the last page to write
1872 * then use the real pageprogram command, else select
1873 * cached programming if supported by the chip.
1874 */
1875 ret = nand_write_page (mtd, this, page & this->pagemask,
1876 &oobbuf[oob], oobsel, i != numpages);
1877 if (ret)
1878 goto out;
1879 this->data_poi += mtd->oobblock;
1880 len += mtd->oobblock;
1881 oob += mtd->oobsize;
1882 page++;
1883 }
1884 /* Check, if we have to switch to the next tuple */
1885 if (len >= (int) vecs->iov_len) {
1886 vecs++;
1887 len = 0;
1888 count--;
1889 }
1890 } else {
1891 /* We must use the internal buffer, read data out of each
1892 * tuple until we have a full page to write
1893 */
1894 int cnt = 0;
1895 while (cnt < mtd->oobblock) {
1896 if (vecs->iov_base != NULL && vecs->iov_len)
1897 this->data_buf[cnt++] = ((u_char *) vecs->iov_base)[len++];
1898 /* Check, if we have to switch to the next tuple */
1899 if (len >= (int) vecs->iov_len) {
1900 vecs++;
1901 len = 0;
1902 count--;
1903 }
1904 }
1905 this->pagebuf = page;
1906 this->data_poi = this->data_buf;
1907 bufstart = this->data_poi;
1908 numpages = 1;
1909 oobbuf = nand_prepare_oobbuf (mtd, NULL, oobsel, autoplace, numpages);
1910 ret = nand_write_page (mtd, this, page & this->pagemask,
1911 oobbuf, oobsel, 0);
1912 if (ret)
1913 goto out;
1914 page++;
1915 }
1916
1917 this->data_poi = bufstart;
1918 ret = nand_verify_pages (mtd, this, startpage, numpages, oobbuf, oobsel, chipnr, 0);
1919 if (ret)
1920 goto out;
1921
1922 written += mtd->oobblock * numpages;
1923 /* All done ? */
1924 if (!count)
1925 break;
1926
1927 startpage = page & this->pagemask;
1928 /* Check, if we cross a chip boundary */
1929 if (!startpage) {
1930 chipnr++;
1931 this->select_chip(mtd, -1);
1932 this->select_chip(mtd, chipnr);
1933 }
1934 }
1935 ret = 0;
1936out:
1937 /* Deselect and wake up anyone waiting on the device */
1938 nand_release_device(mtd);
1939
1940 *retlen = written;
1941 return ret;
1942}
1943
1944/**
1945 * single_erease_cmd - [GENERIC] NAND standard block erase command function
1946 * @mtd: MTD device structure
1947 * @page: the page address of the block which will be erased
1948 *
1949 * Standard erase command for NAND chips
1950 */
1951static void single_erase_cmd (struct mtd_info *mtd, int page)
1952{
1953 struct nand_chip *this = mtd->priv;
1954 /* Send commands to erase a block */
1955 this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page);
1956 this->cmdfunc (mtd, NAND_CMD_ERASE2, -1, -1);
1957}
1958
1959/**
1960 * multi_erease_cmd - [GENERIC] AND specific block erase command function
1961 * @mtd: MTD device structure
1962 * @page: the page address of the block which will be erased
1963 *
1964 * AND multi block erase command function
1965 * Erase 4 consecutive blocks
1966 */
1967static void multi_erase_cmd (struct mtd_info *mtd, int page)
1968{
1969 struct nand_chip *this = mtd->priv;
1970 /* Send commands to erase a block */
1971 this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page++);
1972 this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page++);
1973 this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page++);
1974 this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page);
1975 this->cmdfunc (mtd, NAND_CMD_ERASE2, -1, -1);
1976}
1977
1978/**
1979 * nand_erase - [MTD Interface] erase block(s)
1980 * @mtd: MTD device structure
1981 * @instr: erase instruction
1982 *
1983 * Erase one ore more blocks
1984 */
1985static int nand_erase (struct mtd_info *mtd, struct erase_info *instr)
1986{
1987 return nand_erase_nand (mtd, instr, 0);
1988}
1989
1990/**
1991 * nand_erase_intern - [NAND Interface] erase block(s)
1992 * @mtd: MTD device structure
1993 * @instr: erase instruction
1994 * @allowbbt: allow erasing the bbt area
1995 *
1996 * Erase one ore more blocks
1997 */
1998int nand_erase_nand (struct mtd_info *mtd, struct erase_info *instr, int allowbbt)
1999{
2000 int page, len, status, pages_per_block, ret, chipnr;
2001 struct nand_chip *this = mtd->priv;
2002
2003 DEBUG (MTD_DEBUG_LEVEL3,
2004 "nand_erase: start = 0x%08x, len = %i\n", (unsigned int) instr->addr, (unsigned int) instr->len);
2005
2006 /* Start address must align on block boundary */
2007 if (instr->addr & ((1 << this->phys_erase_shift) - 1)) {
2008 DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Unaligned address\n");
2009 return -EINVAL;
2010 }
2011
2012 /* Length must align on block boundary */
2013 if (instr->len & ((1 << this->phys_erase_shift) - 1)) {
2014 DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Length not block aligned\n");
2015 return -EINVAL;
2016 }
2017
2018 /* Do not allow erase past end of device */
2019 if ((instr->len + instr->addr) > mtd->size) {
2020 DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Erase past end of device\n");
2021 return -EINVAL;
2022 }
2023
2024 instr->fail_addr = 0xffffffff;
2025
2026 /* Grab the lock and see if the device is available */
2027 nand_get_device (this, mtd, FL_ERASING);
2028
2029 /* Shift to get first page */
2030 page = (int) (instr->addr >> this->page_shift);
2031 chipnr = (int) (instr->addr >> this->chip_shift);
2032
2033 /* Calculate pages in each block */
2034 pages_per_block = 1 << (this->phys_erase_shift - this->page_shift);
2035
2036 /* Select the NAND device */
2037 this->select_chip(mtd, chipnr);
2038
2039 /* Check the WP bit */
2040 /* Check, if it is write protected */
2041 if (nand_check_wp(mtd)) {
2042 DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Device is write protected!!!\n");
2043 instr->state = MTD_ERASE_FAILED;
2044 goto erase_exit;
2045 }
2046
2047 /* Loop through the pages */
2048 len = instr->len;
2049
2050 instr->state = MTD_ERASING;
2051
2052 while (len) {
2053 /* Check if we have a bad block, we do not erase bad blocks ! */
2054 if (nand_block_checkbad(mtd, ((loff_t) page) << this->page_shift, 0, allowbbt)) {
2055 printk (KERN_WARNING "nand_erase: attempt to erase a bad block at page 0x%08x\n", page);
2056 instr->state = MTD_ERASE_FAILED;
2057 goto erase_exit;
2058 }
2059
2060 /* Invalidate the page cache, if we erase the block which contains
2061 the current cached page */
2062 if (page <= this->pagebuf && this->pagebuf < (page + pages_per_block))
2063 this->pagebuf = -1;
2064
2065 this->erase_cmd (mtd, page & this->pagemask);
2066
2067 status = this->waitfunc (mtd, this, FL_ERASING);
2068
2069 /* See if block erase succeeded */
2070 if (status & 0x01) {
2071 DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: " "Failed erase, page 0x%08x\n", page);
2072 instr->state = MTD_ERASE_FAILED;
2073 instr->fail_addr = (page << this->page_shift);
2074 goto erase_exit;
2075 }
2076
2077 /* Increment page address and decrement length */
2078 len -= (1 << this->phys_erase_shift);
2079 page += pages_per_block;
2080
2081 /* Check, if we cross a chip boundary */
2082 if (len && !(page & this->pagemask)) {
2083 chipnr++;
2084 this->select_chip(mtd, -1);
2085 this->select_chip(mtd, chipnr);
2086 }
2087 }
2088 instr->state = MTD_ERASE_DONE;
2089
2090erase_exit:
2091
2092 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
2093 /* Do call back function */
2094 if (!ret)
2095 mtd_erase_callback(instr);
2096
2097 /* Deselect and wake up anyone waiting on the device */
2098 nand_release_device(mtd);
2099
2100 /* Return more or less happy */
2101 return ret;
2102}
2103
2104/**
2105 * nand_sync - [MTD Interface] sync
2106 * @mtd: MTD device structure
2107 *
2108 * Sync is actually a wait for chip ready function
2109 */
2110static void nand_sync (struct mtd_info *mtd)
2111{
2112 struct nand_chip *this = mtd->priv;
2113
2114 DEBUG (MTD_DEBUG_LEVEL3, "nand_sync: called\n");
2115
2116 /* Grab the lock and see if the device is available */
2117 nand_get_device (this, mtd, FL_SYNCING);
2118 /* Release it and go back */
2119 nand_release_device (mtd);
2120}
2121
2122
2123/**
2124 * nand_block_isbad - [MTD Interface] Check whether the block at the given offset is bad
2125 * @mtd: MTD device structure
2126 * @ofs: offset relative to mtd start
2127 */
2128static int nand_block_isbad (struct mtd_info *mtd, loff_t ofs)
2129{
2130 /* Check for invalid offset */
2131 if (ofs > mtd->size)
2132 return -EINVAL;
2133
2134 return nand_block_checkbad (mtd, ofs, 1, 0);
2135}
2136
2137/**
2138 * nand_block_markbad - [MTD Interface] Mark the block at the given offset as bad
2139 * @mtd: MTD device structure
2140 * @ofs: offset relative to mtd start
2141 */
2142static int nand_block_markbad (struct mtd_info *mtd, loff_t ofs)
2143{
2144 struct nand_chip *this = mtd->priv;
2145 int ret;
2146
2147 if ((ret = nand_block_isbad(mtd, ofs))) {
2148 /* If it was bad already, return success and do nothing. */
2149 if (ret > 0)
2150 return 0;
2151 return ret;
2152 }
2153
2154 return this->block_markbad(mtd, ofs);
2155}
2156
2157/**
2158 * nand_scan - [NAND Interface] Scan for the NAND device
2159 * @mtd: MTD device structure
2160 * @maxchips: Number of chips to scan for
2161 *
2162 * This fills out all the not initialized function pointers
2163 * with the defaults.
2164 * The flash ID is read and the mtd/chip structures are
2165 * filled with the appropriate values. Buffers are allocated if
2166 * they are not provided by the board driver
2167 *
2168 */
2169int nand_scan (struct mtd_info *mtd, int maxchips)
2170{
2171 int i, j, nand_maf_id, nand_dev_id, busw;
2172 struct nand_chip *this = mtd->priv;
2173
2174 /* Get buswidth to select the correct functions*/
2175 busw = this->options & NAND_BUSWIDTH_16;
2176
2177 /* check for proper chip_delay setup, set 20us if not */
2178 if (!this->chip_delay)
2179 this->chip_delay = 20;
2180
2181 /* check, if a user supplied command function given */
2182 if (this->cmdfunc == NULL)
2183 this->cmdfunc = nand_command;
2184
2185 /* check, if a user supplied wait function given */
2186 if (this->waitfunc == NULL)
2187 this->waitfunc = nand_wait;
2188
2189 if (!this->select_chip)
2190 this->select_chip = nand_select_chip;
2191 if (!this->write_byte)
2192 this->write_byte = busw ? nand_write_byte16 : nand_write_byte;
2193 if (!this->read_byte)
2194 this->read_byte = busw ? nand_read_byte16 : nand_read_byte;
2195 if (!this->write_word)
2196 this->write_word = nand_write_word;
2197 if (!this->read_word)
2198 this->read_word = nand_read_word;
2199 if (!this->block_bad)
2200 this->block_bad = nand_block_bad;
2201 if (!this->block_markbad)
2202 this->block_markbad = nand_default_block_markbad;
2203 if (!this->write_buf)
2204 this->write_buf = busw ? nand_write_buf16 : nand_write_buf;
2205 if (!this->read_buf)
2206 this->read_buf = busw ? nand_read_buf16 : nand_read_buf;
2207 if (!this->verify_buf)
2208 this->verify_buf = busw ? nand_verify_buf16 : nand_verify_buf;
2209 if (!this->scan_bbt)
2210 this->scan_bbt = nand_default_bbt;
2211
2212 /* Select the device */
2213 this->select_chip(mtd, 0);
2214
2215 /* Send the command for reading device ID */
2216 this->cmdfunc (mtd, NAND_CMD_READID, 0x00, -1);
2217
2218 /* Read manufacturer and device IDs */
2219 nand_maf_id = this->read_byte(mtd);
2220 nand_dev_id = this->read_byte(mtd);
2221
2222 /* Print and store flash device information */
2223 for (i = 0; nand_flash_ids[i].name != NULL; i++) {
2224
2225 if (nand_dev_id != nand_flash_ids[i].id)
2226 continue;
2227
2228 if (!mtd->name) mtd->name = nand_flash_ids[i].name;
2229 this->chipsize = nand_flash_ids[i].chipsize << 20;
2230
2231 /* New devices have all the information in additional id bytes */
2232 if (!nand_flash_ids[i].pagesize) {
2233 int extid;
2234 /* The 3rd id byte contains non relevant data ATM */
2235 extid = this->read_byte(mtd);
2236 /* The 4th id byte is the important one */
2237 extid = this->read_byte(mtd);
2238 /* Calc pagesize */
2239 mtd->oobblock = 1024 << (extid & 0x3);
2240 extid >>= 2;
2241 /* Calc oobsize */
2242 mtd->oobsize = (8 << (extid & 0x03)) * (mtd->oobblock / 512);
2243 extid >>= 2;
2244 /* Calc blocksize. Blocksize is multiples of 64KiB */
2245 mtd->erasesize = (64 * 1024) << (extid & 0x03);
2246 extid >>= 2;
2247 /* Get buswidth information */
2248 busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
2249
2250 } else {
2251 /* Old devices have this data hardcoded in the
2252 * device id table */
2253 mtd->erasesize = nand_flash_ids[i].erasesize;
2254 mtd->oobblock = nand_flash_ids[i].pagesize;
2255 mtd->oobsize = mtd->oobblock / 32;
2256 busw = nand_flash_ids[i].options & NAND_BUSWIDTH_16;
2257 }
2258
2259 /* Check, if buswidth is correct. Hardware drivers should set
2260 * this correct ! */
2261 if (busw != (this->options & NAND_BUSWIDTH_16)) {
2262 printk (KERN_INFO "NAND device: Manufacturer ID:"
2263 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", nand_maf_id, nand_dev_id,
2264 nand_manuf_ids[i].name , mtd->name);
2265 printk (KERN_WARNING
2266 "NAND bus width %d instead %d bit\n",
2267 (this->options & NAND_BUSWIDTH_16) ? 16 : 8,
2268 busw ? 16 : 8);
2269 this->select_chip(mtd, -1);
2270 return 1;
2271 }
2272
2273 /* Calculate the address shift from the page size */
2274 this->page_shift = ffs(mtd->oobblock) - 1;
2275 this->bbt_erase_shift = this->phys_erase_shift = ffs(mtd->erasesize) - 1;
2276 this->chip_shift = ffs(this->chipsize) - 1;
2277
2278 /* Set the bad block position */
2279 this->badblockpos = mtd->oobblock > 512 ?
2280 NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
2281
2282 /* Get chip options, preserve non chip based options */
2283 this->options &= ~NAND_CHIPOPTIONS_MSK;
2284 this->options |= nand_flash_ids[i].options & NAND_CHIPOPTIONS_MSK;
2285 /* Set this as a default. Board drivers can override it, if neccecary */
2286 this->options |= NAND_NO_AUTOINCR;
2287 /* Check if this is a not a samsung device. Do not clear the options
2288 * for chips which are not having an extended id.
2289 */
2290 if (nand_maf_id != NAND_MFR_SAMSUNG && !nand_flash_ids[i].pagesize)
2291 this->options &= ~NAND_SAMSUNG_LP_OPTIONS;
2292
2293 /* Check for AND chips with 4 page planes */
2294 if (this->options & NAND_4PAGE_ARRAY)
2295 this->erase_cmd = multi_erase_cmd;
2296 else
2297 this->erase_cmd = single_erase_cmd;
2298
2299 /* Do not replace user supplied command function ! */
2300 if (mtd->oobblock > 512 && this->cmdfunc == nand_command)
2301 this->cmdfunc = nand_command_lp;
2302
2303 /* Try to identify manufacturer */
2304 for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
2305 if (nand_manuf_ids[j].id == nand_maf_id)
2306 break;
2307 }
2308 printk (KERN_INFO "NAND device: Manufacturer ID:"
2309 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", nand_maf_id, nand_dev_id,
2310 nand_manuf_ids[j].name , nand_flash_ids[i].name);
2311 break;
2312 }
2313
2314 if (!nand_flash_ids[i].name) {
2315 printk (KERN_WARNING "No NAND device found!!!\n");
2316 this->select_chip(mtd, -1);
2317 return 1;
2318 }
2319
2320 for (i=1; i < maxchips; i++) {
2321 this->select_chip(mtd, i);
2322
2323 /* Send the command for reading device ID */
2324 this->cmdfunc (mtd, NAND_CMD_READID, 0x00, -1);
2325
2326 /* Read manufacturer and device IDs */
2327 if (nand_maf_id != this->read_byte(mtd) ||
2328 nand_dev_id != this->read_byte(mtd))
2329 break;
2330 }
2331 if (i > 1)
2332 printk(KERN_INFO "%d NAND chips detected\n", i);
2333
2334 /* Allocate buffers, if neccecary */
2335 if (!this->oob_buf) {
2336 size_t len;
2337 len = mtd->oobsize << (this->phys_erase_shift - this->page_shift);
2338 this->oob_buf = kmalloc (len, GFP_KERNEL);
2339 if (!this->oob_buf) {
2340 printk (KERN_ERR "nand_scan(): Cannot allocate oob_buf\n");
2341 return -ENOMEM;
2342 }
2343 this->options |= NAND_OOBBUF_ALLOC;
2344 }
2345
2346 if (!this->data_buf) {
2347 size_t len;
2348 len = mtd->oobblock + mtd->oobsize;
2349 this->data_buf = kmalloc (len, GFP_KERNEL);
2350 if (!this->data_buf) {
2351 if (this->options & NAND_OOBBUF_ALLOC)
2352 kfree (this->oob_buf);
2353 printk (KERN_ERR "nand_scan(): Cannot allocate data_buf\n");
2354 return -ENOMEM;
2355 }
2356 this->options |= NAND_DATABUF_ALLOC;
2357 }
2358
2359 /* Store the number of chips and calc total size for mtd */
2360 this->numchips = i;
2361 mtd->size = i * this->chipsize;
2362 /* Convert chipsize to number of pages per chip -1. */
2363 this->pagemask = (this->chipsize >> this->page_shift) - 1;
2364 /* Preset the internal oob buffer */
2365 memset(this->oob_buf, 0xff, mtd->oobsize << (this->phys_erase_shift - this->page_shift));
2366
2367 /* If no default placement scheme is given, select an
2368 * appropriate one */
2369 if (!this->autooob) {
2370 /* Select the appropriate default oob placement scheme for
2371 * placement agnostic filesystems */
2372 switch (mtd->oobsize) {
2373 case 8:
2374 this->autooob = &nand_oob_8;
2375 break;
2376 case 16:
2377 this->autooob = &nand_oob_16;
2378 break;
2379 case 64:
2380 this->autooob = &nand_oob_64;
2381 break;
2382 default:
2383 printk (KERN_WARNING "No oob scheme defined for oobsize %d\n",
2384 mtd->oobsize);
2385 BUG();
2386 }
2387 }
2388
2389 /* The number of bytes available for the filesystem to place fs dependend
2390 * oob data */
2391 if (this->options & NAND_BUSWIDTH_16) {
2392 mtd->oobavail = mtd->oobsize - (this->autooob->eccbytes + 2);
2393 if (this->autooob->eccbytes & 0x01)
2394 mtd->oobavail--;
2395 } else
2396 mtd->oobavail = mtd->oobsize - (this->autooob->eccbytes + 1);
2397
2398 /*
2399 * check ECC mode, default to software
2400 * if 3byte/512byte hardware ECC is selected and we have 256 byte pagesize
2401 * fallback to software ECC
2402 */
2403 this->eccsize = 256; /* set default eccsize */
2404 this->eccbytes = 3;
2405
2406 switch (this->eccmode) {
2407 case NAND_ECC_HW12_2048:
2408 if (mtd->oobblock < 2048) {
2409 printk(KERN_WARNING "2048 byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
2410 mtd->oobblock);
2411 this->eccmode = NAND_ECC_SOFT;
2412 this->calculate_ecc = nand_calculate_ecc;
2413 this->correct_data = nand_correct_data;
2414 } else
2415 this->eccsize = 2048;
2416 break;
2417
2418 case NAND_ECC_HW3_512:
2419 case NAND_ECC_HW6_512:
2420 case NAND_ECC_HW8_512:
2421 if (mtd->oobblock == 256) {
2422 printk (KERN_WARNING "512 byte HW ECC not possible on 256 Byte pagesize, fallback to SW ECC \n");
2423 this->eccmode = NAND_ECC_SOFT;
2424 this->calculate_ecc = nand_calculate_ecc;
2425 this->correct_data = nand_correct_data;
2426 } else
2427 this->eccsize = 512; /* set eccsize to 512 */
2428 break;
2429
2430 case NAND_ECC_HW3_256:
2431 break;
2432
2433 case NAND_ECC_NONE:
2434 printk (KERN_WARNING "NAND_ECC_NONE selected by board driver. This is not recommended !!\n");
2435 this->eccmode = NAND_ECC_NONE;
2436 break;
2437
2438 case NAND_ECC_SOFT:
2439 this->calculate_ecc = nand_calculate_ecc;
2440 this->correct_data = nand_correct_data;
2441 break;
2442
2443 default:
2444 printk (KERN_WARNING "Invalid NAND_ECC_MODE %d\n", this->eccmode);
2445 BUG();
2446 }
2447
2448 /* Check hardware ecc function availability and adjust number of ecc bytes per
2449 * calculation step
2450 */
2451 switch (this->eccmode) {
2452 case NAND_ECC_HW12_2048:
2453 this->eccbytes += 4;
2454 case NAND_ECC_HW8_512:
2455 this->eccbytes += 2;
2456 case NAND_ECC_HW6_512:
2457 this->eccbytes += 3;
2458 case NAND_ECC_HW3_512:
2459 case NAND_ECC_HW3_256:
2460 if (this->calculate_ecc && this->correct_data && this->enable_hwecc)
2461 break;
2462 printk (KERN_WARNING "No ECC functions supplied, Hardware ECC not possible\n");
2463 BUG();
2464 }
2465
2466 mtd->eccsize = this->eccsize;
2467
2468 /* Set the number of read / write steps for one page to ensure ECC generation */
2469 switch (this->eccmode) {
2470 case NAND_ECC_HW12_2048:
2471 this->eccsteps = mtd->oobblock / 2048;
2472 break;
2473 case NAND_ECC_HW3_512:
2474 case NAND_ECC_HW6_512:
2475 case NAND_ECC_HW8_512:
2476 this->eccsteps = mtd->oobblock / 512;
2477 break;
2478 case NAND_ECC_HW3_256:
2479 case NAND_ECC_SOFT:
2480 this->eccsteps = mtd->oobblock / 256;
2481 break;
2482
2483 case NAND_ECC_NONE:
2484 this->eccsteps = 1;
2485 break;
2486 }
2487
2488 /* Initialize state, waitqueue and spinlock */
2489 this->state = FL_READY;
2490 init_waitqueue_head (&this->wq);
2491 spin_lock_init (&this->chip_lock);
2492
2493 /* De-select the device */
2494 this->select_chip(mtd, -1);
2495
2496 /* Invalidate the pagebuffer reference */
2497 this->pagebuf = -1;
2498
2499 /* Fill in remaining MTD driver data */
2500 mtd->type = MTD_NANDFLASH;
2501 mtd->flags = MTD_CAP_NANDFLASH | MTD_ECC;
2502 mtd->ecctype = MTD_ECC_SW;
2503 mtd->erase = nand_erase;
2504 mtd->point = NULL;
2505 mtd->unpoint = NULL;
2506 mtd->read = nand_read;
2507 mtd->write = nand_write;
2508 mtd->read_ecc = nand_read_ecc;
2509 mtd->write_ecc = nand_write_ecc;
2510 mtd->read_oob = nand_read_oob;
2511 mtd->write_oob = nand_write_oob;
2512 mtd->readv = NULL;
2513 mtd->writev = nand_writev;
2514 mtd->writev_ecc = nand_writev_ecc;
2515 mtd->sync = nand_sync;
2516 mtd->lock = NULL;
2517 mtd->unlock = NULL;
2518 mtd->suspend = NULL;
2519 mtd->resume = NULL;
2520 mtd->block_isbad = nand_block_isbad;
2521 mtd->block_markbad = nand_block_markbad;
2522
2523 /* and make the autooob the default one */
2524 memcpy(&mtd->oobinfo, this->autooob, sizeof(mtd->oobinfo));
2525
2526 mtd->owner = THIS_MODULE;
2527
2528 /* Build bad block table */
2529 return this->scan_bbt (mtd);
2530}
2531
2532/**
2533 * nand_release - [NAND Interface] Free resources held by the NAND device
2534 * @mtd: MTD device structure
2535*/
2536void nand_release (struct mtd_info *mtd)
2537{
2538 struct nand_chip *this = mtd->priv;
2539
2540#ifdef CONFIG_MTD_PARTITIONS
2541 /* Deregister partitions */
2542 del_mtd_partitions (mtd);
2543#endif
2544 /* Deregister the device */
2545 del_mtd_device (mtd);
2546
2547 /* Free bad block table memory, if allocated */
2548 if (this->bbt)
2549 kfree (this->bbt);
2550 /* Buffer allocated by nand_scan ? */
2551 if (this->options & NAND_OOBBUF_ALLOC)
2552 kfree (this->oob_buf);
2553 /* Buffer allocated by nand_scan ? */
2554 if (this->options & NAND_DATABUF_ALLOC)
2555 kfree (this->data_buf);
2556}
2557
2558EXPORT_SYMBOL (nand_scan);
2559EXPORT_SYMBOL (nand_release);
2560
2561MODULE_LICENSE ("GPL");
2562MODULE_AUTHOR ("Steven J. Hill <sjhill@realitydiluted.com>, Thomas Gleixner <tglx@linutronix.de>");
2563MODULE_DESCRIPTION ("Generic NAND flash driver code");
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
new file mode 100644
index 000000000000..9a1949751c1f
--- /dev/null
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -0,0 +1,1056 @@
1/*
2 * drivers/mtd/nand_bbt.c
3 *
4 * Overview:
5 * Bad block table support for the NAND driver
6 *
7 * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
8 *
9 * $Id: nand_bbt.c,v 1.28 2004/11/13 10:19:09 gleixner Exp $
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * Description:
16 *
17 * When nand_scan_bbt is called, then it tries to find the bad block table
18 * depending on the options in the bbt descriptor(s). If a bbt is found
19 * then the contents are read and the memory based bbt is created. If a
20 * mirrored bbt is selected then the mirror is searched too and the
21 * versions are compared. If the mirror has a greater version number
22 * than the mirror bbt is used to build the memory based bbt.
23 * If the tables are not versioned, then we "or" the bad block information.
24 * If one of the bbt's is out of date or does not exist it is (re)created.
25 * If no bbt exists at all then the device is scanned for factory marked
26 * good / bad blocks and the bad block tables are created.
27 *
28 * For manufacturer created bbts like the one found on M-SYS DOC devices
29 * the bbt is searched and read but never created
30 *
31 * The autogenerated bad block table is located in the last good blocks
32 * of the device. The table is mirrored, so it can be updated eventually.
33 * The table is marked in the oob area with an ident pattern and a version
34 * number which indicates which of both tables is more up to date.
35 *
36 * The table uses 2 bits per block
37 * 11b: block is good
38 * 00b: block is factory marked bad
39 * 01b, 10b: block is marked bad due to wear
40 *
41 * The memory bad block table uses the following scheme:
42 * 00b: block is good
43 * 01b: block is marked bad due to wear
44 * 10b: block is reserved (to protect the bbt area)
45 * 11b: block is factory marked bad
46 *
47 * Multichip devices like DOC store the bad block info per floor.
48 *
49 * Following assumptions are made:
50 * - bbts start at a page boundary, if autolocated on a block boundary
51 * - the space neccecary for a bbt in FLASH does not exceed a block boundary
52 *
53 */
54
55#include <linux/slab.h>
56#include <linux/types.h>
57#include <linux/mtd/mtd.h>
58#include <linux/mtd/nand.h>
59#include <linux/mtd/nand_ecc.h>
60#include <linux/mtd/compatmac.h>
61#include <linux/bitops.h>
62#include <linux/delay.h>
63
64
65/**
66 * check_pattern - [GENERIC] check if a pattern is in the buffer
67 * @buf: the buffer to search
68 * @len: the length of buffer to search
69 * @paglen: the pagelength
70 * @td: search pattern descriptor
71 *
72 * Check for a pattern at the given place. Used to search bad block
73 * tables and good / bad block identifiers.
74 * If the SCAN_EMPTY option is set then check, if all bytes except the
75 * pattern area contain 0xff
76 *
77*/
78static int check_pattern (uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
79{
80 int i, end;
81 uint8_t *p = buf;
82
83 end = paglen + td->offs;
84 if (td->options & NAND_BBT_SCANEMPTY) {
85 for (i = 0; i < end; i++) {
86 if (p[i] != 0xff)
87 return -1;
88 }
89 }
90 p += end;
91
92 /* Compare the pattern */
93 for (i = 0; i < td->len; i++) {
94 if (p[i] != td->pattern[i])
95 return -1;
96 }
97
98 p += td->len;
99 end += td->len;
100 if (td->options & NAND_BBT_SCANEMPTY) {
101 for (i = end; i < len; i++) {
102 if (*p++ != 0xff)
103 return -1;
104 }
105 }
106 return 0;
107}
108
109/**
110 * read_bbt - [GENERIC] Read the bad block table starting from page
111 * @mtd: MTD device structure
112 * @buf: temporary buffer
113 * @page: the starting page
114 * @num: the number of bbt descriptors to read
115 * @bits: number of bits per block
116 * @offs: offset in the memory table
117 * @reserved_block_code: Pattern to identify reserved blocks
118 *
119 * Read the bad block table starting from page.
120 *
121 */
122static int read_bbt (struct mtd_info *mtd, uint8_t *buf, int page, int num,
123 int bits, int offs, int reserved_block_code)
124{
125 int res, i, j, act = 0;
126 struct nand_chip *this = mtd->priv;
127 size_t retlen, len, totlen;
128 loff_t from;
129 uint8_t msk = (uint8_t) ((1 << bits) - 1);
130
131 totlen = (num * bits) >> 3;
132 from = ((loff_t)page) << this->page_shift;
133
134 while (totlen) {
135 len = min (totlen, (size_t) (1 << this->bbt_erase_shift));
136 res = mtd->read_ecc (mtd, from, len, &retlen, buf, NULL, this->autooob);
137 if (res < 0) {
138 if (retlen != len) {
139 printk (KERN_INFO "nand_bbt: Error reading bad block table\n");
140 return res;
141 }
142 printk (KERN_WARNING "nand_bbt: ECC error while reading bad block table\n");
143 }
144
145 /* Analyse data */
146 for (i = 0; i < len; i++) {
147 uint8_t dat = buf[i];
148 for (j = 0; j < 8; j += bits, act += 2) {
149 uint8_t tmp = (dat >> j) & msk;
150 if (tmp == msk)
151 continue;
152 if (reserved_block_code &&
153 (tmp == reserved_block_code)) {
154 printk (KERN_DEBUG "nand_read_bbt: Reserved block at 0x%08x\n",
155 ((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
156 this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
157 continue;
158 }
159 /* Leave it for now, if its matured we can move this
160 * message to MTD_DEBUG_LEVEL0 */
161 printk (KERN_DEBUG "nand_read_bbt: Bad block at 0x%08x\n",
162 ((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
163 /* Factory marked bad or worn out ? */
164 if (tmp == 0)
165 this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
166 else
167 this->bbt[offs + (act >> 3)] |= 0x1 << (act & 0x06);
168 }
169 }
170 totlen -= len;
171 from += len;
172 }
173 return 0;
174}
175
176/**
177 * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
178 * @mtd: MTD device structure
179 * @buf: temporary buffer
180 * @td: descriptor for the bad block table
181 * @chip: read the table for a specific chip, -1 read all chips.
182 * Applies only if NAND_BBT_PERCHIP option is set
183 *
184 * Read the bad block table for all chips starting at a given page
185 * We assume that the bbt bits are in consecutive order.
186*/
187static int read_abs_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
188{
189 struct nand_chip *this = mtd->priv;
190 int res = 0, i;
191 int bits;
192
193 bits = td->options & NAND_BBT_NRBITS_MSK;
194 if (td->options & NAND_BBT_PERCHIP) {
195 int offs = 0;
196 for (i = 0; i < this->numchips; i++) {
197 if (chip == -1 || chip == i)
198 res = read_bbt (mtd, buf, td->pages[i], this->chipsize >> this->bbt_erase_shift, bits, offs, td->reserved_block_code);
199 if (res)
200 return res;
201 offs += this->chipsize >> (this->bbt_erase_shift + 2);
202 }
203 } else {
204 res = read_bbt (mtd, buf, td->pages[0], mtd->size >> this->bbt_erase_shift, bits, 0, td->reserved_block_code);
205 if (res)
206 return res;
207 }
208 return 0;
209}
210
211/**
212 * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
213 * @mtd: MTD device structure
214 * @buf: temporary buffer
215 * @td: descriptor for the bad block table
216 * @md: descriptor for the bad block table mirror
217 *
218 * Read the bad block table(s) for all chips starting at a given page
219 * We assume that the bbt bits are in consecutive order.
220 *
221*/
222static int read_abs_bbts (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td,
223 struct nand_bbt_descr *md)
224{
225 struct nand_chip *this = mtd->priv;
226
227 /* Read the primary version, if available */
228 if (td->options & NAND_BBT_VERSION) {
229 nand_read_raw (mtd, buf, td->pages[0] << this->page_shift, mtd->oobblock, mtd->oobsize);
230 td->version[0] = buf[mtd->oobblock + td->veroffs];
231 printk (KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", td->pages[0], td->version[0]);
232 }
233
234 /* Read the mirror version, if available */
235 if (md && (md->options & NAND_BBT_VERSION)) {
236 nand_read_raw (mtd, buf, md->pages[0] << this->page_shift, mtd->oobblock, mtd->oobsize);
237 md->version[0] = buf[mtd->oobblock + md->veroffs];
238 printk (KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", md->pages[0], md->version[0]);
239 }
240
241 return 1;
242}
243
244/**
245 * create_bbt - [GENERIC] Create a bad block table by scanning the device
246 * @mtd: MTD device structure
247 * @buf: temporary buffer
248 * @bd: descriptor for the good/bad block search pattern
249 * @chip: create the table for a specific chip, -1 read all chips.
250 * Applies only if NAND_BBT_PERCHIP option is set
251 *
252 * Create a bad block table by scanning the device
253 * for the given good/bad block identify pattern
254 */
255static void create_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd, int chip)
256{
257 struct nand_chip *this = mtd->priv;
258 int i, j, numblocks, len, scanlen;
259 int startblock;
260 loff_t from;
261 size_t readlen, ooblen;
262
263 printk (KERN_INFO "Scanning device for bad blocks\n");
264
265 if (bd->options & NAND_BBT_SCANALLPAGES)
266 len = 1 << (this->bbt_erase_shift - this->page_shift);
267 else {
268 if (bd->options & NAND_BBT_SCAN2NDPAGE)
269 len = 2;
270 else
271 len = 1;
272 }
273 scanlen = mtd->oobblock + mtd->oobsize;
274 readlen = len * mtd->oobblock;
275 ooblen = len * mtd->oobsize;
276
277 if (chip == -1) {
278 /* Note that numblocks is 2 * (real numblocks) here, see i+=2 below as it
279 * makes shifting and masking less painful */
280 numblocks = mtd->size >> (this->bbt_erase_shift - 1);
281 startblock = 0;
282 from = 0;
283 } else {
284 if (chip >= this->numchips) {
285 printk (KERN_WARNING "create_bbt(): chipnr (%d) > available chips (%d)\n",
286 chip + 1, this->numchips);
287 return;
288 }
289 numblocks = this->chipsize >> (this->bbt_erase_shift - 1);
290 startblock = chip * numblocks;
291 numblocks += startblock;
292 from = startblock << (this->bbt_erase_shift - 1);
293 }
294
295 for (i = startblock; i < numblocks;) {
296 nand_read_raw (mtd, buf, from, readlen, ooblen);
297 for (j = 0; j < len; j++) {
298 if (check_pattern (&buf[j * scanlen], scanlen, mtd->oobblock, bd)) {
299 this->bbt[i >> 3] |= 0x03 << (i & 0x6);
300 printk (KERN_WARNING "Bad eraseblock %d at 0x%08x\n",
301 i >> 1, (unsigned int) from);
302 break;
303 }
304 }
305 i += 2;
306 from += (1 << this->bbt_erase_shift);
307 }
308}
309
310/**
311 * search_bbt - [GENERIC] scan the device for a specific bad block table
312 * @mtd: MTD device structure
313 * @buf: temporary buffer
314 * @td: descriptor for the bad block table
315 *
316 * Read the bad block table by searching for a given ident pattern.
317 * Search is preformed either from the beginning up or from the end of
318 * the device downwards. The search starts always at the start of a
319 * block.
320 * If the option NAND_BBT_PERCHIP is given, each chip is searched
321 * for a bbt, which contains the bad block information of this chip.
322 * This is neccecary to provide support for certain DOC devices.
323 *
324 * The bbt ident pattern resides in the oob area of the first page
325 * in a block.
326 */
327static int search_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
328{
329 struct nand_chip *this = mtd->priv;
330 int i, chips;
331 int bits, startblock, block, dir;
332 int scanlen = mtd->oobblock + mtd->oobsize;
333 int bbtblocks;
334
335 /* Search direction top -> down ? */
336 if (td->options & NAND_BBT_LASTBLOCK) {
337 startblock = (mtd->size >> this->bbt_erase_shift) -1;
338 dir = -1;
339 } else {
340 startblock = 0;
341 dir = 1;
342 }
343
344 /* Do we have a bbt per chip ? */
345 if (td->options & NAND_BBT_PERCHIP) {
346 chips = this->numchips;
347 bbtblocks = this->chipsize >> this->bbt_erase_shift;
348 startblock &= bbtblocks - 1;
349 } else {
350 chips = 1;
351 bbtblocks = mtd->size >> this->bbt_erase_shift;
352 }
353
354 /* Number of bits for each erase block in the bbt */
355 bits = td->options & NAND_BBT_NRBITS_MSK;
356
357 for (i = 0; i < chips; i++) {
358 /* Reset version information */
359 td->version[i] = 0;
360 td->pages[i] = -1;
361 /* Scan the maximum number of blocks */
362 for (block = 0; block < td->maxblocks; block++) {
363 int actblock = startblock + dir * block;
364 /* Read first page */
365 nand_read_raw (mtd, buf, actblock << this->bbt_erase_shift, mtd->oobblock, mtd->oobsize);
366 if (!check_pattern(buf, scanlen, mtd->oobblock, td)) {
367 td->pages[i] = actblock << (this->bbt_erase_shift - this->page_shift);
368 if (td->options & NAND_BBT_VERSION) {
369 td->version[i] = buf[mtd->oobblock + td->veroffs];
370 }
371 break;
372 }
373 }
374 startblock += this->chipsize >> this->bbt_erase_shift;
375 }
376 /* Check, if we found a bbt for each requested chip */
377 for (i = 0; i < chips; i++) {
378 if (td->pages[i] == -1)
379 printk (KERN_WARNING "Bad block table not found for chip %d\n", i);
380 else
381 printk (KERN_DEBUG "Bad block table found at page %d, version 0x%02X\n", td->pages[i], td->version[i]);
382 }
383 return 0;
384}
385
386/**
387 * search_read_bbts - [GENERIC] scan the device for bad block table(s)
388 * @mtd: MTD device structure
389 * @buf: temporary buffer
390 * @td: descriptor for the bad block table
391 * @md: descriptor for the bad block table mirror
392 *
393 * Search and read the bad block table(s)
394*/
395static int search_read_bbts (struct mtd_info *mtd, uint8_t *buf,
396 struct nand_bbt_descr *td, struct nand_bbt_descr *md)
397{
398 /* Search the primary table */
399 search_bbt (mtd, buf, td);
400
401 /* Search the mirror table */
402 if (md)
403 search_bbt (mtd, buf, md);
404
405 /* Force result check */
406 return 1;
407}
408
409
410/**
411 * write_bbt - [GENERIC] (Re)write the bad block table
412 *
413 * @mtd: MTD device structure
414 * @buf: temporary buffer
415 * @td: descriptor for the bad block table
416 * @md: descriptor for the bad block table mirror
417 * @chipsel: selector for a specific chip, -1 for all
418 *
419 * (Re)write the bad block table
420 *
421*/
422static int write_bbt (struct mtd_info *mtd, uint8_t *buf,
423 struct nand_bbt_descr *td, struct nand_bbt_descr *md, int chipsel)
424{
425 struct nand_chip *this = mtd->priv;
426 struct nand_oobinfo oobinfo;
427 struct erase_info einfo;
428 int i, j, res, chip = 0;
429 int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
430 int nrchips, bbtoffs, pageoffs;
431 uint8_t msk[4];
432 uint8_t rcode = td->reserved_block_code;
433 size_t retlen, len = 0;
434 loff_t to;
435
436 if (!rcode)
437 rcode = 0xff;
438 /* Write bad block table per chip rather than per device ? */
439 if (td->options & NAND_BBT_PERCHIP) {
440 numblocks = (int) (this->chipsize >> this->bbt_erase_shift);
441 /* Full device write or specific chip ? */
442 if (chipsel == -1) {
443 nrchips = this->numchips;
444 } else {
445 nrchips = chipsel + 1;
446 chip = chipsel;
447 }
448 } else {
449 numblocks = (int) (mtd->size >> this->bbt_erase_shift);
450 nrchips = 1;
451 }
452
453 /* Loop through the chips */
454 for (; chip < nrchips; chip++) {
455
456 /* There was already a version of the table, reuse the page
457 * This applies for absolute placement too, as we have the
458 * page nr. in td->pages.
459 */
460 if (td->pages[chip] != -1) {
461 page = td->pages[chip];
462 goto write;
463 }
464
465 /* Automatic placement of the bad block table */
466 /* Search direction top -> down ? */
467 if (td->options & NAND_BBT_LASTBLOCK) {
468 startblock = numblocks * (chip + 1) - 1;
469 dir = -1;
470 } else {
471 startblock = chip * numblocks;
472 dir = 1;
473 }
474
475 for (i = 0; i < td->maxblocks; i++) {
476 int block = startblock + dir * i;
477 /* Check, if the block is bad */
478 switch ((this->bbt[block >> 2] >> (2 * (block & 0x03))) & 0x03) {
479 case 0x01:
480 case 0x03:
481 continue;
482 }
483 page = block << (this->bbt_erase_shift - this->page_shift);
484 /* Check, if the block is used by the mirror table */
485 if (!md || md->pages[chip] != page)
486 goto write;
487 }
488 printk (KERN_ERR "No space left to write bad block table\n");
489 return -ENOSPC;
490write:
491
492 /* Set up shift count and masks for the flash table */
493 bits = td->options & NAND_BBT_NRBITS_MSK;
494 switch (bits) {
495 case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01; msk[2] = ~rcode; msk[3] = 0x01; break;
496 case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01; msk[2] = ~rcode; msk[3] = 0x03; break;
497 case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C; msk[2] = ~rcode; msk[3] = 0x0f; break;
498 case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F; msk[2] = ~rcode; msk[3] = 0xff; break;
499 default: return -EINVAL;
500 }
501
502 bbtoffs = chip * (numblocks >> 2);
503
504 to = ((loff_t) page) << this->page_shift;
505
506 memcpy (&oobinfo, this->autooob, sizeof(oobinfo));
507 oobinfo.useecc = MTD_NANDECC_PLACEONLY;
508
509 /* Must we save the block contents ? */
510 if (td->options & NAND_BBT_SAVECONTENT) {
511 /* Make it block aligned */
512 to &= ~((loff_t) ((1 << this->bbt_erase_shift) - 1));
513 len = 1 << this->bbt_erase_shift;
514 res = mtd->read_ecc (mtd, to, len, &retlen, buf, &buf[len], &oobinfo);
515 if (res < 0) {
516 if (retlen != len) {
517 printk (KERN_INFO "nand_bbt: Error reading block for writing the bad block table\n");
518 return res;
519 }
520 printk (KERN_WARNING "nand_bbt: ECC error while reading block for writing bad block table\n");
521 }
522 /* Calc the byte offset in the buffer */
523 pageoffs = page - (int)(to >> this->page_shift);
524 offs = pageoffs << this->page_shift;
525 /* Preset the bbt area with 0xff */
526 memset (&buf[offs], 0xff, (size_t)(numblocks >> sft));
527 /* Preset the bbt's oob area with 0xff */
528 memset (&buf[len + pageoffs * mtd->oobsize], 0xff,
529 ((len >> this->page_shift) - pageoffs) * mtd->oobsize);
530 if (td->options & NAND_BBT_VERSION) {
531 buf[len + (pageoffs * mtd->oobsize) + td->veroffs] = td->version[chip];
532 }
533 } else {
534 /* Calc length */
535 len = (size_t) (numblocks >> sft);
536 /* Make it page aligned ! */
537 len = (len + (mtd->oobblock-1)) & ~(mtd->oobblock-1);
538 /* Preset the buffer with 0xff */
539 memset (buf, 0xff, len + (len >> this->page_shift) * mtd->oobsize);
540 offs = 0;
541 /* Pattern is located in oob area of first page */
542 memcpy (&buf[len + td->offs], td->pattern, td->len);
543 if (td->options & NAND_BBT_VERSION) {
544 buf[len + td->veroffs] = td->version[chip];
545 }
546 }
547
548 /* walk through the memory table */
549 for (i = 0; i < numblocks; ) {
550 uint8_t dat;
551 dat = this->bbt[bbtoffs + (i >> 2)];
552 for (j = 0; j < 4; j++ , i++) {
553 int sftcnt = (i << (3 - sft)) & sftmsk;
554 /* Do not store the reserved bbt blocks ! */
555 buf[offs + (i >> sft)] &= ~(msk[dat & 0x03] << sftcnt);
556 dat >>= 2;
557 }
558 }
559
560 memset (&einfo, 0, sizeof (einfo));
561 einfo.mtd = mtd;
562 einfo.addr = (unsigned long) to;
563 einfo.len = 1 << this->bbt_erase_shift;
564 res = nand_erase_nand (mtd, &einfo, 1);
565 if (res < 0) {
566 printk (KERN_WARNING "nand_bbt: Error during block erase: %d\n", res);
567 return res;
568 }
569
570 res = mtd->write_ecc (mtd, to, len, &retlen, buf, &buf[len], &oobinfo);
571 if (res < 0) {
572 printk (KERN_WARNING "nand_bbt: Error while writing bad block table %d\n", res);
573 return res;
574 }
575 printk (KERN_DEBUG "Bad block table written to 0x%08x, version 0x%02X\n",
576 (unsigned int) to, td->version[chip]);
577
578 /* Mark it as used */
579 td->pages[chip] = page;
580 }
581 return 0;
582}
583
584/**
585 * nand_memory_bbt - [GENERIC] create a memory based bad block table
586 * @mtd: MTD device structure
587 * @bd: descriptor for the good/bad block search pattern
588 *
589 * The function creates a memory based bbt by scanning the device
590 * for manufacturer / software marked good / bad blocks
591*/
592static int nand_memory_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd)
593{
594 struct nand_chip *this = mtd->priv;
595
596 /* Ensure that we only scan for the pattern and nothing else */
597 bd->options = 0;
598 create_bbt (mtd, this->data_buf, bd, -1);
599 return 0;
600}
601
602/**
603 * check_create - [GENERIC] create and write bbt(s) if neccecary
604 * @mtd: MTD device structure
605 * @buf: temporary buffer
606 * @bd: descriptor for the good/bad block search pattern
607 *
608 * The function checks the results of the previous call to read_bbt
609 * and creates / updates the bbt(s) if neccecary
610 * Creation is neccecary if no bbt was found for the chip/device
611 * Update is neccecary if one of the tables is missing or the
612 * version nr. of one table is less than the other
613*/
614static int check_create (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
615{
616 int i, chips, writeops, chipsel, res;
617 struct nand_chip *this = mtd->priv;
618 struct nand_bbt_descr *td = this->bbt_td;
619 struct nand_bbt_descr *md = this->bbt_md;
620 struct nand_bbt_descr *rd, *rd2;
621
622 /* Do we have a bbt per chip ? */
623 if (td->options & NAND_BBT_PERCHIP)
624 chips = this->numchips;
625 else
626 chips = 1;
627
628 for (i = 0; i < chips; i++) {
629 writeops = 0;
630 rd = NULL;
631 rd2 = NULL;
632 /* Per chip or per device ? */
633 chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
634 /* Mirrored table avilable ? */
635 if (md) {
636 if (td->pages[i] == -1 && md->pages[i] == -1) {
637 writeops = 0x03;
638 goto create;
639 }
640
641 if (td->pages[i] == -1) {
642 rd = md;
643 td->version[i] = md->version[i];
644 writeops = 1;
645 goto writecheck;
646 }
647
648 if (md->pages[i] == -1) {
649 rd = td;
650 md->version[i] = td->version[i];
651 writeops = 2;
652 goto writecheck;
653 }
654
655 if (td->version[i] == md->version[i]) {
656 rd = td;
657 if (!(td->options & NAND_BBT_VERSION))
658 rd2 = md;
659 goto writecheck;
660 }
661
662 if (((int8_t) (td->version[i] - md->version[i])) > 0) {
663 rd = td;
664 md->version[i] = td->version[i];
665 writeops = 2;
666 } else {
667 rd = md;
668 td->version[i] = md->version[i];
669 writeops = 1;
670 }
671
672 goto writecheck;
673
674 } else {
675 if (td->pages[i] == -1) {
676 writeops = 0x01;
677 goto create;
678 }
679 rd = td;
680 goto writecheck;
681 }
682create:
683 /* Create the bad block table by scanning the device ? */
684 if (!(td->options & NAND_BBT_CREATE))
685 continue;
686
687 /* Create the table in memory by scanning the chip(s) */
688 create_bbt (mtd, buf, bd, chipsel);
689
690 td->version[i] = 1;
691 if (md)
692 md->version[i] = 1;
693writecheck:
694 /* read back first ? */
695 if (rd)
696 read_abs_bbt (mtd, buf, rd, chipsel);
697 /* If they weren't versioned, read both. */
698 if (rd2)
699 read_abs_bbt (mtd, buf, rd2, chipsel);
700
701 /* Write the bad block table to the device ? */
702 if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
703 res = write_bbt (mtd, buf, td, md, chipsel);
704 if (res < 0)
705 return res;
706 }
707
708 /* Write the mirror bad block table to the device ? */
709 if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
710 res = write_bbt (mtd, buf, md, td, chipsel);
711 if (res < 0)
712 return res;
713 }
714 }
715 return 0;
716}
717
718/**
719 * mark_bbt_regions - [GENERIC] mark the bad block table regions
720 * @mtd: MTD device structure
721 * @td: bad block table descriptor
722 *
723 * The bad block table regions are marked as "bad" to prevent
724 * accidental erasures / writes. The regions are identified by
725 * the mark 0x02.
726*/
727static void mark_bbt_region (struct mtd_info *mtd, struct nand_bbt_descr *td)
728{
729 struct nand_chip *this = mtd->priv;
730 int i, j, chips, block, nrblocks, update;
731 uint8_t oldval, newval;
732
733 /* Do we have a bbt per chip ? */
734 if (td->options & NAND_BBT_PERCHIP) {
735 chips = this->numchips;
736 nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
737 } else {
738 chips = 1;
739 nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
740 }
741
742 for (i = 0; i < chips; i++) {
743 if ((td->options & NAND_BBT_ABSPAGE) ||
744 !(td->options & NAND_BBT_WRITE)) {
745 if (td->pages[i] == -1) continue;
746 block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
747 block <<= 1;
748 oldval = this->bbt[(block >> 3)];
749 newval = oldval | (0x2 << (block & 0x06));
750 this->bbt[(block >> 3)] = newval;
751 if ((oldval != newval) && td->reserved_block_code)
752 nand_update_bbt(mtd, block << (this->bbt_erase_shift - 1));
753 continue;
754 }
755 update = 0;
756 if (td->options & NAND_BBT_LASTBLOCK)
757 block = ((i + 1) * nrblocks) - td->maxblocks;
758 else
759 block = i * nrblocks;
760 block <<= 1;
761 for (j = 0; j < td->maxblocks; j++) {
762 oldval = this->bbt[(block >> 3)];
763 newval = oldval | (0x2 << (block & 0x06));
764 this->bbt[(block >> 3)] = newval;
765 if (oldval != newval) update = 1;
766 block += 2;
767 }
768 /* If we want reserved blocks to be recorded to flash, and some
769 new ones have been marked, then we need to update the stored
770 bbts. This should only happen once. */
771 if (update && td->reserved_block_code)
772 nand_update_bbt(mtd, (block - 2) << (this->bbt_erase_shift - 1));
773 }
774}
775
776/**
777 * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
778 * @mtd: MTD device structure
779 * @bd: descriptor for the good/bad block search pattern
780 *
781 * The function checks, if a bad block table(s) is/are already
782 * available. If not it scans the device for manufacturer
783 * marked good / bad blocks and writes the bad block table(s) to
784 * the selected place.
785 *
786 * The bad block table memory is allocated here. It must be freed
787 * by calling the nand_free_bbt function.
788 *
789*/
790int nand_scan_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd)
791{
792 struct nand_chip *this = mtd->priv;
793 int len, res = 0;
794 uint8_t *buf;
795 struct nand_bbt_descr *td = this->bbt_td;
796 struct nand_bbt_descr *md = this->bbt_md;
797
798 len = mtd->size >> (this->bbt_erase_shift + 2);
799 /* Allocate memory (2bit per block) */
800 this->bbt = kmalloc (len, GFP_KERNEL);
801 if (!this->bbt) {
802 printk (KERN_ERR "nand_scan_bbt: Out of memory\n");
803 return -ENOMEM;
804 }
805 /* Clear the memory bad block table */
806 memset (this->bbt, 0x00, len);
807
808 /* If no primary table decriptor is given, scan the device
809 * to build a memory based bad block table
810 */
811 if (!td)
812 return nand_memory_bbt(mtd, bd);
813
814 /* Allocate a temporary buffer for one eraseblock incl. oob */
815 len = (1 << this->bbt_erase_shift);
816 len += (len >> this->page_shift) * mtd->oobsize;
817 buf = kmalloc (len, GFP_KERNEL);
818 if (!buf) {
819 printk (KERN_ERR "nand_bbt: Out of memory\n");
820 kfree (this->bbt);
821 this->bbt = NULL;
822 return -ENOMEM;
823 }
824
825 /* Is the bbt at a given page ? */
826 if (td->options & NAND_BBT_ABSPAGE) {
827 res = read_abs_bbts (mtd, buf, td, md);
828 } else {
829 /* Search the bad block table using a pattern in oob */
830 res = search_read_bbts (mtd, buf, td, md);
831 }
832
833 if (res)
834 res = check_create (mtd, buf, bd);
835
836 /* Prevent the bbt regions from erasing / writing */
837 mark_bbt_region (mtd, td);
838 if (md)
839 mark_bbt_region (mtd, md);
840
841 kfree (buf);
842 return res;
843}
844
845
846/**
847 * nand_update_bbt - [NAND Interface] update bad block table(s)
848 * @mtd: MTD device structure
849 * @offs: the offset of the newly marked block
850 *
851 * The function updates the bad block table(s)
852*/
853int nand_update_bbt (struct mtd_info *mtd, loff_t offs)
854{
855 struct nand_chip *this = mtd->priv;
856 int len, res = 0, writeops = 0;
857 int chip, chipsel;
858 uint8_t *buf;
859 struct nand_bbt_descr *td = this->bbt_td;
860 struct nand_bbt_descr *md = this->bbt_md;
861
862 if (!this->bbt || !td)
863 return -EINVAL;
864
865 len = mtd->size >> (this->bbt_erase_shift + 2);
866 /* Allocate a temporary buffer for one eraseblock incl. oob */
867 len = (1 << this->bbt_erase_shift);
868 len += (len >> this->page_shift) * mtd->oobsize;
869 buf = kmalloc (len, GFP_KERNEL);
870 if (!buf) {
871 printk (KERN_ERR "nand_update_bbt: Out of memory\n");
872 return -ENOMEM;
873 }
874
875 writeops = md != NULL ? 0x03 : 0x01;
876
877 /* Do we have a bbt per chip ? */
878 if (td->options & NAND_BBT_PERCHIP) {
879 chip = (int) (offs >> this->chip_shift);
880 chipsel = chip;
881 } else {
882 chip = 0;
883 chipsel = -1;
884 }
885
886 td->version[chip]++;
887 if (md)
888 md->version[chip]++;
889
890 /* Write the bad block table to the device ? */
891 if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
892 res = write_bbt (mtd, buf, td, md, chipsel);
893 if (res < 0)
894 goto out;
895 }
896 /* Write the mirror bad block table to the device ? */
897 if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
898 res = write_bbt (mtd, buf, md, td, chipsel);
899 }
900
901out:
902 kfree (buf);
903 return res;
904}
905
906/* Define some generic bad / good block scan pattern which are used
907 * while scanning a device for factory marked good / bad blocks
908 *
909 * The memory based patterns just
910 */
911static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
912
913static struct nand_bbt_descr smallpage_memorybased = {
914 .options = 0,
915 .offs = 5,
916 .len = 1,
917 .pattern = scan_ff_pattern
918};
919
920static struct nand_bbt_descr largepage_memorybased = {
921 .options = 0,
922 .offs = 0,
923 .len = 2,
924 .pattern = scan_ff_pattern
925};
926
927static struct nand_bbt_descr smallpage_flashbased = {
928 .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
929 .offs = 5,
930 .len = 1,
931 .pattern = scan_ff_pattern
932};
933
934static struct nand_bbt_descr largepage_flashbased = {
935 .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
936 .offs = 0,
937 .len = 2,
938 .pattern = scan_ff_pattern
939};
940
941static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
942
943static struct nand_bbt_descr agand_flashbased = {
944 .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
945 .offs = 0x20,
946 .len = 6,
947 .pattern = scan_agand_pattern
948};
949
950/* Generic flash bbt decriptors
951*/
952static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
953static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
954
955static struct nand_bbt_descr bbt_main_descr = {
956 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
957 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
958 .offs = 8,
959 .len = 4,
960 .veroffs = 12,
961 .maxblocks = 4,
962 .pattern = bbt_pattern
963};
964
965static struct nand_bbt_descr bbt_mirror_descr = {
966 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
967 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
968 .offs = 8,
969 .len = 4,
970 .veroffs = 12,
971 .maxblocks = 4,
972 .pattern = mirror_pattern
973};
974
975/**
976 * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
977 * @mtd: MTD device structure
978 *
979 * This function selects the default bad block table
980 * support for the device and calls the nand_scan_bbt function
981 *
982*/
983int nand_default_bbt (struct mtd_info *mtd)
984{
985 struct nand_chip *this = mtd->priv;
986
987 /* Default for AG-AND. We must use a flash based
988 * bad block table as the devices have factory marked
989 * _good_ blocks. Erasing those blocks leads to loss
990 * of the good / bad information, so we _must_ store
991 * this information in a good / bad table during
992 * startup
993 */
994 if (this->options & NAND_IS_AND) {
995 /* Use the default pattern descriptors */
996 if (!this->bbt_td) {
997 this->bbt_td = &bbt_main_descr;
998 this->bbt_md = &bbt_mirror_descr;
999 }
1000 this->options |= NAND_USE_FLASH_BBT;
1001 return nand_scan_bbt (mtd, &agand_flashbased);
1002 }
1003
1004
1005 /* Is a flash based bad block table requested ? */
1006 if (this->options & NAND_USE_FLASH_BBT) {
1007 /* Use the default pattern descriptors */
1008 if (!this->bbt_td) {
1009 this->bbt_td = &bbt_main_descr;
1010 this->bbt_md = &bbt_mirror_descr;
1011 }
1012 if (!this->badblock_pattern) {
1013 this->badblock_pattern = (mtd->oobblock > 512) ?
1014 &largepage_flashbased : &smallpage_flashbased;
1015 }
1016 } else {
1017 this->bbt_td = NULL;
1018 this->bbt_md = NULL;
1019 if (!this->badblock_pattern) {
1020 this->badblock_pattern = (mtd->oobblock > 512) ?
1021 &largepage_memorybased : &smallpage_memorybased;
1022 }
1023 }
1024 return nand_scan_bbt (mtd, this->badblock_pattern);
1025}
1026
1027/**
1028 * nand_isbad_bbt - [NAND Interface] Check if a block is bad
1029 * @mtd: MTD device structure
1030 * @offs: offset in the device
1031 * @allowbbt: allow access to bad block table region
1032 *
1033*/
1034int nand_isbad_bbt (struct mtd_info *mtd, loff_t offs, int allowbbt)
1035{
1036 struct nand_chip *this = mtd->priv;
1037 int block;
1038 uint8_t res;
1039
1040 /* Get block number * 2 */
1041 block = (int) (offs >> (this->bbt_erase_shift - 1));
1042 res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
1043
1044 DEBUG (MTD_DEBUG_LEVEL2, "nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
1045 (unsigned int)offs, res, block >> 1);
1046
1047 switch ((int)res) {
1048 case 0x00: return 0;
1049 case 0x01: return 1;
1050 case 0x02: return allowbbt ? 0 : 1;
1051 }
1052 return 1;
1053}
1054
1055EXPORT_SYMBOL (nand_scan_bbt);
1056EXPORT_SYMBOL (nand_default_bbt);
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
new file mode 100644
index 000000000000..2e341b75437a
--- /dev/null
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -0,0 +1,250 @@
1/*
2 * This file contains an ECC algorithm from Toshiba that detects and
3 * corrects 1 bit errors in a 256 byte block of data.
4 *
5 * drivers/mtd/nand/nand_ecc.c
6 *
7 * Copyright (C) 2000-2004 Steven J. Hill (sjhill@realitydiluted.com)
8 * Toshiba America Electronics Components, Inc.
9 *
10 * $Id: nand_ecc.c,v 1.14 2004/06/16 15:34:37 gleixner Exp $
11 *
12 * This file is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 or (at your option) any
15 * later version.
16 *
17 * This file is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this file; if not, write to the Free Software Foundation, Inc.,
24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 *
26 * As a special exception, if other files instantiate templates or use
27 * macros or inline functions from these files, or you compile these
28 * files and link them with other works to produce a work based on these
29 * files, these files do not by themselves cause the resulting work to be
30 * covered by the GNU General Public License. However the source code for
31 * these files must still be made available in accordance with section (3)
32 * of the GNU General Public License.
33 *
34 * This exception does not invalidate any other reasons why a work based on
35 * this file might be covered by the GNU General Public License.
36 */
37
38#include <linux/types.h>
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/mtd/nand_ecc.h>
42
43/*
44 * Pre-calculated 256-way 1 byte column parity
45 */
46static const u_char nand_ecc_precalc_table[] = {
47 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00,
48 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65,
49 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66,
50 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03,
51 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69,
52 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c,
53 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f,
54 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a,
55 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a,
56 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f,
57 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c,
58 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69,
59 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03,
60 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66,
61 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65,
62 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00
63};
64
65
66/**
67 * nand_trans_result - [GENERIC] create non-inverted ECC
68 * @reg2: line parity reg 2
69 * @reg3: line parity reg 3
70 * @ecc_code: ecc
71 *
72 * Creates non-inverted ECC code from line parity
73 */
74static void nand_trans_result(u_char reg2, u_char reg3,
75 u_char *ecc_code)
76{
77 u_char a, b, i, tmp1, tmp2;
78
79 /* Initialize variables */
80 a = b = 0x80;
81 tmp1 = tmp2 = 0;
82
83 /* Calculate first ECC byte */
84 for (i = 0; i < 4; i++) {
85 if (reg3 & a) /* LP15,13,11,9 --> ecc_code[0] */
86 tmp1 |= b;
87 b >>= 1;
88 if (reg2 & a) /* LP14,12,10,8 --> ecc_code[0] */
89 tmp1 |= b;
90 b >>= 1;
91 a >>= 1;
92 }
93
94 /* Calculate second ECC byte */
95 b = 0x80;
96 for (i = 0; i < 4; i++) {
97 if (reg3 & a) /* LP7,5,3,1 --> ecc_code[1] */
98 tmp2 |= b;
99 b >>= 1;
100 if (reg2 & a) /* LP6,4,2,0 --> ecc_code[1] */
101 tmp2 |= b;
102 b >>= 1;
103 a >>= 1;
104 }
105
106 /* Store two of the ECC bytes */
107 ecc_code[0] = tmp1;
108 ecc_code[1] = tmp2;
109}
110
111/**
112 * nand_calculate_ecc - [NAND Interface] Calculate 3 byte ECC code for 256 byte block
113 * @mtd: MTD block structure
114 * @dat: raw data
115 * @ecc_code: buffer for ECC
116 */
117int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
118{
119 u_char idx, reg1, reg2, reg3;
120 int j;
121
122 /* Initialize variables */
123 reg1 = reg2 = reg3 = 0;
124 ecc_code[0] = ecc_code[1] = ecc_code[2] = 0;
125
126 /* Build up column parity */
127 for(j = 0; j < 256; j++) {
128
129 /* Get CP0 - CP5 from table */
130 idx = nand_ecc_precalc_table[dat[j]];
131 reg1 ^= (idx & 0x3f);
132
133 /* All bit XOR = 1 ? */
134 if (idx & 0x40) {
135 reg3 ^= (u_char) j;
136 reg2 ^= ~((u_char) j);
137 }
138 }
139
140 /* Create non-inverted ECC code from line parity */
141 nand_trans_result(reg2, reg3, ecc_code);
142
143 /* Calculate final ECC code */
144 ecc_code[0] = ~ecc_code[0];
145 ecc_code[1] = ~ecc_code[1];
146 ecc_code[2] = ((~reg1) << 2) | 0x03;
147 return 0;
148}
149
150/**
151 * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
152 * @mtd: MTD block structure
153 * @dat: raw data read from the chip
154 * @read_ecc: ECC from the chip
155 * @calc_ecc: the ECC calculated from raw data
156 *
157 * Detect and correct a 1 bit error for 256 byte block
158 */
159int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc)
160{
161 u_char a, b, c, d1, d2, d3, add, bit, i;
162
163 /* Do error detection */
164 d1 = calc_ecc[0] ^ read_ecc[0];
165 d2 = calc_ecc[1] ^ read_ecc[1];
166 d3 = calc_ecc[2] ^ read_ecc[2];
167
168 if ((d1 | d2 | d3) == 0) {
169 /* No errors */
170 return 0;
171 }
172 else {
173 a = (d1 ^ (d1 >> 1)) & 0x55;
174 b = (d2 ^ (d2 >> 1)) & 0x55;
175 c = (d3 ^ (d3 >> 1)) & 0x54;
176
177 /* Found and will correct single bit error in the data */
178 if ((a == 0x55) && (b == 0x55) && (c == 0x54)) {
179 c = 0x80;
180 add = 0;
181 a = 0x80;
182 for (i=0; i<4; i++) {
183 if (d1 & c)
184 add |= a;
185 c >>= 2;
186 a >>= 1;
187 }
188 c = 0x80;
189 for (i=0; i<4; i++) {
190 if (d2 & c)
191 add |= a;
192 c >>= 2;
193 a >>= 1;
194 }
195 bit = 0;
196 b = 0x04;
197 c = 0x80;
198 for (i=0; i<3; i++) {
199 if (d3 & c)
200 bit |= b;
201 c >>= 2;
202 b >>= 1;
203 }
204 b = 0x01;
205 a = dat[add];
206 a ^= (b << bit);
207 dat[add] = a;
208 return 1;
209 }
210 else {
211 i = 0;
212 while (d1) {
213 if (d1 & 0x01)
214 ++i;
215 d1 >>= 1;
216 }
217 while (d2) {
218 if (d2 & 0x01)
219 ++i;
220 d2 >>= 1;
221 }
222 while (d3) {
223 if (d3 & 0x01)
224 ++i;
225 d3 >>= 1;
226 }
227 if (i == 1) {
228 /* ECC Code Error Correction */
229 read_ecc[0] = calc_ecc[0];
230 read_ecc[1] = calc_ecc[1];
231 read_ecc[2] = calc_ecc[2];
232 return 2;
233 }
234 else {
235 /* Uncorrectable Error */
236 return -1;
237 }
238 }
239 }
240
241 /* Should never happen */
242 return -1;
243}
244
245EXPORT_SYMBOL(nand_calculate_ecc);
246EXPORT_SYMBOL(nand_correct_data);
247
248MODULE_LICENSE("GPL");
249MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
250MODULE_DESCRIPTION("Generic NAND ECC support");
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
new file mode 100644
index 000000000000..2d8c4321275b
--- /dev/null
+++ b/drivers/mtd/nand/nand_ids.c
@@ -0,0 +1,129 @@
1/*
2 * drivers/mtd/nandids.c
3 *
4 * Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de)
5 *
6 * $Id: nand_ids.c,v 1.10 2004/05/26 13:40:12 gleixner Exp $
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13#include <linux/module.h>
14#include <linux/mtd/nand.h>
15/*
16* Chip ID list
17*
18* Name. ID code, pagesize, chipsize in MegaByte, eraseblock size,
19* options
20*
21* Pagesize; 0, 256, 512
22* 0 get this information from the extended chip ID
23+ 256 256 Byte page size
24* 512 512 Byte page size
25*/
26struct nand_flash_dev nand_flash_ids[] = {
27 {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0},
28 {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0},
29 {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0},
30 {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0},
31 {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0},
32 {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0},
33 {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0},
34 {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0},
35 {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0},
36 {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0},
37
38 {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0},
39 {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0},
40 {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16},
41 {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16},
42
43 {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0},
44 {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0},
45 {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16},
46 {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16},
47
48 {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0},
49 {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0},
50 {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16},
51 {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16},
52
53 {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0},
54 {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0},
55 {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16},
56 {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16},
57
58 {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0},
59 {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0},
60 {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16},
61 {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16},
62
63 {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0},
64
65 {"NAND 512MiB 3,3V 8-bit", 0xDC, 512, 512, 0x4000, 0},
66
67 /* These are the new chips with large page size. The pagesize
68 * and the erasesize is determined from the extended id bytes
69 */
70 /* 1 Gigabit */
71 {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
72 {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
73 {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
74 {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
75
76 /* 2 Gigabit */
77 {"NAND 256MiB 1,8V 8-bit", 0xAA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
78 {"NAND 256MiB 3,3V 8-bit", 0xDA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
79 {"NAND 256MiB 1,8V 16-bit", 0xBA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
80 {"NAND 256MiB 3,3V 16-bit", 0xCA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
81
82 /* 4 Gigabit */
83 {"NAND 512MiB 1,8V 8-bit", 0xAC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
84 {"NAND 512MiB 3,3V 8-bit", 0xDC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
85 {"NAND 512MiB 1,8V 16-bit", 0xBC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
86 {"NAND 512MiB 3,3V 16-bit", 0xCC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
87
88 /* 8 Gigabit */
89 {"NAND 1GiB 1,8V 8-bit", 0xA3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
90 {"NAND 1GiB 3,3V 8-bit", 0xD3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
91 {"NAND 1GiB 1,8V 16-bit", 0xB3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
92 {"NAND 1GiB 3,3V 16-bit", 0xC3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
93
94 /* 16 Gigabit */
95 {"NAND 2GiB 1,8V 8-bit", 0xA5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
96 {"NAND 2GiB 3,3V 8-bit", 0xD5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
97 {"NAND 2GiB 1,8V 16-bit", 0xB5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
98 {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
99
100 /* Renesas AND 1 Gigabit. Those chips do not support extended id and have a strange page/block layout !
101 * The chosen minimum erasesize is 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page planes
102 * 1 block = 2 pages, but due to plane arrangement the blocks 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7
103 * Anyway JFFS2 would increase the eraseblock size so we chose a combined one which can be erased in one go
104 * There are more speed improvements for reads and writes possible, but not implemented now
105 */
106 {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000, NAND_IS_AND | NAND_NO_AUTOINCR | NAND_4PAGE_ARRAY},
107
108 {NULL,}
109};
110
111/*
112* Manufacturer ID list
113*/
114struct nand_manufacturers nand_manuf_ids[] = {
115 {NAND_MFR_TOSHIBA, "Toshiba"},
116 {NAND_MFR_SAMSUNG, "Samsung"},
117 {NAND_MFR_FUJITSU, "Fujitsu"},
118 {NAND_MFR_NATIONAL, "National"},
119 {NAND_MFR_RENESAS, "Renesas"},
120 {NAND_MFR_STMICRO, "ST Micro"},
121 {0x0, "Unknown"}
122};
123
124EXPORT_SYMBOL (nand_manuf_ids);
125EXPORT_SYMBOL (nand_flash_ids);
126
127MODULE_LICENSE ("GPL");
128MODULE_AUTHOR ("Thomas Gleixner <tglx@linutronix.de>");
129MODULE_DESCRIPTION ("Nand device & manufacturer ID's");
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
new file mode 100644
index 000000000000..13feefd7d8ca
--- /dev/null
+++ b/drivers/mtd/nand/nandsim.c
@@ -0,0 +1,1613 @@
1/*
2 * NAND flash simulator.
3 *
4 * Author: Artem B. Bityuckiy <dedekind@oktetlabs.ru>, <dedekind@infradead.org>
5 *
6 * Copyright (C) 2004 Nokia Corporation
7 *
8 * Note: NS means "NAND Simulator".
9 * Note: Input means input TO flash chip, output means output FROM chip.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2, or (at your option) any later
14 * version.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
19 * Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
24 *
25 * $Id: nandsim.c,v 1.7 2004/12/06 11:53:06 dedekind Exp $
26 */
27
28#include <linux/config.h>
29#include <linux/init.h>
30#include <linux/types.h>
31#include <linux/module.h>
32#include <linux/moduleparam.h>
33#include <linux/vmalloc.h>
34#include <linux/slab.h>
35#include <linux/errno.h>
36#include <linux/string.h>
37#include <linux/mtd/mtd.h>
38#include <linux/mtd/nand.h>
39#include <linux/mtd/partitions.h>
40#include <linux/delay.h>
41#ifdef CONFIG_NS_ABS_POS
42#include <asm/io.h>
43#endif
44
45
46/* Default simulator parameters values */
47#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
48 !defined(CONFIG_NANDSIM_SECOND_ID_BYTE) || \
49 !defined(CONFIG_NANDSIM_THIRD_ID_BYTE) || \
50 !defined(CONFIG_NANDSIM_FOURTH_ID_BYTE)
51#define CONFIG_NANDSIM_FIRST_ID_BYTE 0x98
52#define CONFIG_NANDSIM_SECOND_ID_BYTE 0x39
53#define CONFIG_NANDSIM_THIRD_ID_BYTE 0xFF /* No byte */
54#define CONFIG_NANDSIM_FOURTH_ID_BYTE 0xFF /* No byte */
55#endif
56
57#ifndef CONFIG_NANDSIM_ACCESS_DELAY
58#define CONFIG_NANDSIM_ACCESS_DELAY 25
59#endif
60#ifndef CONFIG_NANDSIM_PROGRAMM_DELAY
61#define CONFIG_NANDSIM_PROGRAMM_DELAY 200
62#endif
63#ifndef CONFIG_NANDSIM_ERASE_DELAY
64#define CONFIG_NANDSIM_ERASE_DELAY 2
65#endif
66#ifndef CONFIG_NANDSIM_OUTPUT_CYCLE
67#define CONFIG_NANDSIM_OUTPUT_CYCLE 40
68#endif
69#ifndef CONFIG_NANDSIM_INPUT_CYCLE
70#define CONFIG_NANDSIM_INPUT_CYCLE 50
71#endif
72#ifndef CONFIG_NANDSIM_BUS_WIDTH
73#define CONFIG_NANDSIM_BUS_WIDTH 8
74#endif
75#ifndef CONFIG_NANDSIM_DO_DELAYS
76#define CONFIG_NANDSIM_DO_DELAYS 0
77#endif
78#ifndef CONFIG_NANDSIM_LOG
79#define CONFIG_NANDSIM_LOG 0
80#endif
81#ifndef CONFIG_NANDSIM_DBG
82#define CONFIG_NANDSIM_DBG 0
83#endif
84
85static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE;
86static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE;
87static uint third_id_byte = CONFIG_NANDSIM_THIRD_ID_BYTE;
88static uint fourth_id_byte = CONFIG_NANDSIM_FOURTH_ID_BYTE;
89static uint access_delay = CONFIG_NANDSIM_ACCESS_DELAY;
90static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY;
91static uint erase_delay = CONFIG_NANDSIM_ERASE_DELAY;
92static uint output_cycle = CONFIG_NANDSIM_OUTPUT_CYCLE;
93static uint input_cycle = CONFIG_NANDSIM_INPUT_CYCLE;
94static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
95static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
96static uint log = CONFIG_NANDSIM_LOG;
97static uint dbg = CONFIG_NANDSIM_DBG;
98
99module_param(first_id_byte, uint, 0400);
100module_param(second_id_byte, uint, 0400);
101module_param(third_id_byte, uint, 0400);
102module_param(fourth_id_byte, uint, 0400);
103module_param(access_delay, uint, 0400);
104module_param(programm_delay, uint, 0400);
105module_param(erase_delay, uint, 0400);
106module_param(output_cycle, uint, 0400);
107module_param(input_cycle, uint, 0400);
108module_param(bus_width, uint, 0400);
109module_param(do_delays, uint, 0400);
110module_param(log, uint, 0400);
111module_param(dbg, uint, 0400);
112
113MODULE_PARM_DESC(first_id_byte, "The fist byte returned by NAND Flash 'read ID' command (manufaturer ID)");
114MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
115MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command");
116MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command");
117MODULE_PARM_DESC(access_delay, "Initial page access delay (microiseconds)");
118MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
119MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
120MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)");
121MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanodeconds)");
122MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
123MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
124MODULE_PARM_DESC(log, "Perform logging if not zero");
125MODULE_PARM_DESC(dbg, "Output debug information if not zero");
126
127/* The largest possible page size */
128#define NS_LARGEST_PAGE_SIZE 2048
129
130/* The prefix for simulator output */
131#define NS_OUTPUT_PREFIX "[nandsim]"
132
133/* Simulator's output macros (logging, debugging, warning, error) */
134#define NS_LOG(args...) \
135 do { if (log) printk(KERN_DEBUG NS_OUTPUT_PREFIX " log: " args); } while(0)
136#define NS_DBG(args...) \
137 do { if (dbg) printk(KERN_DEBUG NS_OUTPUT_PREFIX " debug: " args); } while(0)
138#define NS_WARN(args...) \
139 do { printk(KERN_WARNING NS_OUTPUT_PREFIX " warnig: " args); } while(0)
140#define NS_ERR(args...) \
141 do { printk(KERN_ERR NS_OUTPUT_PREFIX " errorr: " args); } while(0)
142
143/* Busy-wait delay macros (microseconds, milliseconds) */
144#define NS_UDELAY(us) \
145 do { if (do_delays) udelay(us); } while(0)
146#define NS_MDELAY(us) \
147 do { if (do_delays) mdelay(us); } while(0)
148
149/* Is the nandsim structure initialized ? */
150#define NS_IS_INITIALIZED(ns) ((ns)->geom.totsz != 0)
151
152/* Good operation completion status */
153#define NS_STATUS_OK(ns) (NAND_STATUS_READY | (NAND_STATUS_WP * ((ns)->lines.wp == 0)))
154
155/* Operation failed completion status */
156#define NS_STATUS_FAILED(ns) (NAND_STATUS_FAIL | NS_STATUS_OK(ns))
157
158/* Calculate the page offset in flash RAM image by (row, column) address */
159#define NS_RAW_OFFSET(ns) \
160 (((ns)->regs.row << (ns)->geom.pgshift) + ((ns)->regs.row * (ns)->geom.oobsz) + (ns)->regs.column)
161
162/* Calculate the OOB offset in flash RAM image by (row, column) address */
163#define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
164
165/* After a command is input, the simulator goes to one of the following states */
166#define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */
167#define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */
168#define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */
169#define STATE_CMD_PAGEPROG 0x00000004 /* start page programm */
170#define STATE_CMD_READOOB 0x00000005 /* read OOB area */
171#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */
172#define STATE_CMD_STATUS 0x00000007 /* read status */
173#define STATE_CMD_STATUS_M 0x00000008 /* read multi-plane status (isn't implemented) */
174#define STATE_CMD_SEQIN 0x00000009 /* sequential data imput */
175#define STATE_CMD_READID 0x0000000A /* read ID */
176#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */
177#define STATE_CMD_RESET 0x0000000C /* reset */
178#define STATE_CMD_MASK 0x0000000F /* command states mask */
179
180/* After an addres is input, the simulator goes to one of these states */
181#define STATE_ADDR_PAGE 0x00000010 /* full (row, column) address is accepted */
182#define STATE_ADDR_SEC 0x00000020 /* sector address was accepted */
183#define STATE_ADDR_ZERO 0x00000030 /* one byte zero address was accepted */
184#define STATE_ADDR_MASK 0x00000030 /* address states mask */
185
186/* Durind data input/output the simulator is in these states */
187#define STATE_DATAIN 0x00000100 /* waiting for data input */
188#define STATE_DATAIN_MASK 0x00000100 /* data input states mask */
189
190#define STATE_DATAOUT 0x00001000 /* waiting for page data output */
191#define STATE_DATAOUT_ID 0x00002000 /* waiting for ID bytes output */
192#define STATE_DATAOUT_STATUS 0x00003000 /* waiting for status output */
193#define STATE_DATAOUT_STATUS_M 0x00004000 /* waiting for multi-plane status output */
194#define STATE_DATAOUT_MASK 0x00007000 /* data output states mask */
195
196/* Previous operation is done, ready to accept new requests */
197#define STATE_READY 0x00000000
198
199/* This state is used to mark that the next state isn't known yet */
200#define STATE_UNKNOWN 0x10000000
201
202/* Simulator's actions bit masks */
203#define ACTION_CPY 0x00100000 /* copy page/OOB to the internal buffer */
204#define ACTION_PRGPAGE 0x00200000 /* programm the internal buffer to flash */
205#define ACTION_SECERASE 0x00300000 /* erase sector */
206#define ACTION_ZEROOFF 0x00400000 /* don't add any offset to address */
207#define ACTION_HALFOFF 0x00500000 /* add to address half of page */
208#define ACTION_OOBOFF 0x00600000 /* add to address OOB offset */
209#define ACTION_MASK 0x00700000 /* action mask */
210
211#define NS_OPER_NUM 12 /* Number of operations supported by the simulator */
212#define NS_OPER_STATES 6 /* Maximum number of states in operation */
213
214#define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */
215#define OPT_PAGE256 0x00000001 /* 256-byte page chips */
216#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
217#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
218#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
219#define OPT_AUTOINCR 0x00000020 /* page number auto inctimentation is possible */
220#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
221#define OPT_LARGEPAGE (OPT_PAGE2048) /* 2048-byte page chips */
222#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */
223
224/* Remove action bits ftom state */
225#define NS_STATE(x) ((x) & ~ACTION_MASK)
226
227/*
228 * Maximum previous states which need to be saved. Currently saving is
229 * only needed for page programm operation with preceeded read command
230 * (which is only valid for 512-byte pages).
231 */
232#define NS_MAX_PREVSTATES 1
233
234/*
235 * The structure which describes all the internal simulator data.
236 */
237struct nandsim {
238 struct mtd_partition part;
239
240 uint busw; /* flash chip bus width (8 or 16) */
241 u_char ids[4]; /* chip's ID bytes */
242 uint32_t options; /* chip's characteristic bits */
243 uint32_t state; /* current chip state */
244 uint32_t nxstate; /* next expected state */
245
246 uint32_t *op; /* current operation, NULL operations isn't known yet */
247 uint32_t pstates[NS_MAX_PREVSTATES]; /* previous states */
248 uint16_t npstates; /* number of previous states saved */
249 uint16_t stateidx; /* current state index */
250
251 /* The simulated NAND flash image */
252 union flash_media {
253 u_char *byte;
254 uint16_t *word;
255 } mem;
256
257 /* Internal buffer of page + OOB size bytes */
258 union internal_buffer {
259 u_char *byte; /* for byte access */
260 uint16_t *word; /* for 16-bit word access */
261 } buf;
262
263 /* NAND flash "geometry" */
264 struct nandsin_geometry {
265 uint32_t totsz; /* total flash size, bytes */
266 uint32_t secsz; /* flash sector (erase block) size, bytes */
267 uint pgsz; /* NAND flash page size, bytes */
268 uint oobsz; /* page OOB area size, bytes */
269 uint32_t totszoob; /* total flash size including OOB, bytes */
270 uint pgszoob; /* page size including OOB , bytes*/
271 uint secszoob; /* sector size including OOB, bytes */
272 uint pgnum; /* total number of pages */
273 uint pgsec; /* number of pages per sector */
274 uint secshift; /* bits number in sector size */
275 uint pgshift; /* bits number in page size */
276 uint oobshift; /* bits number in OOB size */
277 uint pgaddrbytes; /* bytes per page address */
278 uint secaddrbytes; /* bytes per sector address */
279 uint idbytes; /* the number ID bytes that this chip outputs */
280 } geom;
281
282 /* NAND flash internal registers */
283 struct nandsim_regs {
284 unsigned command; /* the command register */
285 u_char status; /* the status register */
286 uint row; /* the page number */
287 uint column; /* the offset within page */
288 uint count; /* internal counter */
289 uint num; /* number of bytes which must be processed */
290 uint off; /* fixed page offset */
291 } regs;
292
293 /* NAND flash lines state */
294 struct ns_lines_status {
295 int ce; /* chip Enable */
296 int cle; /* command Latch Enable */
297 int ale; /* address Latch Enable */
298 int wp; /* write Protect */
299 } lines;
300};
301
302/*
303 * Operations array. To perform any operation the simulator must pass
304 * through the correspondent states chain.
305 */
306static struct nandsim_operations {
307 uint32_t reqopts; /* options which are required to perform the operation */
308 uint32_t states[NS_OPER_STATES]; /* operation's states */
309} ops[NS_OPER_NUM] = {
310 /* Read page + OOB from the beginning */
311 {OPT_SMALLPAGE, {STATE_CMD_READ0 | ACTION_ZEROOFF, STATE_ADDR_PAGE | ACTION_CPY,
312 STATE_DATAOUT, STATE_READY}},
313 /* Read page + OOB from the second half */
314 {OPT_PAGE512_8BIT, {STATE_CMD_READ1 | ACTION_HALFOFF, STATE_ADDR_PAGE | ACTION_CPY,
315 STATE_DATAOUT, STATE_READY}},
316 /* Read OOB */
317 {OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
318 STATE_DATAOUT, STATE_READY}},
319 /* Programm page starting from the beginning */
320 {OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
321 STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
322 /* Programm page starting from the beginning */
323 {OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
324 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
325 /* Programm page starting from the second half */
326 {OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
327 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
328 /* Programm OOB */
329 {OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
330 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
331 /* Erase sector */
332 {OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
333 /* Read status */
334 {OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
335 /* Read multi-plane status */
336 {OPT_SMARTMEDIA, {STATE_CMD_STATUS_M, STATE_DATAOUT_STATUS_M, STATE_READY}},
337 /* Read ID */
338 {OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
339 /* Large page devices read page */
340 {OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY,
341 STATE_DATAOUT, STATE_READY}}
342};
343
344/* MTD structure for NAND controller */
345static struct mtd_info *nsmtd;
346
347static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE];
348
349/*
350 * Initialize the nandsim structure.
351 *
352 * RETURNS: 0 if success, -ERRNO if failure.
353 */
354static int
355init_nandsim(struct mtd_info *mtd)
356{
357 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
358 struct nandsim *ns = (struct nandsim *)(chip->priv);
359 int i;
360
361 if (NS_IS_INITIALIZED(ns)) {
362 NS_ERR("init_nandsim: nandsim is already initialized\n");
363 return -EIO;
364 }
365
366 /* Force mtd to not do delays */
367 chip->chip_delay = 0;
368
369 /* Initialize the NAND flash parameters */
370 ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
371 ns->geom.totsz = mtd->size;
372 ns->geom.pgsz = mtd->oobblock;
373 ns->geom.oobsz = mtd->oobsize;
374 ns->geom.secsz = mtd->erasesize;
375 ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
376 ns->geom.pgnum = ns->geom.totsz / ns->geom.pgsz;
377 ns->geom.totszoob = ns->geom.totsz + ns->geom.pgnum * ns->geom.oobsz;
378 ns->geom.secshift = ffs(ns->geom.secsz) - 1;
379 ns->geom.pgshift = chip->page_shift;
380 ns->geom.oobshift = ffs(ns->geom.oobsz) - 1;
381 ns->geom.pgsec = ns->geom.secsz / ns->geom.pgsz;
382 ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
383 ns->options = 0;
384
385 if (ns->geom.pgsz == 256) {
386 ns->options |= OPT_PAGE256;
387 }
388 else if (ns->geom.pgsz == 512) {
389 ns->options |= (OPT_PAGE512 | OPT_AUTOINCR);
390 if (ns->busw == 8)
391 ns->options |= OPT_PAGE512_8BIT;
392 } else if (ns->geom.pgsz == 2048) {
393 ns->options |= OPT_PAGE2048;
394 } else {
395 NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
396 return -EIO;
397 }
398
399 if (ns->options & OPT_SMALLPAGE) {
400 if (ns->geom.totsz < (64 << 20)) {
401 ns->geom.pgaddrbytes = 3;
402 ns->geom.secaddrbytes = 2;
403 } else {
404 ns->geom.pgaddrbytes = 4;
405 ns->geom.secaddrbytes = 3;
406 }
407 } else {
408 if (ns->geom.totsz <= (128 << 20)) {
409 ns->geom.pgaddrbytes = 5;
410 ns->geom.secaddrbytes = 2;
411 } else {
412 ns->geom.pgaddrbytes = 5;
413 ns->geom.secaddrbytes = 3;
414 }
415 }
416
417 /* Detect how many ID bytes the NAND chip outputs */
418 for (i = 0; nand_flash_ids[i].name != NULL; i++) {
419 if (second_id_byte != nand_flash_ids[i].id)
420 continue;
421 if (!(nand_flash_ids[i].options & NAND_NO_AUTOINCR))
422 ns->options |= OPT_AUTOINCR;
423 }
424
425 if (ns->busw == 16)
426 NS_WARN("16-bit flashes support wasn't tested\n");
427
428 printk("flash size: %u MiB\n", ns->geom.totsz >> 20);
429 printk("page size: %u bytes\n", ns->geom.pgsz);
430 printk("OOB area size: %u bytes\n", ns->geom.oobsz);
431 printk("sector size: %u KiB\n", ns->geom.secsz >> 10);
432 printk("pages number: %u\n", ns->geom.pgnum);
433 printk("pages per sector: %u\n", ns->geom.pgsec);
434 printk("bus width: %u\n", ns->busw);
435 printk("bits in sector size: %u\n", ns->geom.secshift);
436 printk("bits in page size: %u\n", ns->geom.pgshift);
437 printk("bits in OOB size: %u\n", ns->geom.oobshift);
438 printk("flash size with OOB: %u KiB\n", ns->geom.totszoob >> 10);
439 printk("page address bytes: %u\n", ns->geom.pgaddrbytes);
440 printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
441 printk("options: %#x\n", ns->options);
442
443 /* Map / allocate and initialize the flash image */
444#ifdef CONFIG_NS_ABS_POS
445 ns->mem.byte = ioremap(CONFIG_NS_ABS_POS, ns->geom.totszoob);
446 if (!ns->mem.byte) {
447 NS_ERR("init_nandsim: failed to map the NAND flash image at address %p\n",
448 (void *)CONFIG_NS_ABS_POS);
449 return -ENOMEM;
450 }
451#else
452 ns->mem.byte = vmalloc(ns->geom.totszoob);
453 if (!ns->mem.byte) {
454 NS_ERR("init_nandsim: unable to allocate %u bytes for flash image\n",
455 ns->geom.totszoob);
456 return -ENOMEM;
457 }
458 memset(ns->mem.byte, 0xFF, ns->geom.totszoob);
459#endif
460
461 /* Allocate / initialize the internal buffer */
462 ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
463 if (!ns->buf.byte) {
464 NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
465 ns->geom.pgszoob);
466 goto error;
467 }
468 memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
469
470 /* Fill the partition_info structure */
471 ns->part.name = "NAND simulator partition";
472 ns->part.offset = 0;
473 ns->part.size = ns->geom.totsz;
474
475 return 0;
476
477error:
478#ifdef CONFIG_NS_ABS_POS
479 iounmap(ns->mem.byte);
480#else
481 vfree(ns->mem.byte);
482#endif
483
484 return -ENOMEM;
485}
486
487/*
488 * Free the nandsim structure.
489 */
490static void
491free_nandsim(struct nandsim *ns)
492{
493 kfree(ns->buf.byte);
494
495#ifdef CONFIG_NS_ABS_POS
496 iounmap(ns->mem.byte);
497#else
498 vfree(ns->mem.byte);
499#endif
500
501 return;
502}
503
504/*
505 * Returns the string representation of 'state' state.
506 */
507static char *
508get_state_name(uint32_t state)
509{
510 switch (NS_STATE(state)) {
511 case STATE_CMD_READ0:
512 return "STATE_CMD_READ0";
513 case STATE_CMD_READ1:
514 return "STATE_CMD_READ1";
515 case STATE_CMD_PAGEPROG:
516 return "STATE_CMD_PAGEPROG";
517 case STATE_CMD_READOOB:
518 return "STATE_CMD_READOOB";
519 case STATE_CMD_READSTART:
520 return "STATE_CMD_READSTART";
521 case STATE_CMD_ERASE1:
522 return "STATE_CMD_ERASE1";
523 case STATE_CMD_STATUS:
524 return "STATE_CMD_STATUS";
525 case STATE_CMD_STATUS_M:
526 return "STATE_CMD_STATUS_M";
527 case STATE_CMD_SEQIN:
528 return "STATE_CMD_SEQIN";
529 case STATE_CMD_READID:
530 return "STATE_CMD_READID";
531 case STATE_CMD_ERASE2:
532 return "STATE_CMD_ERASE2";
533 case STATE_CMD_RESET:
534 return "STATE_CMD_RESET";
535 case STATE_ADDR_PAGE:
536 return "STATE_ADDR_PAGE";
537 case STATE_ADDR_SEC:
538 return "STATE_ADDR_SEC";
539 case STATE_ADDR_ZERO:
540 return "STATE_ADDR_ZERO";
541 case STATE_DATAIN:
542 return "STATE_DATAIN";
543 case STATE_DATAOUT:
544 return "STATE_DATAOUT";
545 case STATE_DATAOUT_ID:
546 return "STATE_DATAOUT_ID";
547 case STATE_DATAOUT_STATUS:
548 return "STATE_DATAOUT_STATUS";
549 case STATE_DATAOUT_STATUS_M:
550 return "STATE_DATAOUT_STATUS_M";
551 case STATE_READY:
552 return "STATE_READY";
553 case STATE_UNKNOWN:
554 return "STATE_UNKNOWN";
555 }
556
557 NS_ERR("get_state_name: unknown state, BUG\n");
558 return NULL;
559}
560
561/*
562 * Check if command is valid.
563 *
564 * RETURNS: 1 if wrong command, 0 if right.
565 */
566static int
567check_command(int cmd)
568{
569 switch (cmd) {
570
571 case NAND_CMD_READ0:
572 case NAND_CMD_READSTART:
573 case NAND_CMD_PAGEPROG:
574 case NAND_CMD_READOOB:
575 case NAND_CMD_ERASE1:
576 case NAND_CMD_STATUS:
577 case NAND_CMD_SEQIN:
578 case NAND_CMD_READID:
579 case NAND_CMD_ERASE2:
580 case NAND_CMD_RESET:
581 case NAND_CMD_READ1:
582 return 0;
583
584 case NAND_CMD_STATUS_MULTI:
585 default:
586 return 1;
587 }
588}
589
590/*
591 * Returns state after command is accepted by command number.
592 */
593static uint32_t
594get_state_by_command(unsigned command)
595{
596 switch (command) {
597 case NAND_CMD_READ0:
598 return STATE_CMD_READ0;
599 case NAND_CMD_READ1:
600 return STATE_CMD_READ1;
601 case NAND_CMD_PAGEPROG:
602 return STATE_CMD_PAGEPROG;
603 case NAND_CMD_READSTART:
604 return STATE_CMD_READSTART;
605 case NAND_CMD_READOOB:
606 return STATE_CMD_READOOB;
607 case NAND_CMD_ERASE1:
608 return STATE_CMD_ERASE1;
609 case NAND_CMD_STATUS:
610 return STATE_CMD_STATUS;
611 case NAND_CMD_STATUS_MULTI:
612 return STATE_CMD_STATUS_M;
613 case NAND_CMD_SEQIN:
614 return STATE_CMD_SEQIN;
615 case NAND_CMD_READID:
616 return STATE_CMD_READID;
617 case NAND_CMD_ERASE2:
618 return STATE_CMD_ERASE2;
619 case NAND_CMD_RESET:
620 return STATE_CMD_RESET;
621 }
622
623 NS_ERR("get_state_by_command: unknown command, BUG\n");
624 return 0;
625}
626
627/*
628 * Move an address byte to the correspondent internal register.
629 */
630static inline void
631accept_addr_byte(struct nandsim *ns, u_char bt)
632{
633 uint byte = (uint)bt;
634
635 if (ns->regs.count < (ns->geom.pgaddrbytes - ns->geom.secaddrbytes))
636 ns->regs.column |= (byte << 8 * ns->regs.count);
637 else {
638 ns->regs.row |= (byte << 8 * (ns->regs.count -
639 ns->geom.pgaddrbytes +
640 ns->geom.secaddrbytes));
641 }
642
643 return;
644}
645
646/*
647 * Switch to STATE_READY state.
648 */
649static inline void
650switch_to_ready_state(struct nandsim *ns, u_char status)
651{
652 NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY));
653
654 ns->state = STATE_READY;
655 ns->nxstate = STATE_UNKNOWN;
656 ns->op = NULL;
657 ns->npstates = 0;
658 ns->stateidx = 0;
659 ns->regs.num = 0;
660 ns->regs.count = 0;
661 ns->regs.off = 0;
662 ns->regs.row = 0;
663 ns->regs.column = 0;
664 ns->regs.status = status;
665}
666
667/*
668 * If the operation isn't known yet, try to find it in the global array
669 * of supported operations.
670 *
671 * Operation can be unknown because of the following.
672 * 1. New command was accepted and this is the firs call to find the
673 * correspondent states chain. In this case ns->npstates = 0;
674 * 2. There is several operations which begin with the same command(s)
675 * (for example program from the second half and read from the
676 * second half operations both begin with the READ1 command). In this
677 * case the ns->pstates[] array contains previous states.
678 *
679 * Thus, the function tries to find operation containing the following
680 * states (if the 'flag' parameter is 0):
681 * ns->pstates[0], ... ns->pstates[ns->npstates], ns->state
682 *
683 * If (one and only one) matching operation is found, it is accepted (
684 * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
685 * zeroed).
686 *
687 * If there are several maches, the current state is pushed to the
688 * ns->pstates.
689 *
690 * The operation can be unknown only while commands are input to the chip.
691 * As soon as address command is accepted, the operation must be known.
692 * In such situation the function is called with 'flag' != 0, and the
693 * operation is searched using the following pattern:
694 * ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
695 *
696 * It is supposed that this pattern must either match one operation on
697 * none. There can't be ambiguity in that case.
698 *
699 * If no matches found, the functions does the following:
700 * 1. if there are saved states present, try to ignore them and search
701 * again only using the last command. If nothing was found, switch
702 * to the STATE_READY state.
703 * 2. if there are no saved states, switch to the STATE_READY state.
704 *
705 * RETURNS: -2 - no matched operations found.
706 * -1 - several matches.
707 * 0 - operation is found.
708 */
709static int
710find_operation(struct nandsim *ns, uint32_t flag)
711{
712 int opsfound = 0;
713 int i, j, idx = 0;
714
715 for (i = 0; i < NS_OPER_NUM; i++) {
716
717 int found = 1;
718
719 if (!(ns->options & ops[i].reqopts))
720 /* Ignore operations we can't perform */
721 continue;
722
723 if (flag) {
724 if (!(ops[i].states[ns->npstates] & STATE_ADDR_MASK))
725 continue;
726 } else {
727 if (NS_STATE(ns->state) != NS_STATE(ops[i].states[ns->npstates]))
728 continue;
729 }
730
731 for (j = 0; j < ns->npstates; j++)
732 if (NS_STATE(ops[i].states[j]) != NS_STATE(ns->pstates[j])
733 && (ns->options & ops[idx].reqopts)) {
734 found = 0;
735 break;
736 }
737
738 if (found) {
739 idx = i;
740 opsfound += 1;
741 }
742 }
743
744 if (opsfound == 1) {
745 /* Exact match */
746 ns->op = &ops[idx].states[0];
747 if (flag) {
748 /*
749 * In this case the find_operation function was
750 * called when address has just began input. But it isn't
751 * yet fully input and the current state must
752 * not be one of STATE_ADDR_*, but the STATE_ADDR_*
753 * state must be the next state (ns->nxstate).
754 */
755 ns->stateidx = ns->npstates - 1;
756 } else {
757 ns->stateidx = ns->npstates;
758 }
759 ns->npstates = 0;
760 ns->state = ns->op[ns->stateidx];
761 ns->nxstate = ns->op[ns->stateidx + 1];
762 NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
763 idx, get_state_name(ns->state), get_state_name(ns->nxstate));
764 return 0;
765 }
766
767 if (opsfound == 0) {
768 /* Nothing was found. Try to ignore previous commands (if any) and search again */
769 if (ns->npstates != 0) {
770 NS_DBG("find_operation: no operation found, try again with state %s\n",
771 get_state_name(ns->state));
772 ns->npstates = 0;
773 return find_operation(ns, 0);
774
775 }
776 NS_DBG("find_operation: no operations found\n");
777 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
778 return -2;
779 }
780
781 if (flag) {
782 /* This shouldn't happen */
783 NS_DBG("find_operation: BUG, operation must be known if address is input\n");
784 return -2;
785 }
786
787 NS_DBG("find_operation: there is still ambiguity\n");
788
789 ns->pstates[ns->npstates++] = ns->state;
790
791 return -1;
792}
793
794/*
795 * If state has any action bit, perform this action.
796 *
797 * RETURNS: 0 if success, -1 if error.
798 */
799static int
800do_state_action(struct nandsim *ns, uint32_t action)
801{
802 int i, num;
803 int busdiv = ns->busw == 8 ? 1 : 2;
804
805 action &= ACTION_MASK;
806
807 /* Check that page address input is correct */
808 if (action != ACTION_SECERASE && ns->regs.row >= ns->geom.pgnum) {
809 NS_WARN("do_state_action: wrong page number (%#x)\n", ns->regs.row);
810 return -1;
811 }
812
813 switch (action) {
814
815 case ACTION_CPY:
816 /*
817 * Copy page data to the internal buffer.
818 */
819
820 /* Column shouldn't be very large */
821 if (ns->regs.column >= (ns->geom.pgszoob - ns->regs.off)) {
822 NS_ERR("do_state_action: column number is too large\n");
823 break;
824 }
825 num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
826 memcpy(ns->buf.byte, ns->mem.byte + NS_RAW_OFFSET(ns) + ns->regs.off, num);
827
828 NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
829 num, NS_RAW_OFFSET(ns) + ns->regs.off);
830
831 if (ns->regs.off == 0)
832 NS_LOG("read page %d\n", ns->regs.row);
833 else if (ns->regs.off < ns->geom.pgsz)
834 NS_LOG("read page %d (second half)\n", ns->regs.row);
835 else
836 NS_LOG("read OOB of page %d\n", ns->regs.row);
837
838 NS_UDELAY(access_delay);
839 NS_UDELAY(input_cycle * ns->geom.pgsz / 1000 / busdiv);
840
841 break;
842
843 case ACTION_SECERASE:
844 /*
845 * Erase sector.
846 */
847
848 if (ns->lines.wp) {
849 NS_ERR("do_state_action: device is write-protected, ignore sector erase\n");
850 return -1;
851 }
852
853 if (ns->regs.row >= ns->geom.pgnum - ns->geom.pgsec
854 || (ns->regs.row & ~(ns->geom.secsz - 1))) {
855 NS_ERR("do_state_action: wrong sector address (%#x)\n", ns->regs.row);
856 return -1;
857 }
858
859 ns->regs.row = (ns->regs.row <<
860 8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column;
861 ns->regs.column = 0;
862
863 NS_DBG("do_state_action: erase sector at address %#x, off = %d\n",
864 ns->regs.row, NS_RAW_OFFSET(ns));
865 NS_LOG("erase sector %d\n", ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift));
866
867 memset(ns->mem.byte + NS_RAW_OFFSET(ns), 0xFF, ns->geom.secszoob);
868
869 NS_MDELAY(erase_delay);
870
871 break;
872
873 case ACTION_PRGPAGE:
874 /*
875 * Programm page - move internal buffer data to the page.
876 */
877
878 if (ns->lines.wp) {
879 NS_WARN("do_state_action: device is write-protected, programm\n");
880 return -1;
881 }
882
883 num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
884 if (num != ns->regs.count) {
885 NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n",
886 ns->regs.count, num);
887 return -1;
888 }
889
890 for (i = 0; i < num; i++)
891 ns->mem.byte[NS_RAW_OFFSET(ns) + ns->regs.off + i] &= ns->buf.byte[i];
892
893 NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
894 num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
895 NS_LOG("programm page %d\n", ns->regs.row);
896
897 NS_UDELAY(programm_delay);
898 NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
899
900 break;
901
902 case ACTION_ZEROOFF:
903 NS_DBG("do_state_action: set internal offset to 0\n");
904 ns->regs.off = 0;
905 break;
906
907 case ACTION_HALFOFF:
908 if (!(ns->options & OPT_PAGE512_8BIT)) {
909 NS_ERR("do_state_action: BUG! can't skip half of page for non-512"
910 "byte page size 8x chips\n");
911 return -1;
912 }
913 NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz/2);
914 ns->regs.off = ns->geom.pgsz/2;
915 break;
916
917 case ACTION_OOBOFF:
918 NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz);
919 ns->regs.off = ns->geom.pgsz;
920 break;
921
922 default:
923 NS_DBG("do_state_action: BUG! unknown action\n");
924 }
925
926 return 0;
927}
928
929/*
930 * Switch simulator's state.
931 */
932static void
933switch_state(struct nandsim *ns)
934{
935 if (ns->op) {
936 /*
937 * The current operation have already been identified.
938 * Just follow the states chain.
939 */
940
941 ns->stateidx += 1;
942 ns->state = ns->nxstate;
943 ns->nxstate = ns->op[ns->stateidx + 1];
944
945 NS_DBG("switch_state: operation is known, switch to the next state, "
946 "state: %s, nxstate: %s\n",
947 get_state_name(ns->state), get_state_name(ns->nxstate));
948
949 /* See, whether we need to do some action */
950 if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
951 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
952 return;
953 }
954
955 } else {
956 /*
957 * We don't yet know which operation we perform.
958 * Try to identify it.
959 */
960
961 /*
962 * The only event causing the switch_state function to
963 * be called with yet unknown operation is new command.
964 */
965 ns->state = get_state_by_command(ns->regs.command);
966
967 NS_DBG("switch_state: operation is unknown, try to find it\n");
968
969 if (find_operation(ns, 0) != 0)
970 return;
971
972 if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
973 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
974 return;
975 }
976 }
977
978 /* For 16x devices column means the page offset in words */
979 if ((ns->nxstate & STATE_ADDR_MASK) && ns->busw == 16) {
980 NS_DBG("switch_state: double the column number for 16x device\n");
981 ns->regs.column <<= 1;
982 }
983
984 if (NS_STATE(ns->nxstate) == STATE_READY) {
985 /*
986 * The current state is the last. Return to STATE_READY
987 */
988
989 u_char status = NS_STATUS_OK(ns);
990
991 /* In case of data states, see if all bytes were input/output */
992 if ((ns->state & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK))
993 && ns->regs.count != ns->regs.num) {
994 NS_WARN("switch_state: not all bytes were processed, %d left\n",
995 ns->regs.num - ns->regs.count);
996 status = NS_STATUS_FAILED(ns);
997 }
998
999 NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
1000
1001 switch_to_ready_state(ns, status);
1002
1003 return;
1004 } else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
1005 /*
1006 * If the next state is data input/output, switch to it now
1007 */
1008
1009 ns->state = ns->nxstate;
1010 ns->nxstate = ns->op[++ns->stateidx + 1];
1011 ns->regs.num = ns->regs.count = 0;
1012
1013 NS_DBG("switch_state: the next state is data I/O, switch, "
1014 "state: %s, nxstate: %s\n",
1015 get_state_name(ns->state), get_state_name(ns->nxstate));
1016
1017 /*
1018 * Set the internal register to the count of bytes which
1019 * are expected to be input or output
1020 */
1021 switch (NS_STATE(ns->state)) {
1022 case STATE_DATAIN:
1023 case STATE_DATAOUT:
1024 ns->regs.num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
1025 break;
1026
1027 case STATE_DATAOUT_ID:
1028 ns->regs.num = ns->geom.idbytes;
1029 break;
1030
1031 case STATE_DATAOUT_STATUS:
1032 case STATE_DATAOUT_STATUS_M:
1033 ns->regs.count = ns->regs.num = 0;
1034 break;
1035
1036 default:
1037 NS_ERR("switch_state: BUG! unknown data state\n");
1038 }
1039
1040 } else if (ns->nxstate & STATE_ADDR_MASK) {
1041 /*
1042 * If the next state is address input, set the internal
1043 * register to the number of expected address bytes
1044 */
1045
1046 ns->regs.count = 0;
1047
1048 switch (NS_STATE(ns->nxstate)) {
1049 case STATE_ADDR_PAGE:
1050 ns->regs.num = ns->geom.pgaddrbytes;
1051
1052 break;
1053 case STATE_ADDR_SEC:
1054 ns->regs.num = ns->geom.secaddrbytes;
1055 break;
1056
1057 case STATE_ADDR_ZERO:
1058 ns->regs.num = 1;
1059 break;
1060
1061 default:
1062 NS_ERR("switch_state: BUG! unknown address state\n");
1063 }
1064 } else {
1065 /*
1066 * Just reset internal counters.
1067 */
1068
1069 ns->regs.num = 0;
1070 ns->regs.count = 0;
1071 }
1072}
1073
1074static void
1075ns_hwcontrol(struct mtd_info *mtd, int cmd)
1076{
1077 struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
1078
1079 switch (cmd) {
1080
1081 /* set CLE line high */
1082 case NAND_CTL_SETCLE:
1083 NS_DBG("ns_hwcontrol: start command latch cycles\n");
1084 ns->lines.cle = 1;
1085 break;
1086
1087 /* set CLE line low */
1088 case NAND_CTL_CLRCLE:
1089 NS_DBG("ns_hwcontrol: stop command latch cycles\n");
1090 ns->lines.cle = 0;
1091 break;
1092
1093 /* set ALE line high */
1094 case NAND_CTL_SETALE:
1095 NS_DBG("ns_hwcontrol: start address latch cycles\n");
1096 ns->lines.ale = 1;
1097 break;
1098
1099 /* set ALE line low */
1100 case NAND_CTL_CLRALE:
1101 NS_DBG("ns_hwcontrol: stop address latch cycles\n");
1102 ns->lines.ale = 0;
1103 break;
1104
1105 /* set WP line high */
1106 case NAND_CTL_SETWP:
1107 NS_DBG("ns_hwcontrol: enable write protection\n");
1108 ns->lines.wp = 1;
1109 break;
1110
1111 /* set WP line low */
1112 case NAND_CTL_CLRWP:
1113 NS_DBG("ns_hwcontrol: disable write protection\n");
1114 ns->lines.wp = 0;
1115 break;
1116
1117 /* set CE line low */
1118 case NAND_CTL_SETNCE:
1119 NS_DBG("ns_hwcontrol: enable chip\n");
1120 ns->lines.ce = 1;
1121 break;
1122
1123 /* set CE line high */
1124 case NAND_CTL_CLRNCE:
1125 NS_DBG("ns_hwcontrol: disable chip\n");
1126 ns->lines.ce = 0;
1127 break;
1128
1129 default:
1130 NS_ERR("hwcontrol: unknown command\n");
1131 }
1132
1133 return;
1134}
1135
1136static u_char
1137ns_nand_read_byte(struct mtd_info *mtd)
1138{
1139 struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
1140 u_char outb = 0x00;
1141
1142 /* Sanity and correctness checks */
1143 if (!ns->lines.ce) {
1144 NS_ERR("read_byte: chip is disabled, return %#x\n", (uint)outb);
1145 return outb;
1146 }
1147 if (ns->lines.ale || ns->lines.cle) {
1148 NS_ERR("read_byte: ALE or CLE pin is high, return %#x\n", (uint)outb);
1149 return outb;
1150 }
1151 if (!(ns->state & STATE_DATAOUT_MASK)) {
1152 NS_WARN("read_byte: unexpected data output cycle, state is %s "
1153 "return %#x\n", get_state_name(ns->state), (uint)outb);
1154 return outb;
1155 }
1156
1157 /* Status register may be read as many times as it is wanted */
1158 if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS) {
1159 NS_DBG("read_byte: return %#x status\n", ns->regs.status);
1160 return ns->regs.status;
1161 }
1162
1163 /* Check if there is any data in the internal buffer which may be read */
1164 if (ns->regs.count == ns->regs.num) {
1165 NS_WARN("read_byte: no more data to output, return %#x\n", (uint)outb);
1166 return outb;
1167 }
1168
1169 switch (NS_STATE(ns->state)) {
1170 case STATE_DATAOUT:
1171 if (ns->busw == 8) {
1172 outb = ns->buf.byte[ns->regs.count];
1173 ns->regs.count += 1;
1174 } else {
1175 outb = (u_char)cpu_to_le16(ns->buf.word[ns->regs.count >> 1]);
1176 ns->regs.count += 2;
1177 }
1178 break;
1179 case STATE_DATAOUT_ID:
1180 NS_DBG("read_byte: read ID byte %d, total = %d\n", ns->regs.count, ns->regs.num);
1181 outb = ns->ids[ns->regs.count];
1182 ns->regs.count += 1;
1183 break;
1184 default:
1185 BUG();
1186 }
1187
1188 if (ns->regs.count == ns->regs.num) {
1189 NS_DBG("read_byte: all bytes were read\n");
1190
1191 /*
1192 * The OPT_AUTOINCR allows to read next conseqitive pages without
1193 * new read operation cycle.
1194 */
1195 if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
1196 ns->regs.count = 0;
1197 if (ns->regs.row + 1 < ns->geom.pgnum)
1198 ns->regs.row += 1;
1199 NS_DBG("read_byte: switch to the next page (%#x)\n", ns->regs.row);
1200 do_state_action(ns, ACTION_CPY);
1201 }
1202 else if (NS_STATE(ns->nxstate) == STATE_READY)
1203 switch_state(ns);
1204
1205 }
1206
1207 return outb;
1208}
1209
1210static void
1211ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
1212{
1213 struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
1214
1215 /* Sanity and correctness checks */
1216 if (!ns->lines.ce) {
1217 NS_ERR("write_byte: chip is disabled, ignore write\n");
1218 return;
1219 }
1220 if (ns->lines.ale && ns->lines.cle) {
1221 NS_ERR("write_byte: ALE and CLE pins are high simultaneously, ignore write\n");
1222 return;
1223 }
1224
1225 if (ns->lines.cle == 1) {
1226 /*
1227 * The byte written is a command.
1228 */
1229
1230 if (byte == NAND_CMD_RESET) {
1231 NS_LOG("reset chip\n");
1232 switch_to_ready_state(ns, NS_STATUS_OK(ns));
1233 return;
1234 }
1235
1236 /*
1237 * Chip might still be in STATE_DATAOUT
1238 * (if OPT_AUTOINCR feature is supported), STATE_DATAOUT_STATUS or
1239 * STATE_DATAOUT_STATUS_M state. If so, switch state.
1240 */
1241 if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
1242 || NS_STATE(ns->state) == STATE_DATAOUT_STATUS_M
1243 || ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT))
1244 switch_state(ns);
1245
1246 /* Check if chip is expecting command */
1247 if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
1248 /*
1249 * We are in situation when something else (not command)
1250 * was expected but command was input. In this case ignore
1251 * previous command(s)/state(s) and accept the last one.
1252 */
1253 NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
1254 "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
1255 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1256 }
1257
1258 /* Check that the command byte is correct */
1259 if (check_command(byte)) {
1260 NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
1261 return;
1262 }
1263
1264 NS_DBG("command byte corresponding to %s state accepted\n",
1265 get_state_name(get_state_by_command(byte)));
1266 ns->regs.command = byte;
1267 switch_state(ns);
1268
1269 } else if (ns->lines.ale == 1) {
1270 /*
1271 * The byte written is an address.
1272 */
1273
1274 if (NS_STATE(ns->nxstate) == STATE_UNKNOWN) {
1275
1276 NS_DBG("write_byte: operation isn't known yet, identify it\n");
1277
1278 if (find_operation(ns, 1) < 0)
1279 return;
1280
1281 if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
1282 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1283 return;
1284 }
1285
1286 ns->regs.count = 0;
1287 switch (NS_STATE(ns->nxstate)) {
1288 case STATE_ADDR_PAGE:
1289 ns->regs.num = ns->geom.pgaddrbytes;
1290 break;
1291 case STATE_ADDR_SEC:
1292 ns->regs.num = ns->geom.secaddrbytes;
1293 break;
1294 case STATE_ADDR_ZERO:
1295 ns->regs.num = 1;
1296 break;
1297 default:
1298 BUG();
1299 }
1300 }
1301
1302 /* Check that chip is expecting address */
1303 if (!(ns->nxstate & STATE_ADDR_MASK)) {
1304 NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, "
1305 "switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate));
1306 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1307 return;
1308 }
1309
1310 /* Check if this is expected byte */
1311 if (ns->regs.count == ns->regs.num) {
1312 NS_ERR("write_byte: no more address bytes expected\n");
1313 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1314 return;
1315 }
1316
1317 accept_addr_byte(ns, byte);
1318
1319 ns->regs.count += 1;
1320
1321 NS_DBG("write_byte: address byte %#x was accepted (%d bytes input, %d expected)\n",
1322 (uint)byte, ns->regs.count, ns->regs.num);
1323
1324 if (ns->regs.count == ns->regs.num) {
1325 NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
1326 switch_state(ns);
1327 }
1328
1329 } else {
1330 /*
1331 * The byte written is an input data.
1332 */
1333
1334 /* Check that chip is expecting data input */
1335 if (!(ns->state & STATE_DATAIN_MASK)) {
1336 NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, "
1337 "switch to %s\n", (uint)byte,
1338 get_state_name(ns->state), get_state_name(STATE_READY));
1339 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1340 return;
1341 }
1342
1343 /* Check if this is expected byte */
1344 if (ns->regs.count == ns->regs.num) {
1345 NS_WARN("write_byte: %u input bytes has already been accepted, ignore write\n",
1346 ns->regs.num);
1347 return;
1348 }
1349
1350 if (ns->busw == 8) {
1351 ns->buf.byte[ns->regs.count] = byte;
1352 ns->regs.count += 1;
1353 } else {
1354 ns->buf.word[ns->regs.count >> 1] = cpu_to_le16((uint16_t)byte);
1355 ns->regs.count += 2;
1356 }
1357 }
1358
1359 return;
1360}
1361
1362static int
1363ns_device_ready(struct mtd_info *mtd)
1364{
1365 NS_DBG("device_ready\n");
1366 return 1;
1367}
1368
1369static uint16_t
1370ns_nand_read_word(struct mtd_info *mtd)
1371{
1372 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
1373
1374 NS_DBG("read_word\n");
1375
1376 return chip->read_byte(mtd) | (chip->read_byte(mtd) << 8);
1377}
1378
1379static void
1380ns_nand_write_word(struct mtd_info *mtd, uint16_t word)
1381{
1382 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
1383
1384 NS_DBG("write_word\n");
1385
1386 chip->write_byte(mtd, word & 0xFF);
1387 chip->write_byte(mtd, word >> 8);
1388}
1389
1390static void
1391ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
1392{
1393 struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
1394
1395 /* Check that chip is expecting data input */
1396 if (!(ns->state & STATE_DATAIN_MASK)) {
1397 NS_ERR("write_buf: data input isn't expected, state is %s, "
1398 "switch to STATE_READY\n", get_state_name(ns->state));
1399 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1400 return;
1401 }
1402
1403 /* Check if these are expected bytes */
1404 if (ns->regs.count + len > ns->regs.num) {
1405 NS_ERR("write_buf: too many input bytes\n");
1406 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1407 return;
1408 }
1409
1410 memcpy(ns->buf.byte + ns->regs.count, buf, len);
1411 ns->regs.count += len;
1412
1413 if (ns->regs.count == ns->regs.num) {
1414 NS_DBG("write_buf: %d bytes were written\n", ns->regs.count);
1415 }
1416}
1417
1418static void
1419ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
1420{
1421 struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
1422
1423 /* Sanity and correctness checks */
1424 if (!ns->lines.ce) {
1425 NS_ERR("read_buf: chip is disabled\n");
1426 return;
1427 }
1428 if (ns->lines.ale || ns->lines.cle) {
1429 NS_ERR("read_buf: ALE or CLE pin is high\n");
1430 return;
1431 }
1432 if (!(ns->state & STATE_DATAOUT_MASK)) {
1433 NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
1434 get_state_name(ns->state));
1435 return;
1436 }
1437
1438 if (NS_STATE(ns->state) != STATE_DATAOUT) {
1439 int i;
1440
1441 for (i = 0; i < len; i++)
1442 buf[i] = ((struct nand_chip *)mtd->priv)->read_byte(mtd);
1443
1444 return;
1445 }
1446
1447 /* Check if these are expected bytes */
1448 if (ns->regs.count + len > ns->regs.num) {
1449 NS_ERR("read_buf: too many bytes to read\n");
1450 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1451 return;
1452 }
1453
1454 memcpy(buf, ns->buf.byte + ns->regs.count, len);
1455 ns->regs.count += len;
1456
1457 if (ns->regs.count == ns->regs.num) {
1458 if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
1459 ns->regs.count = 0;
1460 if (ns->regs.row + 1 < ns->geom.pgnum)
1461 ns->regs.row += 1;
1462 NS_DBG("read_buf: switch to the next page (%#x)\n", ns->regs.row);
1463 do_state_action(ns, ACTION_CPY);
1464 }
1465 else if (NS_STATE(ns->nxstate) == STATE_READY)
1466 switch_state(ns);
1467 }
1468
1469 return;
1470}
1471
1472static int
1473ns_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
1474{
1475 ns_nand_read_buf(mtd, (u_char *)&ns_verify_buf[0], len);
1476
1477 if (!memcmp(buf, &ns_verify_buf[0], len)) {
1478 NS_DBG("verify_buf: the buffer is OK\n");
1479 return 0;
1480 } else {
1481 NS_DBG("verify_buf: the buffer is wrong\n");
1482 return -EFAULT;
1483 }
1484}
1485
1486/*
1487 * Having only NAND chip IDs we call nand_scan which detects NAND flash
1488 * parameters and then calls scan_bbt in order to scan/find/build the
1489 * NAND flash bad block table. But since at that moment the NAND flash
1490 * image isn't allocated in the simulator, errors arise. To avoid this
1491 * we redefine the scan_bbt callback and initialize the nandsim structure
1492 * before the flash media scanning.
1493 */
1494int ns_scan_bbt(struct mtd_info *mtd)
1495{
1496 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
1497 struct nandsim *ns = (struct nandsim *)(chip->priv);
1498 int retval;
1499
1500 if (!NS_IS_INITIALIZED(ns))
1501 if ((retval = init_nandsim(mtd)) != 0) {
1502 NS_ERR("scan_bbt: can't initialize the nandsim structure\n");
1503 return retval;
1504 }
1505 if ((retval = nand_default_bbt(mtd)) != 0) {
1506 free_nandsim(ns);
1507 return retval;
1508 }
1509
1510 return 0;
1511}
1512
1513/*
1514 * Module initialization function
1515 */
1516int __init ns_init_module(void)
1517{
1518 struct nand_chip *chip;
1519 struct nandsim *nand;
1520 int retval = -ENOMEM;
1521
1522 if (bus_width != 8 && bus_width != 16) {
1523 NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
1524 return -EINVAL;
1525 }
1526
1527 /* Allocate and initialize mtd_info, nand_chip and nandsim structures */
1528 nsmtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip)
1529 + sizeof(struct nandsim), GFP_KERNEL);
1530 if (!nsmtd) {
1531 NS_ERR("unable to allocate core structures.\n");
1532 return -ENOMEM;
1533 }
1534 memset(nsmtd, 0, sizeof(struct mtd_info) + sizeof(struct nand_chip) +
1535 sizeof(struct nandsim));
1536 chip = (struct nand_chip *)(nsmtd + 1);
1537 nsmtd->priv = (void *)chip;
1538 nand = (struct nandsim *)(chip + 1);
1539 chip->priv = (void *)nand;
1540
1541 /*
1542 * Register simulator's callbacks.
1543 */
1544 chip->hwcontrol = ns_hwcontrol;
1545 chip->read_byte = ns_nand_read_byte;
1546 chip->dev_ready = ns_device_ready;
1547 chip->scan_bbt = ns_scan_bbt;
1548 chip->write_byte = ns_nand_write_byte;
1549 chip->write_buf = ns_nand_write_buf;
1550 chip->read_buf = ns_nand_read_buf;
1551 chip->verify_buf = ns_nand_verify_buf;
1552 chip->write_word = ns_nand_write_word;
1553 chip->read_word = ns_nand_read_word;
1554 chip->eccmode = NAND_ECC_SOFT;
1555
1556 /*
1557 * Perform minimum nandsim structure initialization to handle
1558 * the initial ID read command correctly
1559 */
1560 if (third_id_byte != 0xFF || fourth_id_byte != 0xFF)
1561 nand->geom.idbytes = 4;
1562 else
1563 nand->geom.idbytes = 2;
1564 nand->regs.status = NS_STATUS_OK(nand);
1565 nand->nxstate = STATE_UNKNOWN;
1566 nand->options |= OPT_PAGE256; /* temporary value */
1567 nand->ids[0] = first_id_byte;
1568 nand->ids[1] = second_id_byte;
1569 nand->ids[2] = third_id_byte;
1570 nand->ids[3] = fourth_id_byte;
1571 if (bus_width == 16) {
1572 nand->busw = 16;
1573 chip->options |= NAND_BUSWIDTH_16;
1574 }
1575
1576 if ((retval = nand_scan(nsmtd, 1)) != 0) {
1577 NS_ERR("can't register NAND Simulator\n");
1578 if (retval > 0)
1579 retval = -ENXIO;
1580 goto error;
1581 }
1582
1583 /* Register NAND as one big partition */
1584 add_mtd_partitions(nsmtd, &nand->part, 1);
1585
1586 return 0;
1587
1588error:
1589 kfree(nsmtd);
1590
1591 return retval;
1592}
1593
1594module_init(ns_init_module);
1595
1596/*
1597 * Module clean-up function
1598 */
1599static void __exit ns_cleanup_module(void)
1600{
1601 struct nandsim *ns = (struct nandsim *)(((struct nand_chip *)nsmtd->priv)->priv);
1602
1603 free_nandsim(ns); /* Free nandsim private resources */
1604 nand_release(nsmtd); /* Unregisterd drived */
1605 kfree(nsmtd); /* Free other structures */
1606}
1607
1608module_exit(ns_cleanup_module);
1609
1610MODULE_LICENSE ("GPL");
1611MODULE_AUTHOR ("Artem B. Bityuckiy");
1612MODULE_DESCRIPTION ("The NAND flash simulator");
1613
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
new file mode 100644
index 000000000000..e510a83d7bdb
--- /dev/null
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -0,0 +1,420 @@
1/*
2 * drivers/mtd/nand/ppchameleonevb.c
3 *
4 * Copyright (C) 2003 DAVE Srl (info@wawnet.biz)
5 *
6 * Derived from drivers/mtd/nand/edb7312.c
7 *
8 *
9 * $Id: ppchameleonevb.c,v 1.6 2004/11/05 16:07:16 kalev Exp $
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * Overview:
16 * This is a device driver for the NAND flash devices found on the
17 * PPChameleon/PPChameleonEVB system.
18 * PPChameleon options (autodetected):
19 * - BA model: no NAND
20 * - ME model: 32MB (Samsung K9F5608U0B)
21 * - HI model: 128MB (Samsung K9F1G08UOM)
22 * PPChameleonEVB options:
23 * - 32MB (Samsung K9F5608U0B)
24 */
25
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/module.h>
29#include <linux/mtd/mtd.h>
30#include <linux/mtd/nand.h>
31#include <linux/mtd/partitions.h>
32#include <asm/io.h>
33#include <platforms/PPChameleonEVB.h>
34
35#undef USE_READY_BUSY_PIN
36#define USE_READY_BUSY_PIN
37/* see datasheets (tR) */
38#define NAND_BIG_DELAY_US 25
39#define NAND_SMALL_DELAY_US 10
40
41/* handy sizes */
42#define SZ_4M 0x00400000
43#define NAND_SMALL_SIZE 0x02000000
44#define NAND_MTD_NAME "ppchameleon-nand"
45#define NAND_EVB_MTD_NAME "ppchameleonevb-nand"
46
47/* GPIO pins used to drive NAND chip mounted on processor module */
48#define NAND_nCE_GPIO_PIN (0x80000000 >> 1)
49#define NAND_CLE_GPIO_PIN (0x80000000 >> 2)
50#define NAND_ALE_GPIO_PIN (0x80000000 >> 3)
51#define NAND_RB_GPIO_PIN (0x80000000 >> 4)
52/* GPIO pins used to drive NAND chip mounted on EVB */
53#define NAND_EVB_nCE_GPIO_PIN (0x80000000 >> 14)
54#define NAND_EVB_CLE_GPIO_PIN (0x80000000 >> 15)
55#define NAND_EVB_ALE_GPIO_PIN (0x80000000 >> 16)
56#define NAND_EVB_RB_GPIO_PIN (0x80000000 >> 31)
57
58/*
59 * MTD structure for PPChameleonEVB board
60 */
61static struct mtd_info *ppchameleon_mtd = NULL;
62static struct mtd_info *ppchameleonevb_mtd = NULL;
63
64/*
65 * Module stuff
66 */
67static unsigned long ppchameleon_fio_pbase = CFG_NAND0_PADDR;
68static unsigned long ppchameleonevb_fio_pbase = CFG_NAND1_PADDR;
69
70#ifdef MODULE
71module_param(ppchameleon_fio_pbase, ulong, 0);
72module_param(ppchameleonevb_fio_pbase, ulong, 0);
73#else
74__setup("ppchameleon_fio_pbase=",ppchameleon_fio_pbase);
75__setup("ppchameleonevb_fio_pbase=",ppchameleonevb_fio_pbase);
76#endif
77
78#ifdef CONFIG_MTD_PARTITIONS
79/*
80 * Define static partitions for flash devices
81 */
82static struct mtd_partition partition_info_hi[] = {
83 { name: "PPChameleon HI Nand Flash",
84 offset: 0,
85 size: 128*1024*1024 }
86};
87
88static struct mtd_partition partition_info_me[] = {
89 { name: "PPChameleon ME Nand Flash",
90 offset: 0,
91 size: 32*1024*1024 }
92};
93
94static struct mtd_partition partition_info_evb[] = {
95 { name: "PPChameleonEVB Nand Flash",
96 offset: 0,
97 size: 32*1024*1024 }
98};
99
100#define NUM_PARTITIONS 1
101
102extern int parse_cmdline_partitions(struct mtd_info *master,
103 struct mtd_partition **pparts,
104 const char *mtd_id);
105#endif
106
107
108/*
109 * hardware specific access to control-lines
110 */
111static void ppchameleon_hwcontrol(struct mtd_info *mtdinfo, int cmd)
112{
113 switch(cmd) {
114
115 case NAND_CTL_SETCLE:
116 MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND0_PADDR);
117 break;
118 case NAND_CTL_CLRCLE:
119 MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND0_PADDR);
120 break;
121 case NAND_CTL_SETALE:
122 MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND0_PADDR);
123 break;
124 case NAND_CTL_CLRALE:
125 MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND0_PADDR);
126 break;
127 case NAND_CTL_SETNCE:
128 MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND0_PADDR);
129 break;
130 case NAND_CTL_CLRNCE:
131 MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND0_PADDR);
132 break;
133 }
134}
135
136static void ppchameleonevb_hwcontrol(struct mtd_info *mtdinfo, int cmd)
137{
138 switch(cmd) {
139
140 case NAND_CTL_SETCLE:
141 MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND1_PADDR);
142 break;
143 case NAND_CTL_CLRCLE:
144 MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND1_PADDR);
145 break;
146 case NAND_CTL_SETALE:
147 MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND1_PADDR);
148 break;
149 case NAND_CTL_CLRALE:
150 MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND1_PADDR);
151 break;
152 case NAND_CTL_SETNCE:
153 MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND1_PADDR);
154 break;
155 case NAND_CTL_CLRNCE:
156 MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND1_PADDR);
157 break;
158 }
159}
160
161#ifdef USE_READY_BUSY_PIN
162/*
163 * read device ready pin
164 */
165static int ppchameleon_device_ready(struct mtd_info *minfo)
166{
167 if (in_be32((volatile unsigned*)GPIO0_IR) & NAND_RB_GPIO_PIN)
168 return 1;
169 return 0;
170}
171
172static int ppchameleonevb_device_ready(struct mtd_info *minfo)
173{
174 if (in_be32((volatile unsigned*)GPIO0_IR) & NAND_EVB_RB_GPIO_PIN)
175 return 1;
176 return 0;
177}
178#endif
179
180#ifdef CONFIG_MTD_PARTITIONS
181const char *part_probes[] = { "cmdlinepart", NULL };
182const char *part_probes_evb[] = { "cmdlinepart", NULL };
183#endif
184
185/*
186 * Main initialization routine
187 */
188static int __init ppchameleonevb_init (void)
189{
190 struct nand_chip *this;
191 const char *part_type = 0;
192 int mtd_parts_nb = 0;
193 struct mtd_partition *mtd_parts = 0;
194 void __iomem *ppchameleon_fio_base;
195 void __iomem *ppchameleonevb_fio_base;
196
197
198 /*********************************
199 * Processor module NAND (if any) *
200 *********************************/
201 /* Allocate memory for MTD device structure and private data */
202 ppchameleon_mtd = kmalloc(sizeof(struct mtd_info) +
203 sizeof(struct nand_chip), GFP_KERNEL);
204 if (!ppchameleon_mtd) {
205 printk("Unable to allocate PPChameleon NAND MTD device structure.\n");
206 return -ENOMEM;
207 }
208
209 /* map physical address */
210 ppchameleon_fio_base = ioremap(ppchameleon_fio_pbase, SZ_4M);
211 if(!ppchameleon_fio_base) {
212 printk("ioremap PPChameleon NAND flash failed\n");
213 kfree(ppchameleon_mtd);
214 return -EIO;
215 }
216
217 /* Get pointer to private data */
218 this = (struct nand_chip *) (&ppchameleon_mtd[1]);
219
220 /* Initialize structures */
221 memset((char *) ppchameleon_mtd, 0, sizeof(struct mtd_info));
222 memset((char *) this, 0, sizeof(struct nand_chip));
223
224 /* Link the private data with the MTD structure */
225 ppchameleon_mtd->priv = this;
226
227 /* Initialize GPIOs */
228 /* Pin mapping for NAND chip */
229 /*
230 CE GPIO_01
231 CLE GPIO_02
232 ALE GPIO_03
233 R/B GPIO_04
234 */
235 /* output select */
236 out_be32((volatile unsigned*)GPIO0_OSRH, in_be32((volatile unsigned*)GPIO0_OSRH) & 0xC0FFFFFF);
237 /* three-state select */
238 out_be32((volatile unsigned*)GPIO0_TSRH, in_be32((volatile unsigned*)GPIO0_TSRH) & 0xC0FFFFFF);
239 /* enable output driver */
240 out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) | NAND_nCE_GPIO_PIN | NAND_CLE_GPIO_PIN | NAND_ALE_GPIO_PIN);
241#ifdef USE_READY_BUSY_PIN
242 /* three-state select */
243 out_be32((volatile unsigned*)GPIO0_TSRH, in_be32((volatile unsigned*)GPIO0_TSRH) & 0xFF3FFFFF);
244 /* high-impedecence */
245 out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) & (~NAND_RB_GPIO_PIN));
246 /* input select */
247 out_be32((volatile unsigned*)GPIO0_ISR1H, (in_be32((volatile unsigned*)GPIO0_ISR1H) & 0xFF3FFFFF) | 0x00400000);
248#endif
249
250 /* insert callbacks */
251 this->IO_ADDR_R = ppchameleon_fio_base;
252 this->IO_ADDR_W = ppchameleon_fio_base;
253 this->hwcontrol = ppchameleon_hwcontrol;
254#ifdef USE_READY_BUSY_PIN
255 this->dev_ready = ppchameleon_device_ready;
256#endif
257 this->chip_delay = NAND_BIG_DELAY_US;
258 /* ECC mode */
259 this->eccmode = NAND_ECC_SOFT;
260
261 /* Scan to find existence of the device (it could not be mounted) */
262 if (nand_scan (ppchameleon_mtd, 1)) {
263 iounmap((void *)ppchameleon_fio_base);
264 kfree (ppchameleon_mtd);
265 goto nand_evb_init;
266 }
267
268#ifndef USE_READY_BUSY_PIN
269 /* Adjust delay if necessary */
270 if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
271 this->chip_delay = NAND_SMALL_DELAY_US;
272#endif
273
274#ifdef CONFIG_MTD_PARTITIONS
275 ppchameleon_mtd->name = "ppchameleon-nand";
276 mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0);
277 if (mtd_parts_nb > 0)
278 part_type = "command line";
279 else
280 mtd_parts_nb = 0;
281#endif
282 if (mtd_parts_nb == 0)
283 {
284 if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
285 mtd_parts = partition_info_me;
286 else
287 mtd_parts = partition_info_hi;
288 mtd_parts_nb = NUM_PARTITIONS;
289 part_type = "static";
290 }
291
292 /* Register the partitions */
293 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
294 add_mtd_partitions(ppchameleon_mtd, mtd_parts, mtd_parts_nb);
295
296nand_evb_init:
297 /****************************
298 * EVB NAND (always present) *
299 ****************************/
300 /* Allocate memory for MTD device structure and private data */
301 ppchameleonevb_mtd = kmalloc(sizeof(struct mtd_info) +
302 sizeof(struct nand_chip), GFP_KERNEL);
303 if (!ppchameleonevb_mtd) {
304 printk("Unable to allocate PPChameleonEVB NAND MTD device structure.\n");
305 return -ENOMEM;
306 }
307
308 /* map physical address */
309 ppchameleonevb_fio_base = ioremap(ppchameleonevb_fio_pbase, SZ_4M);
310 if(!ppchameleonevb_fio_base) {
311 printk("ioremap PPChameleonEVB NAND flash failed\n");
312 kfree(ppchameleonevb_mtd);
313 return -EIO;
314 }
315
316 /* Get pointer to private data */
317 this = (struct nand_chip *) (&ppchameleonevb_mtd[1]);
318
319 /* Initialize structures */
320 memset((char *) ppchameleonevb_mtd, 0, sizeof(struct mtd_info));
321 memset((char *) this, 0, sizeof(struct nand_chip));
322
323 /* Link the private data with the MTD structure */
324 ppchameleonevb_mtd->priv = this;
325
326 /* Initialize GPIOs */
327 /* Pin mapping for NAND chip */
328 /*
329 CE GPIO_14
330 CLE GPIO_15
331 ALE GPIO_16
332 R/B GPIO_31
333 */
334 /* output select */
335 out_be32((volatile unsigned*)GPIO0_OSRH, in_be32((volatile unsigned*)GPIO0_OSRH) & 0xFFFFFFF0);
336 out_be32((volatile unsigned*)GPIO0_OSRL, in_be32((volatile unsigned*)GPIO0_OSRL) & 0x3FFFFFFF);
337 /* three-state select */
338 out_be32((volatile unsigned*)GPIO0_TSRH, in_be32((volatile unsigned*)GPIO0_TSRH) & 0xFFFFFFF0);
339 out_be32((volatile unsigned*)GPIO0_TSRL, in_be32((volatile unsigned*)GPIO0_TSRL) & 0x3FFFFFFF);
340 /* enable output driver */
341 out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) | NAND_EVB_nCE_GPIO_PIN |
342 NAND_EVB_CLE_GPIO_PIN | NAND_EVB_ALE_GPIO_PIN);
343#ifdef USE_READY_BUSY_PIN
344 /* three-state select */
345 out_be32((volatile unsigned*)GPIO0_TSRL, in_be32((volatile unsigned*)GPIO0_TSRL) & 0xFFFFFFFC);
346 /* high-impedecence */
347 out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) & (~NAND_EVB_RB_GPIO_PIN));
348 /* input select */
349 out_be32((volatile unsigned*)GPIO0_ISR1L, (in_be32((volatile unsigned*)GPIO0_ISR1L) & 0xFFFFFFFC) | 0x00000001);
350#endif
351
352 /* insert callbacks */
353 this->IO_ADDR_R = ppchameleonevb_fio_base;
354 this->IO_ADDR_W = ppchameleonevb_fio_base;
355 this->hwcontrol = ppchameleonevb_hwcontrol;
356#ifdef USE_READY_BUSY_PIN
357 this->dev_ready = ppchameleonevb_device_ready;
358#endif
359 this->chip_delay = NAND_SMALL_DELAY_US;
360
361 /* ECC mode */
362 this->eccmode = NAND_ECC_SOFT;
363
364 /* Scan to find existence of the device */
365 if (nand_scan (ppchameleonevb_mtd, 1)) {
366 iounmap((void *)ppchameleonevb_fio_base);
367 kfree (ppchameleonevb_mtd);
368 return -ENXIO;
369 }
370
371#ifdef CONFIG_MTD_PARTITIONS
372 ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
373 mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0);
374 if (mtd_parts_nb > 0)
375 part_type = "command line";
376 else
377 mtd_parts_nb = 0;
378#endif
379 if (mtd_parts_nb == 0)
380 {
381 mtd_parts = partition_info_evb;
382 mtd_parts_nb = NUM_PARTITIONS;
383 part_type = "static";
384 }
385
386 /* Register the partitions */
387 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
388 add_mtd_partitions(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb);
389
390 /* Return happy */
391 return 0;
392}
393module_init(ppchameleonevb_init);
394
395/*
396 * Clean up routine
397 */
398static void __exit ppchameleonevb_cleanup (void)
399{
400 struct nand_chip *this;
401
402 /* Release resources, unregister device(s) */
403 nand_release (ppchameleon_mtd);
404 nand_release (ppchameleonevb_mtd);
405
406 /* Release iomaps */
407 this = (struct nand_chip *) &ppchameleon_mtd[1];
408 iounmap((void *) this->IO_ADDR_R;
409 this = (struct nand_chip *) &ppchameleonevb_mtd[1];
410 iounmap((void *) this->IO_ADDR_R;
411
412 /* Free the MTD device structure */
413 kfree (ppchameleon_mtd);
414 kfree (ppchameleonevb_mtd);
415}
416module_exit(ppchameleonevb_cleanup);
417
418MODULE_LICENSE("GPL");
419MODULE_AUTHOR("DAVE Srl <support-ppchameleon@dave-tech.it>");
420MODULE_DESCRIPTION("MTD map driver for DAVE Srl PPChameleonEVB board");
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
new file mode 100644
index 000000000000..02305a2adca7
--- /dev/null
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -0,0 +1,559 @@
1/*
2 * drivers/mtd/nand/rtc_from4.c
3 *
4 * Copyright (C) 2004 Red Hat, Inc.
5 *
6 * Derived from drivers/mtd/nand/spia.c
7 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
8 *
9 * $Id: rtc_from4.c,v 1.7 2004/11/04 12:53:10 gleixner Exp $
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * Overview:
16 * This is a device driver for the AG-AND flash device found on the
17 * Renesas Technology Corp. Flash ROM 4-slot interface board (FROM_BOARD4),
18 * which utilizes the Renesas HN29V1G91T-30 part.
19 * This chip is a 1 GBibit (128MiB x 8 bits) AG-AND flash device.
20 */
21
22#include <linux/delay.h>
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/slab.h>
26#include <linux/rslib.h>
27#include <linux/module.h>
28#include <linux/mtd/compatmac.h>
29#include <linux/mtd/mtd.h>
30#include <linux/mtd/nand.h>
31#include <linux/mtd/partitions.h>
32#include <asm/io.h>
33
34/*
35 * MTD structure for Renesas board
36 */
37static struct mtd_info *rtc_from4_mtd = NULL;
38
39#define RTC_FROM4_MAX_CHIPS 2
40
41/* HS77x9 processor register defines */
42#define SH77X9_BCR1 ((volatile unsigned short *)(0xFFFFFF60))
43#define SH77X9_BCR2 ((volatile unsigned short *)(0xFFFFFF62))
44#define SH77X9_WCR1 ((volatile unsigned short *)(0xFFFFFF64))
45#define SH77X9_WCR2 ((volatile unsigned short *)(0xFFFFFF66))
46#define SH77X9_MCR ((volatile unsigned short *)(0xFFFFFF68))
47#define SH77X9_PCR ((volatile unsigned short *)(0xFFFFFF6C))
48#define SH77X9_FRQCR ((volatile unsigned short *)(0xFFFFFF80))
49
50/*
51 * Values specific to the Renesas Technology Corp. FROM_BOARD4 (used with HS77x9 processor)
52 */
53/* Address where flash is mapped */
54#define RTC_FROM4_FIO_BASE 0x14000000
55
56/* CLE and ALE are tied to address lines 5 & 4, respectively */
57#define RTC_FROM4_CLE (1 << 5)
58#define RTC_FROM4_ALE (1 << 4)
59
60/* address lines A24-A22 used for chip selection */
61#define RTC_FROM4_NAND_ADDR_SLOT3 (0x00800000)
62#define RTC_FROM4_NAND_ADDR_SLOT4 (0x00C00000)
63#define RTC_FROM4_NAND_ADDR_FPGA (0x01000000)
64/* mask address lines A24-A22 used for chip selection */
65#define RTC_FROM4_NAND_ADDR_MASK (RTC_FROM4_NAND_ADDR_SLOT3 | RTC_FROM4_NAND_ADDR_SLOT4 | RTC_FROM4_NAND_ADDR_FPGA)
66
67/* FPGA status register for checking device ready (bit zero) */
68#define RTC_FROM4_FPGA_SR (RTC_FROM4_NAND_ADDR_FPGA | 0x00000002)
69#define RTC_FROM4_DEVICE_READY 0x0001
70
71/* FPGA Reed-Solomon ECC Control register */
72
73#define RTC_FROM4_RS_ECC_CTL (RTC_FROM4_NAND_ADDR_FPGA | 0x00000050)
74#define RTC_FROM4_RS_ECC_CTL_CLR (1 << 7)
75#define RTC_FROM4_RS_ECC_CTL_GEN (1 << 6)
76#define RTC_FROM4_RS_ECC_CTL_FD_E (1 << 5)
77
78/* FPGA Reed-Solomon ECC code base */
79#define RTC_FROM4_RS_ECC (RTC_FROM4_NAND_ADDR_FPGA | 0x00000060)
80#define RTC_FROM4_RS_ECCN (RTC_FROM4_NAND_ADDR_FPGA | 0x00000080)
81
82/* FPGA Reed-Solomon ECC check register */
83#define RTC_FROM4_RS_ECC_CHK (RTC_FROM4_NAND_ADDR_FPGA | 0x00000070)
84#define RTC_FROM4_RS_ECC_CHK_ERROR (1 << 7)
85
86/* Undefine for software ECC */
87#define RTC_FROM4_HWECC 1
88
89/*
90 * Module stuff
91 */
92static void __iomem *rtc_from4_fio_base = P2SEGADDR(RTC_FROM4_FIO_BASE);
93
94const static struct mtd_partition partition_info[] = {
95 {
96 .name = "Renesas flash partition 1",
97 .offset = 0,
98 .size = MTDPART_SIZ_FULL
99 },
100};
101#define NUM_PARTITIONS 1
102
103/*
104 * hardware specific flash bbt decriptors
105 * Note: this is to allow debugging by disabling
106 * NAND_BBT_CREATE and/or NAND_BBT_WRITE
107 *
108 */
109static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
110static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
111
112static struct nand_bbt_descr rtc_from4_bbt_main_descr = {
113 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
114 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
115 .offs = 40,
116 .len = 4,
117 .veroffs = 44,
118 .maxblocks = 4,
119 .pattern = bbt_pattern
120};
121
122static struct nand_bbt_descr rtc_from4_bbt_mirror_descr = {
123 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
124 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
125 .offs = 40,
126 .len = 4,
127 .veroffs = 44,
128 .maxblocks = 4,
129 .pattern = mirror_pattern
130};
131
132
133
134#ifdef RTC_FROM4_HWECC
135
136/* the Reed Solomon control structure */
137static struct rs_control *rs_decoder;
138
139/*
140 * hardware specific Out Of Band information
141 */
142static struct nand_oobinfo rtc_from4_nand_oobinfo = {
143 .useecc = MTD_NANDECC_AUTOPLACE,
144 .eccbytes = 32,
145 .eccpos = {
146 0, 1, 2, 3, 4, 5, 6, 7,
147 8, 9, 10, 11, 12, 13, 14, 15,
148 16, 17, 18, 19, 20, 21, 22, 23,
149 24, 25, 26, 27, 28, 29, 30, 31},
150 .oobfree = { {32, 32} }
151};
152
153/* Aargh. I missed the reversed bit order, when I
154 * was talking to Renesas about the FPGA.
155 *
156 * The table is used for bit reordering and inversion
157 * of the ecc byte which we get from the FPGA
158 */
159static uint8_t revbits[256] = {
160 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
161 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
162 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
163 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
164 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
165 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
166 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
167 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
168 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
169 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
170 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
171 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
172 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
173 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
174 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
175 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
176 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
177 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
178 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
179 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
180 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
181 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
182 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
183 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
184 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
185 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
186 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
187 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
188 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
189 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
190 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
191 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
192};
193
194#endif
195
196
197
198/*
199 * rtc_from4_hwcontrol - hardware specific access to control-lines
200 * @mtd: MTD device structure
201 * @cmd: hardware control command
202 *
203 * Address lines (A5 and A4) are used to control Command and Address Latch
204 * Enable on this board, so set the read/write address appropriately.
205 *
206 * Chip Enable is also controlled by the Chip Select (CS5) and
207 * Address lines (A24-A22), so no action is required here.
208 *
209 */
210static void rtc_from4_hwcontrol(struct mtd_info *mtd, int cmd)
211{
212 struct nand_chip* this = (struct nand_chip *) (mtd->priv);
213
214 switch(cmd) {
215
216 case NAND_CTL_SETCLE:
217 this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_CLE);
218 break;
219 case NAND_CTL_CLRCLE:
220 this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W & ~RTC_FROM4_CLE);
221 break;
222
223 case NAND_CTL_SETALE:
224 this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_ALE);
225 break;
226 case NAND_CTL_CLRALE:
227 this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W & ~RTC_FROM4_ALE);
228 break;
229
230 case NAND_CTL_SETNCE:
231 break;
232 case NAND_CTL_CLRNCE:
233 break;
234
235 }
236}
237
238
239/*
240 * rtc_from4_nand_select_chip - hardware specific chip select
241 * @mtd: MTD device structure
242 * @chip: Chip to select (0 == slot 3, 1 == slot 4)
243 *
244 * The chip select is based on address lines A24-A22.
245 * This driver uses flash slots 3 and 4 (A23-A22).
246 *
247 */
248static void rtc_from4_nand_select_chip(struct mtd_info *mtd, int chip)
249{
250 struct nand_chip *this = mtd->priv;
251
252 this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R & ~RTC_FROM4_NAND_ADDR_MASK);
253 this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W & ~RTC_FROM4_NAND_ADDR_MASK);
254
255 switch(chip) {
256
257 case 0: /* select slot 3 chip */
258 this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT3);
259 this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT3);
260 break;
261 case 1: /* select slot 4 chip */
262 this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT4);
263 this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT4);
264 break;
265
266 }
267}
268
269
270
271/*
272 * rtc_from4_nand_device_ready - hardware specific ready/busy check
273 * @mtd: MTD device structure
274 *
275 * This board provides the Ready/Busy state in the status register
276 * of the FPGA. Bit zero indicates the RDY(1)/BSY(0) signal.
277 *
278 */
279static int rtc_from4_nand_device_ready(struct mtd_info *mtd)
280{
281 unsigned short status;
282
283 status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_FPGA_SR));
284
285 return (status & RTC_FROM4_DEVICE_READY);
286
287}
288
289#ifdef RTC_FROM4_HWECC
290/*
291 * rtc_from4_enable_hwecc - hardware specific hardware ECC enable function
292 * @mtd: MTD device structure
293 * @mode: I/O mode; read or write
294 *
295 * enable hardware ECC for data read or write
296 *
297 */
298static void rtc_from4_enable_hwecc(struct mtd_info *mtd, int mode)
299{
300 volatile unsigned short * rs_ecc_ctl = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CTL);
301 unsigned short status;
302
303 switch (mode) {
304 case NAND_ECC_READ :
305 status = RTC_FROM4_RS_ECC_CTL_CLR
306 | RTC_FROM4_RS_ECC_CTL_FD_E;
307
308 *rs_ecc_ctl = status;
309 break;
310
311 case NAND_ECC_READSYN :
312 status = 0x00;
313
314 *rs_ecc_ctl = status;
315 break;
316
317 case NAND_ECC_WRITE :
318 status = RTC_FROM4_RS_ECC_CTL_CLR
319 | RTC_FROM4_RS_ECC_CTL_GEN
320 | RTC_FROM4_RS_ECC_CTL_FD_E;
321
322 *rs_ecc_ctl = status;
323 break;
324
325 default:
326 BUG();
327 break;
328 }
329
330}
331
332/*
333 * rtc_from4_calculate_ecc - hardware specific code to read ECC code
334 * @mtd: MTD device structure
335 * @dat: buffer containing the data to generate ECC codes
336 * @ecc_code ECC codes calculated
337 *
338 * The ECC code is calculated by the FPGA. All we have to do is read the values
339 * from the FPGA registers.
340 *
341 * Note: We read from the inverted registers, since data is inverted before
342 * the code is calculated. So all 0xff data (blank page) results in all 0xff rs code
343 *
344 */
345static void rtc_from4_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
346{
347 volatile unsigned short * rs_eccn = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECCN);
348 unsigned short value;
349 int i;
350
351 for (i = 0; i < 8; i++) {
352 value = *rs_eccn;
353 ecc_code[i] = (unsigned char)value;
354 rs_eccn++;
355 }
356 ecc_code[7] |= 0x0f; /* set the last four bits (not used) */
357}
358
359/*
360 * rtc_from4_correct_data - hardware specific code to correct data using ECC code
361 * @mtd: MTD device structure
362 * @buf: buffer containing the data to generate ECC codes
363 * @ecc1 ECC codes read
364 * @ecc2 ECC codes calculated
365 *
366 * The FPGA tells us fast, if there's an error or not. If no, we go back happy
367 * else we read the ecc results from the fpga and call the rs library to decode
368 * and hopefully correct the error
369 *
370 * For now I use the code, which we read from the FLASH to use the RS lib,
371 * as the syndrom conversion has a unresolved issue.
372 */
373static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_char *ecc1, u_char *ecc2)
374{
375 int i, j, res;
376 unsigned short status;
377 uint16_t par[6], syn[6], tmp;
378 uint8_t ecc[8];
379 volatile unsigned short *rs_ecc;
380
381 status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CHK));
382
383 if (!(status & RTC_FROM4_RS_ECC_CHK_ERROR)) {
384 return 0;
385 }
386
387 /* Read the syndrom pattern from the FPGA and correct the bitorder */
388 rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC);
389 for (i = 0; i < 8; i++) {
390 ecc[i] = revbits[(*rs_ecc) & 0xFF];
391 rs_ecc++;
392 }
393
394 /* convert into 6 10bit syndrome fields */
395 par[5] = rs_decoder->index_of[(((uint16_t)ecc[0] >> 0) & 0x0ff) |
396 (((uint16_t)ecc[1] << 8) & 0x300)];
397 par[4] = rs_decoder->index_of[(((uint16_t)ecc[1] >> 2) & 0x03f) |
398 (((uint16_t)ecc[2] << 6) & 0x3c0)];
399 par[3] = rs_decoder->index_of[(((uint16_t)ecc[2] >> 4) & 0x00f) |
400 (((uint16_t)ecc[3] << 4) & 0x3f0)];
401 par[2] = rs_decoder->index_of[(((uint16_t)ecc[3] >> 6) & 0x003) |
402 (((uint16_t)ecc[4] << 2) & 0x3fc)];
403 par[1] = rs_decoder->index_of[(((uint16_t)ecc[5] >> 0) & 0x0ff) |
404 (((uint16_t)ecc[6] << 8) & 0x300)];
405 par[0] = (((uint16_t)ecc[6] >> 2) & 0x03f) | (((uint16_t)ecc[7] << 6) & 0x3c0);
406
407 /* Convert to computable syndrome */
408 for (i = 0; i < 6; i++) {
409 syn[i] = par[0];
410 for (j = 1; j < 6; j++)
411 if (par[j] != rs_decoder->nn)
412 syn[i] ^= rs_decoder->alpha_to[rs_modnn(rs_decoder, par[j] + i * j)];
413
414 /* Convert to index form */
415 syn[i] = rs_decoder->index_of[syn[i]];
416 }
417
418 /* Let the library code do its magic.*/
419 res = decode_rs8(rs_decoder, buf, par, 512, syn, 0, NULL, 0xff, NULL);
420 if (res > 0) {
421 DEBUG (MTD_DEBUG_LEVEL0, "rtc_from4_correct_data: "
422 "ECC corrected %d errors on read\n", res);
423 }
424 return res;
425}
426#endif
427
428/*
429 * Main initialization routine
430 */
431int __init rtc_from4_init (void)
432{
433 struct nand_chip *this;
434 unsigned short bcr1, bcr2, wcr2;
435
436 /* Allocate memory for MTD device structure and private data */
437 rtc_from4_mtd = kmalloc(sizeof(struct mtd_info) + sizeof (struct nand_chip),
438 GFP_KERNEL);
439 if (!rtc_from4_mtd) {
440 printk ("Unable to allocate Renesas NAND MTD device structure.\n");
441 return -ENOMEM;
442 }
443
444 /* Get pointer to private data */
445 this = (struct nand_chip *) (&rtc_from4_mtd[1]);
446
447 /* Initialize structures */
448 memset((char *) rtc_from4_mtd, 0, sizeof(struct mtd_info));
449 memset((char *) this, 0, sizeof(struct nand_chip));
450
451 /* Link the private data with the MTD structure */
452 rtc_from4_mtd->priv = this;
453
454 /* set area 5 as PCMCIA mode to clear the spec of tDH(Data hold time;9ns min) */
455 bcr1 = *SH77X9_BCR1 & ~0x0002;
456 bcr1 |= 0x0002;
457 *SH77X9_BCR1 = bcr1;
458
459 /* set */
460 bcr2 = *SH77X9_BCR2 & ~0x0c00;
461 bcr2 |= 0x0800;
462 *SH77X9_BCR2 = bcr2;
463
464 /* set area 5 wait states */
465 wcr2 = *SH77X9_WCR2 & ~0x1c00;
466 wcr2 |= 0x1c00;
467 *SH77X9_WCR2 = wcr2;
468
469 /* Set address of NAND IO lines */
470 this->IO_ADDR_R = rtc_from4_fio_base;
471 this->IO_ADDR_W = rtc_from4_fio_base;
472 /* Set address of hardware control function */
473 this->hwcontrol = rtc_from4_hwcontrol;
474 /* Set address of chip select function */
475 this->select_chip = rtc_from4_nand_select_chip;
476 /* command delay time (in us) */
477 this->chip_delay = 100;
478 /* return the status of the Ready/Busy line */
479 this->dev_ready = rtc_from4_nand_device_ready;
480
481#ifdef RTC_FROM4_HWECC
482 printk(KERN_INFO "rtc_from4_init: using hardware ECC detection.\n");
483
484 this->eccmode = NAND_ECC_HW8_512;
485 this->options |= NAND_HWECC_SYNDROME;
486 /* set the nand_oobinfo to support FPGA H/W error detection */
487 this->autooob = &rtc_from4_nand_oobinfo;
488 this->enable_hwecc = rtc_from4_enable_hwecc;
489 this->calculate_ecc = rtc_from4_calculate_ecc;
490 this->correct_data = rtc_from4_correct_data;
491#else
492 printk(KERN_INFO "rtc_from4_init: using software ECC detection.\n");
493
494 this->eccmode = NAND_ECC_SOFT;
495#endif
496
497 /* set the bad block tables to support debugging */
498 this->bbt_td = &rtc_from4_bbt_main_descr;
499 this->bbt_md = &rtc_from4_bbt_mirror_descr;
500
501 /* Scan to find existence of the device */
502 if (nand_scan(rtc_from4_mtd, RTC_FROM4_MAX_CHIPS)) {
503 kfree(rtc_from4_mtd);
504 return -ENXIO;
505 }
506
507 /* Register the partitions */
508 add_mtd_partitions(rtc_from4_mtd, partition_info, NUM_PARTITIONS);
509
510#ifdef RTC_FROM4_HWECC
511 /* We could create the decoder on demand, if memory is a concern.
512 * This way we have it handy, if an error happens
513 *
514 * Symbolsize is 10 (bits)
515 * Primitve polynomial is x^10+x^3+1
516 * first consecutive root is 0
517 * primitve element to generate roots = 1
518 * generator polinomial degree = 6
519 */
520 rs_decoder = init_rs(10, 0x409, 0, 1, 6);
521 if (!rs_decoder) {
522 printk (KERN_ERR "Could not create a RS decoder\n");
523 nand_release(rtc_from4_mtd);
524 kfree(rtc_from4_mtd);
525 return -ENOMEM;
526 }
527#endif
528 /* Return happy */
529 return 0;
530}
531module_init(rtc_from4_init);
532
533
534/*
535 * Clean up routine
536 */
537#ifdef MODULE
538static void __exit rtc_from4_cleanup (void)
539{
540 /* Release resource, unregister partitions */
541 nand_release(rtc_from4_mtd);
542
543 /* Free the MTD device structure */
544 kfree (rtc_from4_mtd);
545
546#ifdef RTC_FROM4_HWECC
547 /* Free the reed solomon resources */
548 if (rs_decoder) {
549 free_rs(rs_decoder);
550 }
551#endif
552}
553module_exit(rtc_from4_cleanup);
554#endif
555
556MODULE_LICENSE("GPL");
557MODULE_AUTHOR("d.marlin <dmarlin@redhat.com");
558MODULE_DESCRIPTION("Board-specific glue layer for AG-AND flash on Renesas FROM_BOARD4");
559
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
new file mode 100644
index 000000000000..d05e9b97947d
--- /dev/null
+++ b/drivers/mtd/nand/s3c2410.c
@@ -0,0 +1,704 @@
1/* linux/drivers/mtd/nand/s3c2410.c
2 *
3 * Copyright (c) 2004 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * Samsung S3C2410 NAND driver
7 *
8 * Changelog:
9 * 21-Sep-2004 BJD Initial version
10 * 23-Sep-2004 BJD Mulitple device support
11 * 28-Sep-2004 BJD Fixed ECC placement for Hardware mode
12 * 12-Oct-2004 BJD Fixed errors in use of platform data
13 *
14 * $Id: s3c2410.c,v 1.7 2005/01/05 18:05:14 dwmw2 Exp $
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29*/
30
31#include <config/mtd/nand/s3c2410/hwecc.h>
32#include <config/mtd/nand/s3c2410/debug.h>
33
34#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG
35#define DEBUG
36#endif
37
38#include <linux/module.h>
39#include <linux/types.h>
40#include <linux/init.h>
41#include <linux/kernel.h>
42#include <linux/string.h>
43#include <linux/ioport.h>
44#include <linux/device.h>
45#include <linux/delay.h>
46#include <linux/err.h>
47
48#include <linux/mtd/mtd.h>
49#include <linux/mtd/nand.h>
50#include <linux/mtd/nand_ecc.h>
51#include <linux/mtd/partitions.h>
52
53#include <asm/io.h>
54#include <asm/mach-types.h>
55#include <asm/hardware/clock.h>
56
57#include <asm/arch/regs-nand.h>
58#include <asm/arch/nand.h>
59
60#define PFX "s3c2410-nand: "
61
62#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
63static int hardware_ecc = 1;
64#else
65static int hardware_ecc = 0;
66#endif
67
68/* new oob placement block for use with hardware ecc generation
69 */
70
71static struct nand_oobinfo nand_hw_eccoob = {
72 .useecc = MTD_NANDECC_AUTOPLACE,
73 .eccbytes = 3,
74 .eccpos = {0, 1, 2 },
75 .oobfree = { {8, 8} }
76};
77
78/* controller and mtd information */
79
80struct s3c2410_nand_info;
81
82struct s3c2410_nand_mtd {
83 struct mtd_info mtd;
84 struct nand_chip chip;
85 struct s3c2410_nand_set *set;
86 struct s3c2410_nand_info *info;
87 int scan_res;
88};
89
90/* overview of the s3c2410 nand state */
91
92struct s3c2410_nand_info {
93 /* mtd info */
94 struct nand_hw_control controller;
95 struct s3c2410_nand_mtd *mtds;
96 struct s3c2410_platform_nand *platform;
97
98 /* device info */
99 struct device *device;
100 struct resource *area;
101 struct clk *clk;
102 void *regs;
103 int mtd_count;
104};
105
106/* conversion functions */
107
108static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd)
109{
110 return container_of(mtd, struct s3c2410_nand_mtd, mtd);
111}
112
113static struct s3c2410_nand_info *s3c2410_nand_mtd_toinfo(struct mtd_info *mtd)
114{
115 return s3c2410_nand_mtd_toours(mtd)->info;
116}
117
118static struct s3c2410_nand_info *to_nand_info(struct device *dev)
119{
120 return dev_get_drvdata(dev);
121}
122
123static struct s3c2410_platform_nand *to_nand_plat(struct device *dev)
124{
125 return dev->platform_data;
126}
127
128/* timing calculations */
129
130#define NS_IN_KHZ 10000000
131
132static int s3c2410_nand_calc_rate(int wanted, unsigned long clk, int max)
133{
134 int result;
135
136 result = (wanted * NS_IN_KHZ) / clk;
137 result++;
138
139 pr_debug("result %d from %ld, %d\n", result, clk, wanted);
140
141 if (result > max) {
142 printk("%d ns is too big for current clock rate %ld\n",
143 wanted, clk);
144 return -1;
145 }
146
147 if (result < 1)
148 result = 1;
149
150 return result;
151}
152
153#define to_ns(ticks,clk) (((clk) * (ticks)) / NS_IN_KHZ)
154
155/* controller setup */
156
157static int s3c2410_nand_inithw(struct s3c2410_nand_info *info,
158 struct device *dev)
159{
160 struct s3c2410_platform_nand *plat = to_nand_plat(dev);
161 unsigned int tacls, twrph0, twrph1;
162 unsigned long clkrate = clk_get_rate(info->clk);
163 unsigned long cfg;
164
165 /* calculate the timing information for the controller */
166
167 if (plat != NULL) {
168 tacls = s3c2410_nand_calc_rate(plat->tacls, clkrate, 8);
169 twrph0 = s3c2410_nand_calc_rate(plat->twrph0, clkrate, 8);
170 twrph1 = s3c2410_nand_calc_rate(plat->twrph1, clkrate, 8);
171 } else {
172 /* default timings */
173 tacls = 8;
174 twrph0 = 8;
175 twrph1 = 8;
176 }
177
178 if (tacls < 0 || twrph0 < 0 || twrph1 < 0) {
179 printk(KERN_ERR PFX "cannot get timings suitable for board\n");
180 return -EINVAL;
181 }
182
183 printk(KERN_INFO PFX "timing: Tacls %ldns, Twrph0 %ldns, Twrph1 %ldns\n",
184 to_ns(tacls, clkrate),
185 to_ns(twrph0, clkrate),
186 to_ns(twrph1, clkrate));
187
188 cfg = S3C2410_NFCONF_EN;
189 cfg |= S3C2410_NFCONF_TACLS(tacls-1);
190 cfg |= S3C2410_NFCONF_TWRPH0(twrph0-1);
191 cfg |= S3C2410_NFCONF_TWRPH1(twrph1-1);
192
193 pr_debug(PFX "NF_CONF is 0x%lx\n", cfg);
194
195 writel(cfg, info->regs + S3C2410_NFCONF);
196 return 0;
197}
198
199/* select chip */
200
201static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
202{
203 struct s3c2410_nand_info *info;
204 struct s3c2410_nand_mtd *nmtd;
205 struct nand_chip *this = mtd->priv;
206 unsigned long cur;
207
208 nmtd = this->priv;
209 info = nmtd->info;
210
211 cur = readl(info->regs + S3C2410_NFCONF);
212
213 if (chip == -1) {
214 cur |= S3C2410_NFCONF_nFCE;
215 } else {
216 if (chip > nmtd->set->nr_chips) {
217 printk(KERN_ERR PFX "chip %d out of range\n", chip);
218 return;
219 }
220
221 if (info->platform != NULL) {
222 if (info->platform->select_chip != NULL)
223 (info->platform->select_chip)(nmtd->set, chip);
224 }
225
226 cur &= ~S3C2410_NFCONF_nFCE;
227 }
228
229 writel(cur, info->regs + S3C2410_NFCONF);
230}
231
232/* command and control functions */
233
234static void s3c2410_nand_hwcontrol(struct mtd_info *mtd, int cmd)
235{
236 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
237 unsigned long cur;
238
239 switch (cmd) {
240 case NAND_CTL_SETNCE:
241 cur = readl(info->regs + S3C2410_NFCONF);
242 cur &= ~S3C2410_NFCONF_nFCE;
243 writel(cur, info->regs + S3C2410_NFCONF);
244 break;
245
246 case NAND_CTL_CLRNCE:
247 cur = readl(info->regs + S3C2410_NFCONF);
248 cur |= S3C2410_NFCONF_nFCE;
249 writel(cur, info->regs + S3C2410_NFCONF);
250 break;
251
252 /* we don't need to implement these */
253 case NAND_CTL_SETCLE:
254 case NAND_CTL_CLRCLE:
255 case NAND_CTL_SETALE:
256 case NAND_CTL_CLRALE:
257 pr_debug(PFX "s3c2410_nand_hwcontrol(%d) unusedn", cmd);
258 break;
259 }
260}
261
262/* s3c2410_nand_command
263 *
264 * This function implements sending commands and the relevant address
265 * information to the chip, via the hardware controller. Since the
266 * S3C2410 generates the correct ALE/CLE signaling automatically, we
267 * do not need to use hwcontrol.
268*/
269
270static void s3c2410_nand_command (struct mtd_info *mtd, unsigned command,
271 int column, int page_addr)
272{
273 register struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
274 register struct nand_chip *this = mtd->priv;
275
276 /*
277 * Write out the command to the device.
278 */
279 if (command == NAND_CMD_SEQIN) {
280 int readcmd;
281
282 if (column >= mtd->oobblock) {
283 /* OOB area */
284 column -= mtd->oobblock;
285 readcmd = NAND_CMD_READOOB;
286 } else if (column < 256) {
287 /* First 256 bytes --> READ0 */
288 readcmd = NAND_CMD_READ0;
289 } else {
290 column -= 256;
291 readcmd = NAND_CMD_READ1;
292 }
293
294 writeb(readcmd, info->regs + S3C2410_NFCMD);
295 }
296 writeb(command, info->regs + S3C2410_NFCMD);
297
298 /* Set ALE and clear CLE to start address cycle */
299
300 if (column != -1 || page_addr != -1) {
301
302 /* Serially input address */
303 if (column != -1) {
304 /* Adjust columns for 16 bit buswidth */
305 if (this->options & NAND_BUSWIDTH_16)
306 column >>= 1;
307 writeb(column, info->regs + S3C2410_NFADDR);
308 }
309 if (page_addr != -1) {
310 writeb((unsigned char) (page_addr), info->regs + S3C2410_NFADDR);
311 writeb((unsigned char) (page_addr >> 8), info->regs + S3C2410_NFADDR);
312 /* One more address cycle for higher density devices */
313 if (this->chipsize & 0x0c000000)
314 writeb((unsigned char) ((page_addr >> 16) & 0x0f),
315 info->regs + S3C2410_NFADDR);
316 }
317 /* Latch in address */
318 }
319
320 /*
321 * program and erase have their own busy handlers
322 * status and sequential in needs no delay
323 */
324 switch (command) {
325
326 case NAND_CMD_PAGEPROG:
327 case NAND_CMD_ERASE1:
328 case NAND_CMD_ERASE2:
329 case NAND_CMD_SEQIN:
330 case NAND_CMD_STATUS:
331 return;
332
333 case NAND_CMD_RESET:
334 if (this->dev_ready)
335 break;
336
337 udelay(this->chip_delay);
338 writeb(NAND_CMD_STATUS, info->regs + S3C2410_NFCMD);
339
340 while ( !(this->read_byte(mtd) & 0x40));
341 return;
342
343 /* This applies to read commands */
344 default:
345 /*
346 * If we don't have access to the busy pin, we apply the given
347 * command delay
348 */
349 if (!this->dev_ready) {
350 udelay (this->chip_delay);
351 return;
352 }
353 }
354
355 /* Apply this short delay always to ensure that we do wait tWB in
356 * any case on any machine. */
357 ndelay (100);
358 /* wait until command is processed */
359 while (!this->dev_ready(mtd));
360}
361
362
363/* s3c2410_nand_devready()
364 *
365 * returns 0 if the nand is busy, 1 if it is ready
366*/
367
368static int s3c2410_nand_devready(struct mtd_info *mtd)
369{
370 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
371
372 return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY;
373}
374
375/* ECC handling functions */
376
377static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
378 u_char *read_ecc, u_char *calc_ecc)
379{
380 pr_debug("s3c2410_nand_correct_data(%p,%p,%p,%p)\n",
381 mtd, dat, read_ecc, calc_ecc);
382
383 pr_debug("eccs: read %02x,%02x,%02x vs calc %02x,%02x,%02x\n",
384 read_ecc[0], read_ecc[1], read_ecc[2],
385 calc_ecc[0], calc_ecc[1], calc_ecc[2]);
386
387 if (read_ecc[0] == calc_ecc[0] &&
388 read_ecc[1] == calc_ecc[1] &&
389 read_ecc[2] == calc_ecc[2])
390 return 0;
391
392 /* we curently have no method for correcting the error */
393
394 return -1;
395}
396
397static void s3c2410_nand_enable_hwecc(struct mtd_info *mtd, int mode)
398{
399 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
400 unsigned long ctrl;
401
402 ctrl = readl(info->regs + S3C2410_NFCONF);
403 ctrl |= S3C2410_NFCONF_INITECC;
404 writel(ctrl, info->regs + S3C2410_NFCONF);
405}
406
407static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd,
408 const u_char *dat, u_char *ecc_code)
409{
410 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
411
412 ecc_code[0] = readb(info->regs + S3C2410_NFECC + 0);
413 ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1);
414 ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2);
415
416 pr_debug("calculate_ecc: returning ecc %02x,%02x,%02x\n",
417 ecc_code[0], ecc_code[1], ecc_code[2]);
418
419 return 0;
420}
421
422
423/* over-ride the standard functions for a little more speed? */
424
425static void s3c2410_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
426{
427 struct nand_chip *this = mtd->priv;
428 readsb(this->IO_ADDR_R, buf, len);
429}
430
431static void s3c2410_nand_write_buf(struct mtd_info *mtd,
432 const u_char *buf, int len)
433{
434 struct nand_chip *this = mtd->priv;
435 writesb(this->IO_ADDR_W, buf, len);
436}
437
438/* device management functions */
439
440static int s3c2410_nand_remove(struct device *dev)
441{
442 struct s3c2410_nand_info *info = to_nand_info(dev);
443
444 dev_set_drvdata(dev, NULL);
445
446 if (info == NULL)
447 return 0;
448
449 /* first thing we need to do is release all our mtds
450 * and their partitions, then go through freeing the
451 * resources used
452 */
453
454 if (info->mtds != NULL) {
455 struct s3c2410_nand_mtd *ptr = info->mtds;
456 int mtdno;
457
458 for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) {
459 pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
460 nand_release(&ptr->mtd);
461 }
462
463 kfree(info->mtds);
464 }
465
466 /* free the common resources */
467
468 if (info->clk != NULL && !IS_ERR(info->clk)) {
469 clk_disable(info->clk);
470 clk_unuse(info->clk);
471 clk_put(info->clk);
472 }
473
474 if (info->regs != NULL) {
475 iounmap(info->regs);
476 info->regs = NULL;
477 }
478
479 if (info->area != NULL) {
480 release_resource(info->area);
481 kfree(info->area);
482 info->area = NULL;
483 }
484
485 kfree(info);
486
487 return 0;
488}
489
490#ifdef CONFIG_MTD_PARTITIONS
491static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
492 struct s3c2410_nand_mtd *mtd,
493 struct s3c2410_nand_set *set)
494{
495 if (set == NULL)
496 return add_mtd_device(&mtd->mtd);
497
498 if (set->nr_partitions > 0 && set->partitions != NULL) {
499 return add_mtd_partitions(&mtd->mtd,
500 set->partitions,
501 set->nr_partitions);
502 }
503
504 return add_mtd_device(&mtd->mtd);
505}
506#else
507static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
508 struct s3c2410_nand_mtd *mtd,
509 struct s3c2410_nand_set *set)
510{
511 return add_mtd_device(&mtd->mtd);
512}
513#endif
514
515/* s3c2410_nand_init_chip
516 *
517 * init a single instance of an chip
518*/
519
520static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
521 struct s3c2410_nand_mtd *nmtd,
522 struct s3c2410_nand_set *set)
523{
524 struct nand_chip *chip = &nmtd->chip;
525
526 chip->IO_ADDR_R = (char *)info->regs + S3C2410_NFDATA;
527 chip->IO_ADDR_W = (char *)info->regs + S3C2410_NFDATA;
528 chip->hwcontrol = s3c2410_nand_hwcontrol;
529 chip->dev_ready = s3c2410_nand_devready;
530 chip->cmdfunc = s3c2410_nand_command;
531 chip->write_buf = s3c2410_nand_write_buf;
532 chip->read_buf = s3c2410_nand_read_buf;
533 chip->select_chip = s3c2410_nand_select_chip;
534 chip->chip_delay = 50;
535 chip->priv = nmtd;
536 chip->options = 0;
537 chip->controller = &info->controller;
538
539 nmtd->info = info;
540 nmtd->mtd.priv = chip;
541 nmtd->set = set;
542
543 if (hardware_ecc) {
544 chip->correct_data = s3c2410_nand_correct_data;
545 chip->enable_hwecc = s3c2410_nand_enable_hwecc;
546 chip->calculate_ecc = s3c2410_nand_calculate_ecc;
547 chip->eccmode = NAND_ECC_HW3_512;
548 chip->autooob = &nand_hw_eccoob;
549 } else {
550 chip->eccmode = NAND_ECC_SOFT;
551 }
552}
553
554/* s3c2410_nand_probe
555 *
556 * called by device layer when it finds a device matching
557 * one our driver can handled. This code checks to see if
558 * it can allocate all necessary resources then calls the
559 * nand layer to look for devices
560*/
561
562static int s3c2410_nand_probe(struct device *dev)
563{
564 struct platform_device *pdev = to_platform_device(dev);
565 struct s3c2410_platform_nand *plat = to_nand_plat(dev);
566 struct s3c2410_nand_info *info;
567 struct s3c2410_nand_mtd *nmtd;
568 struct s3c2410_nand_set *sets;
569 struct resource *res;
570 int err = 0;
571 int size;
572 int nr_sets;
573 int setno;
574
575 pr_debug("s3c2410_nand_probe(%p)\n", dev);
576
577 info = kmalloc(sizeof(*info), GFP_KERNEL);
578 if (info == NULL) {
579 printk(KERN_ERR PFX "no memory for flash info\n");
580 err = -ENOMEM;
581 goto exit_error;
582 }
583
584 memzero(info, sizeof(*info));
585 dev_set_drvdata(dev, info);
586
587 spin_lock_init(&info->controller.lock);
588
589 /* get the clock source and enable it */
590
591 info->clk = clk_get(dev, "nand");
592 if (IS_ERR(info->clk)) {
593 printk(KERN_ERR PFX "failed to get clock");
594 err = -ENOENT;
595 goto exit_error;
596 }
597
598 clk_use(info->clk);
599 clk_enable(info->clk);
600
601 /* allocate and map the resource */
602
603 res = pdev->resource; /* assume that the flash has one resource */
604 size = res->end - res->start + 1;
605
606 info->area = request_mem_region(res->start, size, pdev->name);
607
608 if (info->area == NULL) {
609 printk(KERN_ERR PFX "cannot reserve register region\n");
610 err = -ENOENT;
611 goto exit_error;
612 }
613
614 info->device = dev;
615 info->platform = plat;
616 info->regs = ioremap(res->start, size);
617
618 if (info->regs == NULL) {
619 printk(KERN_ERR PFX "cannot reserve register region\n");
620 err = -EIO;
621 goto exit_error;
622 }
623
624 printk(KERN_INFO PFX "mapped registers at %p\n", info->regs);
625
626 /* initialise the hardware */
627
628 err = s3c2410_nand_inithw(info, dev);
629 if (err != 0)
630 goto exit_error;
631
632 sets = (plat != NULL) ? plat->sets : NULL;
633 nr_sets = (plat != NULL) ? plat->nr_sets : 1;
634
635 info->mtd_count = nr_sets;
636
637 /* allocate our information */
638
639 size = nr_sets * sizeof(*info->mtds);
640 info->mtds = kmalloc(size, GFP_KERNEL);
641 if (info->mtds == NULL) {
642 printk(KERN_ERR PFX "failed to allocate mtd storage\n");
643 err = -ENOMEM;
644 goto exit_error;
645 }
646
647 memzero(info->mtds, size);
648
649 /* initialise all possible chips */
650
651 nmtd = info->mtds;
652
653 for (setno = 0; setno < nr_sets; setno++, nmtd++) {
654 pr_debug("initialising set %d (%p, info %p)\n",
655 setno, nmtd, info);
656
657 s3c2410_nand_init_chip(info, nmtd, sets);
658
659 nmtd->scan_res = nand_scan(&nmtd->mtd,
660 (sets) ? sets->nr_chips : 1);
661
662 if (nmtd->scan_res == 0) {
663 s3c2410_nand_add_partition(info, nmtd, sets);
664 }
665
666 if (sets != NULL)
667 sets++;
668 }
669
670 pr_debug("initialised ok\n");
671 return 0;
672
673 exit_error:
674 s3c2410_nand_remove(dev);
675
676 if (err == 0)
677 err = -EINVAL;
678 return err;
679}
680
681static struct device_driver s3c2410_nand_driver = {
682 .name = "s3c2410-nand",
683 .bus = &platform_bus_type,
684 .probe = s3c2410_nand_probe,
685 .remove = s3c2410_nand_remove,
686};
687
688static int __init s3c2410_nand_init(void)
689{
690 printk("S3C2410 NAND Driver, (c) 2004 Simtec Electronics\n");
691 return driver_register(&s3c2410_nand_driver);
692}
693
694static void __exit s3c2410_nand_exit(void)
695{
696 driver_unregister(&s3c2410_nand_driver);
697}
698
699module_init(s3c2410_nand_init);
700module_exit(s3c2410_nand_exit);
701
702MODULE_LICENSE("GPL");
703MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
704MODULE_DESCRIPTION("S3C2410 MTD NAND driver");
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
new file mode 100755
index 000000000000..29572793334c
--- /dev/null
+++ b/drivers/mtd/nand/sharpsl.c
@@ -0,0 +1,260 @@
1/*
2 * drivers/mtd/nand/sharpsl.c
3 *
4 * Copyright (C) 2004 Richard Purdie
5 *
6 * $Id: sharpsl.c,v 1.3 2005/01/03 14:53:50 rpurdie Exp $
7 *
8 * Based on Sharp's NAND driver sharp_sl.c
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#include <linux/genhd.h>
17#include <linux/slab.h>
18#include <linux/module.h>
19#include <linux/delay.h>
20#include <linux/mtd/mtd.h>
21#include <linux/mtd/nand.h>
22#include <linux/mtd/nand_ecc.h>
23#include <linux/mtd/partitions.h>
24#include <linux/interrupt.h>
25#include <asm/io.h>
26#include <asm/hardware.h>
27#include <asm/mach-types.h>
28
29static void __iomem *sharpsl_io_base;
30static int sharpsl_phys_base = 0x0C000000;
31
32/* register offset */
33#define ECCLPLB sharpsl_io_base+0x00 /* line parity 7 - 0 bit */
34#define ECCLPUB sharpsl_io_base+0x04 /* line parity 15 - 8 bit */
35#define ECCCP sharpsl_io_base+0x08 /* column parity 5 - 0 bit */
36#define ECCCNTR sharpsl_io_base+0x0C /* ECC byte counter */
37#define ECCCLRR sharpsl_io_base+0x10 /* cleare ECC */
38#define FLASHIO sharpsl_io_base+0x14 /* Flash I/O */
39#define FLASHCTL sharpsl_io_base+0x18 /* Flash Control */
40
41/* Flash control bit */
42#define FLRYBY (1 << 5)
43#define FLCE1 (1 << 4)
44#define FLWP (1 << 3)
45#define FLALE (1 << 2)
46#define FLCLE (1 << 1)
47#define FLCE0 (1 << 0)
48
49
50/*
51 * MTD structure for SharpSL
52 */
53static struct mtd_info *sharpsl_mtd = NULL;
54
55/*
56 * Define partitions for flash device
57 */
58#define DEFAULT_NUM_PARTITIONS 3
59
60static int nr_partitions;
61static struct mtd_partition sharpsl_nand_default_partition_info[] = {
62 {
63 .name = "System Area",
64 .offset = 0,
65 .size = 7 * 1024 * 1024,
66 },
67 {
68 .name = "Root Filesystem",
69 .offset = 7 * 1024 * 1024,
70 .size = 30 * 1024 * 1024,
71 },
72 {
73 .name = "Home Filesystem",
74 .offset = MTDPART_OFS_APPEND ,
75 .size = MTDPART_SIZ_FULL ,
76 },
77};
78
79/*
80 * hardware specific access to control-lines
81 */
82static void
83sharpsl_nand_hwcontrol(struct mtd_info* mtd, int cmd)
84{
85 switch (cmd) {
86 case NAND_CTL_SETCLE:
87 writeb(readb(FLASHCTL) | FLCLE, FLASHCTL);
88 break;
89 case NAND_CTL_CLRCLE:
90 writeb(readb(FLASHCTL) & ~FLCLE, FLASHCTL);
91 break;
92
93 case NAND_CTL_SETALE:
94 writeb(readb(FLASHCTL) | FLALE, FLASHCTL);
95 break;
96 case NAND_CTL_CLRALE:
97 writeb(readb(FLASHCTL) & ~FLALE, FLASHCTL);
98 break;
99
100 case NAND_CTL_SETNCE:
101 writeb(readb(FLASHCTL) & ~(FLCE0|FLCE1), FLASHCTL);
102 break;
103 case NAND_CTL_CLRNCE:
104 writeb(readb(FLASHCTL) | (FLCE0|FLCE1), FLASHCTL);
105 break;
106 }
107}
108
109static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
110
111static struct nand_bbt_descr sharpsl_bbt = {
112 .options = 0,
113 .offs = 4,
114 .len = 2,
115 .pattern = scan_ff_pattern
116};
117
118static int
119sharpsl_nand_dev_ready(struct mtd_info* mtd)
120{
121 return !((readb(FLASHCTL) & FLRYBY) == 0);
122}
123
124static void
125sharpsl_nand_enable_hwecc(struct mtd_info* mtd, int mode)
126{
127 writeb(0 ,ECCCLRR);
128}
129
130static int
131sharpsl_nand_calculate_ecc(struct mtd_info* mtd, const u_char* dat,
132 u_char* ecc_code)
133{
134 ecc_code[0] = ~readb(ECCLPUB);
135 ecc_code[1] = ~readb(ECCLPLB);
136 ecc_code[2] = (~readb(ECCCP) << 2) | 0x03;
137 return readb(ECCCNTR) != 0;
138}
139
140
141#ifdef CONFIG_MTD_PARTITIONS
142const char *part_probes[] = { "cmdlinepart", NULL };
143#endif
144
145
146/*
147 * Main initialization routine
148 */
149int __init
150sharpsl_nand_init(void)
151{
152 struct nand_chip *this;
153 struct mtd_partition* sharpsl_partition_info;
154 int err = 0;
155
156 /* Allocate memory for MTD device structure and private data */
157 sharpsl_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip),
158 GFP_KERNEL);
159 if (!sharpsl_mtd) {
160 printk ("Unable to allocate SharpSL NAND MTD device structure.\n");
161 return -ENOMEM;
162 }
163
164 /* map physical adress */
165 sharpsl_io_base = ioremap(sharpsl_phys_base, 0x1000);
166 if(!sharpsl_io_base){
167 printk("ioremap to access Sharp SL NAND chip failed\n");
168 kfree(sharpsl_mtd);
169 return -EIO;
170 }
171
172 /* Get pointer to private data */
173 this = (struct nand_chip *) (&sharpsl_mtd[1]);
174
175 /* Initialize structures */
176 memset((char *) sharpsl_mtd, 0, sizeof(struct mtd_info));
177 memset((char *) this, 0, sizeof(struct nand_chip));
178
179 /* Link the private data with the MTD structure */
180 sharpsl_mtd->priv = this;
181
182 /*
183 * PXA initialize
184 */
185 writeb(readb(FLASHCTL) | FLWP, FLASHCTL);
186
187 /* Set address of NAND IO lines */
188 this->IO_ADDR_R = FLASHIO;
189 this->IO_ADDR_W = FLASHIO;
190 /* Set address of hardware control function */
191 this->hwcontrol = sharpsl_nand_hwcontrol;
192 this->dev_ready = sharpsl_nand_dev_ready;
193 /* 15 us command delay time */
194 this->chip_delay = 15;
195 /* set eccmode using hardware ECC */
196 this->eccmode = NAND_ECC_HW3_256;
197 this->enable_hwecc = sharpsl_nand_enable_hwecc;
198 this->calculate_ecc = sharpsl_nand_calculate_ecc;
199 this->correct_data = nand_correct_data;
200 this->badblock_pattern = &sharpsl_bbt;
201
202 /* Scan to find existence of the device */
203 err=nand_scan(sharpsl_mtd,1);
204 if (err) {
205 iounmap(sharpsl_io_base);
206 kfree(sharpsl_mtd);
207 return err;
208 }
209
210 /* Register the partitions */
211 sharpsl_mtd->name = "sharpsl-nand";
212 nr_partitions = parse_mtd_partitions(sharpsl_mtd, part_probes,
213 &sharpsl_partition_info, 0);
214
215 if (nr_partitions <= 0) {
216 nr_partitions = DEFAULT_NUM_PARTITIONS;
217 sharpsl_partition_info = sharpsl_nand_default_partition_info;
218 if (machine_is_poodle()) {
219 sharpsl_partition_info[1].size=22 * 1024 * 1024;
220 } else if (machine_is_corgi() || machine_is_shepherd()) {
221 sharpsl_partition_info[1].size=25 * 1024 * 1024;
222 } else if (machine_is_husky()) {
223 sharpsl_partition_info[1].size=53 * 1024 * 1024;
224 }
225 }
226
227 if (machine_is_husky()) {
228 /* Need to use small eraseblock size for backward compatibility */
229 sharpsl_mtd->flags |= MTD_NO_VIRTBLOCKS;
230 }
231
232 add_mtd_partitions(sharpsl_mtd, sharpsl_partition_info, nr_partitions);
233
234 /* Return happy */
235 return 0;
236}
237module_init(sharpsl_nand_init);
238
239/*
240 * Clean up routine
241 */
242#ifdef MODULE
243static void __exit sharpsl_nand_cleanup(void)
244{
245 struct nand_chip *this = (struct nand_chip *) &sharpsl_mtd[1];
246
247 /* Release resources, unregister device */
248 nand_release(sharpsl_mtd);
249
250 iounmap(sharpsl_io_base);
251
252 /* Free the MTD device structure */
253 kfree(sharpsl_mtd);
254}
255module_exit(sharpsl_nand_cleanup);
256#endif
257
258MODULE_LICENSE("GPL");
259MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
260MODULE_DESCRIPTION("Device specific logic for NAND flash on Sharp SL-C7xx Series");
diff --git a/drivers/mtd/nand/spia.c b/drivers/mtd/nand/spia.c
new file mode 100644
index 000000000000..b777c412b758
--- /dev/null
+++ b/drivers/mtd/nand/spia.c
@@ -0,0 +1,173 @@
1/*
2 * drivers/mtd/nand/spia.c
3 *
4 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
5 *
6 *
7 * 10-29-2001 TG change to support hardwarespecific access
8 * to controllines (due to change in nand.c)
9 * page_cache added
10 *
11 * $Id: spia.c,v 1.24 2004/11/04 12:53:10 gleixner Exp $
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 *
17 * Overview:
18 * This is a device driver for the NAND flash device found on the
19 * SPIA board which utilizes the Toshiba TC58V64AFT part. This is
20 * a 64Mibit (8MiB x 8 bits) NAND flash device.
21 */
22
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/slab.h>
26#include <linux/module.h>
27#include <linux/mtd/mtd.h>
28#include <linux/mtd/nand.h>
29#include <linux/mtd/partitions.h>
30#include <asm/io.h>
31
32/*
33 * MTD structure for SPIA board
34 */
35static struct mtd_info *spia_mtd = NULL;
36
37/*
38 * Values specific to the SPIA board (used with EP7212 processor)
39 */
40#define SPIA_IO_BASE 0xd0000000 /* Start of EP7212 IO address space */
41#define SPIA_FIO_BASE 0xf0000000 /* Address where flash is mapped */
42#define SPIA_PEDR 0x0080 /*
43 * IO offset to Port E data register
44 * where the CLE, ALE and NCE pins
45 * are wired to.
46 */
47#define SPIA_PEDDR 0x00c0 /*
48 * IO offset to Port E data direction
49 * register so we can control the IO
50 * lines.
51 */
52
53/*
54 * Module stuff
55 */
56
57static int spia_io_base = SPIA_IO_BASE;
58static int spia_fio_base = SPIA_FIO_BASE;
59static int spia_pedr = SPIA_PEDR;
60static int spia_peddr = SPIA_PEDDR;
61
62module_param(spia_io_base, int, 0);
63module_param(spia_fio_base, int, 0);
64module_param(spia_pedr, int, 0);
65module_param(spia_peddr, int, 0);
66
67/*
68 * Define partitions for flash device
69 */
70const static struct mtd_partition partition_info[] = {
71 {
72 .name = "SPIA flash partition 1",
73 .offset = 0,
74 .size = 2*1024*1024
75 },
76 {
77 .name = "SPIA flash partition 2",
78 .offset = 2*1024*1024,
79 .size = 6*1024*1024
80 }
81};
82#define NUM_PARTITIONS 2
83
84
85/*
86 * hardware specific access to control-lines
87*/
88static void spia_hwcontrol(struct mtd_info *mtd, int cmd){
89
90 switch(cmd){
91
92 case NAND_CTL_SETCLE: (*(volatile unsigned char *) (spia_io_base + spia_pedr)) |= 0x01; break;
93 case NAND_CTL_CLRCLE: (*(volatile unsigned char *) (spia_io_base + spia_pedr)) &= ~0x01; break;
94
95 case NAND_CTL_SETALE: (*(volatile unsigned char *) (spia_io_base + spia_pedr)) |= 0x02; break;
96 case NAND_CTL_CLRALE: (*(volatile unsigned char *) (spia_io_base + spia_pedr)) &= ~0x02; break;
97
98 case NAND_CTL_SETNCE: (*(volatile unsigned char *) (spia_io_base + spia_pedr)) &= ~0x04; break;
99 case NAND_CTL_CLRNCE: (*(volatile unsigned char *) (spia_io_base + spia_pedr)) |= 0x04; break;
100 }
101}
102
103/*
104 * Main initialization routine
105 */
106int __init spia_init (void)
107{
108 struct nand_chip *this;
109
110 /* Allocate memory for MTD device structure and private data */
111 spia_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip),
112 GFP_KERNEL);
113 if (!spia_mtd) {
114 printk ("Unable to allocate SPIA NAND MTD device structure.\n");
115 return -ENOMEM;
116 }
117
118 /* Get pointer to private data */
119 this = (struct nand_chip *) (&spia_mtd[1]);
120
121 /* Initialize structures */
122 memset((char *) spia_mtd, 0, sizeof(struct mtd_info));
123 memset((char *) this, 0, sizeof(struct nand_chip));
124
125 /* Link the private data with the MTD structure */
126 spia_mtd->priv = this;
127
128 /*
129 * Set GPIO Port E control register so that the pins are configured
130 * to be outputs for controlling the NAND flash.
131 */
132 (*(volatile unsigned char *) (spia_io_base + spia_peddr)) = 0x07;
133
134 /* Set address of NAND IO lines */
135 this->IO_ADDR_R = (void __iomem *) spia_fio_base;
136 this->IO_ADDR_W = (void __iomem *) spia_fio_base;
137 /* Set address of hardware control function */
138 this->hwcontrol = spia_hwcontrol;
139 /* 15 us command delay time */
140 this->chip_delay = 15;
141
142 /* Scan to find existence of the device */
143 if (nand_scan (spia_mtd, 1)) {
144 kfree (spia_mtd);
145 return -ENXIO;
146 }
147
148 /* Register the partitions */
149 add_mtd_partitions(spia_mtd, partition_info, NUM_PARTITIONS);
150
151 /* Return happy */
152 return 0;
153}
154module_init(spia_init);
155
156/*
157 * Clean up routine
158 */
159#ifdef MODULE
160static void __exit spia_cleanup (void)
161{
162 /* Release resources, unregister device */
163 nand_release (spia_mtd);
164
165 /* Free the MTD device structure */
166 kfree (spia_mtd);
167}
168module_exit(spia_cleanup);
169#endif
170
171MODULE_LICENSE("GPL");
172MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com");
173MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on SPIA board");
diff --git a/drivers/mtd/nand/toto.c b/drivers/mtd/nand/toto.c
new file mode 100644
index 000000000000..52c808fb5fa9
--- /dev/null
+++ b/drivers/mtd/nand/toto.c
@@ -0,0 +1,205 @@
1/*
2 * drivers/mtd/nand/toto.c
3 *
4 * Copyright (c) 2003 Texas Instruments
5 *
6 * Derived from drivers/mtd/autcpu12.c
7 *
8 * Copyright (c) 2002 Thomas Gleixner <tgxl@linutronix.de>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * Overview:
15 * This is a device driver for the NAND flash device found on the
16 * TI fido board. It supports 32MiB and 64MiB cards
17 *
18 * $Id: toto.c,v 1.4 2004/10/05 13:50:20 gleixner Exp $
19 */
20
21#include <linux/slab.h>
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/delay.h>
25#include <linux/mtd/mtd.h>
26#include <linux/mtd/nand.h>
27#include <linux/mtd/partitions.h>
28#include <asm/io.h>
29#include <asm/arch/hardware.h>
30#include <asm/sizes.h>
31#include <asm/arch/toto.h>
32#include <asm/arch-omap1510/hardware.h>
33#include <asm/arch/gpio.h>
34
35/*
36 * MTD structure for TOTO board
37 */
38static struct mtd_info *toto_mtd = NULL;
39
40static unsigned long toto_io_base = OMAP_FLASH_1_BASE;
41
42#define CONFIG_NAND_WORKAROUND 1
43
44#define NAND_NCE 0x4000
45#define NAND_CLE 0x1000
46#define NAND_ALE 0x0002
47#define NAND_MASK (NAND_CLE | NAND_ALE | NAND_NCE)
48
49#define T_NAND_CTL_CLRALE(iob) gpiosetout(NAND_ALE, 0)
50#define T_NAND_CTL_SETALE(iob) gpiosetout(NAND_ALE, NAND_ALE)
51#ifdef CONFIG_NAND_WORKAROUND /* "some" dev boards busted, blue wired to rts2 :( */
52#define T_NAND_CTL_CLRCLE(iob) gpiosetout(NAND_CLE, 0); rts2setout(2, 2)
53#define T_NAND_CTL_SETCLE(iob) gpiosetout(NAND_CLE, NAND_CLE); rts2setout(2, 0)
54#else
55#define T_NAND_CTL_CLRCLE(iob) gpiosetout(NAND_CLE, 0)
56#define T_NAND_CTL_SETCLE(iob) gpiosetout(NAND_CLE, NAND_CLE)
57#endif
58#define T_NAND_CTL_SETNCE(iob) gpiosetout(NAND_NCE, 0)
59#define T_NAND_CTL_CLRNCE(iob) gpiosetout(NAND_NCE, NAND_NCE)
60
61/*
62 * Define partitions for flash devices
63 */
64
65static struct mtd_partition partition_info64M[] = {
66 { .name = "toto kernel partition 1",
67 .offset = 0,
68 .size = 2 * SZ_1M },
69 { .name = "toto file sys partition 2",
70 .offset = 2 * SZ_1M,
71 .size = 14 * SZ_1M },
72 { .name = "toto user partition 3",
73 .offset = 16 * SZ_1M,
74 .size = 16 * SZ_1M },
75 { .name = "toto devboard extra partition 4",
76 .offset = 32 * SZ_1M,
77 .size = 32 * SZ_1M },
78};
79
80static struct mtd_partition partition_info32M[] = {
81 { .name = "toto kernel partition 1",
82 .offset = 0,
83 .size = 2 * SZ_1M },
84 { .name = "toto file sys partition 2",
85 .offset = 2 * SZ_1M,
86 .size = 14 * SZ_1M },
87 { .name = "toto user partition 3",
88 .offset = 16 * SZ_1M,
89 .size = 16 * SZ_1M },
90};
91
92#define NUM_PARTITIONS32M 3
93#define NUM_PARTITIONS64M 4
94/*
95 * hardware specific access to control-lines
96*/
97
98static void toto_hwcontrol(struct mtd_info *mtd, int cmd)
99{
100
101 udelay(1); /* hopefully enough time for tc make proceding write to clear */
102 switch(cmd){
103
104 case NAND_CTL_SETCLE: T_NAND_CTL_SETCLE(cmd); break;
105 case NAND_CTL_CLRCLE: T_NAND_CTL_CLRCLE(cmd); break;
106
107 case NAND_CTL_SETALE: T_NAND_CTL_SETALE(cmd); break;
108 case NAND_CTL_CLRALE: T_NAND_CTL_CLRALE(cmd); break;
109
110 case NAND_CTL_SETNCE: T_NAND_CTL_SETNCE(cmd); break;
111 case NAND_CTL_CLRNCE: T_NAND_CTL_CLRNCE(cmd); break;
112 }
113 udelay(1); /* allow time to ensure gpio state to over take memory write */
114}
115
116/*
117 * Main initialization routine
118 */
119int __init toto_init (void)
120{
121 struct nand_chip *this;
122 int err = 0;
123
124 /* Allocate memory for MTD device structure and private data */
125 toto_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip),
126 GFP_KERNEL);
127 if (!toto_mtd) {
128 printk (KERN_WARNING "Unable to allocate toto NAND MTD device structure.\n");
129 err = -ENOMEM;
130 goto out;
131 }
132
133 /* Get pointer to private data */
134 this = (struct nand_chip *) (&toto_mtd[1]);
135
136 /* Initialize structures */
137 memset((char *) toto_mtd, 0, sizeof(struct mtd_info));
138 memset((char *) this, 0, sizeof(struct nand_chip));
139
140 /* Link the private data with the MTD structure */
141 toto_mtd->priv = this;
142
143 /* Set address of NAND IO lines */
144 this->IO_ADDR_R = toto_io_base;
145 this->IO_ADDR_W = toto_io_base;
146 this->hwcontrol = toto_hwcontrol;
147 this->dev_ready = NULL;
148 /* 25 us command delay time */
149 this->chip_delay = 30;
150 this->eccmode = NAND_ECC_SOFT;
151
152 /* Scan to find existance of the device */
153 if (nand_scan (toto_mtd, 1)) {
154 err = -ENXIO;
155 goto out_mtd;
156 }
157
158 /* Register the partitions */
159 switch(toto_mtd->size){
160 case SZ_64M: add_mtd_partitions(toto_mtd, partition_info64M, NUM_PARTITIONS64M); break;
161 case SZ_32M: add_mtd_partitions(toto_mtd, partition_info32M, NUM_PARTITIONS32M); break;
162 default: {
163 printk (KERN_WARNING "Unsupported Nand device\n");
164 err = -ENXIO;
165 goto out_buf;
166 }
167 }
168
169 gpioreserve(NAND_MASK); /* claim our gpios */
170 archflashwp(0,0); /* open up flash for writing */
171
172 goto out;
173
174out_buf:
175 kfree (this->data_buf);
176out_mtd:
177 kfree (toto_mtd);
178out:
179 return err;
180}
181
182module_init(toto_init);
183
184/*
185 * Clean up routine
186 */
187static void __exit toto_cleanup (void)
188{
189 /* Release resources, unregister device */
190 nand_release (toto_mtd);
191
192 /* Free the MTD device structure */
193 kfree (toto_mtd);
194
195 /* stop flash writes */
196 archflashwp(0,1);
197
198 /* release gpios to system */
199 gpiorelease(NAND_MASK);
200}
201module_exit(toto_cleanup);
202
203MODULE_LICENSE("GPL");
204MODULE_AUTHOR("Richard Woodruff <r-woodruff2@ti.com>");
205MODULE_DESCRIPTION("Glue layer for NAND flash on toto board");
diff --git a/drivers/mtd/nand/tx4925ndfmc.c b/drivers/mtd/nand/tx4925ndfmc.c
new file mode 100644
index 000000000000..bba688830c9b
--- /dev/null
+++ b/drivers/mtd/nand/tx4925ndfmc.c
@@ -0,0 +1,416 @@
1/*
2 * drivers/mtd/tx4925ndfmc.c
3 *
4 * Overview:
5 * This is a device driver for the NAND flash device found on the
6 * Toshiba RBTX4925 reference board, which is a SmartMediaCard. It supports
7 * 16MiB, 32MiB and 64MiB cards.
8 *
9 * Author: MontaVista Software, Inc. source@mvista.com
10 *
11 * Derived from drivers/mtd/autcpu12.c
12 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
13 *
14 * $Id: tx4925ndfmc.c,v 1.5 2004/10/05 13:50:20 gleixner Exp $
15 *
16 * Copyright (C) 2001 Toshiba Corporation
17 *
18 * 2003 (c) MontaVista Software, Inc. This file is licensed under
19 * the terms of the GNU General Public License version 2. This program
20 * is licensed "as is" without any warranty of any kind, whether express
21 * or implied.
22 *
23 */
24
25#include <linux/slab.h>
26#include <linux/init.h>
27#include <linux/module.h>
28#include <linux/mtd/mtd.h>
29#include <linux/mtd/nand.h>
30#include <linux/mtd/partitions.h>
31#include <linux/delay.h>
32#include <asm/io.h>
33#include <asm/tx4925/tx4925_nand.h>
34
35extern struct nand_oobinfo jffs2_oobinfo;
36
37/*
38 * MTD structure for RBTX4925 board
39 */
40static struct mtd_info *tx4925ndfmc_mtd = NULL;
41
42/*
43 * Define partitions for flash devices
44 */
45
46static struct mtd_partition partition_info16k[] = {
47 { .name = "RBTX4925 flash partition 1",
48 .offset = 0,
49 .size = 8 * 0x00100000 },
50 { .name = "RBTX4925 flash partition 2",
51 .offset = 8 * 0x00100000,
52 .size = 8 * 0x00100000 },
53};
54
55static struct mtd_partition partition_info32k[] = {
56 { .name = "RBTX4925 flash partition 1",
57 .offset = 0,
58 .size = 8 * 0x00100000 },
59 { .name = "RBTX4925 flash partition 2",
60 .offset = 8 * 0x00100000,
61 .size = 24 * 0x00100000 },
62};
63
64static struct mtd_partition partition_info64k[] = {
65 { .name = "User FS",
66 .offset = 0,
67 .size = 16 * 0x00100000 },
68 { .name = "RBTX4925 flash partition 2",
69 .offset = 16 * 0x00100000,
70 .size = 48 * 0x00100000},
71};
72
73static struct mtd_partition partition_info128k[] = {
74 { .name = "Skip bad section",
75 .offset = 0,
76 .size = 16 * 0x00100000 },
77 { .name = "User FS",
78 .offset = 16 * 0x00100000,
79 .size = 112 * 0x00100000 },
80};
81#define NUM_PARTITIONS16K 2
82#define NUM_PARTITIONS32K 2
83#define NUM_PARTITIONS64K 2
84#define NUM_PARTITIONS128K 2
85
86/*
87 * hardware specific access to control-lines
88*/
89static void tx4925ndfmc_hwcontrol(struct mtd_info *mtd, int cmd)
90{
91
92 switch(cmd){
93
94 case NAND_CTL_SETCLE:
95 tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_CLE;
96 break;
97 case NAND_CTL_CLRCLE:
98 tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_CLE;
99 break;
100 case NAND_CTL_SETALE:
101 tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ALE;
102 break;
103 case NAND_CTL_CLRALE:
104 tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ALE;
105 break;
106 case NAND_CTL_SETNCE:
107 tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_CE;
108 break;
109 case NAND_CTL_CLRNCE:
110 tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_CE;
111 break;
112 case NAND_CTL_SETWP:
113 tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_WE;
114 break;
115 case NAND_CTL_CLRWP:
116 tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_WE;
117 break;
118 }
119}
120
121/*
122* read device ready pin
123*/
124static int tx4925ndfmc_device_ready(struct mtd_info *mtd)
125{
126 int ready;
127 ready = (tx4925_ndfmcptr->sr & TX4925_NDSFR_BUSY) ? 0 : 1;
128 return ready;
129}
130void tx4925ndfmc_enable_hwecc(struct mtd_info *mtd, int mode)
131{
132 /* reset first */
133 tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ECC_CNTL_MASK;
134 tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ECC_CNTL_MASK;
135 tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ECC_CNTL_ENAB;
136}
137static void tx4925ndfmc_disable_ecc(void)
138{
139 tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ECC_CNTL_MASK;
140}
141static void tx4925ndfmc_enable_read_ecc(void)
142{
143 tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ECC_CNTL_MASK;
144 tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ECC_CNTL_READ;
145}
146void tx4925ndfmc_readecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code){
147 int i;
148 u_char *ecc = ecc_code;
149 tx4925ndfmc_enable_read_ecc();
150 for (i = 0;i < 6;i++,ecc++)
151 *ecc = tx4925_read_nfmc(&(tx4925_ndfmcptr->dtr));
152 tx4925ndfmc_disable_ecc();
153}
154void tx4925ndfmc_device_setup(void)
155{
156
157 *(unsigned char *)0xbb005000 &= ~0x08;
158
159 /* reset NDFMC */
160 tx4925_ndfmcptr->rstr |= TX4925_NDFRSTR_RST;
161 while (tx4925_ndfmcptr->rstr & TX4925_NDFRSTR_RST);
162
163 /* setup BusSeparete, Hold Time, Strobe Pulse Width */
164 tx4925_ndfmcptr->mcr = TX4925_BSPRT ? TX4925_NDFMCR_BSPRT : 0;
165 tx4925_ndfmcptr->spr = TX4925_HOLD << 4 | TX4925_SPW;
166}
167static u_char tx4925ndfmc_nand_read_byte(struct mtd_info *mtd)
168{
169 struct nand_chip *this = mtd->priv;
170 return tx4925_read_nfmc(this->IO_ADDR_R);
171}
172
173static void tx4925ndfmc_nand_write_byte(struct mtd_info *mtd, u_char byte)
174{
175 struct nand_chip *this = mtd->priv;
176 tx4925_write_nfmc(byte, this->IO_ADDR_W);
177}
178
179static void tx4925ndfmc_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
180{
181 int i;
182 struct nand_chip *this = mtd->priv;
183
184 for (i=0; i<len; i++)
185 tx4925_write_nfmc(buf[i], this->IO_ADDR_W);
186}
187
188static void tx4925ndfmc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
189{
190 int i;
191 struct nand_chip *this = mtd->priv;
192
193 for (i=0; i<len; i++)
194 buf[i] = tx4925_read_nfmc(this->IO_ADDR_R);
195}
196
197static int tx4925ndfmc_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
198{
199 int i;
200 struct nand_chip *this = mtd->priv;
201
202 for (i=0; i<len; i++)
203 if (buf[i] != tx4925_read_nfmc(this->IO_ADDR_R))
204 return -EFAULT;
205
206 return 0;
207}
208
209/*
210 * Send command to NAND device
211 */
212static void tx4925ndfmc_nand_command (struct mtd_info *mtd, unsigned command, int column, int page_addr)
213{
214 register struct nand_chip *this = mtd->priv;
215
216 /* Begin command latch cycle */
217 this->hwcontrol(mtd, NAND_CTL_SETCLE);
218 /*
219 * Write out the command to the device.
220 */
221 if (command == NAND_CMD_SEQIN) {
222 int readcmd;
223
224 if (column >= mtd->oobblock) {
225 /* OOB area */
226 column -= mtd->oobblock;
227 readcmd = NAND_CMD_READOOB;
228 } else if (column < 256) {
229 /* First 256 bytes --> READ0 */
230 readcmd = NAND_CMD_READ0;
231 } else {
232 column -= 256;
233 readcmd = NAND_CMD_READ1;
234 }
235 this->write_byte(mtd, readcmd);
236 }
237 this->write_byte(mtd, command);
238
239 /* Set ALE and clear CLE to start address cycle */
240 this->hwcontrol(mtd, NAND_CTL_CLRCLE);
241
242 if (column != -1 || page_addr != -1) {
243 this->hwcontrol(mtd, NAND_CTL_SETALE);
244
245 /* Serially input address */
246 if (column != -1)
247 this->write_byte(mtd, column);
248 if (page_addr != -1) {
249 this->write_byte(mtd, (unsigned char) (page_addr & 0xff));
250 this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff));
251 /* One more address cycle for higher density devices */
252 if (mtd->size & 0x0c000000)
253 this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0x0f));
254 }
255 /* Latch in address */
256 this->hwcontrol(mtd, NAND_CTL_CLRALE);
257 }
258
259 /*
260 * program and erase have their own busy handlers
261 * status and sequential in needs no delay
262 */
263 switch (command) {
264
265 case NAND_CMD_PAGEPROG:
266 /* Turn off WE */
267 this->hwcontrol (mtd, NAND_CTL_CLRWP);
268 return;
269
270 case NAND_CMD_SEQIN:
271 /* Turn on WE */
272 this->hwcontrol (mtd, NAND_CTL_SETWP);
273 return;
274
275 case NAND_CMD_ERASE1:
276 case NAND_CMD_ERASE2:
277 case NAND_CMD_STATUS:
278 return;
279
280 case NAND_CMD_RESET:
281 if (this->dev_ready)
282 break;
283 this->hwcontrol(mtd, NAND_CTL_SETCLE);
284 this->write_byte(mtd, NAND_CMD_STATUS);
285 this->hwcontrol(mtd, NAND_CTL_CLRCLE);
286 while ( !(this->read_byte(mtd) & 0x40));
287 return;
288
289 /* This applies to read commands */
290 default:
291 /*
292 * If we don't have access to the busy pin, we apply the given
293 * command delay
294 */
295 if (!this->dev_ready) {
296 udelay (this->chip_delay);
297 return;
298 }
299 }
300
301 /* wait until command is processed */
302 while (!this->dev_ready(mtd));
303}
304
305#ifdef CONFIG_MTD_CMDLINE_PARTS
306extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partitio
307n **pparts, char *);
308#endif
309
310/*
311 * Main initialization routine
312 */
313extern int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
314int __init tx4925ndfmc_init (void)
315{
316 struct nand_chip *this;
317 int err = 0;
318
319 /* Allocate memory for MTD device structure and private data */
320 tx4925ndfmc_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip),
321 GFP_KERNEL);
322 if (!tx4925ndfmc_mtd) {
323 printk ("Unable to allocate RBTX4925 NAND MTD device structure.\n");
324 err = -ENOMEM;
325 goto out;
326 }
327
328 tx4925ndfmc_device_setup();
329
330 /* io is indirect via a register so don't need to ioremap address */
331
332 /* Get pointer to private data */
333 this = (struct nand_chip *) (&tx4925ndfmc_mtd[1]);
334
335 /* Initialize structures */
336 memset((char *) tx4925ndfmc_mtd, 0, sizeof(struct mtd_info));
337 memset((char *) this, 0, sizeof(struct nand_chip));
338
339 /* Link the private data with the MTD structure */
340 tx4925ndfmc_mtd->priv = this;
341
342 /* Set address of NAND IO lines */
343 this->IO_ADDR_R = (void __iomem *)&(tx4925_ndfmcptr->dtr);
344 this->IO_ADDR_W = (void __iomem *)&(tx4925_ndfmcptr->dtr);
345 this->hwcontrol = tx4925ndfmc_hwcontrol;
346 this->enable_hwecc = tx4925ndfmc_enable_hwecc;
347 this->calculate_ecc = tx4925ndfmc_readecc;
348 this->correct_data = nand_correct_data;
349 this->eccmode = NAND_ECC_HW6_512;
350 this->dev_ready = tx4925ndfmc_device_ready;
351 /* 20 us command delay time */
352 this->chip_delay = 20;
353 this->read_byte = tx4925ndfmc_nand_read_byte;
354 this->write_byte = tx4925ndfmc_nand_write_byte;
355 this->cmdfunc = tx4925ndfmc_nand_command;
356 this->write_buf = tx4925ndfmc_nand_write_buf;
357 this->read_buf = tx4925ndfmc_nand_read_buf;
358 this->verify_buf = tx4925ndfmc_nand_verify_buf;
359
360 /* Scan to find existance of the device */
361 if (nand_scan (tx4925ndfmc_mtd, 1)) {
362 err = -ENXIO;
363 goto out_ior;
364 }
365
366 /* Register the partitions */
367#ifdef CONFIG_MTD_CMDLINE_PARTS
368 {
369 int mtd_parts_nb = 0;
370 struct mtd_partition *mtd_parts = 0;
371 mtd_parts_nb = parse_cmdline_partitions(tx4925ndfmc_mtd, &mtd_parts, "tx4925ndfmc");
372 if (mtd_parts_nb > 0)
373 add_mtd_partitions(tx4925ndfmc_mtd, mtd_parts, mtd_parts_nb);
374 else
375 add_mtd_device(tx4925ndfmc_mtd);
376 }
377#else /* ifdef CONFIG_MTD_CMDLINE_PARTS */
378 switch(tx4925ndfmc_mtd->size){
379 case 0x01000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info16k, NUM_PARTITIONS16K); break;
380 case 0x02000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info32k, NUM_PARTITIONS32K); break;
381 case 0x04000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info64k, NUM_PARTITIONS64K); break;
382 case 0x08000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info128k, NUM_PARTITIONS128K); break;
383 default: {
384 printk ("Unsupported SmartMedia device\n");
385 err = -ENXIO;
386 goto out_ior;
387 }
388 }
389#endif /* ifdef CONFIG_MTD_CMDLINE_PARTS */
390 goto out;
391
392out_ior:
393out:
394 return err;
395}
396
397module_init(tx4925ndfmc_init);
398
399/*
400 * Clean up routine
401 */
402#ifdef MODULE
403static void __exit tx4925ndfmc_cleanup (void)
404{
405 /* Release resources, unregister device */
406 nand_release (tx4925ndfmc_mtd);
407
408 /* Free the MTD device structure */
409 kfree (tx4925ndfmc_mtd);
410}
411module_exit(tx4925ndfmc_cleanup);
412#endif
413
414MODULE_LICENSE("GPL");
415MODULE_AUTHOR("Alice Hennessy <ahennessy@mvista.com>");
416MODULE_DESCRIPTION("Glue layer for SmartMediaCard on Toshiba RBTX4925");
diff --git a/drivers/mtd/nand/tx4938ndfmc.c b/drivers/mtd/nand/tx4938ndfmc.c
new file mode 100644
index 000000000000..df26e58820b3
--- /dev/null
+++ b/drivers/mtd/nand/tx4938ndfmc.c
@@ -0,0 +1,406 @@
1/*
2 * drivers/mtd/nand/tx4938ndfmc.c
3 *
4 * Overview:
5 * This is a device driver for the NAND flash device connected to
6 * TX4938 internal NAND Memory Controller.
7 * TX4938 NDFMC is almost same as TX4925 NDFMC, but register size are 64 bit.
8 *
9 * Author: source@mvista.com
10 *
11 * Based on spia.c by Steven J. Hill
12 *
13 * $Id: tx4938ndfmc.c,v 1.4 2004/10/05 13:50:20 gleixner Exp $
14 *
15 * Copyright (C) 2000-2001 Toshiba Corporation
16 *
17 * 2003 (c) MontaVista Software, Inc. This file is licensed under the
18 * terms of the GNU General Public License version 2. This program is
19 * licensed "as is" without any warranty of any kind, whether express
20 * or implied.
21 */
22#include <linux/config.h>
23#include <linux/slab.h>
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/mtd/mtd.h>
27#include <linux/mtd/nand.h>
28#include <linux/mtd/nand_ecc.h>
29#include <linux/mtd/partitions.h>
30#include <asm/io.h>
31#include <asm/bootinfo.h>
32#include <linux/delay.h>
33#include <asm/tx4938/rbtx4938.h>
34
35extern struct nand_oobinfo jffs2_oobinfo;
36
37/*
38 * MTD structure for TX4938 NDFMC
39 */
40static struct mtd_info *tx4938ndfmc_mtd;
41
42/*
43 * Define partitions for flash device
44 */
45#define flush_wb() (void)tx4938_ndfmcptr->mcr;
46
47#define NUM_PARTITIONS 3
48#define NUMBER_OF_CIS_BLOCKS 24
49#define SIZE_OF_BLOCK 0x00004000
50#define NUMBER_OF_BLOCK_PER_ZONE 1024
51#define SIZE_OF_ZONE (NUMBER_OF_BLOCK_PER_ZONE * SIZE_OF_BLOCK)
52#ifndef CONFIG_MTD_CMDLINE_PARTS
53/*
54 * You can use the following sample of MTD partitions
55 * on the NAND Flash Memory 32MB or more.
56 *
57 * The following figure shows the image of the sample partition on
58 * the 32MB NAND Flash Memory.
59 *
60 * Block No.
61 * 0 +-----------------------------+ ------
62 * | CIS | ^
63 * 24 +-----------------------------+ |
64 * | kernel image | | Zone 0
65 * | | |
66 * +-----------------------------+ |
67 * 1023 | unused area | v
68 * +-----------------------------+ ------
69 * 1024 | JFFS2 | ^
70 * | | |
71 * | | | Zone 1
72 * | | |
73 * | | |
74 * | | v
75 * 2047 +-----------------------------+ ------
76 *
77 */
78static struct mtd_partition partition_info[NUM_PARTITIONS] = {
79 {
80 .name = "RBTX4938 CIS Area",
81 .offset = 0,
82 .size = (NUMBER_OF_CIS_BLOCKS * SIZE_OF_BLOCK),
83 .mask_flags = MTD_WRITEABLE /* This partition is NOT writable */
84 },
85 {
86 .name = "RBTX4938 kernel image",
87 .offset = MTDPART_OFS_APPEND,
88 .size = 8 * 0x00100000, /* 8MB (Depends on size of kernel image) */
89 .mask_flags = MTD_WRITEABLE /* This partition is NOT writable */
90 },
91 {
92 .name = "Root FS (JFFS2)",
93 .offset = (0 + SIZE_OF_ZONE), /* start address of next zone */
94 .size = MTDPART_SIZ_FULL
95 },
96};
97#endif
98
99static void tx4938ndfmc_hwcontrol(struct mtd_info *mtd, int cmd)
100{
101 switch (cmd) {
102 case NAND_CTL_SETCLE:
103 tx4938_ndfmcptr->mcr |= TX4938_NDFMCR_CLE;
104 break;
105 case NAND_CTL_CLRCLE:
106 tx4938_ndfmcptr->mcr &= ~TX4938_NDFMCR_CLE;
107 break;
108 case NAND_CTL_SETALE:
109 tx4938_ndfmcptr->mcr |= TX4938_NDFMCR_ALE;
110 break;
111 case NAND_CTL_CLRALE:
112 tx4938_ndfmcptr->mcr &= ~TX4938_NDFMCR_ALE;
113 break;
114 /* TX4938_NDFMCR_CE bit is 0:high 1:low */
115 case NAND_CTL_SETNCE:
116 tx4938_ndfmcptr->mcr |= TX4938_NDFMCR_CE;
117 break;
118 case NAND_CTL_CLRNCE:
119 tx4938_ndfmcptr->mcr &= ~TX4938_NDFMCR_CE;
120 break;
121 case NAND_CTL_SETWP:
122 tx4938_ndfmcptr->mcr |= TX4938_NDFMCR_WE;
123 break;
124 case NAND_CTL_CLRWP:
125 tx4938_ndfmcptr->mcr &= ~TX4938_NDFMCR_WE;
126 break;
127 }
128}
129static int tx4938ndfmc_dev_ready(struct mtd_info *mtd)
130{
131 flush_wb();
132 return !(tx4938_ndfmcptr->sr & TX4938_NDFSR_BUSY);
133}
134static void tx4938ndfmc_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
135{
136 u32 mcr = tx4938_ndfmcptr->mcr;
137 mcr &= ~TX4938_NDFMCR_ECC_ALL;
138 tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_OFF;
139 tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_READ;
140 ecc_code[1] = tx4938_ndfmcptr->dtr;
141 ecc_code[0] = tx4938_ndfmcptr->dtr;
142 ecc_code[2] = tx4938_ndfmcptr->dtr;
143 tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_OFF;
144}
145static void tx4938ndfmc_enable_hwecc(struct mtd_info *mtd, int mode)
146{
147 u32 mcr = tx4938_ndfmcptr->mcr;
148 mcr &= ~TX4938_NDFMCR_ECC_ALL;
149 tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_RESET;
150 tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_OFF;
151 tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_ON;
152}
153
154static u_char tx4938ndfmc_nand_read_byte(struct mtd_info *mtd)
155{
156 struct nand_chip *this = mtd->priv;
157 return tx4938_read_nfmc(this->IO_ADDR_R);
158}
159
160static void tx4938ndfmc_nand_write_byte(struct mtd_info *mtd, u_char byte)
161{
162 struct nand_chip *this = mtd->priv;
163 tx4938_write_nfmc(byte, this->IO_ADDR_W);
164}
165
166static void tx4938ndfmc_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
167{
168 int i;
169 struct nand_chip *this = mtd->priv;
170
171 for (i=0; i<len; i++)
172 tx4938_write_nfmc(buf[i], this->IO_ADDR_W);
173}
174
175static void tx4938ndfmc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
176{
177 int i;
178 struct nand_chip *this = mtd->priv;
179
180 for (i=0; i<len; i++)
181 buf[i] = tx4938_read_nfmc(this->IO_ADDR_R);
182}
183
184static int tx4938ndfmc_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
185{
186 int i;
187 struct nand_chip *this = mtd->priv;
188
189 for (i=0; i<len; i++)
190 if (buf[i] != tx4938_read_nfmc(this->IO_ADDR_R))
191 return -EFAULT;
192
193 return 0;
194}
195
196/*
197 * Send command to NAND device
198 */
199static void tx4938ndfmc_nand_command (struct mtd_info *mtd, unsigned command, int column, int page_addr)
200{
201 register struct nand_chip *this = mtd->priv;
202
203 /* Begin command latch cycle */
204 this->hwcontrol(mtd, NAND_CTL_SETCLE);
205 /*
206 * Write out the command to the device.
207 */
208 if (command == NAND_CMD_SEQIN) {
209 int readcmd;
210
211 if (column >= mtd->oobblock) {
212 /* OOB area */
213 column -= mtd->oobblock;
214 readcmd = NAND_CMD_READOOB;
215 } else if (column < 256) {
216 /* First 256 bytes --> READ0 */
217 readcmd = NAND_CMD_READ0;
218 } else {
219 column -= 256;
220 readcmd = NAND_CMD_READ1;
221 }
222 this->write_byte(mtd, readcmd);
223 }
224 this->write_byte(mtd, command);
225
226 /* Set ALE and clear CLE to start address cycle */
227 this->hwcontrol(mtd, NAND_CTL_CLRCLE);
228
229 if (column != -1 || page_addr != -1) {
230 this->hwcontrol(mtd, NAND_CTL_SETALE);
231
232 /* Serially input address */
233 if (column != -1)
234 this->write_byte(mtd, column);
235 if (page_addr != -1) {
236 this->write_byte(mtd, (unsigned char) (page_addr & 0xff));
237 this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff));
238 /* One more address cycle for higher density devices */
239 if (mtd->size & 0x0c000000)
240 this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0x0f));
241 }
242 /* Latch in address */
243 this->hwcontrol(mtd, NAND_CTL_CLRALE);
244 }
245
246 /*
247 * program and erase have their own busy handlers
248 * status and sequential in needs no delay
249 */
250 switch (command) {
251
252 case NAND_CMD_PAGEPROG:
253 /* Turn off WE */
254 this->hwcontrol (mtd, NAND_CTL_CLRWP);
255 return;
256
257 case NAND_CMD_SEQIN:
258 /* Turn on WE */
259 this->hwcontrol (mtd, NAND_CTL_SETWP);
260 return;
261
262 case NAND_CMD_ERASE1:
263 case NAND_CMD_ERASE2:
264 case NAND_CMD_STATUS:
265 return;
266
267 case NAND_CMD_RESET:
268 if (this->dev_ready)
269 break;
270 this->hwcontrol(mtd, NAND_CTL_SETCLE);
271 this->write_byte(mtd, NAND_CMD_STATUS);
272 this->hwcontrol(mtd, NAND_CTL_CLRCLE);
273 while ( !(this->read_byte(mtd) & 0x40));
274 return;
275
276 /* This applies to read commands */
277 default:
278 /*
279 * If we don't have access to the busy pin, we apply the given
280 * command delay
281 */
282 if (!this->dev_ready) {
283 udelay (this->chip_delay);
284 return;
285 }
286 }
287
288 /* wait until command is processed */
289 while (!this->dev_ready(mtd));
290}
291
292#ifdef CONFIG_MTD_CMDLINE_PARTS
293extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, char *);
294#endif
295/*
296 * Main initialization routine
297 */
298int __init tx4938ndfmc_init (void)
299{
300 struct nand_chip *this;
301 int bsprt = 0, hold = 0xf, spw = 0xf;
302 int protected = 0;
303
304 if ((*rbtx4938_piosel_ptr & 0x0c) != 0x08) {
305 printk("TX4938 NDFMC: disabled by IOC PIOSEL\n");
306 return -ENODEV;
307 }
308 bsprt = 1;
309 hold = 2;
310 spw = 9 - 1; /* 8 GBUSCLK = 80ns (@ GBUSCLK 100MHz) */
311
312 if ((tx4938_ccfgptr->pcfg &
313 (TX4938_PCFG_ATA_SEL|TX4938_PCFG_ISA_SEL|TX4938_PCFG_NDF_SEL))
314 != TX4938_PCFG_NDF_SEL) {
315 printk("TX4938 NDFMC: disabled by PCFG.\n");
316 return -ENODEV;
317 }
318
319 /* reset NDFMC */
320 tx4938_ndfmcptr->rstr |= TX4938_NDFRSTR_RST;
321 while (tx4938_ndfmcptr->rstr & TX4938_NDFRSTR_RST)
322 ;
323 /* setup BusSeparete, Hold Time, Strobe Pulse Width */
324 tx4938_ndfmcptr->mcr = bsprt ? TX4938_NDFMCR_BSPRT : 0;
325 tx4938_ndfmcptr->spr = hold << 4 | spw;
326
327 /* Allocate memory for MTD device structure and private data */
328 tx4938ndfmc_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip),
329 GFP_KERNEL);
330 if (!tx4938ndfmc_mtd) {
331 printk ("Unable to allocate TX4938 NDFMC MTD device structure.\n");
332 return -ENOMEM;
333 }
334
335 /* Get pointer to private data */
336 this = (struct nand_chip *) (&tx4938ndfmc_mtd[1]);
337
338 /* Initialize structures */
339 memset((char *) tx4938ndfmc_mtd, 0, sizeof(struct mtd_info));
340 memset((char *) this, 0, sizeof(struct nand_chip));
341
342 /* Link the private data with the MTD structure */
343 tx4938ndfmc_mtd->priv = this;
344
345 /* Set address of NAND IO lines */
346 this->IO_ADDR_R = (unsigned long)&tx4938_ndfmcptr->dtr;
347 this->IO_ADDR_W = (unsigned long)&tx4938_ndfmcptr->dtr;
348 this->hwcontrol = tx4938ndfmc_hwcontrol;
349 this->dev_ready = tx4938ndfmc_dev_ready;
350 this->calculate_ecc = tx4938ndfmc_calculate_ecc;
351 this->correct_data = nand_correct_data;
352 this->enable_hwecc = tx4938ndfmc_enable_hwecc;
353 this->eccmode = NAND_ECC_HW3_256;
354 this->chip_delay = 100;
355 this->read_byte = tx4938ndfmc_nand_read_byte;
356 this->write_byte = tx4938ndfmc_nand_write_byte;
357 this->cmdfunc = tx4938ndfmc_nand_command;
358 this->write_buf = tx4938ndfmc_nand_write_buf;
359 this->read_buf = tx4938ndfmc_nand_read_buf;
360 this->verify_buf = tx4938ndfmc_nand_verify_buf;
361
362 /* Scan to find existance of the device */
363 if (nand_scan (tx4938ndfmc_mtd, 1)) {
364 kfree (tx4938ndfmc_mtd);
365 return -ENXIO;
366 }
367
368 if (protected) {
369 printk(KERN_INFO "TX4938 NDFMC: write protected.\n");
370 tx4938ndfmc_mtd->flags &= ~(MTD_WRITEABLE | MTD_ERASEABLE);
371 }
372
373#ifdef CONFIG_MTD_CMDLINE_PARTS
374 {
375 int mtd_parts_nb = 0;
376 struct mtd_partition *mtd_parts = 0;
377 mtd_parts_nb = parse_cmdline_partitions(tx4938ndfmc_mtd, &mtd_parts, "tx4938ndfmc");
378 if (mtd_parts_nb > 0)
379 add_mtd_partitions(tx4938ndfmc_mtd, mtd_parts, mtd_parts_nb);
380 else
381 add_mtd_device(tx4938ndfmc_mtd);
382 }
383#else
384 add_mtd_partitions(tx4938ndfmc_mtd, partition_info, NUM_PARTITIONS );
385#endif
386
387 return 0;
388}
389module_init(tx4938ndfmc_init);
390
391/*
392 * Clean up routine
393 */
394static void __exit tx4938ndfmc_cleanup (void)
395{
396 /* Release resources, unregister device */
397 nand_release (tx4938ndfmc_mtd);
398
399 /* Free the MTD device structure */
400 kfree (tx4938ndfmc_mtd);
401}
402module_exit(tx4938ndfmc_cleanup);
403
404MODULE_LICENSE("GPL");
405MODULE_AUTHOR("Alice Hennessy <ahennessy@mvista.com>");
406MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on TX4938 NDFMC");
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
new file mode 100644
index 000000000000..b2014043634f
--- /dev/null
+++ b/drivers/mtd/nftlcore.c
@@ -0,0 +1,767 @@
1/* Linux driver for NAND Flash Translation Layer */
2/* (c) 1999 Machine Vision Holdings, Inc. */
3/* Author: David Woodhouse <dwmw2@infradead.org> */
4/* $Id: nftlcore.c,v 1.97 2004/11/16 18:28:59 dwmw2 Exp $ */
5
6/*
7 The contents of this file are distributed under the GNU General
8 Public License version 2. The author places no additional
9 restrictions of any kind on it.
10 */
11
12#define PRERELEASE
13
14#include <linux/config.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <asm/errno.h>
18#include <asm/io.h>
19#include <asm/uaccess.h>
20#include <linux/miscdevice.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/slab.h>
24#include <linux/sched.h>
25#include <linux/init.h>
26#include <linux/hdreg.h>
27
28#include <linux/kmod.h>
29#include <linux/mtd/mtd.h>
30#include <linux/mtd/nand.h>
31#include <linux/mtd/nftl.h>
32#include <linux/mtd/blktrans.h>
33
34/* maximum number of loops while examining next block, to have a
35 chance to detect consistency problems (they should never happen
36 because of the checks done in the mounting */
37
38#define MAX_LOOPS 10000
39
40
41static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
42{
43 struct NFTLrecord *nftl;
44 unsigned long temp;
45
46 if (mtd->type != MTD_NANDFLASH)
47 return;
48 /* OK, this is moderately ugly. But probably safe. Alternatives? */
49 if (memcmp(mtd->name, "DiskOnChip", 10))
50 return;
51
52 if (!mtd->block_isbad) {
53 printk(KERN_ERR
54"NFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
55"Please use the new diskonchip driver under the NAND subsystem.\n");
56 return;
57 }
58
59 DEBUG(MTD_DEBUG_LEVEL1, "NFTL: add_mtd for %s\n", mtd->name);
60
61 nftl = kmalloc(sizeof(struct NFTLrecord), GFP_KERNEL);
62
63 if (!nftl) {
64 printk(KERN_WARNING "NFTL: out of memory for data structures\n");
65 return;
66 }
67 memset(nftl, 0, sizeof(*nftl));
68
69 nftl->mbd.mtd = mtd;
70 nftl->mbd.devnum = -1;
71 nftl->mbd.blksize = 512;
72 nftl->mbd.tr = tr;
73 memcpy(&nftl->oobinfo, &mtd->oobinfo, sizeof(struct nand_oobinfo));
74 nftl->oobinfo.useecc = MTD_NANDECC_PLACEONLY;
75
76 if (NFTL_mount(nftl) < 0) {
77 printk(KERN_WARNING "NFTL: could not mount device\n");
78 kfree(nftl);
79 return;
80 }
81
82 /* OK, it's a new one. Set up all the data structures. */
83
84 /* Calculate geometry */
85 nftl->cylinders = 1024;
86 nftl->heads = 16;
87
88 temp = nftl->cylinders * nftl->heads;
89 nftl->sectors = nftl->mbd.size / temp;
90 if (nftl->mbd.size % temp) {
91 nftl->sectors++;
92 temp = nftl->cylinders * nftl->sectors;
93 nftl->heads = nftl->mbd.size / temp;
94
95 if (nftl->mbd.size % temp) {
96 nftl->heads++;
97 temp = nftl->heads * nftl->sectors;
98 nftl->cylinders = nftl->mbd.size / temp;
99 }
100 }
101
102 if (nftl->mbd.size != nftl->heads * nftl->cylinders * nftl->sectors) {
103 /*
104 Oh no we don't have
105 mbd.size == heads * cylinders * sectors
106 */
107 printk(KERN_WARNING "NFTL: cannot calculate a geometry to "
108 "match size of 0x%lx.\n", nftl->mbd.size);
109 printk(KERN_WARNING "NFTL: using C:%d H:%d S:%d "
110 "(== 0x%lx sects)\n",
111 nftl->cylinders, nftl->heads , nftl->sectors,
112 (long)nftl->cylinders * (long)nftl->heads *
113 (long)nftl->sectors );
114 }
115
116 if (add_mtd_blktrans_dev(&nftl->mbd)) {
117 if (nftl->ReplUnitTable)
118 kfree(nftl->ReplUnitTable);
119 if (nftl->EUNtable)
120 kfree(nftl->EUNtable);
121 kfree(nftl);
122 return;
123 }
124#ifdef PSYCHO_DEBUG
125 printk(KERN_INFO "NFTL: Found new nftl%c\n", nftl->mbd.devnum + 'a');
126#endif
127}
128
129static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
130{
131 struct NFTLrecord *nftl = (void *)dev;
132
133 DEBUG(MTD_DEBUG_LEVEL1, "NFTL: remove_dev (i=%d)\n", dev->devnum);
134
135 del_mtd_blktrans_dev(dev);
136 if (nftl->ReplUnitTable)
137 kfree(nftl->ReplUnitTable);
138 if (nftl->EUNtable)
139 kfree(nftl->EUNtable);
140 kfree(nftl);
141}
142
143#ifdef CONFIG_NFTL_RW
144
145/* Actual NFTL access routines */
146/* NFTL_findfreeblock: Find a free Erase Unit on the NFTL partition. This function is used
147 * when the give Virtual Unit Chain
148 */
149static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
150{
151 /* For a given Virtual Unit Chain: find or create a free block and
152 add it to the chain */
153 /* We're passed the number of the last EUN in the chain, to save us from
154 having to look it up again */
155 u16 pot = nftl->LastFreeEUN;
156 int silly = nftl->nb_blocks;
157
158 /* Normally, we force a fold to happen before we run out of free blocks completely */
159 if (!desperate && nftl->numfreeEUNs < 2) {
160 DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n");
161 return 0xffff;
162 }
163
164 /* Scan for a free block */
165 do {
166 if (nftl->ReplUnitTable[pot] == BLOCK_FREE) {
167 nftl->LastFreeEUN = pot;
168 nftl->numfreeEUNs--;
169 return pot;
170 }
171
172 /* This will probably point to the MediaHdr unit itself,
173 right at the beginning of the partition. But that unit
174 (and the backup unit too) should have the UCI set
175 up so that it's not selected for overwriting */
176 if (++pot > nftl->lastEUN)
177 pot = le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN);
178
179 if (!silly--) {
180 printk("Argh! No free blocks found! LastFreeEUN = %d, "
181 "FirstEUN = %d\n", nftl->LastFreeEUN,
182 le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN));
183 return 0xffff;
184 }
185 } while (pot != nftl->LastFreeEUN);
186
187 return 0xffff;
188}
189
190static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock )
191{
192 u16 BlockMap[MAX_SECTORS_PER_UNIT];
193 unsigned char BlockLastState[MAX_SECTORS_PER_UNIT];
194 unsigned char BlockFreeFound[MAX_SECTORS_PER_UNIT];
195 unsigned int thisEUN;
196 int block;
197 int silly;
198 unsigned int targetEUN;
199 struct nftl_oob oob;
200 int inplace = 1;
201 size_t retlen;
202
203 memset(BlockMap, 0xff, sizeof(BlockMap));
204 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
205
206 thisEUN = nftl->EUNtable[thisVUC];
207
208 if (thisEUN == BLOCK_NIL) {
209 printk(KERN_WARNING "Trying to fold non-existent "
210 "Virtual Unit Chain %d!\n", thisVUC);
211 return BLOCK_NIL;
212 }
213
214 /* Scan to find the Erase Unit which holds the actual data for each
215 512-byte block within the Chain.
216 */
217 silly = MAX_LOOPS;
218 targetEUN = BLOCK_NIL;
219 while (thisEUN <= nftl->lastEUN ) {
220 unsigned int status, foldmark;
221
222 targetEUN = thisEUN;
223 for (block = 0; block < nftl->EraseSize / 512; block ++) {
224 MTD_READOOB(nftl->mbd.mtd,
225 (thisEUN * nftl->EraseSize) + (block * 512),
226 16 , &retlen, (char *)&oob);
227 if (block == 2) {
228 foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1;
229 if (foldmark == FOLD_MARK_IN_PROGRESS) {
230 DEBUG(MTD_DEBUG_LEVEL1,
231 "Write Inhibited on EUN %d\n", thisEUN);
232 inplace = 0;
233 } else {
234 /* There's no other reason not to do inplace,
235 except ones that come later. So we don't need
236 to preserve inplace */
237 inplace = 1;
238 }
239 }
240 status = oob.b.Status | oob.b.Status1;
241 BlockLastState[block] = status;
242
243 switch(status) {
244 case SECTOR_FREE:
245 BlockFreeFound[block] = 1;
246 break;
247
248 case SECTOR_USED:
249 if (!BlockFreeFound[block])
250 BlockMap[block] = thisEUN;
251 else
252 printk(KERN_WARNING
253 "SECTOR_USED found after SECTOR_FREE "
254 "in Virtual Unit Chain %d for block %d\n",
255 thisVUC, block);
256 break;
257 case SECTOR_DELETED:
258 if (!BlockFreeFound[block])
259 BlockMap[block] = BLOCK_NIL;
260 else
261 printk(KERN_WARNING
262 "SECTOR_DELETED found after SECTOR_FREE "
263 "in Virtual Unit Chain %d for block %d\n",
264 thisVUC, block);
265 break;
266
267 case SECTOR_IGNORE:
268 break;
269 default:
270 printk("Unknown status for block %d in EUN %d: %x\n",
271 block, thisEUN, status);
272 }
273 }
274
275 if (!silly--) {
276 printk(KERN_WARNING "Infinite loop in Virtual Unit Chain 0x%x\n",
277 thisVUC);
278 return BLOCK_NIL;
279 }
280
281 thisEUN = nftl->ReplUnitTable[thisEUN];
282 }
283
284 if (inplace) {
285 /* We're being asked to be a fold-in-place. Check
286 that all blocks which actually have data associated
287 with them (i.e. BlockMap[block] != BLOCK_NIL) are
288 either already present or SECTOR_FREE in the target
289 block. If not, we're going to have to fold out-of-place
290 anyway.
291 */
292 for (block = 0; block < nftl->EraseSize / 512 ; block++) {
293 if (BlockLastState[block] != SECTOR_FREE &&
294 BlockMap[block] != BLOCK_NIL &&
295 BlockMap[block] != targetEUN) {
296 DEBUG(MTD_DEBUG_LEVEL1, "Setting inplace to 0. VUC %d, "
297 "block %d was %x lastEUN, "
298 "and is in EUN %d (%s) %d\n",
299 thisVUC, block, BlockLastState[block],
300 BlockMap[block],
301 BlockMap[block]== targetEUN ? "==" : "!=",
302 targetEUN);
303 inplace = 0;
304 break;
305 }
306 }
307
308 if (pendingblock >= (thisVUC * (nftl->EraseSize / 512)) &&
309 pendingblock < ((thisVUC + 1)* (nftl->EraseSize / 512)) &&
310 BlockLastState[pendingblock - (thisVUC * (nftl->EraseSize / 512))] !=
311 SECTOR_FREE) {
312 DEBUG(MTD_DEBUG_LEVEL1, "Pending write not free in EUN %d. "
313 "Folding out of place.\n", targetEUN);
314 inplace = 0;
315 }
316 }
317
318 if (!inplace) {
319 DEBUG(MTD_DEBUG_LEVEL1, "Cannot fold Virtual Unit Chain %d in place. "
320 "Trying out-of-place\n", thisVUC);
321 /* We need to find a targetEUN to fold into. */
322 targetEUN = NFTL_findfreeblock(nftl, 1);
323 if (targetEUN == BLOCK_NIL) {
324 /* Ouch. Now we're screwed. We need to do a
325 fold-in-place of another chain to make room
326 for this one. We need a better way of selecting
327 which chain to fold, because makefreeblock will
328 only ask us to fold the same one again.
329 */
330 printk(KERN_WARNING
331 "NFTL_findfreeblock(desperate) returns 0xffff.\n");
332 return BLOCK_NIL;
333 }
334 } else {
335 /* We put a fold mark in the chain we are folding only if
336 we fold in place to help the mount check code. If we do
337 not fold in place, it is possible to find the valid
338 chain by selecting the longer one */
339 oob.u.c.FoldMark = oob.u.c.FoldMark1 = cpu_to_le16(FOLD_MARK_IN_PROGRESS);
340 oob.u.c.unused = 0xffffffff;
341 MTD_WRITEOOB(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) + 2 * 512 + 8,
342 8, &retlen, (char *)&oob.u);
343 }
344
345 /* OK. We now know the location of every block in the Virtual Unit Chain,
346 and the Erase Unit into which we are supposed to be copying.
347 Go for it.
348 */
349 DEBUG(MTD_DEBUG_LEVEL1,"Folding chain %d into unit %d\n", thisVUC, targetEUN);
350 for (block = 0; block < nftl->EraseSize / 512 ; block++) {
351 unsigned char movebuf[512];
352 int ret;
353
354 /* If it's in the target EUN already, or if it's pending write, do nothing */
355 if (BlockMap[block] == targetEUN ||
356 (pendingblock == (thisVUC * (nftl->EraseSize / 512) + block))) {
357 continue;
358 }
359
360 /* copy only in non free block (free blocks can only
361 happen in case of media errors or deleted blocks) */
362 if (BlockMap[block] == BLOCK_NIL)
363 continue;
364
365 ret = MTD_READ(nftl->mbd.mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512),
366 512, &retlen, movebuf);
367 if (ret < 0) {
368 ret = MTD_READ(nftl->mbd.mtd, (nftl->EraseSize * BlockMap[block])
369 + (block * 512), 512, &retlen,
370 movebuf);
371 if (ret != -EIO)
372 printk("Error went away on retry.\n");
373 }
374 memset(&oob, 0xff, sizeof(struct nftl_oob));
375 oob.b.Status = oob.b.Status1 = SECTOR_USED;
376 MTD_WRITEECC(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) + (block * 512),
377 512, &retlen, movebuf, (char *)&oob, &nftl->oobinfo);
378 }
379
380 /* add the header so that it is now a valid chain */
381 oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum
382 = cpu_to_le16(thisVUC);
383 oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = 0xffff;
384
385 MTD_WRITEOOB(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) + 8,
386 8, &retlen, (char *)&oob.u);
387
388 /* OK. We've moved the whole lot into the new block. Now we have to free the original blocks. */
389
390 /* At this point, we have two different chains for this Virtual Unit, and no way to tell
391 them apart. If we crash now, we get confused. However, both contain the same data, so we
392 shouldn't actually lose data in this case. It's just that when we load up on a medium which
393 has duplicate chains, we need to free one of the chains because it's not necessary any more.
394 */
395 thisEUN = nftl->EUNtable[thisVUC];
396 DEBUG(MTD_DEBUG_LEVEL1,"Want to erase\n");
397
398 /* For each block in the old chain (except the targetEUN of course),
399 free it and make it available for future use */
400 while (thisEUN <= nftl->lastEUN && thisEUN != targetEUN) {
401 unsigned int EUNtmp;
402
403 EUNtmp = nftl->ReplUnitTable[thisEUN];
404
405 if (NFTL_formatblock(nftl, thisEUN) < 0) {
406 /* could not erase : mark block as reserved
407 */
408 nftl->ReplUnitTable[thisEUN] = BLOCK_RESERVED;
409 } else {
410 /* correctly erased : mark it as free */
411 nftl->ReplUnitTable[thisEUN] = BLOCK_FREE;
412 nftl->numfreeEUNs++;
413 }
414 thisEUN = EUNtmp;
415 }
416
417 /* Make this the new start of chain for thisVUC */
418 nftl->ReplUnitTable[targetEUN] = BLOCK_NIL;
419 nftl->EUNtable[thisVUC] = targetEUN;
420
421 return targetEUN;
422}
423
424static u16 NFTL_makefreeblock( struct NFTLrecord *nftl , unsigned pendingblock)
425{
426 /* This is the part that needs some cleverness applied.
427 For now, I'm doing the minimum applicable to actually
428 get the thing to work.
429 Wear-levelling and other clever stuff needs to be implemented
430 and we also need to do some assessment of the results when
431 the system loses power half-way through the routine.
432 */
433 u16 LongestChain = 0;
434 u16 ChainLength = 0, thislen;
435 u16 chain, EUN;
436
437 for (chain = 0; chain < le32_to_cpu(nftl->MediaHdr.FormattedSize) / nftl->EraseSize; chain++) {
438 EUN = nftl->EUNtable[chain];
439 thislen = 0;
440
441 while (EUN <= nftl->lastEUN) {
442 thislen++;
443 //printk("VUC %d reaches len %d with EUN %d\n", chain, thislen, EUN);
444 EUN = nftl->ReplUnitTable[EUN] & 0x7fff;
445 if (thislen > 0xff00) {
446 printk("Endless loop in Virtual Chain %d: Unit %x\n",
447 chain, EUN);
448 }
449 if (thislen > 0xff10) {
450 /* Actually, don't return failure. Just ignore this chain and
451 get on with it. */
452 thislen = 0;
453 break;
454 }
455 }
456
457 if (thislen > ChainLength) {
458 //printk("New longest chain is %d with length %d\n", chain, thislen);
459 ChainLength = thislen;
460 LongestChain = chain;
461 }
462 }
463
464 if (ChainLength < 2) {
465 printk(KERN_WARNING "No Virtual Unit Chains available for folding. "
466 "Failing request\n");
467 return 0xffff;
468 }
469
470 return NFTL_foldchain (nftl, LongestChain, pendingblock);
471}
472
473/* NFTL_findwriteunit: Return the unit number into which we can write
474 for this block. Make it available if it isn't already
475*/
476static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
477{
478 u16 lastEUN;
479 u16 thisVUC = block / (nftl->EraseSize / 512);
480 unsigned int writeEUN;
481 unsigned long blockofs = (block * 512) & (nftl->EraseSize -1);
482 size_t retlen;
483 int silly, silly2 = 3;
484 struct nftl_oob oob;
485
486 do {
487 /* Scan the media to find a unit in the VUC which has
488 a free space for the block in question.
489 */
490
491 /* This condition catches the 0x[7f]fff cases, as well as
492 being a sanity check for past-end-of-media access
493 */
494 lastEUN = BLOCK_NIL;
495 writeEUN = nftl->EUNtable[thisVUC];
496 silly = MAX_LOOPS;
497 while (writeEUN <= nftl->lastEUN) {
498 struct nftl_bci bci;
499 size_t retlen;
500 unsigned int status;
501
502 lastEUN = writeEUN;
503
504 MTD_READOOB(nftl->mbd.mtd, (writeEUN * nftl->EraseSize) + blockofs,
505 8, &retlen, (char *)&bci);
506
507 DEBUG(MTD_DEBUG_LEVEL2, "Status of block %d in EUN %d is %x\n",
508 block , writeEUN, le16_to_cpu(bci.Status));
509
510 status = bci.Status | bci.Status1;
511 switch(status) {
512 case SECTOR_FREE:
513 return writeEUN;
514
515 case SECTOR_DELETED:
516 case SECTOR_USED:
517 case SECTOR_IGNORE:
518 break;
519 default:
520 // Invalid block. Don't use it any more. Must implement.
521 break;
522 }
523
524 if (!silly--) {
525 printk(KERN_WARNING
526 "Infinite loop in Virtual Unit Chain 0x%x\n",
527 thisVUC);
528 return 0xffff;
529 }
530
531 /* Skip to next block in chain */
532 writeEUN = nftl->ReplUnitTable[writeEUN];
533 }
534
535 /* OK. We didn't find one in the existing chain, or there
536 is no existing chain. */
537
538 /* Try to find an already-free block */
539 writeEUN = NFTL_findfreeblock(nftl, 0);
540
541 if (writeEUN == BLOCK_NIL) {
542 /* That didn't work - there were no free blocks just
543 waiting to be picked up. We're going to have to fold
544 a chain to make room.
545 */
546
547 /* First remember the start of this chain */
548 //u16 startEUN = nftl->EUNtable[thisVUC];
549
550 //printk("Write to VirtualUnitChain %d, calling makefreeblock()\n", thisVUC);
551 writeEUN = NFTL_makefreeblock(nftl, 0xffff);
552
553 if (writeEUN == BLOCK_NIL) {
554 /* OK, we accept that the above comment is
555 lying - there may have been free blocks
556 last time we called NFTL_findfreeblock(),
557 but they are reserved for when we're
558 desperate. Well, now we're desperate.
559 */
560 DEBUG(MTD_DEBUG_LEVEL1, "Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC);
561 writeEUN = NFTL_findfreeblock(nftl, 1);
562 }
563 if (writeEUN == BLOCK_NIL) {
564 /* Ouch. This should never happen - we should
565 always be able to make some room somehow.
566 If we get here, we've allocated more storage
567 space than actual media, or our makefreeblock
568 routine is missing something.
569 */
570 printk(KERN_WARNING "Cannot make free space.\n");
571 return BLOCK_NIL;
572 }
573 //printk("Restarting scan\n");
574 lastEUN = BLOCK_NIL;
575 continue;
576 }
577
578 /* We've found a free block. Insert it into the chain. */
579
580 if (lastEUN != BLOCK_NIL) {
581 thisVUC |= 0x8000; /* It's a replacement block */
582 } else {
583 /* The first block in a new chain */
584 nftl->EUNtable[thisVUC] = writeEUN;
585 }
586
587 /* set up the actual EUN we're writing into */
588 /* Both in our cache... */
589 nftl->ReplUnitTable[writeEUN] = BLOCK_NIL;
590
591 /* ... and on the flash itself */
592 MTD_READOOB(nftl->mbd.mtd, writeEUN * nftl->EraseSize + 8, 8,
593 &retlen, (char *)&oob.u);
594
595 oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC);
596
597 MTD_WRITEOOB(nftl->mbd.mtd, writeEUN * nftl->EraseSize + 8, 8,
598 &retlen, (char *)&oob.u);
599
600 /* we link the new block to the chain only after the
601 block is ready. It avoids the case where the chain
602 could point to a free block */
603 if (lastEUN != BLOCK_NIL) {
604 /* Both in our cache... */
605 nftl->ReplUnitTable[lastEUN] = writeEUN;
606 /* ... and on the flash itself */
607 MTD_READOOB(nftl->mbd.mtd, (lastEUN * nftl->EraseSize) + 8,
608 8, &retlen, (char *)&oob.u);
609
610 oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum
611 = cpu_to_le16(writeEUN);
612
613 MTD_WRITEOOB(nftl->mbd.mtd, (lastEUN * nftl->EraseSize) + 8,
614 8, &retlen, (char *)&oob.u);
615 }
616
617 return writeEUN;
618
619 } while (silly2--);
620
621 printk(KERN_WARNING "Error folding to make room for Virtual Unit Chain 0x%x\n",
622 thisVUC);
623 return 0xffff;
624}
625
626static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
627 char *buffer)
628{
629 struct NFTLrecord *nftl = (void *)mbd;
630 u16 writeEUN;
631 unsigned long blockofs = (block * 512) & (nftl->EraseSize - 1);
632 size_t retlen;
633 struct nftl_oob oob;
634
635 writeEUN = NFTL_findwriteunit(nftl, block);
636
637 if (writeEUN == BLOCK_NIL) {
638 printk(KERN_WARNING
639 "NFTL_writeblock(): Cannot find block to write to\n");
640 /* If we _still_ haven't got a block to use, we're screwed */
641 return 1;
642 }
643
644 memset(&oob, 0xff, sizeof(struct nftl_oob));
645 oob.b.Status = oob.b.Status1 = SECTOR_USED;
646 MTD_WRITEECC(nftl->mbd.mtd, (writeEUN * nftl->EraseSize) + blockofs,
647 512, &retlen, (char *)buffer, (char *)&oob, &nftl->oobinfo);
648 /* need to write SECTOR_USED flags since they are not written in mtd_writeecc */
649
650 return 0;
651}
652#endif /* CONFIG_NFTL_RW */
653
654static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
655 char *buffer)
656{
657 struct NFTLrecord *nftl = (void *)mbd;
658 u16 lastgoodEUN;
659 u16 thisEUN = nftl->EUNtable[block / (nftl->EraseSize / 512)];
660 unsigned long blockofs = (block * 512) & (nftl->EraseSize - 1);
661 unsigned int status;
662 int silly = MAX_LOOPS;
663 size_t retlen;
664 struct nftl_bci bci;
665
666 lastgoodEUN = BLOCK_NIL;
667
668 if (thisEUN != BLOCK_NIL) {
669 while (thisEUN < nftl->nb_blocks) {
670 if (MTD_READOOB(nftl->mbd.mtd, (thisEUN * nftl->EraseSize) + blockofs,
671 8, &retlen, (char *)&bci) < 0)
672 status = SECTOR_IGNORE;
673 else
674 status = bci.Status | bci.Status1;
675
676 switch (status) {
677 case SECTOR_FREE:
678 /* no modification of a sector should follow a free sector */
679 goto the_end;
680 case SECTOR_DELETED:
681 lastgoodEUN = BLOCK_NIL;
682 break;
683 case SECTOR_USED:
684 lastgoodEUN = thisEUN;
685 break;
686 case SECTOR_IGNORE:
687 break;
688 default:
689 printk("Unknown status for block %ld in EUN %d: %x\n",
690 block, thisEUN, status);
691 break;
692 }
693
694 if (!silly--) {
695 printk(KERN_WARNING "Infinite loop in Virtual Unit Chain 0x%lx\n",
696 block / (nftl->EraseSize / 512));
697 return 1;
698 }
699 thisEUN = nftl->ReplUnitTable[thisEUN];
700 }
701 }
702
703 the_end:
704 if (lastgoodEUN == BLOCK_NIL) {
705 /* the requested block is not on the media, return all 0x00 */
706 memset(buffer, 0, 512);
707 } else {
708 loff_t ptr = (lastgoodEUN * nftl->EraseSize) + blockofs;
709 size_t retlen;
710 if (MTD_READ(nftl->mbd.mtd, ptr, 512, &retlen, buffer))
711 return -EIO;
712 }
713 return 0;
714}
715
716static int nftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
717{
718 struct NFTLrecord *nftl = (void *)dev;
719
720 geo->heads = nftl->heads;
721 geo->sectors = nftl->sectors;
722 geo->cylinders = nftl->cylinders;
723
724 return 0;
725}
726
727/****************************************************************************
728 *
729 * Module stuff
730 *
731 ****************************************************************************/
732
733
734static struct mtd_blktrans_ops nftl_tr = {
735 .name = "nftl",
736 .major = NFTL_MAJOR,
737 .part_bits = NFTL_PARTN_BITS,
738 .getgeo = nftl_getgeo,
739 .readsect = nftl_readblock,
740#ifdef CONFIG_NFTL_RW
741 .writesect = nftl_writeblock,
742#endif
743 .add_mtd = nftl_add_mtd,
744 .remove_dev = nftl_remove_dev,
745 .owner = THIS_MODULE,
746};
747
748extern char nftlmountrev[];
749
750static int __init init_nftl(void)
751{
752 printk(KERN_INFO "NFTL driver: nftlcore.c $Revision: 1.97 $, nftlmount.c %s\n", nftlmountrev);
753
754 return register_mtd_blktrans(&nftl_tr);
755}
756
757static void __exit cleanup_nftl(void)
758{
759 deregister_mtd_blktrans(&nftl_tr);
760}
761
762module_init(init_nftl);
763module_exit(cleanup_nftl);
764
765MODULE_LICENSE("GPL");
766MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>, Fabrice Bellard <fabrice.bellard@netgem.com> et al.");
767MODULE_DESCRIPTION("Support code for NAND Flash Translation Layer, used on M-Systems DiskOnChip 2000 and Millennium");
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
new file mode 100644
index 000000000000..84afd9029f53
--- /dev/null
+++ b/drivers/mtd/nftlmount.c
@@ -0,0 +1,770 @@
1/*
2 * NFTL mount code with extensive checks
3 *
4 * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
5 * Copyright (C) 2000 Netgem S.A.
6 *
7 * $Id: nftlmount.c,v 1.40 2004/11/22 14:38:29 kalev Exp $
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#include <linux/kernel.h>
25#include <asm/errno.h>
26#include <linux/delay.h>
27#include <linux/slab.h>
28#include <linux/mtd/mtd.h>
29#include <linux/mtd/nand.h>
30#include <linux/mtd/nftl.h>
31
32#define SECTORSIZE 512
33
34char nftlmountrev[]="$Revision: 1.40 $";
35
36/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the
37 * various device information of the NFTL partition and Bad Unit Table. Update
38 * the ReplUnitTable[] table accroding to the Bad Unit Table. ReplUnitTable[]
39 * is used for management of Erase Unit in other routines in nftl.c and nftlmount.c
40 */
41static int find_boot_record(struct NFTLrecord *nftl)
42{
43 struct nftl_uci1 h1;
44 unsigned int block, boot_record_count = 0;
45 size_t retlen;
46 u8 buf[SECTORSIZE];
47 struct NFTLMediaHeader *mh = &nftl->MediaHdr;
48 unsigned int i;
49
50 /* Assume logical EraseSize == physical erasesize for starting the scan.
51 We'll sort it out later if we find a MediaHeader which says otherwise */
52 /* Actually, we won't. The new DiskOnChip driver has already scanned
53 the MediaHeader and adjusted the virtual erasesize it presents in
54 the mtd device accordingly. We could even get rid of
55 nftl->EraseSize if there were any point in doing so. */
56 nftl->EraseSize = nftl->mbd.mtd->erasesize;
57 nftl->nb_blocks = nftl->mbd.mtd->size / nftl->EraseSize;
58
59 nftl->MediaUnit = BLOCK_NIL;
60 nftl->SpareMediaUnit = BLOCK_NIL;
61
62 /* search for a valid boot record */
63 for (block = 0; block < nftl->nb_blocks; block++) {
64 int ret;
65
66 /* Check for ANAND header first. Then can whinge if it's found but later
67 checks fail */
68 ret = MTD_READ(nftl->mbd.mtd, block * nftl->EraseSize, SECTORSIZE, &retlen, buf);
69 /* We ignore ret in case the ECC of the MediaHeader is invalid
70 (which is apparently acceptable) */
71 if (retlen != SECTORSIZE) {
72 static int warncount = 5;
73
74 if (warncount) {
75 printk(KERN_WARNING "Block read at 0x%x of mtd%d failed: %d\n",
76 block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
77 if (!--warncount)
78 printk(KERN_WARNING "Further failures for this block will not be printed\n");
79 }
80 continue;
81 }
82
83 if (retlen < 6 || memcmp(buf, "ANAND", 6)) {
84 /* ANAND\0 not found. Continue */
85#if 0
86 printk(KERN_DEBUG "ANAND header not found at 0x%x in mtd%d\n",
87 block * nftl->EraseSize, nftl->mbd.mtd->index);
88#endif
89 continue;
90 }
91
92 /* To be safer with BIOS, also use erase mark as discriminant */
93 if ((ret = MTD_READOOB(nftl->mbd.mtd, block * nftl->EraseSize + SECTORSIZE + 8,
94 8, &retlen, (char *)&h1) < 0)) {
95 printk(KERN_WARNING "ANAND header found at 0x%x in mtd%d, but OOB data read failed (err %d)\n",
96 block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
97 continue;
98 }
99
100#if 0 /* Some people seem to have devices without ECC or erase marks
101 on the Media Header blocks. There are enough other sanity
102 checks in here that we can probably do without it.
103 */
104 if (le16_to_cpu(h1.EraseMark | h1.EraseMark1) != ERASE_MARK) {
105 printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but erase mark not present (0x%04x,0x%04x instead)\n",
106 block * nftl->EraseSize, nftl->mbd.mtd->index,
107 le16_to_cpu(h1.EraseMark), le16_to_cpu(h1.EraseMark1));
108 continue;
109 }
110
111 /* Finally reread to check ECC */
112 if ((ret = MTD_READECC(nftl->mbd.mtd, block * nftl->EraseSize, SECTORSIZE,
113 &retlen, buf, (char *)&oob, NULL) < 0)) {
114 printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but ECC read failed (err %d)\n",
115 block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
116 continue;
117 }
118
119 /* Paranoia. Check the ANAND header is still there after the ECC read */
120 if (memcmp(buf, "ANAND", 6)) {
121 printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but went away on reread!\n",
122 block * nftl->EraseSize, nftl->mbd.mtd->index);
123 printk(KERN_NOTICE "New data are: %02x %02x %02x %02x %02x %02x\n",
124 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
125 continue;
126 }
127#endif
128 /* OK, we like it. */
129
130 if (boot_record_count) {
131 /* We've already processed one. So we just check if
132 this one is the same as the first one we found */
133 if (memcmp(mh, buf, sizeof(struct NFTLMediaHeader))) {
134 printk(KERN_NOTICE "NFTL Media Headers at 0x%x and 0x%x disagree.\n",
135 nftl->MediaUnit * nftl->EraseSize, block * nftl->EraseSize);
136 /* if (debug) Print both side by side */
137 if (boot_record_count < 2) {
138 /* We haven't yet seen two real ones */
139 return -1;
140 }
141 continue;
142 }
143 if (boot_record_count == 1)
144 nftl->SpareMediaUnit = block;
145
146 /* Mark this boot record (NFTL MediaHeader) block as reserved */
147 nftl->ReplUnitTable[block] = BLOCK_RESERVED;
148
149
150 boot_record_count++;
151 continue;
152 }
153
154 /* This is the first we've seen. Copy the media header structure into place */
155 memcpy(mh, buf, sizeof(struct NFTLMediaHeader));
156
157 /* Do some sanity checks on it */
158#if 0
159The new DiskOnChip driver scans the MediaHeader itself, and presents a virtual
160erasesize based on UnitSizeFactor. So the erasesize we read from the mtd
161device is already correct.
162 if (mh->UnitSizeFactor == 0) {
163 printk(KERN_NOTICE "NFTL: UnitSizeFactor 0x00 detected. This violates the spec but we think we know what it means...\n");
164 } else if (mh->UnitSizeFactor < 0xfc) {
165 printk(KERN_NOTICE "Sorry, we don't support UnitSizeFactor 0x%02x\n",
166 mh->UnitSizeFactor);
167 return -1;
168 } else if (mh->UnitSizeFactor != 0xff) {
169 printk(KERN_NOTICE "WARNING: Support for NFTL with UnitSizeFactor 0x%02x is experimental\n",
170 mh->UnitSizeFactor);
171 nftl->EraseSize = nftl->mbd.mtd->erasesize << (0xff - mh->UnitSizeFactor);
172 nftl->nb_blocks = nftl->mbd.mtd->size / nftl->EraseSize;
173 }
174#endif
175 nftl->nb_boot_blocks = le16_to_cpu(mh->FirstPhysicalEUN);
176 if ((nftl->nb_boot_blocks + 2) >= nftl->nb_blocks) {
177 printk(KERN_NOTICE "NFTL Media Header sanity check failed:\n");
178 printk(KERN_NOTICE "nb_boot_blocks (%d) + 2 > nb_blocks (%d)\n",
179 nftl->nb_boot_blocks, nftl->nb_blocks);
180 return -1;
181 }
182
183 nftl->numvunits = le32_to_cpu(mh->FormattedSize) / nftl->EraseSize;
184 if (nftl->numvunits > (nftl->nb_blocks - nftl->nb_boot_blocks - 2)) {
185 printk(KERN_NOTICE "NFTL Media Header sanity check failed:\n");
186 printk(KERN_NOTICE "numvunits (%d) > nb_blocks (%d) - nb_boot_blocks(%d) - 2\n",
187 nftl->numvunits, nftl->nb_blocks, nftl->nb_boot_blocks);
188 return -1;
189 }
190
191 nftl->mbd.size = nftl->numvunits * (nftl->EraseSize / SECTORSIZE);
192
193 /* If we're not using the last sectors in the device for some reason,
194 reduce nb_blocks accordingly so we forget they're there */
195 nftl->nb_blocks = le16_to_cpu(mh->NumEraseUnits) + le16_to_cpu(mh->FirstPhysicalEUN);
196
197 /* XXX: will be suppressed */
198 nftl->lastEUN = nftl->nb_blocks - 1;
199
200 /* memory alloc */
201 nftl->EUNtable = kmalloc(nftl->nb_blocks * sizeof(u16), GFP_KERNEL);
202 if (!nftl->EUNtable) {
203 printk(KERN_NOTICE "NFTL: allocation of EUNtable failed\n");
204 return -ENOMEM;
205 }
206
207 nftl->ReplUnitTable = kmalloc(nftl->nb_blocks * sizeof(u16), GFP_KERNEL);
208 if (!nftl->ReplUnitTable) {
209 kfree(nftl->EUNtable);
210 printk(KERN_NOTICE "NFTL: allocation of ReplUnitTable failed\n");
211 return -ENOMEM;
212 }
213
214 /* mark the bios blocks (blocks before NFTL MediaHeader) as reserved */
215 for (i = 0; i < nftl->nb_boot_blocks; i++)
216 nftl->ReplUnitTable[i] = BLOCK_RESERVED;
217 /* mark all remaining blocks as potentially containing data */
218 for (; i < nftl->nb_blocks; i++) {
219 nftl->ReplUnitTable[i] = BLOCK_NOTEXPLORED;
220 }
221
222 /* Mark this boot record (NFTL MediaHeader) block as reserved */
223 nftl->ReplUnitTable[block] = BLOCK_RESERVED;
224
225 /* read the Bad Erase Unit Table and modify ReplUnitTable[] accordingly */
226 for (i = 0; i < nftl->nb_blocks; i++) {
227#if 0
228The new DiskOnChip driver already scanned the bad block table. Just query it.
229 if ((i & (SECTORSIZE - 1)) == 0) {
230 /* read one sector for every SECTORSIZE of blocks */
231 if ((ret = MTD_READECC(nftl->mbd.mtd, block * nftl->EraseSize +
232 i + SECTORSIZE, SECTORSIZE, &retlen, buf,
233 (char *)&oob, NULL)) < 0) {
234 printk(KERN_NOTICE "Read of bad sector table failed (err %d)\n",
235 ret);
236 kfree(nftl->ReplUnitTable);
237 kfree(nftl->EUNtable);
238 return -1;
239 }
240 }
241 /* mark the Bad Erase Unit as RESERVED in ReplUnitTable */
242 if (buf[i & (SECTORSIZE - 1)] != 0xff)
243 nftl->ReplUnitTable[i] = BLOCK_RESERVED;
244#endif
245 if (nftl->mbd.mtd->block_isbad(nftl->mbd.mtd, i * nftl->EraseSize))
246 nftl->ReplUnitTable[i] = BLOCK_RESERVED;
247 }
248
249 nftl->MediaUnit = block;
250 boot_record_count++;
251
252 } /* foreach (block) */
253
254 return boot_record_count?0:-1;
255}
256
257static int memcmpb(void *a, int c, int n)
258{
259 int i;
260 for (i = 0; i < n; i++) {
261 if (c != ((unsigned char *)a)[i])
262 return 1;
263 }
264 return 0;
265}
266
267/* check_free_sector: check if a free sector is actually FREE, i.e. All 0xff in data and oob area */
268static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int len,
269 int check_oob)
270{
271 int i;
272 size_t retlen;
273 u8 buf[SECTORSIZE + nftl->mbd.mtd->oobsize];
274
275 for (i = 0; i < len; i += SECTORSIZE) {
276 if (MTD_READECC(nftl->mbd.mtd, address, SECTORSIZE, &retlen, buf, &buf[SECTORSIZE], &nftl->oobinfo) < 0)
277 return -1;
278 if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
279 return -1;
280
281 if (check_oob) {
282 if (memcmpb(buf + SECTORSIZE, 0xff, nftl->mbd.mtd->oobsize) != 0)
283 return -1;
284 }
285 address += SECTORSIZE;
286 }
287
288 return 0;
289}
290
291/* NFTL_format: format a Erase Unit by erasing ALL Erase Zones in the Erase Unit and
292 * Update NFTL metadata. Each erase operation is checked with check_free_sectors
293 *
294 * Return: 0 when succeed, -1 on error.
295 *
296 * ToDo: 1. Is it neceressary to check_free_sector after erasing ??
297 */
298int NFTL_formatblock(struct NFTLrecord *nftl, int block)
299{
300 size_t retlen;
301 unsigned int nb_erases, erase_mark;
302 struct nftl_uci1 uci;
303 struct erase_info *instr = &nftl->instr;
304
305 /* Read the Unit Control Information #1 for Wear-Leveling */
306 if (MTD_READOOB(nftl->mbd.mtd, block * nftl->EraseSize + SECTORSIZE + 8,
307 8, &retlen, (char *)&uci) < 0)
308 goto default_uci1;
309
310 erase_mark = le16_to_cpu ((uci.EraseMark | uci.EraseMark1));
311 if (erase_mark != ERASE_MARK) {
312 default_uci1:
313 uci.EraseMark = cpu_to_le16(ERASE_MARK);
314 uci.EraseMark1 = cpu_to_le16(ERASE_MARK);
315 uci.WearInfo = cpu_to_le32(0);
316 }
317
318 memset(instr, 0, sizeof(struct erase_info));
319
320 /* XXX: use async erase interface, XXX: test return code */
321 instr->mtd = nftl->mbd.mtd;
322 instr->addr = block * nftl->EraseSize;
323 instr->len = nftl->EraseSize;
324 MTD_ERASE(nftl->mbd.mtd, instr);
325
326 if (instr->state == MTD_ERASE_FAILED) {
327 printk("Error while formatting block %d\n", block);
328 goto fail;
329 }
330
331 /* increase and write Wear-Leveling info */
332 nb_erases = le32_to_cpu(uci.WearInfo);
333 nb_erases++;
334
335 /* wrap (almost impossible with current flashs) or free block */
336 if (nb_erases == 0)
337 nb_erases = 1;
338
339 /* check the "freeness" of Erase Unit before updating metadata
340 * FixMe: is this check really necessary ? since we have check the
341 * return code after the erase operation. */
342 if (check_free_sectors(nftl, instr->addr, nftl->EraseSize, 1) != 0)
343 goto fail;
344
345 uci.WearInfo = le32_to_cpu(nb_erases);
346 if (MTD_WRITEOOB(nftl->mbd.mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8,
347 &retlen, (char *)&uci) < 0)
348 goto fail;
349 return 0;
350fail:
351 /* could not format, update the bad block table (caller is responsible
352 for setting the ReplUnitTable to BLOCK_RESERVED on failure) */
353 nftl->mbd.mtd->block_markbad(nftl->mbd.mtd, instr->addr);
354 return -1;
355}
356
357/* check_sectors_in_chain: Check that each sector of a Virtual Unit Chain is correct.
358 * Mark as 'IGNORE' each incorrect sector. This check is only done if the chain
359 * was being folded when NFTL was interrupted.
360 *
361 * The check_free_sectors in this function is neceressary. There is a possible
362 * situation that after writing the Data area, the Block Control Information is
363 * not updated according (due to power failure or something) which leaves the block
364 * in an umconsistent state. So we have to check if a block is really FREE in this
365 * case. */
366static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_block)
367{
368 unsigned int block, i, status;
369 struct nftl_bci bci;
370 int sectors_per_block;
371 size_t retlen;
372
373 sectors_per_block = nftl->EraseSize / SECTORSIZE;
374 block = first_block;
375 for (;;) {
376 for (i = 0; i < sectors_per_block; i++) {
377 if (MTD_READOOB(nftl->mbd.mtd, block * nftl->EraseSize + i * SECTORSIZE,
378 8, &retlen, (char *)&bci) < 0)
379 status = SECTOR_IGNORE;
380 else
381 status = bci.Status | bci.Status1;
382
383 switch(status) {
384 case SECTOR_FREE:
385 /* verify that the sector is really free. If not, mark
386 as ignore */
387 if (memcmpb(&bci, 0xff, 8) != 0 ||
388 check_free_sectors(nftl, block * nftl->EraseSize + i * SECTORSIZE,
389 SECTORSIZE, 0) != 0) {
390 printk("Incorrect free sector %d in block %d: "
391 "marking it as ignored\n",
392 i, block);
393
394 /* sector not free actually : mark it as SECTOR_IGNORE */
395 bci.Status = SECTOR_IGNORE;
396 bci.Status1 = SECTOR_IGNORE;
397 MTD_WRITEOOB(nftl->mbd.mtd,
398 block * nftl->EraseSize + i * SECTORSIZE,
399 8, &retlen, (char *)&bci);
400 }
401 break;
402 default:
403 break;
404 }
405 }
406
407 /* proceed to next Erase Unit on the chain */
408 block = nftl->ReplUnitTable[block];
409 if (!(block == BLOCK_NIL || block < nftl->nb_blocks))
410 printk("incorrect ReplUnitTable[] : %d\n", block);
411 if (block == BLOCK_NIL || block >= nftl->nb_blocks)
412 break;
413 }
414}
415
416/* calc_chain_lenght: Walk through a Virtual Unit Chain and estimate chain length */
417static int calc_chain_length(struct NFTLrecord *nftl, unsigned int first_block)
418{
419 unsigned int length = 0, block = first_block;
420
421 for (;;) {
422 length++;
423 /* avoid infinite loops, although this is guaranted not to
424 happen because of the previous checks */
425 if (length >= nftl->nb_blocks) {
426 printk("nftl: length too long %d !\n", length);
427 break;
428 }
429
430 block = nftl->ReplUnitTable[block];
431 if (!(block == BLOCK_NIL || block < nftl->nb_blocks))
432 printk("incorrect ReplUnitTable[] : %d\n", block);
433 if (block == BLOCK_NIL || block >= nftl->nb_blocks)
434 break;
435 }
436 return length;
437}
438
439/* format_chain: Format an invalid Virtual Unit chain. It frees all the Erase Units in a
440 * Virtual Unit Chain, i.e. all the units are disconnected.
441 *
442 * It is not stricly correct to begin from the first block of the chain because
443 * if we stop the code, we may see again a valid chain if there was a first_block
444 * flag in a block inside it. But is it really a problem ?
445 *
446 * FixMe: Figure out what the last statesment means. What if power failure when we are
447 * in the for (;;) loop formatting blocks ??
448 */
449static void format_chain(struct NFTLrecord *nftl, unsigned int first_block)
450{
451 unsigned int block = first_block, block1;
452
453 printk("Formatting chain at block %d\n", first_block);
454
455 for (;;) {
456 block1 = nftl->ReplUnitTable[block];
457
458 printk("Formatting block %d\n", block);
459 if (NFTL_formatblock(nftl, block) < 0) {
460 /* cannot format !!!! Mark it as Bad Unit */
461 nftl->ReplUnitTable[block] = BLOCK_RESERVED;
462 } else {
463 nftl->ReplUnitTable[block] = BLOCK_FREE;
464 }
465
466 /* goto next block on the chain */
467 block = block1;
468
469 if (!(block == BLOCK_NIL || block < nftl->nb_blocks))
470 printk("incorrect ReplUnitTable[] : %d\n", block);
471 if (block == BLOCK_NIL || block >= nftl->nb_blocks)
472 break;
473 }
474}
475
476/* check_and_mark_free_block: Verify that a block is free in the NFTL sense (valid erase mark) or
477 * totally free (only 0xff).
478 *
479 * Definition: Free Erase Unit -- A properly erased/formatted Free Erase Unit should have meet the
480 * following critia:
481 * 1. */
482static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
483{
484 struct nftl_uci1 h1;
485 unsigned int erase_mark;
486 size_t retlen;
487
488 /* check erase mark. */
489 if (MTD_READOOB(nftl->mbd.mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8,
490 &retlen, (char *)&h1) < 0)
491 return -1;
492
493 erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1));
494 if (erase_mark != ERASE_MARK) {
495 /* if no erase mark, the block must be totally free. This is
496 possible in two cases : empty filsystem or interrupted erase (very unlikely) */
497 if (check_free_sectors (nftl, block * nftl->EraseSize, nftl->EraseSize, 1) != 0)
498 return -1;
499
500 /* free block : write erase mark */
501 h1.EraseMark = cpu_to_le16(ERASE_MARK);
502 h1.EraseMark1 = cpu_to_le16(ERASE_MARK);
503 h1.WearInfo = cpu_to_le32(0);
504 if (MTD_WRITEOOB(nftl->mbd.mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8,
505 &retlen, (char *)&h1) < 0)
506 return -1;
507 } else {
508#if 0
509 /* if erase mark present, need to skip it when doing check */
510 for (i = 0; i < nftl->EraseSize; i += SECTORSIZE) {
511 /* check free sector */
512 if (check_free_sectors (nftl, block * nftl->EraseSize + i,
513 SECTORSIZE, 0) != 0)
514 return -1;
515
516 if (MTD_READOOB(nftl->mbd.mtd, block * nftl->EraseSize + i,
517 16, &retlen, buf) < 0)
518 return -1;
519 if (i == SECTORSIZE) {
520 /* skip erase mark */
521 if (memcmpb(buf, 0xff, 8))
522 return -1;
523 } else {
524 if (memcmpb(buf, 0xff, 16))
525 return -1;
526 }
527 }
528#endif
529 }
530
531 return 0;
532}
533
534/* get_fold_mark: Read fold mark from Unit Control Information #2, we use FOLD_MARK_IN_PROGRESS
535 * to indicate that we are in the progression of a Virtual Unit Chain folding. If the UCI #2
536 * is FOLD_MARK_IN_PROGRESS when mounting the NFTL, the (previous) folding process is interrupted
537 * for some reason. A clean up/check of the VUC is neceressary in this case.
538 *
539 * WARNING: return 0 if read error
540 */
541static int get_fold_mark(struct NFTLrecord *nftl, unsigned int block)
542{
543 struct nftl_uci2 uci;
544 size_t retlen;
545
546 if (MTD_READOOB(nftl->mbd.mtd, block * nftl->EraseSize + 2 * SECTORSIZE + 8,
547 8, &retlen, (char *)&uci) < 0)
548 return 0;
549
550 return le16_to_cpu((uci.FoldMark | uci.FoldMark1));
551}
552
553int NFTL_mount(struct NFTLrecord *s)
554{
555 int i;
556 unsigned int first_logical_block, logical_block, rep_block, nb_erases, erase_mark;
557 unsigned int block, first_block, is_first_block;
558 int chain_length, do_format_chain;
559 struct nftl_uci0 h0;
560 struct nftl_uci1 h1;
561 size_t retlen;
562
563 /* search for NFTL MediaHeader and Spare NFTL Media Header */
564 if (find_boot_record(s) < 0) {
565 printk("Could not find valid boot record\n");
566 return -1;
567 }
568
569 /* init the logical to physical table */
570 for (i = 0; i < s->nb_blocks; i++) {
571 s->EUNtable[i] = BLOCK_NIL;
572 }
573
574 /* first pass : explore each block chain */
575 first_logical_block = 0;
576 for (first_block = 0; first_block < s->nb_blocks; first_block++) {
577 /* if the block was not already explored, we can look at it */
578 if (s->ReplUnitTable[first_block] == BLOCK_NOTEXPLORED) {
579 block = first_block;
580 chain_length = 0;
581 do_format_chain = 0;
582
583 for (;;) {
584 /* read the block header. If error, we format the chain */
585 if (MTD_READOOB(s->mbd.mtd, block * s->EraseSize + 8, 8,
586 &retlen, (char *)&h0) < 0 ||
587 MTD_READOOB(s->mbd.mtd, block * s->EraseSize + SECTORSIZE + 8, 8,
588 &retlen, (char *)&h1) < 0) {
589 s->ReplUnitTable[block] = BLOCK_NIL;
590 do_format_chain = 1;
591 break;
592 }
593
594 logical_block = le16_to_cpu ((h0.VirtUnitNum | h0.SpareVirtUnitNum));
595 rep_block = le16_to_cpu ((h0.ReplUnitNum | h0.SpareReplUnitNum));
596 nb_erases = le32_to_cpu (h1.WearInfo);
597 erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1));
598
599 is_first_block = !(logical_block >> 15);
600 logical_block = logical_block & 0x7fff;
601
602 /* invalid/free block test */
603 if (erase_mark != ERASE_MARK || logical_block >= s->nb_blocks) {
604 if (chain_length == 0) {
605 /* if not currently in a chain, we can handle it safely */
606 if (check_and_mark_free_block(s, block) < 0) {
607 /* not really free: format it */
608 printk("Formatting block %d\n", block);
609 if (NFTL_formatblock(s, block) < 0) {
610 /* could not format: reserve the block */
611 s->ReplUnitTable[block] = BLOCK_RESERVED;
612 } else {
613 s->ReplUnitTable[block] = BLOCK_FREE;
614 }
615 } else {
616 /* free block: mark it */
617 s->ReplUnitTable[block] = BLOCK_FREE;
618 }
619 /* directly examine the next block. */
620 goto examine_ReplUnitTable;
621 } else {
622 /* the block was in a chain : this is bad. We
623 must format all the chain */
624 printk("Block %d: free but referenced in chain %d\n",
625 block, first_block);
626 s->ReplUnitTable[block] = BLOCK_NIL;
627 do_format_chain = 1;
628 break;
629 }
630 }
631
632 /* we accept only first blocks here */
633 if (chain_length == 0) {
634 /* this block is not the first block in chain :
635 ignore it, it will be included in a chain
636 later, or marked as not explored */
637 if (!is_first_block)
638 goto examine_ReplUnitTable;
639 first_logical_block = logical_block;
640 } else {
641 if (logical_block != first_logical_block) {
642 printk("Block %d: incorrect logical block: %d expected: %d\n",
643 block, logical_block, first_logical_block);
644 /* the chain is incorrect : we must format it,
645 but we need to read it completly */
646 do_format_chain = 1;
647 }
648 if (is_first_block) {
649 /* we accept that a block is marked as first
650 block while being last block in a chain
651 only if the chain is being folded */
652 if (get_fold_mark(s, block) != FOLD_MARK_IN_PROGRESS ||
653 rep_block != 0xffff) {
654 printk("Block %d: incorrectly marked as first block in chain\n",
655 block);
656 /* the chain is incorrect : we must format it,
657 but we need to read it completly */
658 do_format_chain = 1;
659 } else {
660 printk("Block %d: folding in progress - ignoring first block flag\n",
661 block);
662 }
663 }
664 }
665 chain_length++;
666 if (rep_block == 0xffff) {
667 /* no more blocks after */
668 s->ReplUnitTable[block] = BLOCK_NIL;
669 break;
670 } else if (rep_block >= s->nb_blocks) {
671 printk("Block %d: referencing invalid block %d\n",
672 block, rep_block);
673 do_format_chain = 1;
674 s->ReplUnitTable[block] = BLOCK_NIL;
675 break;
676 } else if (s->ReplUnitTable[rep_block] != BLOCK_NOTEXPLORED) {
677 /* same problem as previous 'is_first_block' test:
678 we accept that the last block of a chain has
679 the first_block flag set if folding is in
680 progress. We handle here the case where the
681 last block appeared first */
682 if (s->ReplUnitTable[rep_block] == BLOCK_NIL &&
683 s->EUNtable[first_logical_block] == rep_block &&
684 get_fold_mark(s, first_block) == FOLD_MARK_IN_PROGRESS) {
685 /* EUNtable[] will be set after */
686 printk("Block %d: folding in progress - ignoring first block flag\n",
687 rep_block);
688 s->ReplUnitTable[block] = rep_block;
689 s->EUNtable[first_logical_block] = BLOCK_NIL;
690 } else {
691 printk("Block %d: referencing block %d already in another chain\n",
692 block, rep_block);
693 /* XXX: should handle correctly fold in progress chains */
694 do_format_chain = 1;
695 s->ReplUnitTable[block] = BLOCK_NIL;
696 }
697 break;
698 } else {
699 /* this is OK */
700 s->ReplUnitTable[block] = rep_block;
701 block = rep_block;
702 }
703 }
704
705 /* the chain was completely explored. Now we can decide
706 what to do with it */
707 if (do_format_chain) {
708 /* invalid chain : format it */
709 format_chain(s, first_block);
710 } else {
711 unsigned int first_block1, chain_to_format, chain_length1;
712 int fold_mark;
713
714 /* valid chain : get foldmark */
715 fold_mark = get_fold_mark(s, first_block);
716 if (fold_mark == 0) {
717 /* cannot get foldmark : format the chain */
718 printk("Could read foldmark at block %d\n", first_block);
719 format_chain(s, first_block);
720 } else {
721 if (fold_mark == FOLD_MARK_IN_PROGRESS)
722 check_sectors_in_chain(s, first_block);
723
724 /* now handle the case where we find two chains at the
725 same virtual address : we select the longer one,
726 because the shorter one is the one which was being
727 folded if the folding was not done in place */
728 first_block1 = s->EUNtable[first_logical_block];
729 if (first_block1 != BLOCK_NIL) {
730 /* XXX: what to do if same length ? */
731 chain_length1 = calc_chain_length(s, first_block1);
732 printk("Two chains at blocks %d (len=%d) and %d (len=%d)\n",
733 first_block1, chain_length1, first_block, chain_length);
734
735 if (chain_length >= chain_length1) {
736 chain_to_format = first_block1;
737 s->EUNtable[first_logical_block] = first_block;
738 } else {
739 chain_to_format = first_block;
740 }
741 format_chain(s, chain_to_format);
742 } else {
743 s->EUNtable[first_logical_block] = first_block;
744 }
745 }
746 }
747 }
748 examine_ReplUnitTable:;
749 }
750
751 /* second pass to format unreferenced blocks and init free block count */
752 s->numfreeEUNs = 0;
753 s->LastFreeEUN = le16_to_cpu(s->MediaHdr.FirstPhysicalEUN);
754
755 for (block = 0; block < s->nb_blocks; block++) {
756 if (s->ReplUnitTable[block] == BLOCK_NOTEXPLORED) {
757 printk("Unreferenced block %d, formatting it\n", block);
758 if (NFTL_formatblock(s, block) < 0)
759 s->ReplUnitTable[block] = BLOCK_RESERVED;
760 else
761 s->ReplUnitTable[block] = BLOCK_FREE;
762 }
763 if (s->ReplUnitTable[block] == BLOCK_FREE) {
764 s->numfreeEUNs++;
765 s->LastFreeEUN = block;
766 }
767 }
768
769 return 0;
770}
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
new file mode 100644
index 000000000000..13f9e992bef8
--- /dev/null
+++ b/drivers/mtd/redboot.c
@@ -0,0 +1,235 @@
1/*
2 * $Id: redboot.c,v 1.17 2004/11/22 11:33:56 ijc Exp $
3 *
4 * Parse RedBoot-style Flash Image System (FIS) tables and
5 * produce a Linux partition array to match.
6 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/init.h>
11#include <linux/vmalloc.h>
12
13#include <linux/mtd/mtd.h>
14#include <linux/mtd/partitions.h>
15
16struct fis_image_desc {
17 unsigned char name[16]; // Null terminated name
18 unsigned long flash_base; // Address within FLASH of image
19 unsigned long mem_base; // Address in memory where it executes
20 unsigned long size; // Length of image
21 unsigned long entry_point; // Execution entry point
22 unsigned long data_length; // Length of actual data
23 unsigned char _pad[256-(16+7*sizeof(unsigned long))];
24 unsigned long desc_cksum; // Checksum over image descriptor
25 unsigned long file_cksum; // Checksum over image data
26};
27
28struct fis_list {
29 struct fis_image_desc *img;
30 struct fis_list *next;
31};
32
33static int directory = CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK;
34module_param(directory, int, 0);
35
36static inline int redboot_checksum(struct fis_image_desc *img)
37{
38 /* RedBoot doesn't actually write the desc_cksum field yet AFAICT */
39 return 1;
40}
41
42static int parse_redboot_partitions(struct mtd_info *master,
43 struct mtd_partition **pparts,
44 unsigned long fis_origin)
45{
46 int nrparts = 0;
47 struct fis_image_desc *buf;
48 struct mtd_partition *parts;
49 struct fis_list *fl = NULL, *tmp_fl;
50 int ret, i;
51 size_t retlen;
52 char *names;
53 char *nullname;
54 int namelen = 0;
55 int nulllen = 0;
56 int numslots;
57 unsigned long offset;
58#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
59 static char nullstring[] = "unallocated";
60#endif
61
62 buf = vmalloc(master->erasesize);
63
64 if (!buf)
65 return -ENOMEM;
66
67 if ( directory < 0 )
68 offset = master->size + directory*master->erasesize;
69 else
70 offset = directory*master->erasesize;
71
72 printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n",
73 master->name, offset);
74
75 ret = master->read(master, offset,
76 master->erasesize, &retlen, (void *)buf);
77
78 if (ret)
79 goto out;
80
81 if (retlen != master->erasesize) {
82 ret = -EIO;
83 goto out;
84 }
85
86 numslots = (master->erasesize / sizeof(struct fis_image_desc));
87 for (i = 0; i < numslots; i++) {
88 if (buf[i].name[0] == 0xff) {
89 i = numslots;
90 break;
91 }
92 if (!memcmp(buf[i].name, "FIS directory", 14))
93 break;
94 }
95 if (i == numslots) {
96 /* Didn't find it */
97 printk(KERN_NOTICE "No RedBoot partition table detected in %s\n",
98 master->name);
99 ret = 0;
100 goto out;
101 }
102
103 for (i = 0; i < numslots; i++) {
104 struct fis_list *new_fl, **prev;
105
106 if (buf[i].name[0] == 0xff)
107 break;
108 if (!redboot_checksum(&buf[i]))
109 break;
110
111 new_fl = kmalloc(sizeof(struct fis_list), GFP_KERNEL);
112 namelen += strlen(buf[i].name)+1;
113 if (!new_fl) {
114 ret = -ENOMEM;
115 goto out;
116 }
117 new_fl->img = &buf[i];
118 if (fis_origin) {
119 buf[i].flash_base -= fis_origin;
120 } else {
121 buf[i].flash_base &= master->size-1;
122 }
123
124 /* I'm sure the JFFS2 code has done me permanent damage.
125 * I now think the following is _normal_
126 */
127 prev = &fl;
128 while(*prev && (*prev)->img->flash_base < new_fl->img->flash_base)
129 prev = &(*prev)->next;
130 new_fl->next = *prev;
131 *prev = new_fl;
132
133 nrparts++;
134 }
135#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
136 if (fl->img->flash_base) {
137 nrparts++;
138 nulllen = sizeof(nullstring);
139 }
140
141 for (tmp_fl = fl; tmp_fl->next; tmp_fl = tmp_fl->next) {
142 if (tmp_fl->img->flash_base + tmp_fl->img->size + master->erasesize <= tmp_fl->next->img->flash_base) {
143 nrparts++;
144 nulllen = sizeof(nullstring);
145 }
146 }
147#endif
148 parts = kmalloc(sizeof(*parts)*nrparts + nulllen + namelen, GFP_KERNEL);
149
150 if (!parts) {
151 ret = -ENOMEM;
152 goto out;
153 }
154
155 memset(parts, 0, sizeof(*parts)*nrparts + nulllen + namelen);
156
157 nullname = (char *)&parts[nrparts];
158#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
159 if (nulllen > 0) {
160 strcpy(nullname, nullstring);
161 }
162#endif
163 names = nullname + nulllen;
164
165 i=0;
166
167#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
168 if (fl->img->flash_base) {
169 parts[0].name = nullname;
170 parts[0].size = fl->img->flash_base;
171 parts[0].offset = 0;
172 i++;
173 }
174#endif
175 for ( ; i<nrparts; i++) {
176 parts[i].size = fl->img->size;
177 parts[i].offset = fl->img->flash_base;
178 parts[i].name = names;
179
180 strcpy(names, fl->img->name);
181#ifdef CONFIG_MTD_REDBOOT_PARTS_READONLY
182 if (!memcmp(names, "RedBoot", 8) ||
183 !memcmp(names, "RedBoot config", 15) ||
184 !memcmp(names, "FIS directory", 14)) {
185 parts[i].mask_flags = MTD_WRITEABLE;
186 }
187#endif
188 names += strlen(names)+1;
189
190#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
191 if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) {
192 i++;
193 parts[i].offset = parts[i-1].size + parts[i-1].offset;
194 parts[i].size = fl->next->img->flash_base - parts[i].offset;
195 parts[i].name = nullname;
196 }
197#endif
198 tmp_fl = fl;
199 fl = fl->next;
200 kfree(tmp_fl);
201 }
202 ret = nrparts;
203 *pparts = parts;
204 out:
205 while (fl) {
206 struct fis_list *old = fl;
207 fl = fl->next;
208 kfree(old);
209 }
210 vfree(buf);
211 return ret;
212}
213
214static struct mtd_part_parser redboot_parser = {
215 .owner = THIS_MODULE,
216 .parse_fn = parse_redboot_partitions,
217 .name = "RedBoot",
218};
219
220static int __init redboot_parser_init(void)
221{
222 return register_mtd_parser(&redboot_parser);
223}
224
225static void __exit redboot_parser_exit(void)
226{
227 deregister_mtd_parser(&redboot_parser);
228}
229
230module_init(redboot_parser_init);
231module_exit(redboot_parser_exit);
232
233MODULE_LICENSE("GPL");
234MODULE_AUTHOR("Red Hat, Inc. - David Woodhouse <dwmw2@cambridge.redhat.com>");
235MODULE_DESCRIPTION("Parsing code for RedBoot Flash Image System (FIS) tables");