aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 13:20:31 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 13:20:31 -0500
commita8e98d6d51a3eb7bb061b1625193a129c8bd094f (patch)
tree0fa58b6e11e37023b024e55b8f0e7e01438706d4 /drivers
parentf0f1b3364ae7f48084bdf2837fb979ff59622523 (diff)
parentf9f7dd222364a6428d2ad99a515935dd1dd89d18 (diff)
Merge git://git.infradead.org/mtd-2.6
* git://git.infradead.org/mtd-2.6: (120 commits) [MTD] Fix mtdoops.c compilation [MTD] [NOR] fix startup lock when using multiple nor flash chips [MTD] [DOC200x] eccbuf is statically defined and always evaluate to true [MTD] Fix maps/physmap.c compilation with CONFIG_PM [MTD] onenand: Add panic_write function to the onenand driver [MTD] mtdoops: Use the panic_write function when present [MTD] Add mtd panic_write function pointer [MTD] [NAND] Freescale enhanced Local Bus Controller FCM NAND support. [MTD] physmap.c: Add support for multiple resources [MTD] [NAND] Fix misparenthesization introduced by commit 78b65179... [MTD] [NAND] Fix Blackfin NFC ECC calculating bug with page size 512 bytes [MTD] [NAND] Remove wrong operation in PM function of the BF54x NFC driver [MTD] [NAND] Remove unused variable in plat_nand_remove [MTD] Unlocking all Intel flash that is locked on power up. [MTD] [NAND] at91_nand: Make mtdparts option can override board info [MTD] mtdoops: Various minor cleanups [MTD] mtdoops: Ensure sequential write to the buffer [MTD] mtdoops: Perform write operations in a workqueue [MTD] mtdoops: Add further error return code checking [MTD] [NOR] Test devtype, not definition in flash_probe(), drivers/mtd/devices/lart.c ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mtd/Kconfig11
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c78
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c14
-rw-r--r--drivers/mtd/chips/cfi_probe.c12
-rw-r--r--drivers/mtd/chips/gen_probe.c2
-rw-r--r--drivers/mtd/chips/jedec_probe.c1376
-rw-r--r--drivers/mtd/cmdlinepart.c9
-rw-r--r--drivers/mtd/devices/doc2000.c4
-rw-r--r--drivers/mtd/devices/doc2001plus.c2
-rw-r--r--drivers/mtd/devices/lart.c2
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c2
-rw-r--r--drivers/mtd/maps/Kconfig9
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/physmap.c168
-rw-r--r--drivers/mtd/maps/physmap_of.c88
-rw-r--r--drivers/mtd/maps/pnc2000.c93
-rw-r--r--drivers/mtd/maps/scb2_flash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/mtd/mtdchar.c8
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/mtdoops.c185
-rw-r--r--drivers/mtd/mtdpart.c17
-rw-r--r--drivers/mtd/nand/Kconfig26
-rw-r--r--drivers/mtd/nand/Makefile3
-rw-r--r--drivers/mtd/nand/at91_nand.c12
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c39
-rw-r--r--drivers/mtd/nand/cafe_nand.c19
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c1244
-rw-r--r--drivers/mtd/nand/nand_base.c8
-rw-r--r--drivers/mtd/nand/orion_nand.c171
-rw-r--r--drivers/mtd/nand/pasemi_nand.c243
-rw-r--r--drivers/mtd/nand/plat_nand.c2
-rw-r--r--drivers/mtd/nand/s3c2410.c48
-rw-r--r--drivers/mtd/ofpart.c74
-rw-r--r--drivers/mtd/onenand/onenand_base.c199
-rw-r--r--drivers/mtd/redboot.c25
-rw-r--r--drivers/mtd/ubi/build.c674
-rw-r--r--drivers/mtd/ubi/cdev.c244
-rw-r--r--drivers/mtd/ubi/debug.h21
-rw-r--r--drivers/mtd/ubi/eba.c321
-rw-r--r--drivers/mtd/ubi/gluebi.c9
-rw-r--r--drivers/mtd/ubi/io.c10
-rw-r--r--drivers/mtd/ubi/kapi.c177
-rw-r--r--drivers/mtd/ubi/misc.c2
-rw-r--r--drivers/mtd/ubi/scan.c12
-rw-r--r--drivers/mtd/ubi/ubi.h171
-rw-r--r--drivers/mtd/ubi/upd.c185
-rw-r--r--drivers/mtd/ubi/vmt.c208
-rw-r--r--drivers/mtd/ubi/vtbl.c45
-rw-r--r--drivers/mtd/ubi/wl.c338
51 files changed, 4605 insertions, 2013 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 8848e8ac705d..e8503341e3b1 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -150,6 +150,14 @@ config MTD_AFS_PARTS
150 for your particular device. It won't happen automatically. The 150 for your particular device. It won't happen automatically. The
151 'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example. 151 'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example.
152 152
153config MTD_OF_PARTS
154 tristate "Flash partition map based on OF description"
155 depends on PPC_OF && MTD_PARTITIONS
156 help
157 This provides a partition parsing function which derives
158 the partition map from the children of the flash node,
159 as described in Documentation/powerpc/booting-without-of.txt.
160
153comment "User Modules And Translation Layers" 161comment "User Modules And Translation Layers"
154 162
155config MTD_CHAR 163config MTD_CHAR
@@ -286,6 +294,9 @@ config MTD_OOPS
286 buffer in a flash partition where it can be read back at some 294 buffer in a flash partition where it can be read back at some
287 later point. 295 later point.
288 296
297 To use, add console=ttyMTDx to the kernel command line,
298 where x is the MTD device number to use.
299
289source "drivers/mtd/chips/Kconfig" 300source "drivers/mtd/chips/Kconfig"
290 301
291source "drivers/mtd/maps/Kconfig" 302source "drivers/mtd/maps/Kconfig"
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 7f0b04b4caa7..538e33d11d46 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
11obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o 11obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
12obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o 12obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
13obj-$(CONFIG_MTD_AFS_PARTS) += afs.o 13obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
14obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
14 15
15# 'Users' - code which presents functionality to userspace. 16# 'Users' - code which presents functionality to userspace.
16obj-$(CONFIG_MTD_CHAR) += mtdchar.o 17obj-$(CONFIG_MTD_CHAR) += mtdchar.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 1707f98c322c..47794d23a42e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -50,6 +50,7 @@
50#define I82802AC 0x00ac 50#define I82802AC 0x00ac
51#define MANUFACTURER_ST 0x0020 51#define MANUFACTURER_ST 0x0020
52#define M50LPW080 0x002F 52#define M50LPW080 0x002F
53#define AT49BV640D 0x02de
53 54
54static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 55static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
55static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 56static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -157,6 +158,47 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
157} 158}
158#endif 159#endif
159 160
161/* Atmel chips don't use the same PRI format as Intel chips */
162static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
163{
164 struct map_info *map = mtd->priv;
165 struct cfi_private *cfi = map->fldrv_priv;
166 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
167 struct cfi_pri_atmel atmel_pri;
168 uint32_t features = 0;
169
170 /* Reverse byteswapping */
171 extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
172 extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
173 extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
174
175 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
176 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
177
178 printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
179
180 if (atmel_pri.Features & 0x01) /* chip erase supported */
181 features |= (1<<0);
182 if (atmel_pri.Features & 0x02) /* erase suspend supported */
183 features |= (1<<1);
184 if (atmel_pri.Features & 0x04) /* program suspend supported */
185 features |= (1<<2);
186 if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
187 features |= (1<<9);
188 if (atmel_pri.Features & 0x20) /* page mode read supported */
189 features |= (1<<7);
190 if (atmel_pri.Features & 0x40) /* queued erase supported */
191 features |= (1<<4);
192 if (atmel_pri.Features & 0x80) /* Protection bits supported */
193 features |= (1<<6);
194
195 extp->FeatureSupport = features;
196
197 /* burst write mode not supported */
198 cfi->cfiq->BufWriteTimeoutTyp = 0;
199 cfi->cfiq->BufWriteTimeoutMax = 0;
200}
201
160#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 202#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
161/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 203/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
162static void fixup_intel_strataflash(struct mtd_info *mtd, void* param) 204static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
@@ -227,13 +269,20 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
227/* 269/*
228 * Some chips power-up with all sectors locked by default. 270 * Some chips power-up with all sectors locked by default.
229 */ 271 */
230static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param) 272static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
231{ 273{
232 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" ); 274 struct map_info *map = mtd->priv;
233 mtd->flags |= MTD_STUPID_LOCK; 275 struct cfi_private *cfi = map->fldrv_priv;
276 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
277
278 if (cfip->FeatureSupport&32) {
279 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
280 mtd->flags |= MTD_POWERUP_LOCK;
281 }
234} 282}
235 283
236static struct cfi_fixup cfi_fixup_table[] = { 284static struct cfi_fixup cfi_fixup_table[] = {
285 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
237#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 286#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
238 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 287 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
239#endif 288#endif
@@ -245,7 +294,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
245#endif 294#endif
246 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL }, 295 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
247 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL }, 296 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
248 { MANUFACTURER_INTEL, 0x891c, fixup_use_powerup_lock, NULL, }, 297 { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
249 { 0, 0, NULL, NULL } 298 { 0, 0, NULL, NULL }
250}; 299};
251 300
@@ -277,7 +326,7 @@ read_pri_intelext(struct map_info *map, __u16 adr)
277 return NULL; 326 return NULL;
278 327
279 if (extp->MajorVersion != '1' || 328 if (extp->MajorVersion != '1' ||
280 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) { 329 (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
281 printk(KERN_ERR " Unknown Intel/Sharp Extended Query " 330 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
282 "version %c.%c.\n", extp->MajorVersion, 331 "version %c.%c.\n", extp->MajorVersion,
283 extp->MinorVersion); 332 extp->MinorVersion);
@@ -752,6 +801,7 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
752static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 801static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
753{ 802{
754 int ret; 803 int ret;
804 DECLARE_WAITQUEUE(wait, current);
755 805
756 retry: 806 retry:
757 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING 807 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
@@ -808,6 +858,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
808 spin_unlock(contender->mutex); 858 spin_unlock(contender->mutex);
809 } 859 }
810 860
861 /* Check if we already have suspended erase
862 * on this chip. Sleep. */
863 if (mode == FL_ERASING && shared->erasing
864 && shared->erasing->oldstate == FL_ERASING) {
865 spin_unlock(&shared->lock);
866 set_current_state(TASK_UNINTERRUPTIBLE);
867 add_wait_queue(&chip->wq, &wait);
868 spin_unlock(chip->mutex);
869 schedule();
870 remove_wait_queue(&chip->wq, &wait);
871 spin_lock(chip->mutex);
872 goto retry;
873 }
874
811 /* We now own it */ 875 /* We now own it */
812 shared->writing = chip; 876 shared->writing = chip;
813 if (mode == FL_ERASING) 877 if (mode == FL_ERASING)
@@ -2294,7 +2358,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2294 struct flchip *chip; 2358 struct flchip *chip;
2295 int ret = 0; 2359 int ret = 0;
2296 2360
2297 if ((mtd->flags & MTD_STUPID_LOCK) 2361 if ((mtd->flags & MTD_POWERUP_LOCK)
2298 && extp && (extp->FeatureSupport & (1 << 5))) 2362 && extp && (extp->FeatureSupport & (1 << 5)))
2299 cfi_intelext_save_locks(mtd); 2363 cfi_intelext_save_locks(mtd);
2300 2364
@@ -2405,7 +2469,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
2405 spin_unlock(chip->mutex); 2469 spin_unlock(chip->mutex);
2406 } 2470 }
2407 2471
2408 if ((mtd->flags & MTD_STUPID_LOCK) 2472 if ((mtd->flags & MTD_POWERUP_LOCK)
2409 && extp && (extp->FeatureSupport & (1 << 5))) 2473 && extp && (extp->FeatureSupport & (1 << 5)))
2410 cfi_intelext_restore_locks(mtd); 2474 cfi_intelext_restore_locks(mtd);
2411} 2475}
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 389acc600f5e..d072e87ce4e2 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -185,6 +185,10 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
185 extp->TopBottom = 2; 185 extp->TopBottom = 2;
186 else 186 else
187 extp->TopBottom = 3; 187 extp->TopBottom = 3;
188
189 /* burst write mode not supported */
190 cfi->cfiq->BufWriteTimeoutTyp = 0;
191 cfi->cfiq->BufWriteTimeoutMax = 0;
188} 192}
189 193
190static void fixup_use_secsi(struct mtd_info *mtd, void *param) 194static void fixup_use_secsi(struct mtd_info *mtd, void *param)
@@ -213,10 +217,11 @@ static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
213{ 217{
214 mtd->lock = cfi_atmel_lock; 218 mtd->lock = cfi_atmel_lock;
215 mtd->unlock = cfi_atmel_unlock; 219 mtd->unlock = cfi_atmel_unlock;
216 mtd->flags |= MTD_STUPID_LOCK; 220 mtd->flags |= MTD_POWERUP_LOCK;
217} 221}
218 222
219static struct cfi_fixup cfi_fixup_table[] = { 223static struct cfi_fixup cfi_fixup_table[] = {
224 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
220#ifdef AMD_BOOTLOC_BUG 225#ifdef AMD_BOOTLOC_BUG
221 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 226 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
222#endif 227#endif
@@ -229,7 +234,6 @@ static struct cfi_fixup cfi_fixup_table[] = {
229#if !FORCE_WORD_WRITE 234#if !FORCE_WORD_WRITE
230 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, }, 235 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
231#endif 236#endif
232 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
233 { 0, 0, NULL, NULL } 237 { 0, 0, NULL, NULL }
234}; 238};
235static struct cfi_fixup jedec_fixup_table[] = { 239static struct cfi_fixup jedec_fixup_table[] = {
@@ -338,10 +342,12 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
338 /* Modify the unlock address if we are in compatibility mode */ 342 /* Modify the unlock address if we are in compatibility mode */
339 if ( /* x16 in x8 mode */ 343 if ( /* x16 in x8 mode */
340 ((cfi->device_type == CFI_DEVICETYPE_X8) && 344 ((cfi->device_type == CFI_DEVICETYPE_X8) &&
341 (cfi->cfiq->InterfaceDesc == 2)) || 345 (cfi->cfiq->InterfaceDesc ==
346 CFI_INTERFACE_X8_BY_X16_ASYNC)) ||
342 /* x32 in x16 mode */ 347 /* x32 in x16 mode */
343 ((cfi->device_type == CFI_DEVICETYPE_X16) && 348 ((cfi->device_type == CFI_DEVICETYPE_X16) &&
344 (cfi->cfiq->InterfaceDesc == 4))) 349 (cfi->cfiq->InterfaceDesc ==
350 CFI_INTERFACE_X16_BY_X32_ASYNC)))
345 { 351 {
346 cfi->addr_unlock1 = 0xaaa; 352 cfi->addr_unlock1 = 0xaaa;
347 cfi->addr_unlock2 = 0x555; 353 cfi->addr_unlock2 = 0x555;
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index 60e11a0ada97..f651b6ef1c5d 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -370,27 +370,27 @@ static void print_cfi_ident(struct cfi_ident *cfip)
370 printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20)); 370 printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20));
371 printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc); 371 printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc);
372 switch(cfip->InterfaceDesc) { 372 switch(cfip->InterfaceDesc) {
373 case 0: 373 case CFI_INTERFACE_X8_ASYNC:
374 printk(" - x8-only asynchronous interface\n"); 374 printk(" - x8-only asynchronous interface\n");
375 break; 375 break;
376 376
377 case 1: 377 case CFI_INTERFACE_X16_ASYNC:
378 printk(" - x16-only asynchronous interface\n"); 378 printk(" - x16-only asynchronous interface\n");
379 break; 379 break;
380 380
381 case 2: 381 case CFI_INTERFACE_X8_BY_X16_ASYNC:
382 printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n"); 382 printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n");
383 break; 383 break;
384 384
385 case 3: 385 case CFI_INTERFACE_X32_ASYNC:
386 printk(" - x32-only asynchronous interface\n"); 386 printk(" - x32-only asynchronous interface\n");
387 break; 387 break;
388 388
389 case 4: 389 case CFI_INTERFACE_X16_BY_X32_ASYNC:
390 printk(" - supports x16 and x32 via Word# with asynchronous interface\n"); 390 printk(" - supports x16 and x32 via Word# with asynchronous interface\n");
391 break; 391 break;
392 392
393 case 65535: 393 case CFI_INTERFACE_NOT_ALLOWED:
394 printk(" - Not Allowed / Reserved\n"); 394 printk(" - Not Allowed / Reserved\n");
395 break; 395 break;
396 396
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index 2eb696d7b97b..d338b8c92780 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -112,7 +112,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
112 max_chips = 1; 112 max_chips = 1;
113 } 113 }
114 114
115 mapsize = (max_chips + BITS_PER_LONG-1) / BITS_PER_LONG; 115 mapsize = sizeof(long) * ( (max_chips + BITS_PER_LONG-1) / BITS_PER_LONG );
116 chip_map = kzalloc(mapsize, GFP_KERNEL); 116 chip_map = kzalloc(mapsize, GFP_KERNEL);
117 if (!chip_map) { 117 if (!chip_map) {
118 printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name); 118 printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name);
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index a67b23b87fc0..4be51a86a85c 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -194,8 +194,8 @@ enum uaddr {
194 194
195 195
196struct unlock_addr { 196struct unlock_addr {
197 u32 addr1; 197 uint32_t addr1;
198 u32 addr2; 198 uint32_t addr2;
199}; 199};
200 200
201 201
@@ -246,16 +246,16 @@ static const struct unlock_addr unlock_addrs[] = {
246 } 246 }
247}; 247};
248 248
249
250struct amd_flash_info { 249struct amd_flash_info {
251 const __u16 mfr_id;
252 const __u16 dev_id;
253 const char *name; 250 const char *name;
254 const int DevSize; 251 const uint16_t mfr_id;
255 const int NumEraseRegions; 252 const uint16_t dev_id;
256 const int CmdSet; 253 const uint8_t dev_size;
257 const __u8 uaddr[4]; /* unlock addrs for 8, 16, 32, 64 */ 254 const uint8_t nr_regions;
258 const ulong regions[6]; 255 const uint16_t cmd_set;
256 const uint32_t regions[6];
257 const uint8_t devtypes; /* Bitmask for x8, x16 etc. */
258 const uint8_t uaddr; /* unlock addrs for 8, 16, 32, 64 */
259}; 259};
260 260
261#define ERASEINFO(size,blocks) (size<<8)|(blocks-1) 261#define ERASEINFO(size,blocks) (size<<8)|(blocks-1)
@@ -280,12 +280,11 @@ static const struct amd_flash_info jedec_table[] = {
280 .mfr_id = MANUFACTURER_AMD, 280 .mfr_id = MANUFACTURER_AMD,
281 .dev_id = AM29F032B, 281 .dev_id = AM29F032B,
282 .name = "AMD AM29F032B", 282 .name = "AMD AM29F032B",
283 .uaddr = { 283 .uaddr = MTD_UADDR_0x0555_0x02AA,
284 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 284 .devtypes = CFI_DEVICETYPE_X8,
285 }, 285 .dev_size = SIZE_4MiB,
286 .DevSize = SIZE_4MiB, 286 .cmd_set = P_ID_AMD_STD,
287 .CmdSet = P_ID_AMD_STD, 287 .nr_regions = 1,
288 .NumEraseRegions= 1,
289 .regions = { 288 .regions = {
290 ERASEINFO(0x10000,64) 289 ERASEINFO(0x10000,64)
291 } 290 }
@@ -293,13 +292,11 @@ static const struct amd_flash_info jedec_table[] = {
293 .mfr_id = MANUFACTURER_AMD, 292 .mfr_id = MANUFACTURER_AMD,
294 .dev_id = AM29LV160DT, 293 .dev_id = AM29LV160DT,
295 .name = "AMD AM29LV160DT", 294 .name = "AMD AM29LV160DT",
296 .uaddr = { 295 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
297 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 296 .uaddr = MTD_UADDR_0x0AAA_0x0555,
298 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 297 .dev_size = SIZE_2MiB,
299 }, 298 .cmd_set = P_ID_AMD_STD,
300 .DevSize = SIZE_2MiB, 299 .nr_regions = 4,
301 .CmdSet = P_ID_AMD_STD,
302 .NumEraseRegions= 4,
303 .regions = { 300 .regions = {
304 ERASEINFO(0x10000,31), 301 ERASEINFO(0x10000,31),
305 ERASEINFO(0x08000,1), 302 ERASEINFO(0x08000,1),
@@ -310,13 +307,11 @@ static const struct amd_flash_info jedec_table[] = {
310 .mfr_id = MANUFACTURER_AMD, 307 .mfr_id = MANUFACTURER_AMD,
311 .dev_id = AM29LV160DB, 308 .dev_id = AM29LV160DB,
312 .name = "AMD AM29LV160DB", 309 .name = "AMD AM29LV160DB",
313 .uaddr = { 310 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
314 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 311 .uaddr = MTD_UADDR_0x0AAA_0x0555,
315 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 312 .dev_size = SIZE_2MiB,
316 }, 313 .cmd_set = P_ID_AMD_STD,
317 .DevSize = SIZE_2MiB, 314 .nr_regions = 4,
318 .CmdSet = P_ID_AMD_STD,
319 .NumEraseRegions= 4,
320 .regions = { 315 .regions = {
321 ERASEINFO(0x04000,1), 316 ERASEINFO(0x04000,1),
322 ERASEINFO(0x02000,2), 317 ERASEINFO(0x02000,2),
@@ -327,13 +322,11 @@ static const struct amd_flash_info jedec_table[] = {
327 .mfr_id = MANUFACTURER_AMD, 322 .mfr_id = MANUFACTURER_AMD,
328 .dev_id = AM29LV400BB, 323 .dev_id = AM29LV400BB,
329 .name = "AMD AM29LV400BB", 324 .name = "AMD AM29LV400BB",
330 .uaddr = { 325 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
331 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 326 .uaddr = MTD_UADDR_0x0AAA_0x0555,
332 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 327 .dev_size = SIZE_512KiB,
333 }, 328 .cmd_set = P_ID_AMD_STD,
334 .DevSize = SIZE_512KiB, 329 .nr_regions = 4,
335 .CmdSet = P_ID_AMD_STD,
336 .NumEraseRegions= 4,
337 .regions = { 330 .regions = {
338 ERASEINFO(0x04000,1), 331 ERASEINFO(0x04000,1),
339 ERASEINFO(0x02000,2), 332 ERASEINFO(0x02000,2),
@@ -344,13 +337,11 @@ static const struct amd_flash_info jedec_table[] = {
344 .mfr_id = MANUFACTURER_AMD, 337 .mfr_id = MANUFACTURER_AMD,
345 .dev_id = AM29LV400BT, 338 .dev_id = AM29LV400BT,
346 .name = "AMD AM29LV400BT", 339 .name = "AMD AM29LV400BT",
347 .uaddr = { 340 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
348 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 341 .uaddr = MTD_UADDR_0x0AAA_0x0555,
349 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 342 .dev_size = SIZE_512KiB,
350 }, 343 .cmd_set = P_ID_AMD_STD,
351 .DevSize = SIZE_512KiB, 344 .nr_regions = 4,
352 .CmdSet = P_ID_AMD_STD,
353 .NumEraseRegions= 4,
354 .regions = { 345 .regions = {
355 ERASEINFO(0x10000,7), 346 ERASEINFO(0x10000,7),
356 ERASEINFO(0x08000,1), 347 ERASEINFO(0x08000,1),
@@ -361,13 +352,11 @@ static const struct amd_flash_info jedec_table[] = {
361 .mfr_id = MANUFACTURER_AMD, 352 .mfr_id = MANUFACTURER_AMD,
362 .dev_id = AM29LV800BB, 353 .dev_id = AM29LV800BB,
363 .name = "AMD AM29LV800BB", 354 .name = "AMD AM29LV800BB",
364 .uaddr = { 355 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
365 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 356 .uaddr = MTD_UADDR_0x0AAA_0x0555,
366 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 357 .dev_size = SIZE_1MiB,
367 }, 358 .cmd_set = P_ID_AMD_STD,
368 .DevSize = SIZE_1MiB, 359 .nr_regions = 4,
369 .CmdSet = P_ID_AMD_STD,
370 .NumEraseRegions= 4,
371 .regions = { 360 .regions = {
372 ERASEINFO(0x04000,1), 361 ERASEINFO(0x04000,1),
373 ERASEINFO(0x02000,2), 362 ERASEINFO(0x02000,2),
@@ -379,13 +368,11 @@ static const struct amd_flash_info jedec_table[] = {
379 .mfr_id = MANUFACTURER_AMD, 368 .mfr_id = MANUFACTURER_AMD,
380 .dev_id = AM29DL800BB, 369 .dev_id = AM29DL800BB,
381 .name = "AMD AM29DL800BB", 370 .name = "AMD AM29DL800BB",
382 .uaddr = { 371 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
383 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 372 .uaddr = MTD_UADDR_0x0AAA_0x0555,
384 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 373 .dev_size = SIZE_1MiB,
385 }, 374 .cmd_set = P_ID_AMD_STD,
386 .DevSize = SIZE_1MiB, 375 .nr_regions = 6,
387 .CmdSet = P_ID_AMD_STD,
388 .NumEraseRegions= 6,
389 .regions = { 376 .regions = {
390 ERASEINFO(0x04000,1), 377 ERASEINFO(0x04000,1),
391 ERASEINFO(0x08000,1), 378 ERASEINFO(0x08000,1),
@@ -398,13 +385,11 @@ static const struct amd_flash_info jedec_table[] = {
398 .mfr_id = MANUFACTURER_AMD, 385 .mfr_id = MANUFACTURER_AMD,
399 .dev_id = AM29DL800BT, 386 .dev_id = AM29DL800BT,
400 .name = "AMD AM29DL800BT", 387 .name = "AMD AM29DL800BT",
401 .uaddr = { 388 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
402 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 389 .uaddr = MTD_UADDR_0x0AAA_0x0555,
403 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 390 .dev_size = SIZE_1MiB,
404 }, 391 .cmd_set = P_ID_AMD_STD,
405 .DevSize = SIZE_1MiB, 392 .nr_regions = 6,
406 .CmdSet = P_ID_AMD_STD,
407 .NumEraseRegions= 6,
408 .regions = { 393 .regions = {
409 ERASEINFO(0x10000,14), 394 ERASEINFO(0x10000,14),
410 ERASEINFO(0x04000,1), 395 ERASEINFO(0x04000,1),
@@ -417,13 +402,11 @@ static const struct amd_flash_info jedec_table[] = {
417 .mfr_id = MANUFACTURER_AMD, 402 .mfr_id = MANUFACTURER_AMD,
418 .dev_id = AM29F800BB, 403 .dev_id = AM29F800BB,
419 .name = "AMD AM29F800BB", 404 .name = "AMD AM29F800BB",
420 .uaddr = { 405 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
421 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 406 .uaddr = MTD_UADDR_0x0AAA_0x0555,
422 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 407 .dev_size = SIZE_1MiB,
423 }, 408 .cmd_set = P_ID_AMD_STD,
424 .DevSize = SIZE_1MiB, 409 .nr_regions = 4,
425 .CmdSet = P_ID_AMD_STD,
426 .NumEraseRegions= 4,
427 .regions = { 410 .regions = {
428 ERASEINFO(0x04000,1), 411 ERASEINFO(0x04000,1),
429 ERASEINFO(0x02000,2), 412 ERASEINFO(0x02000,2),
@@ -434,13 +417,11 @@ static const struct amd_flash_info jedec_table[] = {
434 .mfr_id = MANUFACTURER_AMD, 417 .mfr_id = MANUFACTURER_AMD,
435 .dev_id = AM29LV800BT, 418 .dev_id = AM29LV800BT,
436 .name = "AMD AM29LV800BT", 419 .name = "AMD AM29LV800BT",
437 .uaddr = { 420 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
438 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 421 .uaddr = MTD_UADDR_0x0AAA_0x0555,
439 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 422 .dev_size = SIZE_1MiB,
440 }, 423 .cmd_set = P_ID_AMD_STD,
441 .DevSize = SIZE_1MiB, 424 .nr_regions = 4,
442 .CmdSet = P_ID_AMD_STD,
443 .NumEraseRegions= 4,
444 .regions = { 425 .regions = {
445 ERASEINFO(0x10000,15), 426 ERASEINFO(0x10000,15),
446 ERASEINFO(0x08000,1), 427 ERASEINFO(0x08000,1),
@@ -451,13 +432,11 @@ static const struct amd_flash_info jedec_table[] = {
451 .mfr_id = MANUFACTURER_AMD, 432 .mfr_id = MANUFACTURER_AMD,
452 .dev_id = AM29F800BT, 433 .dev_id = AM29F800BT,
453 .name = "AMD AM29F800BT", 434 .name = "AMD AM29F800BT",
454 .uaddr = { 435 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
455 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 436 .uaddr = MTD_UADDR_0x0AAA_0x0555,
456 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 437 .dev_size = SIZE_1MiB,
457 }, 438 .cmd_set = P_ID_AMD_STD,
458 .DevSize = SIZE_1MiB, 439 .nr_regions = 4,
459 .CmdSet = P_ID_AMD_STD,
460 .NumEraseRegions= 4,
461 .regions = { 440 .regions = {
462 ERASEINFO(0x10000,15), 441 ERASEINFO(0x10000,15),
463 ERASEINFO(0x08000,1), 442 ERASEINFO(0x08000,1),
@@ -468,12 +447,11 @@ static const struct amd_flash_info jedec_table[] = {
468 .mfr_id = MANUFACTURER_AMD, 447 .mfr_id = MANUFACTURER_AMD,
469 .dev_id = AM29F017D, 448 .dev_id = AM29F017D,
470 .name = "AMD AM29F017D", 449 .name = "AMD AM29F017D",
471 .uaddr = { 450 .devtypes = CFI_DEVICETYPE_X8,
472 [0] = MTD_UADDR_DONT_CARE /* x8 */ 451 .uaddr = MTD_UADDR_DONT_CARE,
473 }, 452 .dev_size = SIZE_2MiB,
474 .DevSize = SIZE_2MiB, 453 .cmd_set = P_ID_AMD_STD,
475 .CmdSet = P_ID_AMD_STD, 454 .nr_regions = 1,
476 .NumEraseRegions= 1,
477 .regions = { 455 .regions = {
478 ERASEINFO(0x10000,32), 456 ERASEINFO(0x10000,32),
479 } 457 }
@@ -481,12 +459,11 @@ static const struct amd_flash_info jedec_table[] = {
481 .mfr_id = MANUFACTURER_AMD, 459 .mfr_id = MANUFACTURER_AMD,
482 .dev_id = AM29F016D, 460 .dev_id = AM29F016D,
483 .name = "AMD AM29F016D", 461 .name = "AMD AM29F016D",
484 .uaddr = { 462 .devtypes = CFI_DEVICETYPE_X8,
485 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 463 .uaddr = MTD_UADDR_0x0555_0x02AA,
486 }, 464 .dev_size = SIZE_2MiB,
487 .DevSize = SIZE_2MiB, 465 .cmd_set = P_ID_AMD_STD,
488 .CmdSet = P_ID_AMD_STD, 466 .nr_regions = 1,
489 .NumEraseRegions= 1,
490 .regions = { 467 .regions = {
491 ERASEINFO(0x10000,32), 468 ERASEINFO(0x10000,32),
492 } 469 }
@@ -494,12 +471,11 @@ static const struct amd_flash_info jedec_table[] = {
494 .mfr_id = MANUFACTURER_AMD, 471 .mfr_id = MANUFACTURER_AMD,
495 .dev_id = AM29F080, 472 .dev_id = AM29F080,
496 .name = "AMD AM29F080", 473 .name = "AMD AM29F080",
497 .uaddr = { 474 .devtypes = CFI_DEVICETYPE_X8,
498 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 475 .uaddr = MTD_UADDR_0x0555_0x02AA,
499 }, 476 .dev_size = SIZE_1MiB,
500 .DevSize = SIZE_1MiB, 477 .cmd_set = P_ID_AMD_STD,
501 .CmdSet = P_ID_AMD_STD, 478 .nr_regions = 1,
502 .NumEraseRegions= 1,
503 .regions = { 479 .regions = {
504 ERASEINFO(0x10000,16), 480 ERASEINFO(0x10000,16),
505 } 481 }
@@ -507,12 +483,11 @@ static const struct amd_flash_info jedec_table[] = {
507 .mfr_id = MANUFACTURER_AMD, 483 .mfr_id = MANUFACTURER_AMD,
508 .dev_id = AM29F040, 484 .dev_id = AM29F040,
509 .name = "AMD AM29F040", 485 .name = "AMD AM29F040",
510 .uaddr = { 486 .devtypes = CFI_DEVICETYPE_X8,
511 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 487 .uaddr = MTD_UADDR_0x0555_0x02AA,
512 }, 488 .dev_size = SIZE_512KiB,
513 .DevSize = SIZE_512KiB, 489 .cmd_set = P_ID_AMD_STD,
514 .CmdSet = P_ID_AMD_STD, 490 .nr_regions = 1,
515 .NumEraseRegions= 1,
516 .regions = { 491 .regions = {
517 ERASEINFO(0x10000,8), 492 ERASEINFO(0x10000,8),
518 } 493 }
@@ -520,12 +495,11 @@ static const struct amd_flash_info jedec_table[] = {
520 .mfr_id = MANUFACTURER_AMD, 495 .mfr_id = MANUFACTURER_AMD,
521 .dev_id = AM29LV040B, 496 .dev_id = AM29LV040B,
522 .name = "AMD AM29LV040B", 497 .name = "AMD AM29LV040B",
523 .uaddr = { 498 .devtypes = CFI_DEVICETYPE_X8,
524 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 499 .uaddr = MTD_UADDR_0x0555_0x02AA,
525 }, 500 .dev_size = SIZE_512KiB,
526 .DevSize = SIZE_512KiB, 501 .cmd_set = P_ID_AMD_STD,
527 .CmdSet = P_ID_AMD_STD, 502 .nr_regions = 1,
528 .NumEraseRegions= 1,
529 .regions = { 503 .regions = {
530 ERASEINFO(0x10000,8), 504 ERASEINFO(0x10000,8),
531 } 505 }
@@ -533,12 +507,11 @@ static const struct amd_flash_info jedec_table[] = {
533 .mfr_id = MANUFACTURER_AMD, 507 .mfr_id = MANUFACTURER_AMD,
534 .dev_id = AM29F002T, 508 .dev_id = AM29F002T,
535 .name = "AMD AM29F002T", 509 .name = "AMD AM29F002T",
536 .uaddr = { 510 .devtypes = CFI_DEVICETYPE_X8,
537 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 511 .uaddr = MTD_UADDR_0x0555_0x02AA,
538 }, 512 .dev_size = SIZE_256KiB,
539 .DevSize = SIZE_256KiB, 513 .cmd_set = P_ID_AMD_STD,
540 .CmdSet = P_ID_AMD_STD, 514 .nr_regions = 4,
541 .NumEraseRegions= 4,
542 .regions = { 515 .regions = {
543 ERASEINFO(0x10000,3), 516 ERASEINFO(0x10000,3),
544 ERASEINFO(0x08000,1), 517 ERASEINFO(0x08000,1),
@@ -549,12 +522,11 @@ static const struct amd_flash_info jedec_table[] = {
549 .mfr_id = MANUFACTURER_ATMEL, 522 .mfr_id = MANUFACTURER_ATMEL,
550 .dev_id = AT49BV512, 523 .dev_id = AT49BV512,
551 .name = "Atmel AT49BV512", 524 .name = "Atmel AT49BV512",
552 .uaddr = { 525 .devtypes = CFI_DEVICETYPE_X8,
553 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 526 .uaddr = MTD_UADDR_0x5555_0x2AAA,
554 }, 527 .dev_size = SIZE_64KiB,
555 .DevSize = SIZE_64KiB, 528 .cmd_set = P_ID_AMD_STD,
556 .CmdSet = P_ID_AMD_STD, 529 .nr_regions = 1,
557 .NumEraseRegions= 1,
558 .regions = { 530 .regions = {
559 ERASEINFO(0x10000,1) 531 ERASEINFO(0x10000,1)
560 } 532 }
@@ -562,12 +534,11 @@ static const struct amd_flash_info jedec_table[] = {
562 .mfr_id = MANUFACTURER_ATMEL, 534 .mfr_id = MANUFACTURER_ATMEL,
563 .dev_id = AT29LV512, 535 .dev_id = AT29LV512,
564 .name = "Atmel AT29LV512", 536 .name = "Atmel AT29LV512",
565 .uaddr = { 537 .devtypes = CFI_DEVICETYPE_X8,
566 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 538 .uaddr = MTD_UADDR_0x5555_0x2AAA,
567 }, 539 .dev_size = SIZE_64KiB,
568 .DevSize = SIZE_64KiB, 540 .cmd_set = P_ID_AMD_STD,
569 .CmdSet = P_ID_AMD_STD, 541 .nr_regions = 1,
570 .NumEraseRegions= 1,
571 .regions = { 542 .regions = {
572 ERASEINFO(0x80,256), 543 ERASEINFO(0x80,256),
573 ERASEINFO(0x80,256) 544 ERASEINFO(0x80,256)
@@ -576,13 +547,11 @@ static const struct amd_flash_info jedec_table[] = {
576 .mfr_id = MANUFACTURER_ATMEL, 547 .mfr_id = MANUFACTURER_ATMEL,
577 .dev_id = AT49BV16X, 548 .dev_id = AT49BV16X,
578 .name = "Atmel AT49BV16X", 549 .name = "Atmel AT49BV16X",
579 .uaddr = { 550 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
580 [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ 551 .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
581 [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ 552 .dev_size = SIZE_2MiB,
582 }, 553 .cmd_set = P_ID_AMD_STD,
583 .DevSize = SIZE_2MiB, 554 .nr_regions = 2,
584 .CmdSet = P_ID_AMD_STD,
585 .NumEraseRegions= 2,
586 .regions = { 555 .regions = {
587 ERASEINFO(0x02000,8), 556 ERASEINFO(0x02000,8),
588 ERASEINFO(0x10000,31) 557 ERASEINFO(0x10000,31)
@@ -591,13 +560,11 @@ static const struct amd_flash_info jedec_table[] = {
591 .mfr_id = MANUFACTURER_ATMEL, 560 .mfr_id = MANUFACTURER_ATMEL,
592 .dev_id = AT49BV16XT, 561 .dev_id = AT49BV16XT,
593 .name = "Atmel AT49BV16XT", 562 .name = "Atmel AT49BV16XT",
594 .uaddr = { 563 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
595 [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ 564 .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
596 [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ 565 .dev_size = SIZE_2MiB,
597 }, 566 .cmd_set = P_ID_AMD_STD,
598 .DevSize = SIZE_2MiB, 567 .nr_regions = 2,
599 .CmdSet = P_ID_AMD_STD,
600 .NumEraseRegions= 2,
601 .regions = { 568 .regions = {
602 ERASEINFO(0x10000,31), 569 ERASEINFO(0x10000,31),
603 ERASEINFO(0x02000,8) 570 ERASEINFO(0x02000,8)
@@ -606,13 +573,11 @@ static const struct amd_flash_info jedec_table[] = {
606 .mfr_id = MANUFACTURER_ATMEL, 573 .mfr_id = MANUFACTURER_ATMEL,
607 .dev_id = AT49BV32X, 574 .dev_id = AT49BV32X,
608 .name = "Atmel AT49BV32X", 575 .name = "Atmel AT49BV32X",
609 .uaddr = { 576 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
610 [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ 577 .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
611 [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ 578 .dev_size = SIZE_4MiB,
612 }, 579 .cmd_set = P_ID_AMD_STD,
613 .DevSize = SIZE_4MiB, 580 .nr_regions = 2,
614 .CmdSet = P_ID_AMD_STD,
615 .NumEraseRegions= 2,
616 .regions = { 581 .regions = {
617 ERASEINFO(0x02000,8), 582 ERASEINFO(0x02000,8),
618 ERASEINFO(0x10000,63) 583 ERASEINFO(0x10000,63)
@@ -621,13 +586,11 @@ static const struct amd_flash_info jedec_table[] = {
621 .mfr_id = MANUFACTURER_ATMEL, 586 .mfr_id = MANUFACTURER_ATMEL,
622 .dev_id = AT49BV32XT, 587 .dev_id = AT49BV32XT,
623 .name = "Atmel AT49BV32XT", 588 .name = "Atmel AT49BV32XT",
624 .uaddr = { 589 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
625 [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ 590 .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
626 [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ 591 .dev_size = SIZE_4MiB,
627 }, 592 .cmd_set = P_ID_AMD_STD,
628 .DevSize = SIZE_4MiB, 593 .nr_regions = 2,
629 .CmdSet = P_ID_AMD_STD,
630 .NumEraseRegions= 2,
631 .regions = { 594 .regions = {
632 ERASEINFO(0x10000,63), 595 ERASEINFO(0x10000,63),
633 ERASEINFO(0x02000,8) 596 ERASEINFO(0x02000,8)
@@ -636,12 +599,11 @@ static const struct amd_flash_info jedec_table[] = {
636 .mfr_id = MANUFACTURER_FUJITSU, 599 .mfr_id = MANUFACTURER_FUJITSU,
637 .dev_id = MBM29F040C, 600 .dev_id = MBM29F040C,
638 .name = "Fujitsu MBM29F040C", 601 .name = "Fujitsu MBM29F040C",
639 .uaddr = { 602 .devtypes = CFI_DEVICETYPE_X8,
640 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 603 .uaddr = MTD_UADDR_0x0AAA_0x0555,
641 }, 604 .dev_size = SIZE_512KiB,
642 .DevSize = SIZE_512KiB, 605 .cmd_set = P_ID_AMD_STD,
643 .CmdSet = P_ID_AMD_STD, 606 .nr_regions = 1,
644 .NumEraseRegions= 1,
645 .regions = { 607 .regions = {
646 ERASEINFO(0x10000,8) 608 ERASEINFO(0x10000,8)
647 } 609 }
@@ -649,13 +611,11 @@ static const struct amd_flash_info jedec_table[] = {
649 .mfr_id = MANUFACTURER_FUJITSU, 611 .mfr_id = MANUFACTURER_FUJITSU,
650 .dev_id = MBM29F800BA, 612 .dev_id = MBM29F800BA,
651 .name = "Fujitsu MBM29F800BA", 613 .name = "Fujitsu MBM29F800BA",
652 .uaddr = { 614 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
653 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 615 .uaddr = MTD_UADDR_0x0AAA_0x0555,
654 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 616 .dev_size = SIZE_1MiB,
655 }, 617 .cmd_set = P_ID_AMD_STD,
656 .DevSize = SIZE_1MiB, 618 .nr_regions = 4,
657 .CmdSet = P_ID_AMD_STD,
658 .NumEraseRegions= 4,
659 .regions = { 619 .regions = {
660 ERASEINFO(0x04000,1), 620 ERASEINFO(0x04000,1),
661 ERASEINFO(0x02000,2), 621 ERASEINFO(0x02000,2),
@@ -666,12 +626,11 @@ static const struct amd_flash_info jedec_table[] = {
666 .mfr_id = MANUFACTURER_FUJITSU, 626 .mfr_id = MANUFACTURER_FUJITSU,
667 .dev_id = MBM29LV650UE, 627 .dev_id = MBM29LV650UE,
668 .name = "Fujitsu MBM29LV650UE", 628 .name = "Fujitsu MBM29LV650UE",
669 .uaddr = { 629 .devtypes = CFI_DEVICETYPE_X8,
670 [0] = MTD_UADDR_DONT_CARE /* x16 */ 630 .uaddr = MTD_UADDR_DONT_CARE,
671 }, 631 .dev_size = SIZE_8MiB,
672 .DevSize = SIZE_8MiB, 632 .cmd_set = P_ID_AMD_STD,
673 .CmdSet = P_ID_AMD_STD, 633 .nr_regions = 1,
674 .NumEraseRegions= 1,
675 .regions = { 634 .regions = {
676 ERASEINFO(0x10000,128) 635 ERASEINFO(0x10000,128)
677 } 636 }
@@ -679,13 +638,11 @@ static const struct amd_flash_info jedec_table[] = {
679 .mfr_id = MANUFACTURER_FUJITSU, 638 .mfr_id = MANUFACTURER_FUJITSU,
680 .dev_id = MBM29LV320TE, 639 .dev_id = MBM29LV320TE,
681 .name = "Fujitsu MBM29LV320TE", 640 .name = "Fujitsu MBM29LV320TE",
682 .uaddr = { 641 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
683 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 642 .uaddr = MTD_UADDR_0x0AAA_0x0555,
684 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 643 .dev_size = SIZE_4MiB,
685 }, 644 .cmd_set = P_ID_AMD_STD,
686 .DevSize = SIZE_4MiB, 645 .nr_regions = 2,
687 .CmdSet = P_ID_AMD_STD,
688 .NumEraseRegions= 2,
689 .regions = { 646 .regions = {
690 ERASEINFO(0x10000,63), 647 ERASEINFO(0x10000,63),
691 ERASEINFO(0x02000,8) 648 ERASEINFO(0x02000,8)
@@ -694,13 +651,11 @@ static const struct amd_flash_info jedec_table[] = {
694 .mfr_id = MANUFACTURER_FUJITSU, 651 .mfr_id = MANUFACTURER_FUJITSU,
695 .dev_id = MBM29LV320BE, 652 .dev_id = MBM29LV320BE,
696 .name = "Fujitsu MBM29LV320BE", 653 .name = "Fujitsu MBM29LV320BE",
697 .uaddr = { 654 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
698 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 655 .uaddr = MTD_UADDR_0x0AAA_0x0555,
699 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 656 .dev_size = SIZE_4MiB,
700 }, 657 .cmd_set = P_ID_AMD_STD,
701 .DevSize = SIZE_4MiB, 658 .nr_regions = 2,
702 .CmdSet = P_ID_AMD_STD,
703 .NumEraseRegions= 2,
704 .regions = { 659 .regions = {
705 ERASEINFO(0x02000,8), 660 ERASEINFO(0x02000,8),
706 ERASEINFO(0x10000,63) 661 ERASEINFO(0x10000,63)
@@ -709,13 +664,11 @@ static const struct amd_flash_info jedec_table[] = {
709 .mfr_id = MANUFACTURER_FUJITSU, 664 .mfr_id = MANUFACTURER_FUJITSU,
710 .dev_id = MBM29LV160TE, 665 .dev_id = MBM29LV160TE,
711 .name = "Fujitsu MBM29LV160TE", 666 .name = "Fujitsu MBM29LV160TE",
712 .uaddr = { 667 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
713 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 668 .uaddr = MTD_UADDR_0x0AAA_0x0555,
714 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 669 .dev_size = SIZE_2MiB,
715 }, 670 .cmd_set = P_ID_AMD_STD,
716 .DevSize = SIZE_2MiB, 671 .nr_regions = 4,
717 .CmdSet = P_ID_AMD_STD,
718 .NumEraseRegions= 4,
719 .regions = { 672 .regions = {
720 ERASEINFO(0x10000,31), 673 ERASEINFO(0x10000,31),
721 ERASEINFO(0x08000,1), 674 ERASEINFO(0x08000,1),
@@ -726,13 +679,11 @@ static const struct amd_flash_info jedec_table[] = {
726 .mfr_id = MANUFACTURER_FUJITSU, 679 .mfr_id = MANUFACTURER_FUJITSU,
727 .dev_id = MBM29LV160BE, 680 .dev_id = MBM29LV160BE,
728 .name = "Fujitsu MBM29LV160BE", 681 .name = "Fujitsu MBM29LV160BE",
729 .uaddr = { 682 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
730 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 683 .uaddr = MTD_UADDR_0x0AAA_0x0555,
731 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 684 .dev_size = SIZE_2MiB,
732 }, 685 .cmd_set = P_ID_AMD_STD,
733 .DevSize = SIZE_2MiB, 686 .nr_regions = 4,
734 .CmdSet = P_ID_AMD_STD,
735 .NumEraseRegions= 4,
736 .regions = { 687 .regions = {
737 ERASEINFO(0x04000,1), 688 ERASEINFO(0x04000,1),
738 ERASEINFO(0x02000,2), 689 ERASEINFO(0x02000,2),
@@ -743,13 +694,11 @@ static const struct amd_flash_info jedec_table[] = {
743 .mfr_id = MANUFACTURER_FUJITSU, 694 .mfr_id = MANUFACTURER_FUJITSU,
744 .dev_id = MBM29LV800BA, 695 .dev_id = MBM29LV800BA,
745 .name = "Fujitsu MBM29LV800BA", 696 .name = "Fujitsu MBM29LV800BA",
746 .uaddr = { 697 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
747 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 698 .uaddr = MTD_UADDR_0x0AAA_0x0555,
748 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 699 .dev_size = SIZE_1MiB,
749 }, 700 .cmd_set = P_ID_AMD_STD,
750 .DevSize = SIZE_1MiB, 701 .nr_regions = 4,
751 .CmdSet = P_ID_AMD_STD,
752 .NumEraseRegions= 4,
753 .regions = { 702 .regions = {
754 ERASEINFO(0x04000,1), 703 ERASEINFO(0x04000,1),
755 ERASEINFO(0x02000,2), 704 ERASEINFO(0x02000,2),
@@ -760,13 +709,11 @@ static const struct amd_flash_info jedec_table[] = {
760 .mfr_id = MANUFACTURER_FUJITSU, 709 .mfr_id = MANUFACTURER_FUJITSU,
761 .dev_id = MBM29LV800TA, 710 .dev_id = MBM29LV800TA,
762 .name = "Fujitsu MBM29LV800TA", 711 .name = "Fujitsu MBM29LV800TA",
763 .uaddr = { 712 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
764 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 713 .uaddr = MTD_UADDR_0x0AAA_0x0555,
765 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 714 .dev_size = SIZE_1MiB,
766 }, 715 .cmd_set = P_ID_AMD_STD,
767 .DevSize = SIZE_1MiB, 716 .nr_regions = 4,
768 .CmdSet = P_ID_AMD_STD,
769 .NumEraseRegions= 4,
770 .regions = { 717 .regions = {
771 ERASEINFO(0x10000,15), 718 ERASEINFO(0x10000,15),
772 ERASEINFO(0x08000,1), 719 ERASEINFO(0x08000,1),
@@ -777,13 +724,11 @@ static const struct amd_flash_info jedec_table[] = {
777 .mfr_id = MANUFACTURER_FUJITSU, 724 .mfr_id = MANUFACTURER_FUJITSU,
778 .dev_id = MBM29LV400BC, 725 .dev_id = MBM29LV400BC,
779 .name = "Fujitsu MBM29LV400BC", 726 .name = "Fujitsu MBM29LV400BC",
780 .uaddr = { 727 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
781 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 728 .uaddr = MTD_UADDR_0x0AAA_0x0555,
782 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 729 .dev_size = SIZE_512KiB,
783 }, 730 .cmd_set = P_ID_AMD_STD,
784 .DevSize = SIZE_512KiB, 731 .nr_regions = 4,
785 .CmdSet = P_ID_AMD_STD,
786 .NumEraseRegions= 4,
787 .regions = { 732 .regions = {
788 ERASEINFO(0x04000,1), 733 ERASEINFO(0x04000,1),
789 ERASEINFO(0x02000,2), 734 ERASEINFO(0x02000,2),
@@ -794,13 +739,11 @@ static const struct amd_flash_info jedec_table[] = {
794 .mfr_id = MANUFACTURER_FUJITSU, 739 .mfr_id = MANUFACTURER_FUJITSU,
795 .dev_id = MBM29LV400TC, 740 .dev_id = MBM29LV400TC,
796 .name = "Fujitsu MBM29LV400TC", 741 .name = "Fujitsu MBM29LV400TC",
797 .uaddr = { 742 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
798 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 743 .uaddr = MTD_UADDR_0x0AAA_0x0555,
799 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 744 .dev_size = SIZE_512KiB,
800 }, 745 .cmd_set = P_ID_AMD_STD,
801 .DevSize = SIZE_512KiB, 746 .nr_regions = 4,
802 .CmdSet = P_ID_AMD_STD,
803 .NumEraseRegions= 4,
804 .regions = { 747 .regions = {
805 ERASEINFO(0x10000,7), 748 ERASEINFO(0x10000,7),
806 ERASEINFO(0x08000,1), 749 ERASEINFO(0x08000,1),
@@ -811,12 +754,11 @@ static const struct amd_flash_info jedec_table[] = {
811 .mfr_id = MANUFACTURER_HYUNDAI, 754 .mfr_id = MANUFACTURER_HYUNDAI,
812 .dev_id = HY29F002T, 755 .dev_id = HY29F002T,
813 .name = "Hyundai HY29F002T", 756 .name = "Hyundai HY29F002T",
814 .uaddr = { 757 .devtypes = CFI_DEVICETYPE_X8,
815 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 758 .uaddr = MTD_UADDR_0x0555_0x02AA,
816 }, 759 .dev_size = SIZE_256KiB,
817 .DevSize = SIZE_256KiB, 760 .cmd_set = P_ID_AMD_STD,
818 .CmdSet = P_ID_AMD_STD, 761 .nr_regions = 4,
819 .NumEraseRegions= 4,
820 .regions = { 762 .regions = {
821 ERASEINFO(0x10000,3), 763 ERASEINFO(0x10000,3),
822 ERASEINFO(0x08000,1), 764 ERASEINFO(0x08000,1),
@@ -827,12 +769,11 @@ static const struct amd_flash_info jedec_table[] = {
827 .mfr_id = MANUFACTURER_INTEL, 769 .mfr_id = MANUFACTURER_INTEL,
828 .dev_id = I28F004B3B, 770 .dev_id = I28F004B3B,
829 .name = "Intel 28F004B3B", 771 .name = "Intel 28F004B3B",
830 .uaddr = { 772 .devtypes = CFI_DEVICETYPE_X8,
831 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 773 .uaddr = MTD_UADDR_UNNECESSARY,
832 }, 774 .dev_size = SIZE_512KiB,
833 .DevSize = SIZE_512KiB, 775 .cmd_set = P_ID_INTEL_STD,
834 .CmdSet = P_ID_INTEL_STD, 776 .nr_regions = 2,
835 .NumEraseRegions= 2,
836 .regions = { 777 .regions = {
837 ERASEINFO(0x02000, 8), 778 ERASEINFO(0x02000, 8),
838 ERASEINFO(0x10000, 7), 779 ERASEINFO(0x10000, 7),
@@ -841,12 +782,11 @@ static const struct amd_flash_info jedec_table[] = {
841 .mfr_id = MANUFACTURER_INTEL, 782 .mfr_id = MANUFACTURER_INTEL,
842 .dev_id = I28F004B3T, 783 .dev_id = I28F004B3T,
843 .name = "Intel 28F004B3T", 784 .name = "Intel 28F004B3T",
844 .uaddr = { 785 .devtypes = CFI_DEVICETYPE_X8,
845 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 786 .uaddr = MTD_UADDR_UNNECESSARY,
846 }, 787 .dev_size = SIZE_512KiB,
847 .DevSize = SIZE_512KiB, 788 .cmd_set = P_ID_INTEL_STD,
848 .CmdSet = P_ID_INTEL_STD, 789 .nr_regions = 2,
849 .NumEraseRegions= 2,
850 .regions = { 790 .regions = {
851 ERASEINFO(0x10000, 7), 791 ERASEINFO(0x10000, 7),
852 ERASEINFO(0x02000, 8), 792 ERASEINFO(0x02000, 8),
@@ -855,13 +795,11 @@ static const struct amd_flash_info jedec_table[] = {
855 .mfr_id = MANUFACTURER_INTEL, 795 .mfr_id = MANUFACTURER_INTEL,
856 .dev_id = I28F400B3B, 796 .dev_id = I28F400B3B,
857 .name = "Intel 28F400B3B", 797 .name = "Intel 28F400B3B",
858 .uaddr = { 798 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
859 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 799 .uaddr = MTD_UADDR_UNNECESSARY,
860 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 800 .dev_size = SIZE_512KiB,
861 }, 801 .cmd_set = P_ID_INTEL_STD,
862 .DevSize = SIZE_512KiB, 802 .nr_regions = 2,
863 .CmdSet = P_ID_INTEL_STD,
864 .NumEraseRegions= 2,
865 .regions = { 803 .regions = {
866 ERASEINFO(0x02000, 8), 804 ERASEINFO(0x02000, 8),
867 ERASEINFO(0x10000, 7), 805 ERASEINFO(0x10000, 7),
@@ -870,13 +808,11 @@ static const struct amd_flash_info jedec_table[] = {
870 .mfr_id = MANUFACTURER_INTEL, 808 .mfr_id = MANUFACTURER_INTEL,
871 .dev_id = I28F400B3T, 809 .dev_id = I28F400B3T,
872 .name = "Intel 28F400B3T", 810 .name = "Intel 28F400B3T",
873 .uaddr = { 811 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
874 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 812 .uaddr = MTD_UADDR_UNNECESSARY,
875 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 813 .dev_size = SIZE_512KiB,
876 }, 814 .cmd_set = P_ID_INTEL_STD,
877 .DevSize = SIZE_512KiB, 815 .nr_regions = 2,
878 .CmdSet = P_ID_INTEL_STD,
879 .NumEraseRegions= 2,
880 .regions = { 816 .regions = {
881 ERASEINFO(0x10000, 7), 817 ERASEINFO(0x10000, 7),
882 ERASEINFO(0x02000, 8), 818 ERASEINFO(0x02000, 8),
@@ -885,12 +821,11 @@ static const struct amd_flash_info jedec_table[] = {
885 .mfr_id = MANUFACTURER_INTEL, 821 .mfr_id = MANUFACTURER_INTEL,
886 .dev_id = I28F008B3B, 822 .dev_id = I28F008B3B,
887 .name = "Intel 28F008B3B", 823 .name = "Intel 28F008B3B",
888 .uaddr = { 824 .devtypes = CFI_DEVICETYPE_X8,
889 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 825 .uaddr = MTD_UADDR_UNNECESSARY,
890 }, 826 .dev_size = SIZE_1MiB,
891 .DevSize = SIZE_1MiB, 827 .cmd_set = P_ID_INTEL_STD,
892 .CmdSet = P_ID_INTEL_STD, 828 .nr_regions = 2,
893 .NumEraseRegions= 2,
894 .regions = { 829 .regions = {
895 ERASEINFO(0x02000, 8), 830 ERASEINFO(0x02000, 8),
896 ERASEINFO(0x10000, 15), 831 ERASEINFO(0x10000, 15),
@@ -899,12 +834,11 @@ static const struct amd_flash_info jedec_table[] = {
899 .mfr_id = MANUFACTURER_INTEL, 834 .mfr_id = MANUFACTURER_INTEL,
900 .dev_id = I28F008B3T, 835 .dev_id = I28F008B3T,
901 .name = "Intel 28F008B3T", 836 .name = "Intel 28F008B3T",
902 .uaddr = { 837 .devtypes = CFI_DEVICETYPE_X8,
903 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 838 .uaddr = MTD_UADDR_UNNECESSARY,
904 }, 839 .dev_size = SIZE_1MiB,
905 .DevSize = SIZE_1MiB, 840 .cmd_set = P_ID_INTEL_STD,
906 .CmdSet = P_ID_INTEL_STD, 841 .nr_regions = 2,
907 .NumEraseRegions= 2,
908 .regions = { 842 .regions = {
909 ERASEINFO(0x10000, 15), 843 ERASEINFO(0x10000, 15),
910 ERASEINFO(0x02000, 8), 844 ERASEINFO(0x02000, 8),
@@ -913,12 +847,11 @@ static const struct amd_flash_info jedec_table[] = {
913 .mfr_id = MANUFACTURER_INTEL, 847 .mfr_id = MANUFACTURER_INTEL,
914 .dev_id = I28F008S5, 848 .dev_id = I28F008S5,
915 .name = "Intel 28F008S5", 849 .name = "Intel 28F008S5",
916 .uaddr = { 850 .devtypes = CFI_DEVICETYPE_X8,
917 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 851 .uaddr = MTD_UADDR_UNNECESSARY,
918 }, 852 .dev_size = SIZE_1MiB,
919 .DevSize = SIZE_1MiB, 853 .cmd_set = P_ID_INTEL_EXT,
920 .CmdSet = P_ID_INTEL_EXT, 854 .nr_regions = 1,
921 .NumEraseRegions= 1,
922 .regions = { 855 .regions = {
923 ERASEINFO(0x10000,16), 856 ERASEINFO(0x10000,16),
924 } 857 }
@@ -926,12 +859,11 @@ static const struct amd_flash_info jedec_table[] = {
926 .mfr_id = MANUFACTURER_INTEL, 859 .mfr_id = MANUFACTURER_INTEL,
927 .dev_id = I28F016S5, 860 .dev_id = I28F016S5,
928 .name = "Intel 28F016S5", 861 .name = "Intel 28F016S5",
929 .uaddr = { 862 .devtypes = CFI_DEVICETYPE_X8,
930 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 863 .uaddr = MTD_UADDR_UNNECESSARY,
931 }, 864 .dev_size = SIZE_2MiB,
932 .DevSize = SIZE_2MiB, 865 .cmd_set = P_ID_INTEL_EXT,
933 .CmdSet = P_ID_INTEL_EXT, 866 .nr_regions = 1,
934 .NumEraseRegions= 1,
935 .regions = { 867 .regions = {
936 ERASEINFO(0x10000,32), 868 ERASEINFO(0x10000,32),
937 } 869 }
@@ -939,12 +871,11 @@ static const struct amd_flash_info jedec_table[] = {
939 .mfr_id = MANUFACTURER_INTEL, 871 .mfr_id = MANUFACTURER_INTEL,
940 .dev_id = I28F008SA, 872 .dev_id = I28F008SA,
941 .name = "Intel 28F008SA", 873 .name = "Intel 28F008SA",
942 .uaddr = { 874 .devtypes = CFI_DEVICETYPE_X8,
943 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 875 .uaddr = MTD_UADDR_UNNECESSARY,
944 }, 876 .dev_size = SIZE_1MiB,
945 .DevSize = SIZE_1MiB, 877 .cmd_set = P_ID_INTEL_STD,
946 .CmdSet = P_ID_INTEL_STD, 878 .nr_regions = 1,
947 .NumEraseRegions= 1,
948 .regions = { 879 .regions = {
949 ERASEINFO(0x10000, 16), 880 ERASEINFO(0x10000, 16),
950 } 881 }
@@ -952,12 +883,11 @@ static const struct amd_flash_info jedec_table[] = {
952 .mfr_id = MANUFACTURER_INTEL, 883 .mfr_id = MANUFACTURER_INTEL,
953 .dev_id = I28F800B3B, 884 .dev_id = I28F800B3B,
954 .name = "Intel 28F800B3B", 885 .name = "Intel 28F800B3B",
955 .uaddr = { 886 .devtypes = CFI_DEVICETYPE_X16,
956 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 887 .uaddr = MTD_UADDR_UNNECESSARY,
957 }, 888 .dev_size = SIZE_1MiB,
958 .DevSize = SIZE_1MiB, 889 .cmd_set = P_ID_INTEL_STD,
959 .CmdSet = P_ID_INTEL_STD, 890 .nr_regions = 2,
960 .NumEraseRegions= 2,
961 .regions = { 891 .regions = {
962 ERASEINFO(0x02000, 8), 892 ERASEINFO(0x02000, 8),
963 ERASEINFO(0x10000, 15), 893 ERASEINFO(0x10000, 15),
@@ -966,12 +896,11 @@ static const struct amd_flash_info jedec_table[] = {
966 .mfr_id = MANUFACTURER_INTEL, 896 .mfr_id = MANUFACTURER_INTEL,
967 .dev_id = I28F800B3T, 897 .dev_id = I28F800B3T,
968 .name = "Intel 28F800B3T", 898 .name = "Intel 28F800B3T",
969 .uaddr = { 899 .devtypes = CFI_DEVICETYPE_X16,
970 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 900 .uaddr = MTD_UADDR_UNNECESSARY,
971 }, 901 .dev_size = SIZE_1MiB,
972 .DevSize = SIZE_1MiB, 902 .cmd_set = P_ID_INTEL_STD,
973 .CmdSet = P_ID_INTEL_STD, 903 .nr_regions = 2,
974 .NumEraseRegions= 2,
975 .regions = { 904 .regions = {
976 ERASEINFO(0x10000, 15), 905 ERASEINFO(0x10000, 15),
977 ERASEINFO(0x02000, 8), 906 ERASEINFO(0x02000, 8),
@@ -980,12 +909,11 @@ static const struct amd_flash_info jedec_table[] = {
980 .mfr_id = MANUFACTURER_INTEL, 909 .mfr_id = MANUFACTURER_INTEL,
981 .dev_id = I28F016B3B, 910 .dev_id = I28F016B3B,
982 .name = "Intel 28F016B3B", 911 .name = "Intel 28F016B3B",
983 .uaddr = { 912 .devtypes = CFI_DEVICETYPE_X8,
984 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 913 .uaddr = MTD_UADDR_UNNECESSARY,
985 }, 914 .dev_size = SIZE_2MiB,
986 .DevSize = SIZE_2MiB, 915 .cmd_set = P_ID_INTEL_STD,
987 .CmdSet = P_ID_INTEL_STD, 916 .nr_regions = 2,
988 .NumEraseRegions= 2,
989 .regions = { 917 .regions = {
990 ERASEINFO(0x02000, 8), 918 ERASEINFO(0x02000, 8),
991 ERASEINFO(0x10000, 31), 919 ERASEINFO(0x10000, 31),
@@ -994,12 +922,11 @@ static const struct amd_flash_info jedec_table[] = {
994 .mfr_id = MANUFACTURER_INTEL, 922 .mfr_id = MANUFACTURER_INTEL,
995 .dev_id = I28F016S3, 923 .dev_id = I28F016S3,
996 .name = "Intel I28F016S3", 924 .name = "Intel I28F016S3",
997 .uaddr = { 925 .devtypes = CFI_DEVICETYPE_X8,
998 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 926 .uaddr = MTD_UADDR_UNNECESSARY,
999 }, 927 .dev_size = SIZE_2MiB,
1000 .DevSize = SIZE_2MiB, 928 .cmd_set = P_ID_INTEL_STD,
1001 .CmdSet = P_ID_INTEL_STD, 929 .nr_regions = 1,
1002 .NumEraseRegions= 1,
1003 .regions = { 930 .regions = {
1004 ERASEINFO(0x10000, 32), 931 ERASEINFO(0x10000, 32),
1005 } 932 }
@@ -1007,12 +934,11 @@ static const struct amd_flash_info jedec_table[] = {
1007 .mfr_id = MANUFACTURER_INTEL, 934 .mfr_id = MANUFACTURER_INTEL,
1008 .dev_id = I28F016B3T, 935 .dev_id = I28F016B3T,
1009 .name = "Intel 28F016B3T", 936 .name = "Intel 28F016B3T",
1010 .uaddr = { 937 .devtypes = CFI_DEVICETYPE_X8,
1011 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 938 .uaddr = MTD_UADDR_UNNECESSARY,
1012 }, 939 .dev_size = SIZE_2MiB,
1013 .DevSize = SIZE_2MiB, 940 .cmd_set = P_ID_INTEL_STD,
1014 .CmdSet = P_ID_INTEL_STD, 941 .nr_regions = 2,
1015 .NumEraseRegions= 2,
1016 .regions = { 942 .regions = {
1017 ERASEINFO(0x10000, 31), 943 ERASEINFO(0x10000, 31),
1018 ERASEINFO(0x02000, 8), 944 ERASEINFO(0x02000, 8),
@@ -1021,12 +947,11 @@ static const struct amd_flash_info jedec_table[] = {
1021 .mfr_id = MANUFACTURER_INTEL, 947 .mfr_id = MANUFACTURER_INTEL,
1022 .dev_id = I28F160B3B, 948 .dev_id = I28F160B3B,
1023 .name = "Intel 28F160B3B", 949 .name = "Intel 28F160B3B",
1024 .uaddr = { 950 .devtypes = CFI_DEVICETYPE_X16,
1025 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 951 .uaddr = MTD_UADDR_UNNECESSARY,
1026 }, 952 .dev_size = SIZE_2MiB,
1027 .DevSize = SIZE_2MiB, 953 .cmd_set = P_ID_INTEL_STD,
1028 .CmdSet = P_ID_INTEL_STD, 954 .nr_regions = 2,
1029 .NumEraseRegions= 2,
1030 .regions = { 955 .regions = {
1031 ERASEINFO(0x02000, 8), 956 ERASEINFO(0x02000, 8),
1032 ERASEINFO(0x10000, 31), 957 ERASEINFO(0x10000, 31),
@@ -1035,12 +960,11 @@ static const struct amd_flash_info jedec_table[] = {
1035 .mfr_id = MANUFACTURER_INTEL, 960 .mfr_id = MANUFACTURER_INTEL,
1036 .dev_id = I28F160B3T, 961 .dev_id = I28F160B3T,
1037 .name = "Intel 28F160B3T", 962 .name = "Intel 28F160B3T",
1038 .uaddr = { 963 .devtypes = CFI_DEVICETYPE_X16,
1039 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 964 .uaddr = MTD_UADDR_UNNECESSARY,
1040 }, 965 .dev_size = SIZE_2MiB,
1041 .DevSize = SIZE_2MiB, 966 .cmd_set = P_ID_INTEL_STD,
1042 .CmdSet = P_ID_INTEL_STD, 967 .nr_regions = 2,
1043 .NumEraseRegions= 2,
1044 .regions = { 968 .regions = {
1045 ERASEINFO(0x10000, 31), 969 ERASEINFO(0x10000, 31),
1046 ERASEINFO(0x02000, 8), 970 ERASEINFO(0x02000, 8),
@@ -1049,12 +973,11 @@ static const struct amd_flash_info jedec_table[] = {
1049 .mfr_id = MANUFACTURER_INTEL, 973 .mfr_id = MANUFACTURER_INTEL,
1050 .dev_id = I28F320B3B, 974 .dev_id = I28F320B3B,
1051 .name = "Intel 28F320B3B", 975 .name = "Intel 28F320B3B",
1052 .uaddr = { 976 .devtypes = CFI_DEVICETYPE_X16,
1053 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 977 .uaddr = MTD_UADDR_UNNECESSARY,
1054 }, 978 .dev_size = SIZE_4MiB,
1055 .DevSize = SIZE_4MiB, 979 .cmd_set = P_ID_INTEL_STD,
1056 .CmdSet = P_ID_INTEL_STD, 980 .nr_regions = 2,
1057 .NumEraseRegions= 2,
1058 .regions = { 981 .regions = {
1059 ERASEINFO(0x02000, 8), 982 ERASEINFO(0x02000, 8),
1060 ERASEINFO(0x10000, 63), 983 ERASEINFO(0x10000, 63),
@@ -1063,12 +986,11 @@ static const struct amd_flash_info jedec_table[] = {
1063 .mfr_id = MANUFACTURER_INTEL, 986 .mfr_id = MANUFACTURER_INTEL,
1064 .dev_id = I28F320B3T, 987 .dev_id = I28F320B3T,
1065 .name = "Intel 28F320B3T", 988 .name = "Intel 28F320B3T",
1066 .uaddr = { 989 .devtypes = CFI_DEVICETYPE_X16,
1067 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 990 .uaddr = MTD_UADDR_UNNECESSARY,
1068 }, 991 .dev_size = SIZE_4MiB,
1069 .DevSize = SIZE_4MiB, 992 .cmd_set = P_ID_INTEL_STD,
1070 .CmdSet = P_ID_INTEL_STD, 993 .nr_regions = 2,
1071 .NumEraseRegions= 2,
1072 .regions = { 994 .regions = {
1073 ERASEINFO(0x10000, 63), 995 ERASEINFO(0x10000, 63),
1074 ERASEINFO(0x02000, 8), 996 ERASEINFO(0x02000, 8),
@@ -1077,12 +999,11 @@ static const struct amd_flash_info jedec_table[] = {
1077 .mfr_id = MANUFACTURER_INTEL, 999 .mfr_id = MANUFACTURER_INTEL,
1078 .dev_id = I28F640B3B, 1000 .dev_id = I28F640B3B,
1079 .name = "Intel 28F640B3B", 1001 .name = "Intel 28F640B3B",
1080 .uaddr = { 1002 .devtypes = CFI_DEVICETYPE_X16,
1081 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 1003 .uaddr = MTD_UADDR_UNNECESSARY,
1082 }, 1004 .dev_size = SIZE_8MiB,
1083 .DevSize = SIZE_8MiB, 1005 .cmd_set = P_ID_INTEL_STD,
1084 .CmdSet = P_ID_INTEL_STD, 1006 .nr_regions = 2,
1085 .NumEraseRegions= 2,
1086 .regions = { 1007 .regions = {
1087 ERASEINFO(0x02000, 8), 1008 ERASEINFO(0x02000, 8),
1088 ERASEINFO(0x10000, 127), 1009 ERASEINFO(0x10000, 127),
@@ -1091,12 +1012,11 @@ static const struct amd_flash_info jedec_table[] = {
1091 .mfr_id = MANUFACTURER_INTEL, 1012 .mfr_id = MANUFACTURER_INTEL,
1092 .dev_id = I28F640B3T, 1013 .dev_id = I28F640B3T,
1093 .name = "Intel 28F640B3T", 1014 .name = "Intel 28F640B3T",
1094 .uaddr = { 1015 .devtypes = CFI_DEVICETYPE_X16,
1095 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 1016 .uaddr = MTD_UADDR_UNNECESSARY,
1096 }, 1017 .dev_size = SIZE_8MiB,
1097 .DevSize = SIZE_8MiB, 1018 .cmd_set = P_ID_INTEL_STD,
1098 .CmdSet = P_ID_INTEL_STD, 1019 .nr_regions = 2,
1099 .NumEraseRegions= 2,
1100 .regions = { 1020 .regions = {
1101 ERASEINFO(0x10000, 127), 1021 ERASEINFO(0x10000, 127),
1102 ERASEINFO(0x02000, 8), 1022 ERASEINFO(0x02000, 8),
@@ -1105,12 +1025,11 @@ static const struct amd_flash_info jedec_table[] = {
1105 .mfr_id = MANUFACTURER_INTEL, 1025 .mfr_id = MANUFACTURER_INTEL,
1106 .dev_id = I82802AB, 1026 .dev_id = I82802AB,
1107 .name = "Intel 82802AB", 1027 .name = "Intel 82802AB",
1108 .uaddr = { 1028 .devtypes = CFI_DEVICETYPE_X8,
1109 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1029 .uaddr = MTD_UADDR_UNNECESSARY,
1110 }, 1030 .dev_size = SIZE_512KiB,
1111 .DevSize = SIZE_512KiB, 1031 .cmd_set = P_ID_INTEL_EXT,
1112 .CmdSet = P_ID_INTEL_EXT, 1032 .nr_regions = 1,
1113 .NumEraseRegions= 1,
1114 .regions = { 1033 .regions = {
1115 ERASEINFO(0x10000,8), 1034 ERASEINFO(0x10000,8),
1116 } 1035 }
@@ -1118,12 +1037,11 @@ static const struct amd_flash_info jedec_table[] = {
1118 .mfr_id = MANUFACTURER_INTEL, 1037 .mfr_id = MANUFACTURER_INTEL,
1119 .dev_id = I82802AC, 1038 .dev_id = I82802AC,
1120 .name = "Intel 82802AC", 1039 .name = "Intel 82802AC",
1121 .uaddr = { 1040 .devtypes = CFI_DEVICETYPE_X8,
1122 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1041 .uaddr = MTD_UADDR_UNNECESSARY,
1123 }, 1042 .dev_size = SIZE_1MiB,
1124 .DevSize = SIZE_1MiB, 1043 .cmd_set = P_ID_INTEL_EXT,
1125 .CmdSet = P_ID_INTEL_EXT, 1044 .nr_regions = 1,
1126 .NumEraseRegions= 1,
1127 .regions = { 1045 .regions = {
1128 ERASEINFO(0x10000,16), 1046 ERASEINFO(0x10000,16),
1129 } 1047 }
@@ -1131,12 +1049,11 @@ static const struct amd_flash_info jedec_table[] = {
1131 .mfr_id = MANUFACTURER_MACRONIX, 1049 .mfr_id = MANUFACTURER_MACRONIX,
1132 .dev_id = MX29LV040C, 1050 .dev_id = MX29LV040C,
1133 .name = "Macronix MX29LV040C", 1051 .name = "Macronix MX29LV040C",
1134 .uaddr = { 1052 .devtypes = CFI_DEVICETYPE_X8,
1135 [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ 1053 .uaddr = MTD_UADDR_0x0555_0x02AA,
1136 }, 1054 .dev_size = SIZE_512KiB,
1137 .DevSize = SIZE_512KiB, 1055 .cmd_set = P_ID_AMD_STD,
1138 .CmdSet = P_ID_AMD_STD, 1056 .nr_regions = 1,
1139 .NumEraseRegions= 1,
1140 .regions = { 1057 .regions = {
1141 ERASEINFO(0x10000,8), 1058 ERASEINFO(0x10000,8),
1142 } 1059 }
@@ -1144,13 +1061,11 @@ static const struct amd_flash_info jedec_table[] = {
1144 .mfr_id = MANUFACTURER_MACRONIX, 1061 .mfr_id = MANUFACTURER_MACRONIX,
1145 .dev_id = MX29LV160T, 1062 .dev_id = MX29LV160T,
1146 .name = "MXIC MX29LV160T", 1063 .name = "MXIC MX29LV160T",
1147 .uaddr = { 1064 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1148 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1065 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1149 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1066 .dev_size = SIZE_2MiB,
1150 }, 1067 .cmd_set = P_ID_AMD_STD,
1151 .DevSize = SIZE_2MiB, 1068 .nr_regions = 4,
1152 .CmdSet = P_ID_AMD_STD,
1153 .NumEraseRegions= 4,
1154 .regions = { 1069 .regions = {
1155 ERASEINFO(0x10000,31), 1070 ERASEINFO(0x10000,31),
1156 ERASEINFO(0x08000,1), 1071 ERASEINFO(0x08000,1),
@@ -1161,13 +1076,11 @@ static const struct amd_flash_info jedec_table[] = {
1161 .mfr_id = MANUFACTURER_NEC, 1076 .mfr_id = MANUFACTURER_NEC,
1162 .dev_id = UPD29F064115, 1077 .dev_id = UPD29F064115,
1163 .name = "NEC uPD29F064115", 1078 .name = "NEC uPD29F064115",
1164 .uaddr = { 1079 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1165 [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ 1080 .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
1166 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1081 .dev_size = SIZE_8MiB,
1167 }, 1082 .cmd_set = P_ID_AMD_STD,
1168 .DevSize = SIZE_8MiB, 1083 .nr_regions = 3,
1169 .CmdSet = P_ID_AMD_STD,
1170 .NumEraseRegions= 3,
1171 .regions = { 1084 .regions = {
1172 ERASEINFO(0x2000,8), 1085 ERASEINFO(0x2000,8),
1173 ERASEINFO(0x10000,126), 1086 ERASEINFO(0x10000,126),
@@ -1177,13 +1090,11 @@ static const struct amd_flash_info jedec_table[] = {
1177 .mfr_id = MANUFACTURER_MACRONIX, 1090 .mfr_id = MANUFACTURER_MACRONIX,
1178 .dev_id = MX29LV160B, 1091 .dev_id = MX29LV160B,
1179 .name = "MXIC MX29LV160B", 1092 .name = "MXIC MX29LV160B",
1180 .uaddr = { 1093 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1181 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1094 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1182 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1095 .dev_size = SIZE_2MiB,
1183 }, 1096 .cmd_set = P_ID_AMD_STD,
1184 .DevSize = SIZE_2MiB, 1097 .nr_regions = 4,
1185 .CmdSet = P_ID_AMD_STD,
1186 .NumEraseRegions= 4,
1187 .regions = { 1098 .regions = {
1188 ERASEINFO(0x04000,1), 1099 ERASEINFO(0x04000,1),
1189 ERASEINFO(0x02000,2), 1100 ERASEINFO(0x02000,2),
@@ -1194,12 +1105,11 @@ static const struct amd_flash_info jedec_table[] = {
1194 .mfr_id = MANUFACTURER_MACRONIX, 1105 .mfr_id = MANUFACTURER_MACRONIX,
1195 .dev_id = MX29F040, 1106 .dev_id = MX29F040,
1196 .name = "Macronix MX29F040", 1107 .name = "Macronix MX29F040",
1197 .uaddr = { 1108 .devtypes = CFI_DEVICETYPE_X8,
1198 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1109 .uaddr = MTD_UADDR_0x0555_0x02AA,
1199 }, 1110 .dev_size = SIZE_512KiB,
1200 .DevSize = SIZE_512KiB, 1111 .cmd_set = P_ID_AMD_STD,
1201 .CmdSet = P_ID_AMD_STD, 1112 .nr_regions = 1,
1202 .NumEraseRegions= 1,
1203 .regions = { 1113 .regions = {
1204 ERASEINFO(0x10000,8), 1114 ERASEINFO(0x10000,8),
1205 } 1115 }
@@ -1207,12 +1117,11 @@ static const struct amd_flash_info jedec_table[] = {
1207 .mfr_id = MANUFACTURER_MACRONIX, 1117 .mfr_id = MANUFACTURER_MACRONIX,
1208 .dev_id = MX29F016, 1118 .dev_id = MX29F016,
1209 .name = "Macronix MX29F016", 1119 .name = "Macronix MX29F016",
1210 .uaddr = { 1120 .devtypes = CFI_DEVICETYPE_X8,
1211 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1121 .uaddr = MTD_UADDR_0x0555_0x02AA,
1212 }, 1122 .dev_size = SIZE_2MiB,
1213 .DevSize = SIZE_2MiB, 1123 .cmd_set = P_ID_AMD_STD,
1214 .CmdSet = P_ID_AMD_STD, 1124 .nr_regions = 1,
1215 .NumEraseRegions= 1,
1216 .regions = { 1125 .regions = {
1217 ERASEINFO(0x10000,32), 1126 ERASEINFO(0x10000,32),
1218 } 1127 }
@@ -1220,12 +1129,11 @@ static const struct amd_flash_info jedec_table[] = {
1220 .mfr_id = MANUFACTURER_MACRONIX, 1129 .mfr_id = MANUFACTURER_MACRONIX,
1221 .dev_id = MX29F004T, 1130 .dev_id = MX29F004T,
1222 .name = "Macronix MX29F004T", 1131 .name = "Macronix MX29F004T",
1223 .uaddr = { 1132 .devtypes = CFI_DEVICETYPE_X8,
1224 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1133 .uaddr = MTD_UADDR_0x0555_0x02AA,
1225 }, 1134 .dev_size = SIZE_512KiB,
1226 .DevSize = SIZE_512KiB, 1135 .cmd_set = P_ID_AMD_STD,
1227 .CmdSet = P_ID_AMD_STD, 1136 .nr_regions = 4,
1228 .NumEraseRegions= 4,
1229 .regions = { 1137 .regions = {
1230 ERASEINFO(0x10000,7), 1138 ERASEINFO(0x10000,7),
1231 ERASEINFO(0x08000,1), 1139 ERASEINFO(0x08000,1),
@@ -1236,12 +1144,11 @@ static const struct amd_flash_info jedec_table[] = {
1236 .mfr_id = MANUFACTURER_MACRONIX, 1144 .mfr_id = MANUFACTURER_MACRONIX,
1237 .dev_id = MX29F004B, 1145 .dev_id = MX29F004B,
1238 .name = "Macronix MX29F004B", 1146 .name = "Macronix MX29F004B",
1239 .uaddr = { 1147 .devtypes = CFI_DEVICETYPE_X8,
1240 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1148 .uaddr = MTD_UADDR_0x0555_0x02AA,
1241 }, 1149 .dev_size = SIZE_512KiB,
1242 .DevSize = SIZE_512KiB, 1150 .cmd_set = P_ID_AMD_STD,
1243 .CmdSet = P_ID_AMD_STD, 1151 .nr_regions = 4,
1244 .NumEraseRegions= 4,
1245 .regions = { 1152 .regions = {
1246 ERASEINFO(0x04000,1), 1153 ERASEINFO(0x04000,1),
1247 ERASEINFO(0x02000,2), 1154 ERASEINFO(0x02000,2),
@@ -1252,12 +1159,11 @@ static const struct amd_flash_info jedec_table[] = {
1252 .mfr_id = MANUFACTURER_MACRONIX, 1159 .mfr_id = MANUFACTURER_MACRONIX,
1253 .dev_id = MX29F002T, 1160 .dev_id = MX29F002T,
1254 .name = "Macronix MX29F002T", 1161 .name = "Macronix MX29F002T",
1255 .uaddr = { 1162 .devtypes = CFI_DEVICETYPE_X8,
1256 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1163 .uaddr = MTD_UADDR_0x0555_0x02AA,
1257 }, 1164 .dev_size = SIZE_256KiB,
1258 .DevSize = SIZE_256KiB, 1165 .cmd_set = P_ID_AMD_STD,
1259 .CmdSet = P_ID_AMD_STD, 1166 .nr_regions = 4,
1260 .NumEraseRegions= 4,
1261 .regions = { 1167 .regions = {
1262 ERASEINFO(0x10000,3), 1168 ERASEINFO(0x10000,3),
1263 ERASEINFO(0x08000,1), 1169 ERASEINFO(0x08000,1),
@@ -1268,12 +1174,11 @@ static const struct amd_flash_info jedec_table[] = {
1268 .mfr_id = MANUFACTURER_PMC, 1174 .mfr_id = MANUFACTURER_PMC,
1269 .dev_id = PM49FL002, 1175 .dev_id = PM49FL002,
1270 .name = "PMC Pm49FL002", 1176 .name = "PMC Pm49FL002",
1271 .uaddr = { 1177 .devtypes = CFI_DEVICETYPE_X8,
1272 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1178 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1273 }, 1179 .dev_size = SIZE_256KiB,
1274 .DevSize = SIZE_256KiB, 1180 .cmd_set = P_ID_AMD_STD,
1275 .CmdSet = P_ID_AMD_STD, 1181 .nr_regions = 1,
1276 .NumEraseRegions= 1,
1277 .regions = { 1182 .regions = {
1278 ERASEINFO( 0x01000, 64 ) 1183 ERASEINFO( 0x01000, 64 )
1279 } 1184 }
@@ -1281,12 +1186,11 @@ static const struct amd_flash_info jedec_table[] = {
1281 .mfr_id = MANUFACTURER_PMC, 1186 .mfr_id = MANUFACTURER_PMC,
1282 .dev_id = PM49FL004, 1187 .dev_id = PM49FL004,
1283 .name = "PMC Pm49FL004", 1188 .name = "PMC Pm49FL004",
1284 .uaddr = { 1189 .devtypes = CFI_DEVICETYPE_X8,
1285 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1190 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1286 }, 1191 .dev_size = SIZE_512KiB,
1287 .DevSize = SIZE_512KiB, 1192 .cmd_set = P_ID_AMD_STD,
1288 .CmdSet = P_ID_AMD_STD, 1193 .nr_regions = 1,
1289 .NumEraseRegions= 1,
1290 .regions = { 1194 .regions = {
1291 ERASEINFO( 0x01000, 128 ) 1195 ERASEINFO( 0x01000, 128 )
1292 } 1196 }
@@ -1294,12 +1198,11 @@ static const struct amd_flash_info jedec_table[] = {
1294 .mfr_id = MANUFACTURER_PMC, 1198 .mfr_id = MANUFACTURER_PMC,
1295 .dev_id = PM49FL008, 1199 .dev_id = PM49FL008,
1296 .name = "PMC Pm49FL008", 1200 .name = "PMC Pm49FL008",
1297 .uaddr = { 1201 .devtypes = CFI_DEVICETYPE_X8,
1298 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1202 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1299 }, 1203 .dev_size = SIZE_1MiB,
1300 .DevSize = SIZE_1MiB, 1204 .cmd_set = P_ID_AMD_STD,
1301 .CmdSet = P_ID_AMD_STD, 1205 .nr_regions = 1,
1302 .NumEraseRegions= 1,
1303 .regions = { 1206 .regions = {
1304 ERASEINFO( 0x01000, 256 ) 1207 ERASEINFO( 0x01000, 256 )
1305 } 1208 }
@@ -1307,25 +1210,23 @@ static const struct amd_flash_info jedec_table[] = {
1307 .mfr_id = MANUFACTURER_SHARP, 1210 .mfr_id = MANUFACTURER_SHARP,
1308 .dev_id = LH28F640BF, 1211 .dev_id = LH28F640BF,
1309 .name = "LH28F640BF", 1212 .name = "LH28F640BF",
1310 .uaddr = { 1213 .devtypes = CFI_DEVICETYPE_X8,
1311 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1214 .uaddr = MTD_UADDR_UNNECESSARY,
1312 }, 1215 .dev_size = SIZE_4MiB,
1313 .DevSize = SIZE_4MiB, 1216 .cmd_set = P_ID_INTEL_STD,
1314 .CmdSet = P_ID_INTEL_STD, 1217 .nr_regions = 1,
1315 .NumEraseRegions= 1, 1218 .regions = {
1316 .regions = {
1317 ERASEINFO(0x40000,16), 1219 ERASEINFO(0x40000,16),
1318 } 1220 }
1319 }, { 1221 }, {
1320 .mfr_id = MANUFACTURER_SST, 1222 .mfr_id = MANUFACTURER_SST,
1321 .dev_id = SST39LF512, 1223 .dev_id = SST39LF512,
1322 .name = "SST 39LF512", 1224 .name = "SST 39LF512",
1323 .uaddr = { 1225 .devtypes = CFI_DEVICETYPE_X8,
1324 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1226 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1325 }, 1227 .dev_size = SIZE_64KiB,
1326 .DevSize = SIZE_64KiB, 1228 .cmd_set = P_ID_AMD_STD,
1327 .CmdSet = P_ID_AMD_STD, 1229 .nr_regions = 1,
1328 .NumEraseRegions= 1,
1329 .regions = { 1230 .regions = {
1330 ERASEINFO(0x01000,16), 1231 ERASEINFO(0x01000,16),
1331 } 1232 }
@@ -1333,12 +1234,11 @@ static const struct amd_flash_info jedec_table[] = {
1333 .mfr_id = MANUFACTURER_SST, 1234 .mfr_id = MANUFACTURER_SST,
1334 .dev_id = SST39LF010, 1235 .dev_id = SST39LF010,
1335 .name = "SST 39LF010", 1236 .name = "SST 39LF010",
1336 .uaddr = { 1237 .devtypes = CFI_DEVICETYPE_X8,
1337 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1238 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1338 }, 1239 .dev_size = SIZE_128KiB,
1339 .DevSize = SIZE_128KiB, 1240 .cmd_set = P_ID_AMD_STD,
1340 .CmdSet = P_ID_AMD_STD, 1241 .nr_regions = 1,
1341 .NumEraseRegions= 1,
1342 .regions = { 1242 .regions = {
1343 ERASEINFO(0x01000,32), 1243 ERASEINFO(0x01000,32),
1344 } 1244 }
@@ -1346,36 +1246,33 @@ static const struct amd_flash_info jedec_table[] = {
1346 .mfr_id = MANUFACTURER_SST, 1246 .mfr_id = MANUFACTURER_SST,
1347 .dev_id = SST29EE020, 1247 .dev_id = SST29EE020,
1348 .name = "SST 29EE020", 1248 .name = "SST 29EE020",
1349 .uaddr = { 1249 .devtypes = CFI_DEVICETYPE_X8,
1350 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1250 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1351 }, 1251 .dev_size = SIZE_256KiB,
1352 .DevSize = SIZE_256KiB, 1252 .cmd_set = P_ID_SST_PAGE,
1353 .CmdSet = P_ID_SST_PAGE, 1253 .nr_regions = 1,
1354 .NumEraseRegions= 1, 1254 .regions = {ERASEINFO(0x01000,64),
1355 .regions = {ERASEINFO(0x01000,64), 1255 }
1356 } 1256 }, {
1357 }, {
1358 .mfr_id = MANUFACTURER_SST, 1257 .mfr_id = MANUFACTURER_SST,
1359 .dev_id = SST29LE020, 1258 .dev_id = SST29LE020,
1360 .name = "SST 29LE020", 1259 .name = "SST 29LE020",
1361 .uaddr = { 1260 .devtypes = CFI_DEVICETYPE_X8,
1362 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1261 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1363 }, 1262 .dev_size = SIZE_256KiB,
1364 .DevSize = SIZE_256KiB, 1263 .cmd_set = P_ID_SST_PAGE,
1365 .CmdSet = P_ID_SST_PAGE, 1264 .nr_regions = 1,
1366 .NumEraseRegions= 1, 1265 .regions = {ERASEINFO(0x01000,64),
1367 .regions = {ERASEINFO(0x01000,64), 1266 }
1368 }
1369 }, { 1267 }, {
1370 .mfr_id = MANUFACTURER_SST, 1268 .mfr_id = MANUFACTURER_SST,
1371 .dev_id = SST39LF020, 1269 .dev_id = SST39LF020,
1372 .name = "SST 39LF020", 1270 .name = "SST 39LF020",
1373 .uaddr = { 1271 .devtypes = CFI_DEVICETYPE_X8,
1374 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1272 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1375 }, 1273 .dev_size = SIZE_256KiB,
1376 .DevSize = SIZE_256KiB, 1274 .cmd_set = P_ID_AMD_STD,
1377 .CmdSet = P_ID_AMD_STD, 1275 .nr_regions = 1,
1378 .NumEraseRegions= 1,
1379 .regions = { 1276 .regions = {
1380 ERASEINFO(0x01000,64), 1277 ERASEINFO(0x01000,64),
1381 } 1278 }
@@ -1383,12 +1280,11 @@ static const struct amd_flash_info jedec_table[] = {
1383 .mfr_id = MANUFACTURER_SST, 1280 .mfr_id = MANUFACTURER_SST,
1384 .dev_id = SST39LF040, 1281 .dev_id = SST39LF040,
1385 .name = "SST 39LF040", 1282 .name = "SST 39LF040",
1386 .uaddr = { 1283 .devtypes = CFI_DEVICETYPE_X8,
1387 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1284 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1388 }, 1285 .dev_size = SIZE_512KiB,
1389 .DevSize = SIZE_512KiB, 1286 .cmd_set = P_ID_AMD_STD,
1390 .CmdSet = P_ID_AMD_STD, 1287 .nr_regions = 1,
1391 .NumEraseRegions= 1,
1392 .regions = { 1288 .regions = {
1393 ERASEINFO(0x01000,128), 1289 ERASEINFO(0x01000,128),
1394 } 1290 }
@@ -1396,12 +1292,11 @@ static const struct amd_flash_info jedec_table[] = {
1396 .mfr_id = MANUFACTURER_SST, 1292 .mfr_id = MANUFACTURER_SST,
1397 .dev_id = SST39SF010A, 1293 .dev_id = SST39SF010A,
1398 .name = "SST 39SF010A", 1294 .name = "SST 39SF010A",
1399 .uaddr = { 1295 .devtypes = CFI_DEVICETYPE_X8,
1400 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1296 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1401 }, 1297 .dev_size = SIZE_128KiB,
1402 .DevSize = SIZE_128KiB, 1298 .cmd_set = P_ID_AMD_STD,
1403 .CmdSet = P_ID_AMD_STD, 1299 .nr_regions = 1,
1404 .NumEraseRegions= 1,
1405 .regions = { 1300 .regions = {
1406 ERASEINFO(0x01000,32), 1301 ERASEINFO(0x01000,32),
1407 } 1302 }
@@ -1409,26 +1304,24 @@ static const struct amd_flash_info jedec_table[] = {
1409 .mfr_id = MANUFACTURER_SST, 1304 .mfr_id = MANUFACTURER_SST,
1410 .dev_id = SST39SF020A, 1305 .dev_id = SST39SF020A,
1411 .name = "SST 39SF020A", 1306 .name = "SST 39SF020A",
1412 .uaddr = { 1307 .devtypes = CFI_DEVICETYPE_X8,
1413 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1308 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1414 }, 1309 .dev_size = SIZE_256KiB,
1415 .DevSize = SIZE_256KiB, 1310 .cmd_set = P_ID_AMD_STD,
1416 .CmdSet = P_ID_AMD_STD, 1311 .nr_regions = 1,
1417 .NumEraseRegions= 1,
1418 .regions = { 1312 .regions = {
1419 ERASEINFO(0x01000,64), 1313 ERASEINFO(0x01000,64),
1420 } 1314 }
1421 }, { 1315 }, {
1422 .mfr_id = MANUFACTURER_SST, 1316 .mfr_id = MANUFACTURER_SST,
1423 .dev_id = SST49LF040B, 1317 .dev_id = SST49LF040B,
1424 .name = "SST 49LF040B", 1318 .name = "SST 49LF040B",
1425 .uaddr = { 1319 .devtypes = CFI_DEVICETYPE_X8,
1426 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1320 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1427 }, 1321 .dev_size = SIZE_512KiB,
1428 .DevSize = SIZE_512KiB, 1322 .cmd_set = P_ID_AMD_STD,
1429 .CmdSet = P_ID_AMD_STD, 1323 .nr_regions = 1,
1430 .NumEraseRegions= 1, 1324 .regions = {
1431 .regions = {
1432 ERASEINFO(0x01000,128), 1325 ERASEINFO(0x01000,128),
1433 } 1326 }
1434 }, { 1327 }, {
@@ -1436,12 +1329,11 @@ static const struct amd_flash_info jedec_table[] = {
1436 .mfr_id = MANUFACTURER_SST, 1329 .mfr_id = MANUFACTURER_SST,
1437 .dev_id = SST49LF004B, 1330 .dev_id = SST49LF004B,
1438 .name = "SST 49LF004B", 1331 .name = "SST 49LF004B",
1439 .uaddr = { 1332 .devtypes = CFI_DEVICETYPE_X8,
1440 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1333 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1441 }, 1334 .dev_size = SIZE_512KiB,
1442 .DevSize = SIZE_512KiB, 1335 .cmd_set = P_ID_AMD_STD,
1443 .CmdSet = P_ID_AMD_STD, 1336 .nr_regions = 1,
1444 .NumEraseRegions= 1,
1445 .regions = { 1337 .regions = {
1446 ERASEINFO(0x01000,128), 1338 ERASEINFO(0x01000,128),
1447 } 1339 }
@@ -1449,12 +1341,11 @@ static const struct amd_flash_info jedec_table[] = {
1449 .mfr_id = MANUFACTURER_SST, 1341 .mfr_id = MANUFACTURER_SST,
1450 .dev_id = SST49LF008A, 1342 .dev_id = SST49LF008A,
1451 .name = "SST 49LF008A", 1343 .name = "SST 49LF008A",
1452 .uaddr = { 1344 .devtypes = CFI_DEVICETYPE_X8,
1453 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1345 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1454 }, 1346 .dev_size = SIZE_1MiB,
1455 .DevSize = SIZE_1MiB, 1347 .cmd_set = P_ID_AMD_STD,
1456 .CmdSet = P_ID_AMD_STD, 1348 .nr_regions = 1,
1457 .NumEraseRegions= 1,
1458 .regions = { 1349 .regions = {
1459 ERASEINFO(0x01000,256), 1350 ERASEINFO(0x01000,256),
1460 } 1351 }
@@ -1462,12 +1353,11 @@ static const struct amd_flash_info jedec_table[] = {
1462 .mfr_id = MANUFACTURER_SST, 1353 .mfr_id = MANUFACTURER_SST,
1463 .dev_id = SST49LF030A, 1354 .dev_id = SST49LF030A,
1464 .name = "SST 49LF030A", 1355 .name = "SST 49LF030A",
1465 .uaddr = { 1356 .devtypes = CFI_DEVICETYPE_X8,
1466 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1357 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1467 }, 1358 .dev_size = SIZE_512KiB,
1468 .DevSize = SIZE_512KiB, 1359 .cmd_set = P_ID_AMD_STD,
1469 .CmdSet = P_ID_AMD_STD, 1360 .nr_regions = 1,
1470 .NumEraseRegions= 1,
1471 .regions = { 1361 .regions = {
1472 ERASEINFO(0x01000,96), 1362 ERASEINFO(0x01000,96),
1473 } 1363 }
@@ -1475,12 +1365,11 @@ static const struct amd_flash_info jedec_table[] = {
1475 .mfr_id = MANUFACTURER_SST, 1365 .mfr_id = MANUFACTURER_SST,
1476 .dev_id = SST49LF040A, 1366 .dev_id = SST49LF040A,
1477 .name = "SST 49LF040A", 1367 .name = "SST 49LF040A",
1478 .uaddr = { 1368 .devtypes = CFI_DEVICETYPE_X8,
1479 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1369 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1480 }, 1370 .dev_size = SIZE_512KiB,
1481 .DevSize = SIZE_512KiB, 1371 .cmd_set = P_ID_AMD_STD,
1482 .CmdSet = P_ID_AMD_STD, 1372 .nr_regions = 1,
1483 .NumEraseRegions= 1,
1484 .regions = { 1373 .regions = {
1485 ERASEINFO(0x01000,128), 1374 ERASEINFO(0x01000,128),
1486 } 1375 }
@@ -1488,57 +1377,49 @@ static const struct amd_flash_info jedec_table[] = {
1488 .mfr_id = MANUFACTURER_SST, 1377 .mfr_id = MANUFACTURER_SST,
1489 .dev_id = SST49LF080A, 1378 .dev_id = SST49LF080A,
1490 .name = "SST 49LF080A", 1379 .name = "SST 49LF080A",
1491 .uaddr = { 1380 .devtypes = CFI_DEVICETYPE_X8,
1492 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1381 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1493 }, 1382 .dev_size = SIZE_1MiB,
1494 .DevSize = SIZE_1MiB, 1383 .cmd_set = P_ID_AMD_STD,
1495 .CmdSet = P_ID_AMD_STD, 1384 .nr_regions = 1,
1496 .NumEraseRegions= 1,
1497 .regions = { 1385 .regions = {
1498 ERASEINFO(0x01000,256), 1386 ERASEINFO(0x01000,256),
1499 } 1387 }
1500 }, { 1388 }, {
1501 .mfr_id = MANUFACTURER_SST, /* should be CFI */ 1389 .mfr_id = MANUFACTURER_SST, /* should be CFI */
1502 .dev_id = SST39LF160, 1390 .dev_id = SST39LF160,
1503 .name = "SST 39LF160", 1391 .name = "SST 39LF160",
1504 .uaddr = { 1392 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1505 [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ 1393 .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
1506 [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ 1394 .dev_size = SIZE_2MiB,
1507 }, 1395 .cmd_set = P_ID_AMD_STD,
1508 .DevSize = SIZE_2MiB, 1396 .nr_regions = 2,
1509 .CmdSet = P_ID_AMD_STD, 1397 .regions = {
1510 .NumEraseRegions= 2, 1398 ERASEINFO(0x1000,256),
1511 .regions = { 1399 ERASEINFO(0x1000,256)
1512 ERASEINFO(0x1000,256), 1400 }
1513 ERASEINFO(0x1000,256) 1401 }, {
1514 } 1402 .mfr_id = MANUFACTURER_SST, /* should be CFI */
1515 }, { 1403 .dev_id = SST39VF1601,
1516 .mfr_id = MANUFACTURER_SST, /* should be CFI */ 1404 .name = "SST 39VF1601",
1517 .dev_id = SST39VF1601, 1405 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1518 .name = "SST 39VF1601", 1406 .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
1519 .uaddr = { 1407 .dev_size = SIZE_2MiB,
1520 [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ 1408 .cmd_set = P_ID_AMD_STD,
1521 [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ 1409 .nr_regions = 2,
1522 }, 1410 .regions = {
1523 .DevSize = SIZE_2MiB, 1411 ERASEINFO(0x1000,256),
1524 .CmdSet = P_ID_AMD_STD, 1412 ERASEINFO(0x1000,256)
1525 .NumEraseRegions= 2, 1413 }
1526 .regions = {
1527 ERASEINFO(0x1000,256),
1528 ERASEINFO(0x1000,256)
1529 }
1530
1531 }, { 1414 }, {
1532 .mfr_id = MANUFACTURER_ST, 1415 .mfr_id = MANUFACTURER_ST,
1533 .dev_id = M29F800AB, 1416 .dev_id = M29F800AB,
1534 .name = "ST M29F800AB", 1417 .name = "ST M29F800AB",
1535 .uaddr = { 1418 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1536 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1419 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1537 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1420 .dev_size = SIZE_1MiB,
1538 }, 1421 .cmd_set = P_ID_AMD_STD,
1539 .DevSize = SIZE_1MiB, 1422 .nr_regions = 4,
1540 .CmdSet = P_ID_AMD_STD,
1541 .NumEraseRegions= 4,
1542 .regions = { 1423 .regions = {
1543 ERASEINFO(0x04000,1), 1424 ERASEINFO(0x04000,1),
1544 ERASEINFO(0x02000,2), 1425 ERASEINFO(0x02000,2),
@@ -1549,13 +1430,11 @@ static const struct amd_flash_info jedec_table[] = {
1549 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1430 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1550 .dev_id = M29W800DT, 1431 .dev_id = M29W800DT,
1551 .name = "ST M29W800DT", 1432 .name = "ST M29W800DT",
1552 .uaddr = { 1433 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1553 [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ 1434 .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
1554 [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ 1435 .dev_size = SIZE_1MiB,
1555 }, 1436 .cmd_set = P_ID_AMD_STD,
1556 .DevSize = SIZE_1MiB, 1437 .nr_regions = 4,
1557 .CmdSet = P_ID_AMD_STD,
1558 .NumEraseRegions= 4,
1559 .regions = { 1438 .regions = {
1560 ERASEINFO(0x10000,15), 1439 ERASEINFO(0x10000,15),
1561 ERASEINFO(0x08000,1), 1440 ERASEINFO(0x08000,1),
@@ -1566,13 +1445,11 @@ static const struct amd_flash_info jedec_table[] = {
1566 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1445 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1567 .dev_id = M29W800DB, 1446 .dev_id = M29W800DB,
1568 .name = "ST M29W800DB", 1447 .name = "ST M29W800DB",
1569 .uaddr = { 1448 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1570 [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ 1449 .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
1571 [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ 1450 .dev_size = SIZE_1MiB,
1572 }, 1451 .cmd_set = P_ID_AMD_STD,
1573 .DevSize = SIZE_1MiB, 1452 .nr_regions = 4,
1574 .CmdSet = P_ID_AMD_STD,
1575 .NumEraseRegions= 4,
1576 .regions = { 1453 .regions = {
1577 ERASEINFO(0x04000,1), 1454 ERASEINFO(0x04000,1),
1578 ERASEINFO(0x02000,2), 1455 ERASEINFO(0x02000,2),
@@ -1583,13 +1460,11 @@ static const struct amd_flash_info jedec_table[] = {
1583 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1460 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1584 .dev_id = M29W160DT, 1461 .dev_id = M29W160DT,
1585 .name = "ST M29W160DT", 1462 .name = "ST M29W160DT",
1586 .uaddr = { 1463 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1587 [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ 1464 .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
1588 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1465 .dev_size = SIZE_2MiB,
1589 }, 1466 .cmd_set = P_ID_AMD_STD,
1590 .DevSize = SIZE_2MiB, 1467 .nr_regions = 4,
1591 .CmdSet = P_ID_AMD_STD,
1592 .NumEraseRegions= 4,
1593 .regions = { 1468 .regions = {
1594 ERASEINFO(0x10000,31), 1469 ERASEINFO(0x10000,31),
1595 ERASEINFO(0x08000,1), 1470 ERASEINFO(0x08000,1),
@@ -1600,13 +1475,11 @@ static const struct amd_flash_info jedec_table[] = {
1600 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1475 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1601 .dev_id = M29W160DB, 1476 .dev_id = M29W160DB,
1602 .name = "ST M29W160DB", 1477 .name = "ST M29W160DB",
1603 .uaddr = { 1478 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1604 [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ 1479 .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
1605 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1480 .dev_size = SIZE_2MiB,
1606 }, 1481 .cmd_set = P_ID_AMD_STD,
1607 .DevSize = SIZE_2MiB, 1482 .nr_regions = 4,
1608 .CmdSet = P_ID_AMD_STD,
1609 .NumEraseRegions= 4,
1610 .regions = { 1483 .regions = {
1611 ERASEINFO(0x04000,1), 1484 ERASEINFO(0x04000,1),
1612 ERASEINFO(0x02000,2), 1485 ERASEINFO(0x02000,2),
@@ -1617,12 +1490,11 @@ static const struct amd_flash_info jedec_table[] = {
1617 .mfr_id = MANUFACTURER_ST, 1490 .mfr_id = MANUFACTURER_ST,
1618 .dev_id = M29W040B, 1491 .dev_id = M29W040B,
1619 .name = "ST M29W040B", 1492 .name = "ST M29W040B",
1620 .uaddr = { 1493 .devtypes = CFI_DEVICETYPE_X8,
1621 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1494 .uaddr = MTD_UADDR_0x0555_0x02AA,
1622 }, 1495 .dev_size = SIZE_512KiB,
1623 .DevSize = SIZE_512KiB, 1496 .cmd_set = P_ID_AMD_STD,
1624 .CmdSet = P_ID_AMD_STD, 1497 .nr_regions = 1,
1625 .NumEraseRegions= 1,
1626 .regions = { 1498 .regions = {
1627 ERASEINFO(0x10000,8), 1499 ERASEINFO(0x10000,8),
1628 } 1500 }
@@ -1630,12 +1502,11 @@ static const struct amd_flash_info jedec_table[] = {
1630 .mfr_id = MANUFACTURER_ST, 1502 .mfr_id = MANUFACTURER_ST,
1631 .dev_id = M50FW040, 1503 .dev_id = M50FW040,
1632 .name = "ST M50FW040", 1504 .name = "ST M50FW040",
1633 .uaddr = { 1505 .devtypes = CFI_DEVICETYPE_X8,
1634 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1506 .uaddr = MTD_UADDR_UNNECESSARY,
1635 }, 1507 .dev_size = SIZE_512KiB,
1636 .DevSize = SIZE_512KiB, 1508 .cmd_set = P_ID_INTEL_EXT,
1637 .CmdSet = P_ID_INTEL_EXT, 1509 .nr_regions = 1,
1638 .NumEraseRegions= 1,
1639 .regions = { 1510 .regions = {
1640 ERASEINFO(0x10000,8), 1511 ERASEINFO(0x10000,8),
1641 } 1512 }
@@ -1643,12 +1514,11 @@ static const struct amd_flash_info jedec_table[] = {
1643 .mfr_id = MANUFACTURER_ST, 1514 .mfr_id = MANUFACTURER_ST,
1644 .dev_id = M50FW080, 1515 .dev_id = M50FW080,
1645 .name = "ST M50FW080", 1516 .name = "ST M50FW080",
1646 .uaddr = { 1517 .devtypes = CFI_DEVICETYPE_X8,
1647 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1518 .uaddr = MTD_UADDR_UNNECESSARY,
1648 }, 1519 .dev_size = SIZE_1MiB,
1649 .DevSize = SIZE_1MiB, 1520 .cmd_set = P_ID_INTEL_EXT,
1650 .CmdSet = P_ID_INTEL_EXT, 1521 .nr_regions = 1,
1651 .NumEraseRegions= 1,
1652 .regions = { 1522 .regions = {
1653 ERASEINFO(0x10000,16), 1523 ERASEINFO(0x10000,16),
1654 } 1524 }
@@ -1656,12 +1526,11 @@ static const struct amd_flash_info jedec_table[] = {
1656 .mfr_id = MANUFACTURER_ST, 1526 .mfr_id = MANUFACTURER_ST,
1657 .dev_id = M50FW016, 1527 .dev_id = M50FW016,
1658 .name = "ST M50FW016", 1528 .name = "ST M50FW016",
1659 .uaddr = { 1529 .devtypes = CFI_DEVICETYPE_X8,
1660 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1530 .uaddr = MTD_UADDR_UNNECESSARY,
1661 }, 1531 .dev_size = SIZE_2MiB,
1662 .DevSize = SIZE_2MiB, 1532 .cmd_set = P_ID_INTEL_EXT,
1663 .CmdSet = P_ID_INTEL_EXT, 1533 .nr_regions = 1,
1664 .NumEraseRegions= 1,
1665 .regions = { 1534 .regions = {
1666 ERASEINFO(0x10000,32), 1535 ERASEINFO(0x10000,32),
1667 } 1536 }
@@ -1669,12 +1538,11 @@ static const struct amd_flash_info jedec_table[] = {
1669 .mfr_id = MANUFACTURER_ST, 1538 .mfr_id = MANUFACTURER_ST,
1670 .dev_id = M50LPW080, 1539 .dev_id = M50LPW080,
1671 .name = "ST M50LPW080", 1540 .name = "ST M50LPW080",
1672 .uaddr = { 1541 .devtypes = CFI_DEVICETYPE_X8,
1673 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1542 .uaddr = MTD_UADDR_UNNECESSARY,
1674 }, 1543 .dev_size = SIZE_1MiB,
1675 .DevSize = SIZE_1MiB, 1544 .cmd_set = P_ID_INTEL_EXT,
1676 .CmdSet = P_ID_INTEL_EXT, 1545 .nr_regions = 1,
1677 .NumEraseRegions= 1,
1678 .regions = { 1546 .regions = {
1679 ERASEINFO(0x10000,16), 1547 ERASEINFO(0x10000,16),
1680 } 1548 }
@@ -1682,13 +1550,11 @@ static const struct amd_flash_info jedec_table[] = {
1682 .mfr_id = MANUFACTURER_TOSHIBA, 1550 .mfr_id = MANUFACTURER_TOSHIBA,
1683 .dev_id = TC58FVT160, 1551 .dev_id = TC58FVT160,
1684 .name = "Toshiba TC58FVT160", 1552 .name = "Toshiba TC58FVT160",
1685 .uaddr = { 1553 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1686 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1554 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1687 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 1555 .dev_size = SIZE_2MiB,
1688 }, 1556 .cmd_set = P_ID_AMD_STD,
1689 .DevSize = SIZE_2MiB, 1557 .nr_regions = 4,
1690 .CmdSet = P_ID_AMD_STD,
1691 .NumEraseRegions= 4,
1692 .regions = { 1558 .regions = {
1693 ERASEINFO(0x10000,31), 1559 ERASEINFO(0x10000,31),
1694 ERASEINFO(0x08000,1), 1560 ERASEINFO(0x08000,1),
@@ -1699,13 +1565,11 @@ static const struct amd_flash_info jedec_table[] = {
1699 .mfr_id = MANUFACTURER_TOSHIBA, 1565 .mfr_id = MANUFACTURER_TOSHIBA,
1700 .dev_id = TC58FVB160, 1566 .dev_id = TC58FVB160,
1701 .name = "Toshiba TC58FVB160", 1567 .name = "Toshiba TC58FVB160",
1702 .uaddr = { 1568 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1703 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1569 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1704 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 1570 .dev_size = SIZE_2MiB,
1705 }, 1571 .cmd_set = P_ID_AMD_STD,
1706 .DevSize = SIZE_2MiB, 1572 .nr_regions = 4,
1707 .CmdSet = P_ID_AMD_STD,
1708 .NumEraseRegions= 4,
1709 .regions = { 1573 .regions = {
1710 ERASEINFO(0x04000,1), 1574 ERASEINFO(0x04000,1),
1711 ERASEINFO(0x02000,2), 1575 ERASEINFO(0x02000,2),
@@ -1716,13 +1580,11 @@ static const struct amd_flash_info jedec_table[] = {
1716 .mfr_id = MANUFACTURER_TOSHIBA, 1580 .mfr_id = MANUFACTURER_TOSHIBA,
1717 .dev_id = TC58FVB321, 1581 .dev_id = TC58FVB321,
1718 .name = "Toshiba TC58FVB321", 1582 .name = "Toshiba TC58FVB321",
1719 .uaddr = { 1583 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1720 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1584 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1721 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 1585 .dev_size = SIZE_4MiB,
1722 }, 1586 .cmd_set = P_ID_AMD_STD,
1723 .DevSize = SIZE_4MiB, 1587 .nr_regions = 2,
1724 .CmdSet = P_ID_AMD_STD,
1725 .NumEraseRegions= 2,
1726 .regions = { 1588 .regions = {
1727 ERASEINFO(0x02000,8), 1589 ERASEINFO(0x02000,8),
1728 ERASEINFO(0x10000,63) 1590 ERASEINFO(0x10000,63)
@@ -1731,13 +1593,11 @@ static const struct amd_flash_info jedec_table[] = {
1731 .mfr_id = MANUFACTURER_TOSHIBA, 1593 .mfr_id = MANUFACTURER_TOSHIBA,
1732 .dev_id = TC58FVT321, 1594 .dev_id = TC58FVT321,
1733 .name = "Toshiba TC58FVT321", 1595 .name = "Toshiba TC58FVT321",
1734 .uaddr = { 1596 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1735 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1597 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1736 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 1598 .dev_size = SIZE_4MiB,
1737 }, 1599 .cmd_set = P_ID_AMD_STD,
1738 .DevSize = SIZE_4MiB, 1600 .nr_regions = 2,
1739 .CmdSet = P_ID_AMD_STD,
1740 .NumEraseRegions= 2,
1741 .regions = { 1601 .regions = {
1742 ERASEINFO(0x10000,63), 1602 ERASEINFO(0x10000,63),
1743 ERASEINFO(0x02000,8) 1603 ERASEINFO(0x02000,8)
@@ -1746,13 +1606,11 @@ static const struct amd_flash_info jedec_table[] = {
1746 .mfr_id = MANUFACTURER_TOSHIBA, 1606 .mfr_id = MANUFACTURER_TOSHIBA,
1747 .dev_id = TC58FVB641, 1607 .dev_id = TC58FVB641,
1748 .name = "Toshiba TC58FVB641", 1608 .name = "Toshiba TC58FVB641",
1749 .uaddr = { 1609 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1750 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1610 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1751 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1611 .dev_size = SIZE_8MiB,
1752 }, 1612 .cmd_set = P_ID_AMD_STD,
1753 .DevSize = SIZE_8MiB, 1613 .nr_regions = 2,
1754 .CmdSet = P_ID_AMD_STD,
1755 .NumEraseRegions= 2,
1756 .regions = { 1614 .regions = {
1757 ERASEINFO(0x02000,8), 1615 ERASEINFO(0x02000,8),
1758 ERASEINFO(0x10000,127) 1616 ERASEINFO(0x10000,127)
@@ -1761,13 +1619,11 @@ static const struct amd_flash_info jedec_table[] = {
1761 .mfr_id = MANUFACTURER_TOSHIBA, 1619 .mfr_id = MANUFACTURER_TOSHIBA,
1762 .dev_id = TC58FVT641, 1620 .dev_id = TC58FVT641,
1763 .name = "Toshiba TC58FVT641", 1621 .name = "Toshiba TC58FVT641",
1764 .uaddr = { 1622 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1765 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1623 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1766 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1624 .dev_size = SIZE_8MiB,
1767 }, 1625 .cmd_set = P_ID_AMD_STD,
1768 .DevSize = SIZE_8MiB, 1626 .nr_regions = 2,
1769 .CmdSet = P_ID_AMD_STD,
1770 .NumEraseRegions= 2,
1771 .regions = { 1627 .regions = {
1772 ERASEINFO(0x10000,127), 1628 ERASEINFO(0x10000,127),
1773 ERASEINFO(0x02000,8) 1629 ERASEINFO(0x02000,8)
@@ -1776,12 +1632,11 @@ static const struct amd_flash_info jedec_table[] = {
1776 .mfr_id = MANUFACTURER_WINBOND, 1632 .mfr_id = MANUFACTURER_WINBOND,
1777 .dev_id = W49V002A, 1633 .dev_id = W49V002A,
1778 .name = "Winbond W49V002A", 1634 .name = "Winbond W49V002A",
1779 .uaddr = { 1635 .devtypes = CFI_DEVICETYPE_X8,
1780 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1636 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1781 }, 1637 .dev_size = SIZE_256KiB,
1782 .DevSize = SIZE_256KiB, 1638 .cmd_set = P_ID_AMD_STD,
1783 .CmdSet = P_ID_AMD_STD, 1639 .nr_regions = 4,
1784 .NumEraseRegions= 4,
1785 .regions = { 1640 .regions = {
1786 ERASEINFO(0x10000, 3), 1641 ERASEINFO(0x10000, 3),
1787 ERASEINFO(0x08000, 1), 1642 ERASEINFO(0x08000, 1),
@@ -1791,15 +1646,7 @@ static const struct amd_flash_info jedec_table[] = {
1791 } 1646 }
1792}; 1647};
1793 1648
1794 1649static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
1795static int cfi_jedec_setup(struct cfi_private *p_cfi, int index);
1796
1797static int jedec_probe_chip(struct map_info *map, __u32 base,
1798 unsigned long *chip_map, struct cfi_private *cfi);
1799
1800static struct mtd_info *jedec_probe(struct map_info *map);
1801
1802static inline u32 jedec_read_mfr(struct map_info *map, __u32 base,
1803 struct cfi_private *cfi) 1650 struct cfi_private *cfi)
1804{ 1651{
1805 map_word result; 1652 map_word result;
@@ -1810,7 +1657,7 @@ static inline u32 jedec_read_mfr(struct map_info *map, __u32 base,
1810 return result.x[0] & mask; 1657 return result.x[0] & mask;
1811} 1658}
1812 1659
1813static inline u32 jedec_read_id(struct map_info *map, __u32 base, 1660static inline u32 jedec_read_id(struct map_info *map, uint32_t base,
1814 struct cfi_private *cfi) 1661 struct cfi_private *cfi)
1815{ 1662{
1816 map_word result; 1663 map_word result;
@@ -1821,8 +1668,7 @@ static inline u32 jedec_read_id(struct map_info *map, __u32 base,
1821 return result.x[0] & mask; 1668 return result.x[0] & mask;
1822} 1669}
1823 1670
1824static inline void jedec_reset(u32 base, struct map_info *map, 1671static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
1825 struct cfi_private *cfi)
1826{ 1672{
1827 /* Reset */ 1673 /* Reset */
1828 1674
@@ -1832,7 +1678,7 @@ static inline void jedec_reset(u32 base, struct map_info *map,
1832 * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips 1678 * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips
1833 * as they will ignore the writes and dont care what address 1679 * as they will ignore the writes and dont care what address
1834 * the F0 is written to */ 1680 * the F0 is written to */
1835 if(cfi->addr_unlock1) { 1681 if (cfi->addr_unlock1) {
1836 DEBUG( MTD_DEBUG_LEVEL3, 1682 DEBUG( MTD_DEBUG_LEVEL3,
1837 "reset unlock called %x %x \n", 1683 "reset unlock called %x %x \n",
1838 cfi->addr_unlock1,cfi->addr_unlock2); 1684 cfi->addr_unlock1,cfi->addr_unlock2);
@@ -1841,7 +1687,7 @@ static inline void jedec_reset(u32 base, struct map_info *map,
1841 } 1687 }
1842 1688
1843 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); 1689 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
1844 /* Some misdesigned intel chips do not respond for 0xF0 for a reset, 1690 /* Some misdesigned Intel chips do not respond for 0xF0 for a reset,
1845 * so ensure we're in read mode. Send both the Intel and the AMD command 1691 * so ensure we're in read mode. Send both the Intel and the AMD command
1846 * for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so 1692 * for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so
1847 * this should be safe. 1693 * this should be safe.
@@ -1851,42 +1697,20 @@ static inline void jedec_reset(u32 base, struct map_info *map,
1851} 1697}
1852 1698
1853 1699
1854static inline __u8 finfo_uaddr(const struct amd_flash_info *finfo, int device_type)
1855{
1856 int uaddr_idx;
1857 __u8 uaddr = MTD_UADDR_NOT_SUPPORTED;
1858
1859 switch ( device_type ) {
1860 case CFI_DEVICETYPE_X8: uaddr_idx = 0; break;
1861 case CFI_DEVICETYPE_X16: uaddr_idx = 1; break;
1862 case CFI_DEVICETYPE_X32: uaddr_idx = 2; break;
1863 default:
1864 printk(KERN_NOTICE "MTD: %s(): unknown device_type %d\n",
1865 __func__, device_type);
1866 goto uaddr_done;
1867 }
1868
1869 uaddr = finfo->uaddr[uaddr_idx];
1870
1871 if (uaddr != MTD_UADDR_NOT_SUPPORTED ) {
1872 /* ASSERT("The unlock addresses for non-8-bit mode
1873 are bollocks. We don't really need an array."); */
1874 uaddr = finfo->uaddr[0];
1875 }
1876
1877 uaddr_done:
1878 return uaddr;
1879}
1880
1881
1882static int cfi_jedec_setup(struct cfi_private *p_cfi, int index) 1700static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
1883{ 1701{
1884 int i,num_erase_regions; 1702 int i,num_erase_regions;
1885 __u8 uaddr; 1703 uint8_t uaddr;
1886 1704
1887 printk("Found: %s\n",jedec_table[index].name); 1705 if (! (jedec_table[index].devtypes & p_cfi->device_type)) {
1706 DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n",
1707 jedec_table[index].name, 4 * (1<<p_cfi->device_type));
1708 return 0;
1709 }
1710
1711 printk(KERN_INFO "Found: %s\n",jedec_table[index].name);
1888 1712
1889 num_erase_regions = jedec_table[index].NumEraseRegions; 1713 num_erase_regions = jedec_table[index].nr_regions;
1890 1714
1891 p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); 1715 p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
1892 if (!p_cfi->cfiq) { 1716 if (!p_cfi->cfiq) {
@@ -1896,9 +1720,9 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
1896 1720
1897 memset(p_cfi->cfiq,0,sizeof(struct cfi_ident)); 1721 memset(p_cfi->cfiq,0,sizeof(struct cfi_ident));
1898 1722
1899 p_cfi->cfiq->P_ID = jedec_table[index].CmdSet; 1723 p_cfi->cfiq->P_ID = jedec_table[index].cmd_set;
1900 p_cfi->cfiq->NumEraseRegions = jedec_table[index].NumEraseRegions; 1724 p_cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions;
1901 p_cfi->cfiq->DevSize = jedec_table[index].DevSize; 1725 p_cfi->cfiq->DevSize = jedec_table[index].dev_size;
1902 p_cfi->cfi_mode = CFI_MODE_JEDEC; 1726 p_cfi->cfi_mode = CFI_MODE_JEDEC;
1903 1727
1904 for (i=0; i<num_erase_regions; i++){ 1728 for (i=0; i<num_erase_regions; i++){
@@ -1910,14 +1734,14 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
1910 p_cfi->mfr = jedec_table[index].mfr_id; 1734 p_cfi->mfr = jedec_table[index].mfr_id;
1911 p_cfi->id = jedec_table[index].dev_id; 1735 p_cfi->id = jedec_table[index].dev_id;
1912 1736
1913 uaddr = finfo_uaddr(&jedec_table[index], p_cfi->device_type); 1737 uaddr = jedec_table[index].uaddr;
1914 if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) {
1915 kfree( p_cfi->cfiq );
1916 return 0;
1917 }
1918 1738
1919 p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1; 1739 /* The table has unlock addresses in _bytes_, and we try not to let
1920 p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2; 1740 our brains explode when we see the datasheets talking about address
1741 lines numbered from A-1 to A18. The CFI table has unlock addresses
1742 in device-words according to the mode the device is connected in */
1743 p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type;
1744 p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type;
1921 1745
1922 return 1; /* ok */ 1746 return 1; /* ok */
1923} 1747}
@@ -1930,14 +1754,14 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
1930 * be perfect - consequently there should be some module parameters that 1754 * be perfect - consequently there should be some module parameters that
1931 * could be manually specified to force the chip info. 1755 * could be manually specified to force the chip info.
1932 */ 1756 */
1933static inline int jedec_match( __u32 base, 1757static inline int jedec_match( uint32_t base,
1934 struct map_info *map, 1758 struct map_info *map,
1935 struct cfi_private *cfi, 1759 struct cfi_private *cfi,
1936 const struct amd_flash_info *finfo ) 1760 const struct amd_flash_info *finfo )
1937{ 1761{
1938 int rc = 0; /* failure until all tests pass */ 1762 int rc = 0; /* failure until all tests pass */
1939 u32 mfr, id; 1763 u32 mfr, id;
1940 __u8 uaddr; 1764 uint8_t uaddr;
1941 1765
1942 /* 1766 /*
1943 * The IDs must match. For X16 and X32 devices operating in 1767 * The IDs must match. For X16 and X32 devices operating in
@@ -1950,8 +1774,8 @@ static inline int jedec_match( __u32 base,
1950 */ 1774 */
1951 switch (cfi->device_type) { 1775 switch (cfi->device_type) {
1952 case CFI_DEVICETYPE_X8: 1776 case CFI_DEVICETYPE_X8:
1953 mfr = (__u8)finfo->mfr_id; 1777 mfr = (uint8_t)finfo->mfr_id;
1954 id = (__u8)finfo->dev_id; 1778 id = (uint8_t)finfo->dev_id;
1955 1779
1956 /* bjd: it seems that if we do this, we can end up 1780 /* bjd: it seems that if we do this, we can end up
1957 * detecting 16bit flashes as an 8bit device, even though 1781 * detecting 16bit flashes as an 8bit device, even though
@@ -1964,12 +1788,12 @@ static inline int jedec_match( __u32 base,
1964 } 1788 }
1965 break; 1789 break;
1966 case CFI_DEVICETYPE_X16: 1790 case CFI_DEVICETYPE_X16:
1967 mfr = (__u16)finfo->mfr_id; 1791 mfr = (uint16_t)finfo->mfr_id;
1968 id = (__u16)finfo->dev_id; 1792 id = (uint16_t)finfo->dev_id;
1969 break; 1793 break;
1970 case CFI_DEVICETYPE_X32: 1794 case CFI_DEVICETYPE_X32:
1971 mfr = (__u16)finfo->mfr_id; 1795 mfr = (uint16_t)finfo->mfr_id;
1972 id = (__u32)finfo->dev_id; 1796 id = (uint32_t)finfo->dev_id;
1973 break; 1797 break;
1974 default: 1798 default:
1975 printk(KERN_WARNING 1799 printk(KERN_WARNING
@@ -1984,25 +1808,25 @@ static inline int jedec_match( __u32 base,
1984 /* the part size must fit in the memory window */ 1808 /* the part size must fit in the memory window */
1985 DEBUG( MTD_DEBUG_LEVEL3, 1809 DEBUG( MTD_DEBUG_LEVEL3,
1986 "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n", 1810 "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
1987 __func__, base, 1 << finfo->DevSize, base + (1 << finfo->DevSize) ); 1811 __func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) );
1988 if ( base + cfi_interleave(cfi) * ( 1 << finfo->DevSize ) > map->size ) { 1812 if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) {
1989 DEBUG( MTD_DEBUG_LEVEL3, 1813 DEBUG( MTD_DEBUG_LEVEL3,
1990 "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n", 1814 "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
1991 __func__, finfo->mfr_id, finfo->dev_id, 1815 __func__, finfo->mfr_id, finfo->dev_id,
1992 1 << finfo->DevSize ); 1816 1 << finfo->dev_size );
1993 goto match_done; 1817 goto match_done;
1994 } 1818 }
1995 1819
1996 uaddr = finfo_uaddr(finfo, cfi->device_type); 1820 if (! (finfo->devtypes & cfi->device_type))
1997 if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) {
1998 goto match_done; 1821 goto match_done;
1999 } 1822
1823 uaddr = finfo->uaddr;
2000 1824
2001 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n", 1825 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
2002 __func__, cfi->addr_unlock1, cfi->addr_unlock2 ); 1826 __func__, cfi->addr_unlock1, cfi->addr_unlock2 );
2003 if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr 1827 if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr
2004 && ( unlock_addrs[uaddr].addr1 != cfi->addr_unlock1 || 1828 && ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 ||
2005 unlock_addrs[uaddr].addr2 != cfi->addr_unlock2 ) ) { 1829 unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) {
2006 DEBUG( MTD_DEBUG_LEVEL3, 1830 DEBUG( MTD_DEBUG_LEVEL3,
2007 "MTD %s(): 0x%.4x 0x%.4x did not match\n", 1831 "MTD %s(): 0x%.4x 0x%.4x did not match\n",
2008 __func__, 1832 __func__,
@@ -2042,7 +1866,7 @@ static inline int jedec_match( __u32 base,
2042 * were truly frobbing a real device. 1866 * were truly frobbing a real device.
2043 */ 1867 */
2044 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ ); 1868 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ );
2045 if(cfi->addr_unlock1) { 1869 if (cfi->addr_unlock1) {
2046 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); 1870 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
2047 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); 1871 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
2048 } 1872 }
@@ -2068,8 +1892,8 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2068 if (MTD_UADDR_UNNECESSARY == uaddr_idx) 1892 if (MTD_UADDR_UNNECESSARY == uaddr_idx)
2069 return 0; 1893 return 0;
2070 1894
2071 cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1; 1895 cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1 / cfi->device_type;
2072 cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2; 1896 cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2 / cfi->device_type;
2073 } 1897 }
2074 1898
2075 /* Make certain we aren't probing past the end of map */ 1899 /* Make certain we aren't probing past the end of map */
@@ -2081,19 +1905,11 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2081 1905
2082 } 1906 }
2083 /* Ensure the unlock addresses we try stay inside the map */ 1907 /* Ensure the unlock addresses we try stay inside the map */
2084 probe_offset1 = cfi_build_cmd_addr( 1908 probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, cfi_interleave(cfi), cfi->device_type);
2085 cfi->addr_unlock1, 1909 probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, cfi_interleave(cfi), cfi->device_type);
2086 cfi_interleave(cfi),
2087 cfi->device_type);
2088 probe_offset2 = cfi_build_cmd_addr(
2089 cfi->addr_unlock1,
2090 cfi_interleave(cfi),
2091 cfi->device_type);
2092 if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) || 1910 if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
2093 ((base + probe_offset2 + map_bankwidth(map)) >= map->size)) 1911 ((base + probe_offset2 + map_bankwidth(map)) >= map->size))
2094 {
2095 goto retry; 1912 goto retry;
2096 }
2097 1913
2098 /* Reset */ 1914 /* Reset */
2099 jedec_reset(base, map, cfi); 1915 jedec_reset(base, map, cfi);
@@ -2128,8 +1944,8 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2128 } 1944 }
2129 goto retry; 1945 goto retry;
2130 } else { 1946 } else {
2131 __u16 mfr; 1947 uint16_t mfr;
2132 __u16 id; 1948 uint16_t id;
2133 1949
2134 /* Make sure it is a chip of the same manufacturer and id */ 1950 /* Make sure it is a chip of the same manufacturer and id */
2135 mfr = jedec_read_mfr(map, base, cfi); 1951 mfr = jedec_read_mfr(map, base, cfi);
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 23fab14f1637..b44292abd9f7 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -9,7 +9,7 @@
9 * 9 *
10 * mtdparts=<mtddef>[;<mtddef] 10 * mtdparts=<mtddef>[;<mtddef]
11 * <mtddef> := <mtd-id>:<partdef>[,<partdef>] 11 * <mtddef> := <mtd-id>:<partdef>[,<partdef>]
12 * <partdef> := <size>[@offset][<name>][ro] 12 * <partdef> := <size>[@offset][<name>][ro][lk]
13 * <mtd-id> := unique name used in mapping driver/device (mtd->name) 13 * <mtd-id> := unique name used in mapping driver/device (mtd->name)
14 * <size> := standard linux memsize OR "-" to denote all remaining space 14 * <size> := standard linux memsize OR "-" to denote all remaining space
15 * <name> := '(' NAME ')' 15 * <name> := '(' NAME ')'
@@ -143,6 +143,13 @@ static struct mtd_partition * newpart(char *s,
143 s += 2; 143 s += 2;
144 } 144 }
145 145
146 /* if lk is found do NOT unlock the MTD partition*/
147 if (strncmp(s, "lk", 2) == 0)
148 {
149 mask_flags |= MTD_POWERUP_LOCK;
150 s += 2;
151 }
152
146 /* test if more partitions are following */ 153 /* test if more partitions are following */
147 if (*s == ',') 154 if (*s == ',')
148 { 155 {
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index 90acf57c19bd..846989f292e3 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -632,7 +632,7 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
632 len = ((from | 0x1ff) + 1) - from; 632 len = ((from | 0x1ff) + 1) - from;
633 633
634 /* The ECC will not be calculated correctly if less than 512 is read */ 634 /* The ECC will not be calculated correctly if less than 512 is read */
635 if (len != 0x200 && eccbuf) 635 if (len != 0x200)
636 printk(KERN_WARNING 636 printk(KERN_WARNING
637 "ECC needs a full sector read (adr: %lx size %lx)\n", 637 "ECC needs a full sector read (adr: %lx size %lx)\n",
638 (long) from, (long) len); 638 (long) from, (long) len);
@@ -896,7 +896,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
896 /* Let the caller know we completed it */ 896 /* Let the caller know we completed it */
897 *retlen += len; 897 *retlen += len;
898 898
899 if (eccbuf) { 899 {
900 unsigned char x[8]; 900 unsigned char x[8];
901 size_t dummy; 901 size_t dummy;
902 int ret; 902 int ret;
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 2b30b587c6e8..83be3461658f 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -748,7 +748,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
748 WriteDOC(DoC_GetDataOffset(mtd, &fto), docptr, Mplus_FlashCmd); 748 WriteDOC(DoC_GetDataOffset(mtd, &fto), docptr, Mplus_FlashCmd);
749 749
750 /* On interleaved devices the flags for 2nd half 512 are before data */ 750 /* On interleaved devices the flags for 2nd half 512 are before data */
751 if (eccbuf && before) 751 if (before)
752 fto -= 2; 752 fto -= 2;
753 753
754 /* issue the Serial Data In command to initial the Page Program process */ 754 /* issue the Serial Data In command to initial the Page Program process */
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 4ea50a1dda85..99fd210feaec 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -323,7 +323,7 @@ static int flash_probe (void)
323 /* put the flash back into command mode */ 323 /* put the flash back into command mode */
324 write32 (DATA_TO_FLASH (READ_ARRAY),0x00000000); 324 write32 (DATA_TO_FLASH (READ_ARRAY),0x00000000);
325 325
326 return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || FLASH_DEVICE_16mbit_BOTTOM)); 326 return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || devtype == FLASH_DEVICE_16mbit_BOTTOM));
327} 327}
328 328
329/* 329/*
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index a5ed6d232c35..b35e4813a3a5 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -420,7 +420,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
420 status = dataflash_waitready(priv->spi); 420 status = dataflash_waitready(priv->spi);
421 421
422 /* Check result of the compare operation */ 422 /* Check result of the compare operation */
423 if ((status & (1 << 6)) == 1) { 423 if (status & (1 << 6)) {
424 printk(KERN_ERR "%s: compare page %u, err %d\n", 424 printk(KERN_ERR "%s: compare page %u, err %d\n",
425 spi->dev.bus_id, pageaddr, status); 425 spi->dev.bus_id, pageaddr, status);
426 remaining = 0; 426 remaining = 0;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index a592fc04cf78..12c253664eb2 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -110,13 +110,6 @@ config MTD_SUN_UFLASH
110 Sun Microsystems boardsets. This driver will require CFI support 110 Sun Microsystems boardsets. This driver will require CFI support
111 in the kernel, so if you did not enable CFI previously, do that now. 111 in the kernel, so if you did not enable CFI previously, do that now.
112 112
113config MTD_PNC2000
114 tristate "CFI Flash device mapped on Photron PNC-2000"
115 depends on X86 && MTD_CFI && MTD_PARTITIONS
116 help
117 PNC-2000 is the name of Network Camera product from PHOTRON
118 Ltd. in Japan. It uses CFI-compliant flash.
119
120config MTD_SC520CDP 113config MTD_SC520CDP
121 tristate "CFI Flash device mapped on AMD SC520 CDP" 114 tristate "CFI Flash device mapped on AMD SC520 CDP"
122 depends on X86 && MTD_CFI && MTD_CONCAT 115 depends on X86 && MTD_CFI && MTD_CONCAT
@@ -576,7 +569,7 @@ config MTD_BAST_MAXSIZE
576 default "4" 569 default "4"
577 570
578config MTD_SHARP_SL 571config MTD_SHARP_SL
579 bool "ROM mapped on Sharp SL Series" 572 tristate "ROM mapped on Sharp SL Series"
580 depends on ARCH_PXA 573 depends on ARCH_PXA
581 help 574 help
582 This enables access to the flash chip on the Sharp SL Series of PDAs. 575 This enables access to the flash chip on the Sharp SL Series of PDAs.
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 316382a1401b..a9cbe80f99a0 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -28,7 +28,6 @@ obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
28obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o 28obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
29obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o 29obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
30obj-$(CONFIG_MTD_PMC_MSP_RAMROOT)+= pmcmsp-ramroot.o 30obj-$(CONFIG_MTD_PMC_MSP_RAMROOT)+= pmcmsp-ramroot.o
31obj-$(CONFIG_MTD_PNC2000) += pnc2000.o
32obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o 31obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
33obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o 32obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
34obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o 33obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 28c5ffd75233..f00e04efbe28 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -20,11 +20,15 @@
20#include <linux/mtd/map.h> 20#include <linux/mtd/map.h>
21#include <linux/mtd/partitions.h> 21#include <linux/mtd/partitions.h>
22#include <linux/mtd/physmap.h> 22#include <linux/mtd/physmap.h>
23#include <linux/mtd/concat.h>
23#include <asm/io.h> 24#include <asm/io.h>
24 25
26#define MAX_RESOURCES 4
27
25struct physmap_flash_info { 28struct physmap_flash_info {
26 struct mtd_info *mtd; 29 struct mtd_info *mtd[MAX_RESOURCES];
27 struct map_info map; 30 struct mtd_info *cmtd;
31 struct map_info map[MAX_RESOURCES];
28 struct resource *res; 32 struct resource *res;
29#ifdef CONFIG_MTD_PARTITIONS 33#ifdef CONFIG_MTD_PARTITIONS
30 int nr_parts; 34 int nr_parts;
@@ -32,11 +36,11 @@ struct physmap_flash_info {
32#endif 36#endif
33}; 37};
34 38
35
36static int physmap_flash_remove(struct platform_device *dev) 39static int physmap_flash_remove(struct platform_device *dev)
37{ 40{
38 struct physmap_flash_info *info; 41 struct physmap_flash_info *info;
39 struct physmap_flash_data *physmap_data; 42 struct physmap_flash_data *physmap_data;
43 int i;
40 44
41 info = platform_get_drvdata(dev); 45 info = platform_get_drvdata(dev);
42 if (info == NULL) 46 if (info == NULL)
@@ -45,24 +49,33 @@ static int physmap_flash_remove(struct platform_device *dev)
45 49
46 physmap_data = dev->dev.platform_data; 50 physmap_data = dev->dev.platform_data;
47 51
48 if (info->mtd != NULL) { 52#ifdef CONFIG_MTD_CONCAT
53 if (info->cmtd != info->mtd[0]) {
54 del_mtd_device(info->cmtd);
55 mtd_concat_destroy(info->cmtd);
56 }
57#endif
58
59 for (i = 0; i < MAX_RESOURCES; i++) {
60 if (info->mtd[i] != NULL) {
49#ifdef CONFIG_MTD_PARTITIONS 61#ifdef CONFIG_MTD_PARTITIONS
50 if (info->nr_parts) { 62 if (info->nr_parts) {
51 del_mtd_partitions(info->mtd); 63 del_mtd_partitions(info->mtd[i]);
52 kfree(info->parts); 64 kfree(info->parts);
53 } else if (physmap_data->nr_parts) { 65 } else if (physmap_data->nr_parts) {
54 del_mtd_partitions(info->mtd); 66 del_mtd_partitions(info->mtd[i]);
55 } else { 67 } else {
56 del_mtd_device(info->mtd); 68 del_mtd_device(info->mtd[i]);
57 } 69 }
58#else 70#else
59 del_mtd_device(info->mtd); 71 del_mtd_device(info->mtd[i]);
60#endif 72#endif
61 map_destroy(info->mtd); 73 map_destroy(info->mtd[i]);
62 } 74 }
63 75
64 if (info->map.virt != NULL) 76 if (info->map[i].virt != NULL)
65 iounmap(info->map.virt); 77 iounmap(info->map[i].virt);
78 }
66 79
67 if (info->res != NULL) { 80 if (info->res != NULL) {
68 release_resource(info->res); 81 release_resource(info->res);
@@ -82,16 +95,14 @@ static int physmap_flash_probe(struct platform_device *dev)
82 struct physmap_flash_data *physmap_data; 95 struct physmap_flash_data *physmap_data;
83 struct physmap_flash_info *info; 96 struct physmap_flash_info *info;
84 const char **probe_type; 97 const char **probe_type;
85 int err; 98 int err = 0;
99 int i;
100 int devices_found = 0;
86 101
87 physmap_data = dev->dev.platform_data; 102 physmap_data = dev->dev.platform_data;
88 if (physmap_data == NULL) 103 if (physmap_data == NULL)
89 return -ENODEV; 104 return -ENODEV;
90 105
91 printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n",
92 (unsigned long long)(dev->resource->end - dev->resource->start + 1),
93 (unsigned long long)dev->resource->start);
94
95 info = kzalloc(sizeof(struct physmap_flash_info), GFP_KERNEL); 106 info = kzalloc(sizeof(struct physmap_flash_info), GFP_KERNEL);
96 if (info == NULL) { 107 if (info == NULL) {
97 err = -ENOMEM; 108 err = -ENOMEM;
@@ -100,56 +111,83 @@ static int physmap_flash_probe(struct platform_device *dev)
100 111
101 platform_set_drvdata(dev, info); 112 platform_set_drvdata(dev, info);
102 113
103 info->res = request_mem_region(dev->resource->start, 114 for (i = 0; i < dev->num_resources; i++) {
104 dev->resource->end - dev->resource->start + 1, 115 printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n",
105 dev->dev.bus_id); 116 (unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1),
106 if (info->res == NULL) { 117 (unsigned long long)dev->resource[i].start);
107 dev_err(&dev->dev, "Could not reserve memory region\n"); 118
108 err = -ENOMEM; 119 info->res = request_mem_region(dev->resource[i].start,
109 goto err_out; 120 dev->resource[i].end - dev->resource[i].start + 1,
110 } 121 dev->dev.bus_id);
122 if (info->res == NULL) {
123 dev_err(&dev->dev, "Could not reserve memory region\n");
124 err = -ENOMEM;
125 goto err_out;
126 }
111 127
112 info->map.name = dev->dev.bus_id; 128 info->map[i].name = dev->dev.bus_id;
113 info->map.phys = dev->resource->start; 129 info->map[i].phys = dev->resource[i].start;
114 info->map.size = dev->resource->end - dev->resource->start + 1; 130 info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1;
115 info->map.bankwidth = physmap_data->width; 131 info->map[i].bankwidth = physmap_data->width;
116 info->map.set_vpp = physmap_data->set_vpp; 132 info->map[i].set_vpp = physmap_data->set_vpp;
133
134 info->map[i].virt = ioremap(info->map[i].phys, info->map[i].size);
135 if (info->map[i].virt == NULL) {
136 dev_err(&dev->dev, "Failed to ioremap flash region\n");
137 err = EIO;
138 goto err_out;
139 }
117 140
118 info->map.virt = ioremap(info->map.phys, info->map.size); 141 simple_map_init(&info->map[i]);
119 if (info->map.virt == NULL) {
120 dev_err(&dev->dev, "Failed to ioremap flash region\n");
121 err = EIO;
122 goto err_out;
123 }
124 142
125 simple_map_init(&info->map); 143 probe_type = rom_probe_types;
144 for (; info->mtd[i] == NULL && *probe_type != NULL; probe_type++)
145 info->mtd[i] = do_map_probe(*probe_type, &info->map[i]);
146 if (info->mtd[i] == NULL) {
147 dev_err(&dev->dev, "map_probe failed\n");
148 err = -ENXIO;
149 goto err_out;
150 } else {
151 devices_found++;
152 }
153 info->mtd[i]->owner = THIS_MODULE;
154 }
126 155
127 probe_type = rom_probe_types; 156 if (devices_found == 1) {
128 for (; info->mtd == NULL && *probe_type != NULL; probe_type++) 157 info->cmtd = info->mtd[0];
129 info->mtd = do_map_probe(*probe_type, &info->map); 158 } else if (devices_found > 1) {
130 if (info->mtd == NULL) { 159 /*
131 dev_err(&dev->dev, "map_probe failed\n"); 160 * We detected multiple devices. Concatenate them together.
161 */
162#ifdef CONFIG_MTD_CONCAT
163 info->cmtd = mtd_concat_create(info->mtd, devices_found, dev->dev.bus_id);
164 if (info->cmtd == NULL)
165 err = -ENXIO;
166#else
167 printk(KERN_ERR "physmap-flash: multiple devices "
168 "found but MTD concat support disabled.\n");
132 err = -ENXIO; 169 err = -ENXIO;
133 goto err_out; 170#endif
134 } 171 }
135 info->mtd->owner = THIS_MODULE; 172 if (err)
173 goto err_out;
136 174
137#ifdef CONFIG_MTD_PARTITIONS 175#ifdef CONFIG_MTD_PARTITIONS
138 err = parse_mtd_partitions(info->mtd, part_probe_types, &info->parts, 0); 176 err = parse_mtd_partitions(info->cmtd, part_probe_types, &info->parts, 0);
139 if (err > 0) { 177 if (err > 0) {
140 add_mtd_partitions(info->mtd, info->parts, err); 178 add_mtd_partitions(info->cmtd, info->parts, err);
141 return 0; 179 return 0;
142 } 180 }
143 181
144 if (physmap_data->nr_parts) { 182 if (physmap_data->nr_parts) {
145 printk(KERN_NOTICE "Using physmap partition information\n"); 183 printk(KERN_NOTICE "Using physmap partition information\n");
146 add_mtd_partitions(info->mtd, physmap_data->parts, 184 add_mtd_partitions(info->cmtd, physmap_data->parts,
147 physmap_data->nr_parts); 185 physmap_data->nr_parts);
148 return 0; 186 return 0;
149 } 187 }
150#endif 188#endif
151 189
152 add_mtd_device(info->mtd); 190 add_mtd_device(info->cmtd);
153 return 0; 191 return 0;
154 192
155err_out: 193err_out:
@@ -162,9 +200,11 @@ static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state
162{ 200{
163 struct physmap_flash_info *info = platform_get_drvdata(dev); 201 struct physmap_flash_info *info = platform_get_drvdata(dev);
164 int ret = 0; 202 int ret = 0;
203 int i;
165 204
166 if (info) 205 if (info)
167 ret = info->mtd->suspend(info->mtd); 206 for (i = 0; i < MAX_RESOURCES; i++)
207 ret |= info->mtd[i]->suspend(info->mtd[i]);
168 208
169 return ret; 209 return ret;
170} 210}
@@ -172,27 +212,35 @@ static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state
172static int physmap_flash_resume(struct platform_device *dev) 212static int physmap_flash_resume(struct platform_device *dev)
173{ 213{
174 struct physmap_flash_info *info = platform_get_drvdata(dev); 214 struct physmap_flash_info *info = platform_get_drvdata(dev);
215 int i;
216
175 if (info) 217 if (info)
176 info->mtd->resume(info->mtd); 218 for (i = 0; i < MAX_RESOURCES; i++)
219 info->mtd[i]->resume(info->mtd[i]);
177 return 0; 220 return 0;
178} 221}
179 222
180static void physmap_flash_shutdown(struct platform_device *dev) 223static void physmap_flash_shutdown(struct platform_device *dev)
181{ 224{
182 struct physmap_flash_info *info = platform_get_drvdata(dev); 225 struct physmap_flash_info *info = platform_get_drvdata(dev);
183 if (info && info->mtd->suspend(info->mtd) == 0) 226 int i;
184 info->mtd->resume(info->mtd); 227
228 for (i = 0; i < MAX_RESOURCES; i++)
229 if (info && info->mtd[i]->suspend(info->mtd[i]) == 0)
230 info->mtd[i]->resume(info->mtd[i]);
185} 231}
232#else
233#define physmap_flash_suspend NULL
234#define physmap_flash_resume NULL
235#define physmap_flash_shutdown NULL
186#endif 236#endif
187 237
188static struct platform_driver physmap_flash_driver = { 238static struct platform_driver physmap_flash_driver = {
189 .probe = physmap_flash_probe, 239 .probe = physmap_flash_probe,
190 .remove = physmap_flash_remove, 240 .remove = physmap_flash_remove,
191#ifdef CONFIG_PM
192 .suspend = physmap_flash_suspend, 241 .suspend = physmap_flash_suspend,
193 .resume = physmap_flash_resume, 242 .resume = physmap_flash_resume,
194 .shutdown = physmap_flash_shutdown, 243 .shutdown = physmap_flash_shutdown,
195#endif
196 .driver = { 244 .driver = {
197 .name = "physmap-flash", 245 .name = "physmap-flash",
198 }, 246 },
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index aeed9ea79714..49acd4171893 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -80,64 +80,6 @@ static int parse_obsolete_partitions(struct of_device *dev,
80 80
81 return nr_parts; 81 return nr_parts;
82} 82}
83
84static int __devinit parse_partitions(struct of_flash *info,
85 struct of_device *dev)
86{
87 const char *partname;
88 static const char *part_probe_types[]
89 = { "cmdlinepart", "RedBoot", NULL };
90 struct device_node *dp = dev->node, *pp;
91 int nr_parts, i;
92
93 /* First look for RedBoot table or partitions on the command
94 * line, these take precedence over device tree information */
95 nr_parts = parse_mtd_partitions(info->mtd, part_probe_types,
96 &info->parts, 0);
97 if (nr_parts > 0) {
98 add_mtd_partitions(info->mtd, info->parts, nr_parts);
99 return 0;
100 }
101
102 /* First count the subnodes */
103 nr_parts = 0;
104 for (pp = dp->child; pp; pp = pp->sibling)
105 nr_parts++;
106
107 if (nr_parts == 0)
108 return parse_obsolete_partitions(dev, info, dp);
109
110 info->parts = kzalloc(nr_parts * sizeof(*info->parts),
111 GFP_KERNEL);
112 if (!info->parts)
113 return -ENOMEM;
114
115 for (pp = dp->child, i = 0; pp; pp = pp->sibling, i++) {
116 const u32 *reg;
117 int len;
118
119 reg = of_get_property(pp, "reg", &len);
120 if (!reg || (len != 2*sizeof(u32))) {
121 dev_err(&dev->dev, "Invalid 'reg' on %s\n",
122 dp->full_name);
123 kfree(info->parts);
124 info->parts = NULL;
125 return -EINVAL;
126 }
127 info->parts[i].offset = reg[0];
128 info->parts[i].size = reg[1];
129
130 partname = of_get_property(pp, "label", &len);
131 if (!partname)
132 partname = of_get_property(pp, "name", &len);
133 info->parts[i].name = (char *)partname;
134
135 if (of_get_property(pp, "read-only", &len))
136 info->parts[i].mask_flags = MTD_WRITEABLE;
137 }
138
139 return nr_parts;
140}
141#else /* MTD_PARTITIONS */ 83#else /* MTD_PARTITIONS */
142#define OF_FLASH_PARTS(info) (0) 84#define OF_FLASH_PARTS(info) (0)
143#define parse_partitions(info, dev) (0) 85#define parse_partitions(info, dev) (0)
@@ -212,6 +154,10 @@ static struct mtd_info * __devinit obsolete_probe(struct of_device *dev,
212static int __devinit of_flash_probe(struct of_device *dev, 154static int __devinit of_flash_probe(struct of_device *dev,
213 const struct of_device_id *match) 155 const struct of_device_id *match)
214{ 156{
157#ifdef CONFIG_MTD_PARTITIONS
158 static const char *part_probe_types[]
159 = { "cmdlinepart", "RedBoot", NULL };
160#endif
215 struct device_node *dp = dev->node; 161 struct device_node *dp = dev->node;
216 struct resource res; 162 struct resource res;
217 struct of_flash *info; 163 struct of_flash *info;
@@ -274,13 +220,33 @@ static int __devinit of_flash_probe(struct of_device *dev,
274 } 220 }
275 info->mtd->owner = THIS_MODULE; 221 info->mtd->owner = THIS_MODULE;
276 222
277 err = parse_partitions(info, dev); 223#ifdef CONFIG_MTD_PARTITIONS
224 /* First look for RedBoot table or partitions on the command
225 * line, these take precedence over device tree information */
226 err = parse_mtd_partitions(info->mtd, part_probe_types,
227 &info->parts, 0);
278 if (err < 0) 228 if (err < 0)
279 goto err_out; 229 return err;
230
231#ifdef CONFIG_MTD_OF_PARTS
232 if (err == 0) {
233 err = of_mtd_parse_partitions(&dev->dev, info->mtd,
234 dp, &info->parts);
235 if (err < 0)
236 return err;
237 }
238#endif
239
240 if (err == 0) {
241 err = parse_obsolete_partitions(dev, info, dp);
242 if (err < 0)
243 return err;
244 }
280 245
281 if (err > 0) 246 if (err > 0)
282 add_mtd_partitions(info->mtd, OF_FLASH_PARTS(info), err); 247 add_mtd_partitions(info->mtd, info->parts, err);
283 else 248 else
249#endif
284 add_mtd_device(info->mtd); 250 add_mtd_device(info->mtd);
285 251
286 return 0; 252 return 0;
diff --git a/drivers/mtd/maps/pnc2000.c b/drivers/mtd/maps/pnc2000.c
deleted file mode 100644
index d7e16c2d5c44..000000000000
--- a/drivers/mtd/maps/pnc2000.c
+++ /dev/null
@@ -1,93 +0,0 @@
1/*
2 * pnc2000.c - mapper for Photron PNC-2000 board.
3 *
4 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
5 *
6 * This code is GPL
7 *
8 * $Id: pnc2000.c,v 1.18 2005/11/07 11:14:28 gleixner Exp $
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h>
18#include <linux/mtd/partitions.h>
19
20
21#define WINDOW_ADDR 0xbf000000
22#define WINDOW_SIZE 0x00400000
23
24/*
25 * MAP DRIVER STUFF
26 */
27
28
29static struct map_info pnc_map = {
30 .name = "PNC-2000",
31 .size = WINDOW_SIZE,
32 .bankwidth = 4,
33 .phys = 0xFFFFFFFF,
34 .virt = (void __iomem *)WINDOW_ADDR,
35};
36
37
38/*
39 * MTD 'PARTITIONING' STUFF
40 */
41static struct mtd_partition pnc_partitions[3] = {
42 {
43 .name = "PNC-2000 boot firmware",
44 .size = 0x20000,
45 .offset = 0
46 },
47 {
48 .name = "PNC-2000 kernel",
49 .size = 0x1a0000,
50 .offset = 0x20000
51 },
52 {
53 .name = "PNC-2000 filesystem",
54 .size = 0x240000,
55 .offset = 0x1c0000
56 }
57};
58
59/*
60 * This is the master MTD device for which all the others are just
61 * auto-relocating aliases.
62 */
63static struct mtd_info *mymtd;
64
65static int __init init_pnc2000(void)
66{
67 printk(KERN_NOTICE "Photron PNC-2000 flash mapping: %x at %x\n", WINDOW_SIZE, WINDOW_ADDR);
68
69 simple_map_init(&pnc_map);
70
71 mymtd = do_map_probe("cfi_probe", &pnc_map);
72 if (mymtd) {
73 mymtd->owner = THIS_MODULE;
74 return add_mtd_partitions(mymtd, pnc_partitions, 3);
75 }
76
77 return -ENXIO;
78}
79
80static void __exit cleanup_pnc2000(void)
81{
82 if (mymtd) {
83 del_mtd_partitions(mymtd);
84 map_destroy(mymtd);
85 }
86}
87
88module_init(init_pnc2000);
89module_exit(cleanup_pnc2000);
90
91MODULE_LICENSE("GPL");
92MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp>");
93MODULE_DESCRIPTION("MTD map driver for Photron PNC-2000 board");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index dcfb85840d1e..0fc5584324e3 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -79,7 +79,7 @@ scb2_fixup_mtd(struct mtd_info *mtd)
79 struct cfi_private *cfi = map->fldrv_priv; 79 struct cfi_private *cfi = map->fldrv_priv;
80 80
81 /* barf if this doesn't look right */ 81 /* barf if this doesn't look right */
82 if (cfi->cfiq->InterfaceDesc != 1) { 82 if (cfi->cfiq->InterfaceDesc != CFI_INTERFACE_X16_ASYNC) {
83 printk(KERN_ERR MODNAME ": unsupported InterfaceDesc: %#x\n", 83 printk(KERN_ERR MODNAME ": unsupported InterfaceDesc: %#x\n",
84 cfi->cfiq->InterfaceDesc); 84 cfi->cfiq->InterfaceDesc);
85 return -1; 85 return -1;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 74d9d30edabd..839eed8430a2 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -248,9 +248,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
248 return -EBUSY; 248 return -EBUSY;
249 } 249 }
250 250
251 mutex_init(&new->lock);
252 list_add_tail(&new->list, &tr->devs); 251 list_add_tail(&new->list, &tr->devs);
253 added: 252 added:
253 mutex_init(&new->lock);
254 if (!tr->writesect) 254 if (!tr->writesect)
255 new->readonly = 1; 255 new->readonly = 1;
256 256
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index a0cee86464ca..5d3ac512ce16 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -481,6 +481,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
481 { 481 {
482 struct mtd_oob_buf buf; 482 struct mtd_oob_buf buf;
483 struct mtd_oob_ops ops; 483 struct mtd_oob_ops ops;
484 uint32_t retlen;
484 485
485 if(!(file->f_mode & 2)) 486 if(!(file->f_mode & 2))
486 return -EPERM; 487 return -EPERM;
@@ -520,8 +521,11 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
520 buf.start &= ~(mtd->oobsize - 1); 521 buf.start &= ~(mtd->oobsize - 1);
521 ret = mtd->write_oob(mtd, buf.start, &ops); 522 ret = mtd->write_oob(mtd, buf.start, &ops);
522 523
523 if (copy_to_user(argp + sizeof(uint32_t), &ops.oobretlen, 524 if (ops.oobretlen > 0xFFFFFFFFU)
524 sizeof(uint32_t))) 525 ret = -EOVERFLOW;
526 retlen = ops.oobretlen;
527 if (copy_to_user(&((struct mtd_oob_buf *)argp)->length,
528 &retlen, sizeof(buf.length)))
525 ret = -EFAULT; 529 ret = -EFAULT;
526 530
527 kfree(ops.oobbuf); 531 kfree(ops.oobbuf);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 6c2645e28371..f7e7890e5bc6 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -61,7 +61,7 @@ int add_mtd_device(struct mtd_info *mtd)
61 61
62 /* Some chips always power up locked. Unlock them now */ 62 /* Some chips always power up locked. Unlock them now */
63 if ((mtd->flags & MTD_WRITEABLE) 63 if ((mtd->flags & MTD_WRITEABLE)
64 && (mtd->flags & MTD_STUPID_LOCK) && mtd->unlock) { 64 && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
65 if (mtd->unlock(mtd, 0, mtd->size)) 65 if (mtd->unlock(mtd, 0, mtd->size))
66 printk(KERN_WARNING 66 printk(KERN_WARNING
67 "%s: unlock failed, " 67 "%s: unlock failed, "
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index f8af627f0b98..d3cf05012b46 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -28,19 +28,26 @@
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/wait.h> 30#include <linux/wait.h>
31#include <linux/delay.h>
32#include <linux/spinlock.h>
33#include <linux/interrupt.h>
31#include <linux/mtd/mtd.h> 34#include <linux/mtd/mtd.h>
32 35
33#define OOPS_PAGE_SIZE 4096 36#define OOPS_PAGE_SIZE 4096
34 37
35static struct mtdoops_context { 38struct mtdoops_context {
36 int mtd_index; 39 int mtd_index;
37 struct work_struct work; 40 struct work_struct work_erase;
41 struct work_struct work_write;
38 struct mtd_info *mtd; 42 struct mtd_info *mtd;
39 int oops_pages; 43 int oops_pages;
40 int nextpage; 44 int nextpage;
41 int nextcount; 45 int nextcount;
42 46
43 void *oops_buf; 47 void *oops_buf;
48
49 /* writecount and disabling ready are spin lock protected */
50 spinlock_t writecount_lock;
44 int ready; 51 int ready;
45 int writecount; 52 int writecount;
46} oops_cxt; 53} oops_cxt;
@@ -62,10 +69,7 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
62 erase.mtd = mtd; 69 erase.mtd = mtd;
63 erase.callback = mtdoops_erase_callback; 70 erase.callback = mtdoops_erase_callback;
64 erase.addr = offset; 71 erase.addr = offset;
65 if (mtd->erasesize < OOPS_PAGE_SIZE) 72 erase.len = mtd->erasesize;
66 erase.len = OOPS_PAGE_SIZE;
67 else
68 erase.len = mtd->erasesize;
69 erase.priv = (u_long)&wait_q; 73 erase.priv = (u_long)&wait_q;
70 74
71 set_current_state(TASK_INTERRUPTIBLE); 75 set_current_state(TASK_INTERRUPTIBLE);
@@ -87,7 +91,7 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
87 return 0; 91 return 0;
88} 92}
89 93
90static int mtdoops_inc_counter(struct mtdoops_context *cxt) 94static void mtdoops_inc_counter(struct mtdoops_context *cxt)
91{ 95{
92 struct mtd_info *mtd = cxt->mtd; 96 struct mtd_info *mtd = cxt->mtd;
93 size_t retlen; 97 size_t retlen;
@@ -103,25 +107,30 @@ static int mtdoops_inc_counter(struct mtdoops_context *cxt)
103 107
104 ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4, 108 ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4,
105 &retlen, (u_char *) &count); 109 &retlen, (u_char *) &count);
106 if ((retlen != 4) || (ret < 0)) { 110 if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
107 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)" 111 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
108 ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE, 112 ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE,
109 retlen, ret); 113 retlen, ret);
110 return 1; 114 schedule_work(&cxt->work_erase);
115 return;
111 } 116 }
112 117
113 /* See if we need to erase the next block */ 118 /* See if we need to erase the next block */
114 if (count != 0xffffffff) 119 if (count != 0xffffffff) {
115 return 1; 120 schedule_work(&cxt->work_erase);
121 return;
122 }
116 123
117 printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n", 124 printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n",
118 cxt->nextpage, cxt->nextcount); 125 cxt->nextpage, cxt->nextcount);
119 cxt->ready = 1; 126 cxt->ready = 1;
120 return 0;
121} 127}
122 128
123static void mtdoops_prepare(struct mtdoops_context *cxt) 129/* Scheduled work - when we can't proceed without erasing a block */
130static void mtdoops_workfunc_erase(struct work_struct *work)
124{ 131{
132 struct mtdoops_context *cxt =
133 container_of(work, struct mtdoops_context, work_erase);
125 struct mtd_info *mtd = cxt->mtd; 134 struct mtd_info *mtd = cxt->mtd;
126 int i = 0, j, ret, mod; 135 int i = 0, j, ret, mod;
127 136
@@ -136,8 +145,14 @@ static void mtdoops_prepare(struct mtdoops_context *cxt)
136 cxt->nextpage = 0; 145 cxt->nextpage = 0;
137 } 146 }
138 147
139 while (mtd->block_isbad && 148 while (mtd->block_isbad) {
140 mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE)) { 149 ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
150 if (!ret)
151 break;
152 if (ret < 0) {
153 printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n");
154 return;
155 }
141badblock: 156badblock:
142 printk(KERN_WARNING "mtdoops: Bad block at %08x\n", 157 printk(KERN_WARNING "mtdoops: Bad block at %08x\n",
143 cxt->nextpage * OOPS_PAGE_SIZE); 158 cxt->nextpage * OOPS_PAGE_SIZE);
@@ -154,34 +169,72 @@ badblock:
154 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) 169 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
155 ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 170 ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
156 171
157 if (ret < 0) { 172 if (ret >= 0) {
158 if (mtd->block_markbad) 173 printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount);
159 mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 174 cxt->ready = 1;
160 goto badblock; 175 return;
161 } 176 }
162 177
163 printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount); 178 if (mtd->block_markbad && (ret == -EIO)) {
179 ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
180 if (ret < 0) {
181 printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n");
182 return;
183 }
184 }
185 goto badblock;
186}
164 187
165 cxt->ready = 1; 188static void mtdoops_write(struct mtdoops_context *cxt, int panic)
189{
190 struct mtd_info *mtd = cxt->mtd;
191 size_t retlen;
192 int ret;
193
194 if (cxt->writecount < OOPS_PAGE_SIZE)
195 memset(cxt->oops_buf + cxt->writecount, 0xff,
196 OOPS_PAGE_SIZE - cxt->writecount);
197
198 if (panic)
199 ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
200 OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
201 else
202 ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
203 OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
204
205 cxt->writecount = 0;
206
207 if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
208 printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
209 cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
210
211 mtdoops_inc_counter(cxt);
166} 212}
167 213
168static void mtdoops_workfunc(struct work_struct *work) 214
215static void mtdoops_workfunc_write(struct work_struct *work)
169{ 216{
170 struct mtdoops_context *cxt = 217 struct mtdoops_context *cxt =
171 container_of(work, struct mtdoops_context, work); 218 container_of(work, struct mtdoops_context, work_write);
172 219
173 mtdoops_prepare(cxt); 220 mtdoops_write(cxt, 0);
174} 221}
175 222
176static int find_next_position(struct mtdoops_context *cxt) 223static void find_next_position(struct mtdoops_context *cxt)
177{ 224{
178 struct mtd_info *mtd = cxt->mtd; 225 struct mtd_info *mtd = cxt->mtd;
179 int page, maxpos = 0; 226 int ret, page, maxpos = 0;
180 u32 count, maxcount = 0xffffffff; 227 u32 count, maxcount = 0xffffffff;
181 size_t retlen; 228 size_t retlen;
182 229
183 for (page = 0; page < cxt->oops_pages; page++) { 230 for (page = 0; page < cxt->oops_pages; page++) {
184 mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count); 231 ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count);
232 if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
233 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
234 ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret);
235 continue;
236 }
237
185 if (count == 0xffffffff) 238 if (count == 0xffffffff)
186 continue; 239 continue;
187 if (maxcount == 0xffffffff) { 240 if (maxcount == 0xffffffff) {
@@ -205,20 +258,19 @@ static int find_next_position(struct mtdoops_context *cxt)
205 cxt->ready = 1; 258 cxt->ready = 1;
206 printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n", 259 printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n",
207 cxt->nextpage, cxt->nextcount); 260 cxt->nextpage, cxt->nextcount);
208 return 0; 261 return;
209 } 262 }
210 263
211 cxt->nextpage = maxpos; 264 cxt->nextpage = maxpos;
212 cxt->nextcount = maxcount; 265 cxt->nextcount = maxcount;
213 266
214 return mtdoops_inc_counter(cxt); 267 mtdoops_inc_counter(cxt);
215} 268}
216 269
217 270
218static void mtdoops_notify_add(struct mtd_info *mtd) 271static void mtdoops_notify_add(struct mtd_info *mtd)
219{ 272{
220 struct mtdoops_context *cxt = &oops_cxt; 273 struct mtdoops_context *cxt = &oops_cxt;
221 int ret;
222 274
223 if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) 275 if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
224 return; 276 return;
@@ -229,14 +281,18 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
229 return; 281 return;
230 } 282 }
231 283
284 if (mtd->erasesize < OOPS_PAGE_SIZE) {
285 printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n",
286 mtd->index);
287 return;
288 }
289
232 cxt->mtd = mtd; 290 cxt->mtd = mtd;
233 cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE; 291 cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE;
234 292
235 ret = find_next_position(cxt); 293 find_next_position(cxt);
236 if (ret == 1)
237 mtdoops_prepare(cxt);
238 294
239 printk(KERN_DEBUG "mtdoops: Attached to MTD device %d\n", mtd->index); 295 printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
240} 296}
241 297
242static void mtdoops_notify_remove(struct mtd_info *mtd) 298static void mtdoops_notify_remove(struct mtd_info *mtd)
@@ -254,31 +310,28 @@ static void mtdoops_console_sync(void)
254{ 310{
255 struct mtdoops_context *cxt = &oops_cxt; 311 struct mtdoops_context *cxt = &oops_cxt;
256 struct mtd_info *mtd = cxt->mtd; 312 struct mtd_info *mtd = cxt->mtd;
257 size_t retlen; 313 unsigned long flags;
258 int ret;
259 314
260 if (!cxt->ready || !mtd) 315 if (!cxt->ready || !mtd || cxt->writecount == 0)
261 return; 316 return;
262 317
263 if (cxt->writecount == 0) 318 /*
319 * Once ready is 0 and we've held the lock no further writes to the
320 * buffer will happen
321 */
322 spin_lock_irqsave(&cxt->writecount_lock, flags);
323 if (!cxt->ready) {
324 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
264 return; 325 return;
265 326 }
266 if (cxt->writecount < OOPS_PAGE_SIZE)
267 memset(cxt->oops_buf + cxt->writecount, 0xff,
268 OOPS_PAGE_SIZE - cxt->writecount);
269
270 ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
271 OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
272 cxt->ready = 0; 327 cxt->ready = 0;
273 cxt->writecount = 0; 328 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
274
275 if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
276 printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
277 cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
278 329
279 ret = mtdoops_inc_counter(cxt); 330 if (mtd->panic_write && in_interrupt())
280 if (ret == 1) 331 /* Interrupt context, we're going to panic so try and log */
281 schedule_work(&cxt->work); 332 mtdoops_write(cxt, 1);
333 else
334 schedule_work(&cxt->work_write);
282} 335}
283 336
284static void 337static void
@@ -286,7 +339,7 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
286{ 339{
287 struct mtdoops_context *cxt = co->data; 340 struct mtdoops_context *cxt = co->data;
288 struct mtd_info *mtd = cxt->mtd; 341 struct mtd_info *mtd = cxt->mtd;
289 int i; 342 unsigned long flags;
290 343
291 if (!oops_in_progress) { 344 if (!oops_in_progress) {
292 mtdoops_console_sync(); 345 mtdoops_console_sync();
@@ -296,6 +349,13 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
296 if (!cxt->ready || !mtd) 349 if (!cxt->ready || !mtd)
297 return; 350 return;
298 351
352 /* Locking on writecount ensures sequential writes to the buffer */
353 spin_lock_irqsave(&cxt->writecount_lock, flags);
354
355 /* Check ready status didn't change whilst waiting for the lock */
356 if (!cxt->ready)
357 return;
358
299 if (cxt->writecount == 0) { 359 if (cxt->writecount == 0) {
300 u32 *stamp = cxt->oops_buf; 360 u32 *stamp = cxt->oops_buf;
301 *stamp = cxt->nextcount; 361 *stamp = cxt->nextcount;
@@ -305,10 +365,13 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
305 if ((count + cxt->writecount) > OOPS_PAGE_SIZE) 365 if ((count + cxt->writecount) > OOPS_PAGE_SIZE)
306 count = OOPS_PAGE_SIZE - cxt->writecount; 366 count = OOPS_PAGE_SIZE - cxt->writecount;
307 367
308 for (i = 0; i < count; i++, s++) 368 memcpy(cxt->oops_buf + cxt->writecount, s, count);
309 *((char *)(cxt->oops_buf) + cxt->writecount + i) = *s; 369 cxt->writecount += count;
310 370
311 cxt->writecount = cxt->writecount + count; 371 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
372
373 if (cxt->writecount == OOPS_PAGE_SIZE)
374 mtdoops_console_sync();
312} 375}
313 376
314static int __init mtdoops_console_setup(struct console *co, char *options) 377static int __init mtdoops_console_setup(struct console *co, char *options)
@@ -334,7 +397,6 @@ static struct console mtdoops_console = {
334 .write = mtdoops_console_write, 397 .write = mtdoops_console_write,
335 .setup = mtdoops_console_setup, 398 .setup = mtdoops_console_setup,
336 .unblank = mtdoops_console_sync, 399 .unblank = mtdoops_console_sync,
337 .flags = CON_PRINTBUFFER,
338 .index = -1, 400 .index = -1,
339 .data = &oops_cxt, 401 .data = &oops_cxt,
340}; 402};
@@ -347,11 +409,12 @@ static int __init mtdoops_console_init(void)
347 cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); 409 cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE);
348 410
349 if (!cxt->oops_buf) { 411 if (!cxt->oops_buf) {
350 printk(KERN_ERR "Failed to allocate oops buffer workspace\n"); 412 printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n");
351 return -ENOMEM; 413 return -ENOMEM;
352 } 414 }
353 415
354 INIT_WORK(&cxt->work, mtdoops_workfunc); 416 INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
417 INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
355 418
356 register_console(&mtdoops_console); 419 register_console(&mtdoops_console);
357 register_mtd_user(&mtdoops_notifier); 420 register_mtd_user(&mtdoops_notifier);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 6174a97d7902..c66902df3171 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -151,6 +151,20 @@ static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
151 len, retlen, buf); 151 len, retlen, buf);
152} 152}
153 153
154static int part_panic_write (struct mtd_info *mtd, loff_t to, size_t len,
155 size_t *retlen, const u_char *buf)
156{
157 struct mtd_part *part = PART(mtd);
158 if (!(mtd->flags & MTD_WRITEABLE))
159 return -EROFS;
160 if (to >= mtd->size)
161 len = 0;
162 else if (to + len > mtd->size)
163 len = mtd->size - to;
164 return part->master->panic_write (part->master, to + part->offset,
165 len, retlen, buf);
166}
167
154static int part_write_oob(struct mtd_info *mtd, loff_t to, 168static int part_write_oob(struct mtd_info *mtd, loff_t to,
155 struct mtd_oob_ops *ops) 169 struct mtd_oob_ops *ops)
156{ 170{
@@ -352,6 +366,9 @@ int add_mtd_partitions(struct mtd_info *master,
352 slave->mtd.read = part_read; 366 slave->mtd.read = part_read;
353 slave->mtd.write = part_write; 367 slave->mtd.write = part_write;
354 368
369 if (master->panic_write)
370 slave->mtd.panic_write = part_panic_write;
371
355 if(master->point && master->unpoint){ 372 if(master->point && master->unpoint){
356 slave->mtd.point = part_point; 373 slave->mtd.point = part_point;
357 slave->mtd.unpoint = part_unpoint; 374 slave->mtd.unpoint = part_unpoint;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 246d4512f64b..4a3c6759492b 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -93,7 +93,7 @@ config MTD_NAND_AU1550
93 93
94config MTD_NAND_BF5XX 94config MTD_NAND_BF5XX
95 tristate "Blackfin on-chip NAND Flash Controller driver" 95 tristate "Blackfin on-chip NAND Flash Controller driver"
96 depends on BF54x && MTD_NAND 96 depends on (BF54x || BF52x) && MTD_NAND
97 help 97 help
98 This enables the Blackfin on-chip NAND flash controller 98 This enables the Blackfin on-chip NAND flash controller
99 99
@@ -283,6 +283,12 @@ config MTD_NAND_CM_X270
283 tristate "Support for NAND Flash on CM-X270 modules" 283 tristate "Support for NAND Flash on CM-X270 modules"
284 depends on MTD_NAND && MACH_ARMCORE 284 depends on MTD_NAND && MACH_ARMCORE
285 285
286config MTD_NAND_PASEMI
287 tristate "NAND support for PA Semi PWRficient"
288 depends on MTD_NAND && PPC_PASEMI
289 help
290 Enables support for NAND Flash interface on PA Semi PWRficient
291 based boards
286 292
287config MTD_NAND_NANDSIM 293config MTD_NAND_NANDSIM
288 tristate "Support for NAND Flash Simulator" 294 tristate "Support for NAND Flash Simulator"
@@ -306,4 +312,22 @@ config MTD_ALAUDA
306 These two (and possibly other) Alauda-based cardreaders for 312 These two (and possibly other) Alauda-based cardreaders for
307 SmartMedia and xD allow raw flash access. 313 SmartMedia and xD allow raw flash access.
308 314
315config MTD_NAND_ORION
316 tristate "NAND Flash support for Marvell Orion SoC"
317 depends on ARCH_ORION && MTD_NAND
318 help
319 This enables the NAND flash controller on Orion machines.
320
321 No board specific support is done by this driver, each board
322 must advertise a platform_device for the driver to attach.
323
324config MTD_NAND_FSL_ELBC
325 tristate "NAND support for Freescale eLBC controllers"
326 depends on MTD_NAND && PPC_OF
327 help
328 Various Freescale chips, including the 8313, include a NAND Flash
329 Controller Module with built-in hardware ECC capabilities.
330 Enabling this option will enable you to use this to control
331 external NAND devices.
332
309endif # MTD_NAND 333endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 3ad6c0165da3..80d575eeee96 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -29,5 +29,8 @@ obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
29obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o 29obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
30obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o 30obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
31obj-$(CONFIG_MTD_ALAUDA) += alauda.o 31obj-$(CONFIG_MTD_ALAUDA) += alauda.o
32obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
33obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
34obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
32 35
33nand-objs := nand_base.o nand_bbt.o 36nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/at91_nand.c b/drivers/mtd/nand/at91_nand.c
index b2a5672df6e0..c9fb2acf4056 100644
--- a/drivers/mtd/nand/at91_nand.c
+++ b/drivers/mtd/nand/at91_nand.c
@@ -156,14 +156,14 @@ static int __init at91_nand_probe(struct platform_device *pdev)
156 } 156 }
157 157
158#ifdef CONFIG_MTD_PARTITIONS 158#ifdef CONFIG_MTD_PARTITIONS
159 if (host->board->partition_info)
160 partitions = host->board->partition_info(mtd->size, &num_partitions);
161#ifdef CONFIG_MTD_CMDLINE_PARTS 159#ifdef CONFIG_MTD_CMDLINE_PARTS
162 else { 160 mtd->name = "at91_nand";
163 mtd->name = "at91_nand"; 161 num_partitions = parse_mtd_partitions(mtd, part_probes,
164 num_partitions = parse_mtd_partitions(mtd, part_probes, &partitions, 0); 162 &partitions, 0);
165 }
166#endif 163#endif
164 if (num_partitions <= 0 && host->board->partition_info)
165 partitions = host->board->partition_info(mtd->size,
166 &num_partitions);
167 167
168 if ((!partitions) || (num_partitions == 0)) { 168 if ((!partitions) || (num_partitions == 0)) {
169 printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n"); 169 printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n");
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index a52f3a737c39..747042ab094a 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -74,7 +74,22 @@ static int hardware_ecc = 1;
74static int hardware_ecc; 74static int hardware_ecc;
75#endif 75#endif
76 76
77static unsigned short bfin_nfc_pin_req[] = {P_NAND_CE, P_NAND_RB, 0}; 77static unsigned short bfin_nfc_pin_req[] =
78 {P_NAND_CE,
79 P_NAND_RB,
80 P_NAND_D0,
81 P_NAND_D1,
82 P_NAND_D2,
83 P_NAND_D3,
84 P_NAND_D4,
85 P_NAND_D5,
86 P_NAND_D6,
87 P_NAND_D7,
88 P_NAND_WE,
89 P_NAND_RE,
90 P_NAND_CLE,
91 P_NAND_ALE,
92 0};
78 93
79/* 94/*
80 * Data structures for bf5xx nand flash controller driver 95 * Data structures for bf5xx nand flash controller driver
@@ -278,7 +293,6 @@ static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd,
278 u16 ecc0, ecc1; 293 u16 ecc0, ecc1;
279 u32 code[2]; 294 u32 code[2];
280 u8 *p; 295 u8 *p;
281 int bytes = 3, i;
282 296
283 /* first 4 bytes ECC code for 256 page size */ 297 /* first 4 bytes ECC code for 256 page size */
284 ecc0 = bfin_read_NFC_ECC0(); 298 ecc0 = bfin_read_NFC_ECC0();
@@ -288,19 +302,24 @@ static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd,
288 302
289 dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]); 303 dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]);
290 304
305 /* first 3 bytes in ecc_code for 256 page size */
306 p = (u8 *) code;
307 memcpy(ecc_code, p, 3);
308
291 /* second 4 bytes ECC code for 512 page size */ 309 /* second 4 bytes ECC code for 512 page size */
292 if (page_size == 512) { 310 if (page_size == 512) {
293 ecc0 = bfin_read_NFC_ECC2(); 311 ecc0 = bfin_read_NFC_ECC2();
294 ecc1 = bfin_read_NFC_ECC3(); 312 ecc1 = bfin_read_NFC_ECC3();
295 code[1] = (ecc0 & 0x3FF) | ((ecc1 & 0x3FF) << 11); 313 code[1] = (ecc0 & 0x3FF) | ((ecc1 & 0x3FF) << 11);
296 bytes = 6; 314
315 /* second 3 bytes in ecc_code for second 256
316 * bytes of 512 page size
317 */
318 p = (u8 *) (code + 1);
319 memcpy((ecc_code + 3), p, 3);
297 dev_dbg(info->device, "returning ecc 0x%08x\n", code[1]); 320 dev_dbg(info->device, "returning ecc 0x%08x\n", code[1]);
298 } 321 }
299 322
300 p = (u8 *)code;
301 for (i = 0; i < bytes; i++)
302 ecc_code[i] = p[i];
303
304 return 0; 323 return 0;
305} 324}
306 325
@@ -507,12 +526,13 @@ static int bf5xx_nand_dma_init(struct bf5xx_nand_info *info)
507 526
508 init_completion(&info->dma_completion); 527 init_completion(&info->dma_completion);
509 528
529#ifdef CONFIG_BF54x
510 /* Setup DMAC1 channel mux for NFC which shared with SDH */ 530 /* Setup DMAC1 channel mux for NFC which shared with SDH */
511 val = bfin_read_DMAC1_PERIMUX(); 531 val = bfin_read_DMAC1_PERIMUX();
512 val &= 0xFFFE; 532 val &= 0xFFFE;
513 bfin_write_DMAC1_PERIMUX(val); 533 bfin_write_DMAC1_PERIMUX(val);
514 SSYNC(); 534 SSYNC();
515 535#endif
516 /* Request NFC DMA channel */ 536 /* Request NFC DMA channel */
517 ret = request_dma(CH_NFC, "BF5XX NFC driver"); 537 ret = request_dma(CH_NFC, "BF5XX NFC driver");
518 if (ret < 0) { 538 if (ret < 0) {
@@ -744,9 +764,6 @@ static int bf5xx_nand_resume(struct platform_device *dev)
744{ 764{
745 struct bf5xx_nand_info *info = platform_get_drvdata(dev); 765 struct bf5xx_nand_info *info = platform_get_drvdata(dev);
746 766
747 if (info)
748 bf5xx_nand_hw_init(info);
749
750 return 0; 767 return 0;
751} 768}
752 769
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 1e811715211a..da6ceaa80ba1 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -11,6 +11,7 @@
11#undef DEBUG 11#undef DEBUG
12#include <linux/mtd/mtd.h> 12#include <linux/mtd/mtd.h>
13#include <linux/mtd/nand.h> 13#include <linux/mtd/nand.h>
14#include <linux/mtd/partitions.h>
14#include <linux/rslib.h> 15#include <linux/rslib.h>
15#include <linux/pci.h> 16#include <linux/pci.h>
16#include <linux/delay.h> 17#include <linux/delay.h>
@@ -52,6 +53,7 @@
52 53
53struct cafe_priv { 54struct cafe_priv {
54 struct nand_chip nand; 55 struct nand_chip nand;
56 struct mtd_partition *parts;
55 struct pci_dev *pdev; 57 struct pci_dev *pdev;
56 void __iomem *mmio; 58 void __iomem *mmio;
57 struct rs_control *rs; 59 struct rs_control *rs;
@@ -84,6 +86,10 @@ static unsigned int numtimings;
84static int timing[3]; 86static int timing[3];
85module_param_array(timing, int, &numtimings, 0644); 87module_param_array(timing, int, &numtimings, 0644);
86 88
89#ifdef CONFIG_MTD_PARTITIONS
90static const char *part_probes[] = { "RedBoot", NULL };
91#endif
92
87/* Hrm. Why isn't this already conditional on something in the struct device? */ 93/* Hrm. Why isn't this already conditional on something in the struct device? */
88#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0) 94#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
89 95
@@ -620,7 +626,9 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
620{ 626{
621 struct mtd_info *mtd; 627 struct mtd_info *mtd;
622 struct cafe_priv *cafe; 628 struct cafe_priv *cafe;
629 struct mtd_partition *parts;
623 uint32_t ctrl; 630 uint32_t ctrl;
631 int nr_parts;
624 int err = 0; 632 int err = 0;
625 633
626 /* Very old versions shared the same PCI ident for all three 634 /* Very old versions shared the same PCI ident for all three
@@ -787,7 +795,18 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
787 goto out_irq; 795 goto out_irq;
788 796
789 pci_set_drvdata(pdev, mtd); 797 pci_set_drvdata(pdev, mtd);
798
799 /* We register the whole device first, separate from the partitions */
790 add_mtd_device(mtd); 800 add_mtd_device(mtd);
801
802#ifdef CONFIG_MTD_PARTITIONS
803 nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
804 if (nr_parts > 0) {
805 cafe->parts = parts;
806 dev_info(&cafe->pdev->dev, "%d RedBoot partitions found\n", nr_parts);
807 add_mtd_partitions(mtd, parts, nr_parts);
808 }
809#endif
791 goto out; 810 goto out;
792 811
793 out_irq: 812 out_irq:
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
new file mode 100644
index 000000000000..b025dfe0b274
--- /dev/null
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -0,0 +1,1244 @@
1/* Freescale Enhanced Local Bus Controller NAND driver
2 *
3 * Copyright (c) 2006-2007 Freescale Semiconductor
4 *
5 * Authors: Nick Spence <nick.spence@freescale.com>,
6 * Scott Wood <scottwood@freescale.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/string.h>
28#include <linux/ioport.h>
29#include <linux/of_platform.h>
30#include <linux/slab.h>
31#include <linux/interrupt.h>
32
33#include <linux/mtd/mtd.h>
34#include <linux/mtd/nand.h>
35#include <linux/mtd/nand_ecc.h>
36#include <linux/mtd/partitions.h>
37
38#include <asm/io.h>
39
40
41#define MAX_BANKS 8
42#define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */
43#define FCM_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait for FCM */
44
45struct elbc_bank {
46 __be32 br; /**< Base Register */
47#define BR_BA 0xFFFF8000
48#define BR_BA_SHIFT 15
49#define BR_PS 0x00001800
50#define BR_PS_SHIFT 11
51#define BR_PS_8 0x00000800 /* Port Size 8 bit */
52#define BR_PS_16 0x00001000 /* Port Size 16 bit */
53#define BR_PS_32 0x00001800 /* Port Size 32 bit */
54#define BR_DECC 0x00000600
55#define BR_DECC_SHIFT 9
56#define BR_DECC_OFF 0x00000000 /* HW ECC checking and generation off */
57#define BR_DECC_CHK 0x00000200 /* HW ECC checking on, generation off */
58#define BR_DECC_CHK_GEN 0x00000400 /* HW ECC checking and generation on */
59#define BR_WP 0x00000100
60#define BR_WP_SHIFT 8
61#define BR_MSEL 0x000000E0
62#define BR_MSEL_SHIFT 5
63#define BR_MS_GPCM 0x00000000 /* GPCM */
64#define BR_MS_FCM 0x00000020 /* FCM */
65#define BR_MS_SDRAM 0x00000060 /* SDRAM */
66#define BR_MS_UPMA 0x00000080 /* UPMA */
67#define BR_MS_UPMB 0x000000A0 /* UPMB */
68#define BR_MS_UPMC 0x000000C0 /* UPMC */
69#define BR_V 0x00000001
70#define BR_V_SHIFT 0
71#define BR_RES ~(BR_BA|BR_PS|BR_DECC|BR_WP|BR_MSEL|BR_V)
72
73 __be32 or; /**< Base Register */
74#define OR0 0x5004
75#define OR1 0x500C
76#define OR2 0x5014
77#define OR3 0x501C
78#define OR4 0x5024
79#define OR5 0x502C
80#define OR6 0x5034
81#define OR7 0x503C
82
83#define OR_FCM_AM 0xFFFF8000
84#define OR_FCM_AM_SHIFT 15
85#define OR_FCM_BCTLD 0x00001000
86#define OR_FCM_BCTLD_SHIFT 12
87#define OR_FCM_PGS 0x00000400
88#define OR_FCM_PGS_SHIFT 10
89#define OR_FCM_CSCT 0x00000200
90#define OR_FCM_CSCT_SHIFT 9
91#define OR_FCM_CST 0x00000100
92#define OR_FCM_CST_SHIFT 8
93#define OR_FCM_CHT 0x00000080
94#define OR_FCM_CHT_SHIFT 7
95#define OR_FCM_SCY 0x00000070
96#define OR_FCM_SCY_SHIFT 4
97#define OR_FCM_SCY_1 0x00000010
98#define OR_FCM_SCY_2 0x00000020
99#define OR_FCM_SCY_3 0x00000030
100#define OR_FCM_SCY_4 0x00000040
101#define OR_FCM_SCY_5 0x00000050
102#define OR_FCM_SCY_6 0x00000060
103#define OR_FCM_SCY_7 0x00000070
104#define OR_FCM_RST 0x00000008
105#define OR_FCM_RST_SHIFT 3
106#define OR_FCM_TRLX 0x00000004
107#define OR_FCM_TRLX_SHIFT 2
108#define OR_FCM_EHTR 0x00000002
109#define OR_FCM_EHTR_SHIFT 1
110};
111
112struct elbc_regs {
113 struct elbc_bank bank[8];
114 u8 res0[0x28];
115 __be32 mar; /**< UPM Address Register */
116 u8 res1[0x4];
117 __be32 mamr; /**< UPMA Mode Register */
118 __be32 mbmr; /**< UPMB Mode Register */
119 __be32 mcmr; /**< UPMC Mode Register */
120 u8 res2[0x8];
121 __be32 mrtpr; /**< Memory Refresh Timer Prescaler Register */
122 __be32 mdr; /**< UPM Data Register */
123 u8 res3[0x4];
124 __be32 lsor; /**< Special Operation Initiation Register */
125 __be32 lsdmr; /**< SDRAM Mode Register */
126 u8 res4[0x8];
127 __be32 lurt; /**< UPM Refresh Timer */
128 __be32 lsrt; /**< SDRAM Refresh Timer */
129 u8 res5[0x8];
130 __be32 ltesr; /**< Transfer Error Status Register */
131#define LTESR_BM 0x80000000
132#define LTESR_FCT 0x40000000
133#define LTESR_PAR 0x20000000
134#define LTESR_WP 0x04000000
135#define LTESR_ATMW 0x00800000
136#define LTESR_ATMR 0x00400000
137#define LTESR_CS 0x00080000
138#define LTESR_CC 0x00000001
139#define LTESR_NAND_MASK (LTESR_FCT | LTESR_PAR | LTESR_CC)
140 __be32 ltedr; /**< Transfer Error Disable Register */
141 __be32 lteir; /**< Transfer Error Interrupt Register */
142 __be32 lteatr; /**< Transfer Error Attributes Register */
143 __be32 ltear; /**< Transfer Error Address Register */
144 u8 res6[0xC];
145 __be32 lbcr; /**< Configuration Register */
146#define LBCR_LDIS 0x80000000
147#define LBCR_LDIS_SHIFT 31
148#define LBCR_BCTLC 0x00C00000
149#define LBCR_BCTLC_SHIFT 22
150#define LBCR_AHD 0x00200000
151#define LBCR_LPBSE 0x00020000
152#define LBCR_LPBSE_SHIFT 17
153#define LBCR_EPAR 0x00010000
154#define LBCR_EPAR_SHIFT 16
155#define LBCR_BMT 0x0000FF00
156#define LBCR_BMT_SHIFT 8
157#define LBCR_INIT 0x00040000
158 __be32 lcrr; /**< Clock Ratio Register */
159#define LCRR_DBYP 0x80000000
160#define LCRR_DBYP_SHIFT 31
161#define LCRR_BUFCMDC 0x30000000
162#define LCRR_BUFCMDC_SHIFT 28
163#define LCRR_ECL 0x03000000
164#define LCRR_ECL_SHIFT 24
165#define LCRR_EADC 0x00030000
166#define LCRR_EADC_SHIFT 16
167#define LCRR_CLKDIV 0x0000000F
168#define LCRR_CLKDIV_SHIFT 0
169 u8 res7[0x8];
170 __be32 fmr; /**< Flash Mode Register */
171#define FMR_CWTO 0x0000F000
172#define FMR_CWTO_SHIFT 12
173#define FMR_BOOT 0x00000800
174#define FMR_ECCM 0x00000100
175#define FMR_AL 0x00000030
176#define FMR_AL_SHIFT 4
177#define FMR_OP 0x00000003
178#define FMR_OP_SHIFT 0
179 __be32 fir; /**< Flash Instruction Register */
180#define FIR_OP0 0xF0000000
181#define FIR_OP0_SHIFT 28
182#define FIR_OP1 0x0F000000
183#define FIR_OP1_SHIFT 24
184#define FIR_OP2 0x00F00000
185#define FIR_OP2_SHIFT 20
186#define FIR_OP3 0x000F0000
187#define FIR_OP3_SHIFT 16
188#define FIR_OP4 0x0000F000
189#define FIR_OP4_SHIFT 12
190#define FIR_OP5 0x00000F00
191#define FIR_OP5_SHIFT 8
192#define FIR_OP6 0x000000F0
193#define FIR_OP6_SHIFT 4
194#define FIR_OP7 0x0000000F
195#define FIR_OP7_SHIFT 0
196#define FIR_OP_NOP 0x0 /* No operation and end of sequence */
197#define FIR_OP_CA 0x1 /* Issue current column address */
198#define FIR_OP_PA 0x2 /* Issue current block+page address */
199#define FIR_OP_UA 0x3 /* Issue user defined address */
200#define FIR_OP_CM0 0x4 /* Issue command from FCR[CMD0] */
201#define FIR_OP_CM1 0x5 /* Issue command from FCR[CMD1] */
202#define FIR_OP_CM2 0x6 /* Issue command from FCR[CMD2] */
203#define FIR_OP_CM3 0x7 /* Issue command from FCR[CMD3] */
204#define FIR_OP_WB 0x8 /* Write FBCR bytes from FCM buffer */
205#define FIR_OP_WS 0x9 /* Write 1 or 2 bytes from MDR[AS] */
206#define FIR_OP_RB 0xA /* Read FBCR bytes to FCM buffer */
207#define FIR_OP_RS 0xB /* Read 1 or 2 bytes to MDR[AS] */
208#define FIR_OP_CW0 0xC /* Wait then issue FCR[CMD0] */
209#define FIR_OP_CW1 0xD /* Wait then issue FCR[CMD1] */
210#define FIR_OP_RBW 0xE /* Wait then read FBCR bytes */
211#define FIR_OP_RSW 0xE /* Wait then read 1 or 2 bytes */
212 __be32 fcr; /**< Flash Command Register */
213#define FCR_CMD0 0xFF000000
214#define FCR_CMD0_SHIFT 24
215#define FCR_CMD1 0x00FF0000
216#define FCR_CMD1_SHIFT 16
217#define FCR_CMD2 0x0000FF00
218#define FCR_CMD2_SHIFT 8
219#define FCR_CMD3 0x000000FF
220#define FCR_CMD3_SHIFT 0
221 __be32 fbar; /**< Flash Block Address Register */
222#define FBAR_BLK 0x00FFFFFF
223 __be32 fpar; /**< Flash Page Address Register */
224#define FPAR_SP_PI 0x00007C00
225#define FPAR_SP_PI_SHIFT 10
226#define FPAR_SP_MS 0x00000200
227#define FPAR_SP_CI 0x000001FF
228#define FPAR_SP_CI_SHIFT 0
229#define FPAR_LP_PI 0x0003F000
230#define FPAR_LP_PI_SHIFT 12
231#define FPAR_LP_MS 0x00000800
232#define FPAR_LP_CI 0x000007FF
233#define FPAR_LP_CI_SHIFT 0
234 __be32 fbcr; /**< Flash Byte Count Register */
235#define FBCR_BC 0x00000FFF
236 u8 res11[0x8];
237 u8 res8[0xF00];
238};
239
240struct fsl_elbc_ctrl;
241
242/* mtd information per set */
243
244struct fsl_elbc_mtd {
245 struct mtd_info mtd;
246 struct nand_chip chip;
247 struct fsl_elbc_ctrl *ctrl;
248
249 struct device *dev;
250 int bank; /* Chip select bank number */
251 u8 __iomem *vbase; /* Chip select base virtual address */
252 int page_size; /* NAND page size (0=512, 1=2048) */
253 unsigned int fmr; /* FCM Flash Mode Register value */
254};
255
256/* overview of the fsl elbc controller */
257
258struct fsl_elbc_ctrl {
259 struct nand_hw_control controller;
260 struct fsl_elbc_mtd *chips[MAX_BANKS];
261
262 /* device info */
263 struct device *dev;
264 struct elbc_regs __iomem *regs;
265 int irq;
266 wait_queue_head_t irq_wait;
267 unsigned int irq_status; /* status read from LTESR by irq handler */
268 u8 __iomem *addr; /* Address of assigned FCM buffer */
269 unsigned int page; /* Last page written to / read from */
270 unsigned int read_bytes; /* Number of bytes read during command */
271 unsigned int column; /* Saved column from SEQIN */
272 unsigned int index; /* Pointer to next byte to 'read' */
273 unsigned int status; /* status read from LTESR after last op */
274 unsigned int mdr; /* UPM/FCM Data Register value */
275 unsigned int use_mdr; /* Non zero if the MDR is to be set */
276 unsigned int oob; /* Non zero if operating on OOB data */
277 char *oob_poi; /* Place to write ECC after read back */
278};
279
280/* These map to the positions used by the FCM hardware ECC generator */
281
282/* Small Page FLASH with FMR[ECCM] = 0 */
283static struct nand_ecclayout fsl_elbc_oob_sp_eccm0 = {
284 .eccbytes = 3,
285 .eccpos = {6, 7, 8},
286 .oobfree = { {0, 5}, {9, 7} },
287 .oobavail = 12,
288};
289
290/* Small Page FLASH with FMR[ECCM] = 1 */
291static struct nand_ecclayout fsl_elbc_oob_sp_eccm1 = {
292 .eccbytes = 3,
293 .eccpos = {8, 9, 10},
294 .oobfree = { {0, 5}, {6, 2}, {11, 5} },
295 .oobavail = 12,
296};
297
298/* Large Page FLASH with FMR[ECCM] = 0 */
299static struct nand_ecclayout fsl_elbc_oob_lp_eccm0 = {
300 .eccbytes = 12,
301 .eccpos = {6, 7, 8, 22, 23, 24, 38, 39, 40, 54, 55, 56},
302 .oobfree = { {1, 5}, {9, 13}, {25, 13}, {41, 13}, {57, 7} },
303 .oobavail = 48,
304};
305
306/* Large Page FLASH with FMR[ECCM] = 1 */
307static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = {
308 .eccbytes = 12,
309 .eccpos = {8, 9, 10, 24, 25, 26, 40, 41, 42, 56, 57, 58},
310 .oobfree = { {1, 7}, {11, 13}, {27, 13}, {43, 13}, {59, 5} },
311 .oobavail = 48,
312};
313
314/*=================================*/
315
316/*
317 * Set up the FCM hardware block and page address fields, and the fcm
318 * structure addr field to point to the correct FCM buffer in memory
319 */
320static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
321{
322 struct nand_chip *chip = mtd->priv;
323 struct fsl_elbc_mtd *priv = chip->priv;
324 struct fsl_elbc_ctrl *ctrl = priv->ctrl;
325 struct elbc_regs __iomem *lbc = ctrl->regs;
326 int buf_num;
327
328 ctrl->page = page_addr;
329
330 out_be32(&lbc->fbar,
331 page_addr >> (chip->phys_erase_shift - chip->page_shift));
332
333 if (priv->page_size) {
334 out_be32(&lbc->fpar,
335 ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
336 (oob ? FPAR_LP_MS : 0) | column);
337 buf_num = (page_addr & 1) << 2;
338 } else {
339 out_be32(&lbc->fpar,
340 ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
341 (oob ? FPAR_SP_MS : 0) | column);
342 buf_num = page_addr & 7;
343 }
344
345 ctrl->addr = priv->vbase + buf_num * 1024;
346 ctrl->index = column;
347
348 /* for OOB data point to the second half of the buffer */
349 if (oob)
350 ctrl->index += priv->page_size ? 2048 : 512;
351
352 dev_vdbg(ctrl->dev, "set_addr: bank=%d, ctrl->addr=0x%p (0x%p), "
353 "index %x, pes %d ps %d\n",
354 buf_num, ctrl->addr, priv->vbase, ctrl->index,
355 chip->phys_erase_shift, chip->page_shift);
356}
357
358/*
359 * execute FCM command and wait for it to complete
360 */
361static int fsl_elbc_run_command(struct mtd_info *mtd)
362{
363 struct nand_chip *chip = mtd->priv;
364 struct fsl_elbc_mtd *priv = chip->priv;
365 struct fsl_elbc_ctrl *ctrl = priv->ctrl;
366 struct elbc_regs __iomem *lbc = ctrl->regs;
367
368 /* Setup the FMR[OP] to execute without write protection */
369 out_be32(&lbc->fmr, priv->fmr | 3);
370 if (ctrl->use_mdr)
371 out_be32(&lbc->mdr, ctrl->mdr);
372
373 dev_vdbg(ctrl->dev,
374 "fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n",
375 in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr));
376 dev_vdbg(ctrl->dev,
377 "fsl_elbc_run_command: fbar=%08x fpar=%08x "
378 "fbcr=%08x bank=%d\n",
379 in_be32(&lbc->fbar), in_be32(&lbc->fpar),
380 in_be32(&lbc->fbcr), priv->bank);
381
382 /* execute special operation */
383 out_be32(&lbc->lsor, priv->bank);
384
385 /* wait for FCM complete flag or timeout */
386 ctrl->irq_status = 0;
387 wait_event_timeout(ctrl->irq_wait, ctrl->irq_status,
388 FCM_TIMEOUT_MSECS * HZ/1000);
389 ctrl->status = ctrl->irq_status;
390
391 /* store mdr value in case it was needed */
392 if (ctrl->use_mdr)
393 ctrl->mdr = in_be32(&lbc->mdr);
394
395 ctrl->use_mdr = 0;
396
397 dev_vdbg(ctrl->dev,
398 "fsl_elbc_run_command: stat=%08x mdr=%08x fmr=%08x\n",
399 ctrl->status, ctrl->mdr, in_be32(&lbc->fmr));
400
401 /* returns 0 on success otherwise non-zero) */
402 return ctrl->status == LTESR_CC ? 0 : -EIO;
403}
404
405static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
406{
407 struct fsl_elbc_mtd *priv = chip->priv;
408 struct fsl_elbc_ctrl *ctrl = priv->ctrl;
409 struct elbc_regs __iomem *lbc = ctrl->regs;
410
411 if (priv->page_size) {
412 out_be32(&lbc->fir,
413 (FIR_OP_CW0 << FIR_OP0_SHIFT) |
414 (FIR_OP_CA << FIR_OP1_SHIFT) |
415 (FIR_OP_PA << FIR_OP2_SHIFT) |
416 (FIR_OP_CW1 << FIR_OP3_SHIFT) |
417 (FIR_OP_RBW << FIR_OP4_SHIFT));
418
419 out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) |
420 (NAND_CMD_READSTART << FCR_CMD1_SHIFT));
421 } else {
422 out_be32(&lbc->fir,
423 (FIR_OP_CW0 << FIR_OP0_SHIFT) |
424 (FIR_OP_CA << FIR_OP1_SHIFT) |
425 (FIR_OP_PA << FIR_OP2_SHIFT) |
426 (FIR_OP_RBW << FIR_OP3_SHIFT));
427
428 if (oob)
429 out_be32(&lbc->fcr, NAND_CMD_READOOB << FCR_CMD0_SHIFT);
430 else
431 out_be32(&lbc->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT);
432 }
433}
434
435/* cmdfunc send commands to the FCM */
436static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
437 int column, int page_addr)
438{
439 struct nand_chip *chip = mtd->priv;
440 struct fsl_elbc_mtd *priv = chip->priv;
441 struct fsl_elbc_ctrl *ctrl = priv->ctrl;
442 struct elbc_regs __iomem *lbc = ctrl->regs;
443
444 ctrl->use_mdr = 0;
445
446 /* clear the read buffer */
447 ctrl->read_bytes = 0;
448 if (command != NAND_CMD_PAGEPROG)
449 ctrl->index = 0;
450
451 switch (command) {
452 /* READ0 and READ1 read the entire buffer to use hardware ECC. */
453 case NAND_CMD_READ1:
454 column += 256;
455
456 /* fall-through */
457 case NAND_CMD_READ0:
458 dev_dbg(ctrl->dev,
459 "fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:"
460 " 0x%x, column: 0x%x.\n", page_addr, column);
461
462
463 out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */
464 set_addr(mtd, 0, page_addr, 0);
465
466 ctrl->read_bytes = mtd->writesize + mtd->oobsize;
467 ctrl->index += column;
468
469 fsl_elbc_do_read(chip, 0);
470 fsl_elbc_run_command(mtd);
471 return;
472
473 /* READOOB reads only the OOB because no ECC is performed. */
474 case NAND_CMD_READOOB:
475 dev_vdbg(ctrl->dev,
476 "fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:"
477 " 0x%x, column: 0x%x.\n", page_addr, column);
478
479 out_be32(&lbc->fbcr, mtd->oobsize - column);
480 set_addr(mtd, column, page_addr, 1);
481
482 ctrl->read_bytes = mtd->writesize + mtd->oobsize;
483
484 fsl_elbc_do_read(chip, 1);
485 fsl_elbc_run_command(mtd);
486 return;
487
488 /* READID must read all 5 possible bytes while CEB is active */
489 case NAND_CMD_READID:
490 dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n");
491
492 out_be32(&lbc->fir, (FIR_OP_CW0 << FIR_OP0_SHIFT) |
493 (FIR_OP_UA << FIR_OP1_SHIFT) |
494 (FIR_OP_RBW << FIR_OP2_SHIFT));
495 out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
496 /* 5 bytes for manuf, device and exts */
497 out_be32(&lbc->fbcr, 5);
498 ctrl->read_bytes = 5;
499 ctrl->use_mdr = 1;
500 ctrl->mdr = 0;
501
502 set_addr(mtd, 0, 0, 0);
503 fsl_elbc_run_command(mtd);
504 return;
505
506 /* ERASE1 stores the block and page address */
507 case NAND_CMD_ERASE1:
508 dev_vdbg(ctrl->dev,
509 "fsl_elbc_cmdfunc: NAND_CMD_ERASE1, "
510 "page_addr: 0x%x.\n", page_addr);
511 set_addr(mtd, 0, page_addr, 0);
512 return;
513
514 /* ERASE2 uses the block and page address from ERASE1 */
515 case NAND_CMD_ERASE2:
516 dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
517
518 out_be32(&lbc->fir,
519 (FIR_OP_CW0 << FIR_OP0_SHIFT) |
520 (FIR_OP_PA << FIR_OP1_SHIFT) |
521 (FIR_OP_CM1 << FIR_OP2_SHIFT));
522
523 out_be32(&lbc->fcr,
524 (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) |
525 (NAND_CMD_ERASE2 << FCR_CMD1_SHIFT));
526
527 out_be32(&lbc->fbcr, 0);
528 ctrl->read_bytes = 0;
529
530 fsl_elbc_run_command(mtd);
531 return;
532
533 /* SEQIN sets up the addr buffer and all registers except the length */
534 case NAND_CMD_SEQIN: {
535 __be32 fcr;
536 dev_vdbg(ctrl->dev,
537 "fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, "
538 "page_addr: 0x%x, column: 0x%x.\n",
539 page_addr, column);
540
541 ctrl->column = column;
542 ctrl->oob = 0;
543
544 fcr = (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT) |
545 (NAND_CMD_SEQIN << FCR_CMD2_SHIFT);
546
547 if (priv->page_size) {
548 out_be32(&lbc->fir,
549 (FIR_OP_CW0 << FIR_OP0_SHIFT) |
550 (FIR_OP_CA << FIR_OP1_SHIFT) |
551 (FIR_OP_PA << FIR_OP2_SHIFT) |
552 (FIR_OP_WB << FIR_OP3_SHIFT) |
553 (FIR_OP_CW1 << FIR_OP4_SHIFT));
554
555 fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
556 } else {
557 out_be32(&lbc->fir,
558 (FIR_OP_CW0 << FIR_OP0_SHIFT) |
559 (FIR_OP_CM2 << FIR_OP1_SHIFT) |
560 (FIR_OP_CA << FIR_OP2_SHIFT) |
561 (FIR_OP_PA << FIR_OP3_SHIFT) |
562 (FIR_OP_WB << FIR_OP4_SHIFT) |
563 (FIR_OP_CW1 << FIR_OP5_SHIFT));
564
565 if (column >= mtd->writesize) {
566 /* OOB area --> READOOB */
567 column -= mtd->writesize;
568 fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
569 ctrl->oob = 1;
570 } else if (column < 256) {
571 /* First 256 bytes --> READ0 */
572 fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
573 } else {
574 /* Second 256 bytes --> READ1 */
575 fcr |= NAND_CMD_READ1 << FCR_CMD0_SHIFT;
576 }
577 }
578
579 out_be32(&lbc->fcr, fcr);
580 set_addr(mtd, column, page_addr, ctrl->oob);
581 return;
582 }
583
584 /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
585 case NAND_CMD_PAGEPROG: {
586 int full_page;
587 dev_vdbg(ctrl->dev,
588 "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
589 "writing %d bytes.\n", ctrl->index);
590
591 /* if the write did not start at 0 or is not a full page
592 * then set the exact length, otherwise use a full page
593 * write so the HW generates the ECC.
594 */
595 if (ctrl->oob || ctrl->column != 0 ||
596 ctrl->index != mtd->writesize + mtd->oobsize) {
597 out_be32(&lbc->fbcr, ctrl->index);
598 full_page = 0;
599 } else {
600 out_be32(&lbc->fbcr, 0);
601 full_page = 1;
602 }
603
604 fsl_elbc_run_command(mtd);
605
606 /* Read back the page in order to fill in the ECC for the
607 * caller. Is this really needed?
608 */
609 if (full_page && ctrl->oob_poi) {
610 out_be32(&lbc->fbcr, 3);
611 set_addr(mtd, 6, page_addr, 1);
612
613 ctrl->read_bytes = mtd->writesize + 9;
614
615 fsl_elbc_do_read(chip, 1);
616 fsl_elbc_run_command(mtd);
617
618 memcpy_fromio(ctrl->oob_poi + 6,
619 &ctrl->addr[ctrl->index], 3);
620 ctrl->index += 3;
621 }
622
623 ctrl->oob_poi = NULL;
624 return;
625 }
626
627 /* CMD_STATUS must read the status byte while CEB is active */
628 /* Note - it does not wait for the ready line */
629 case NAND_CMD_STATUS:
630 out_be32(&lbc->fir,
631 (FIR_OP_CM0 << FIR_OP0_SHIFT) |
632 (FIR_OP_RBW << FIR_OP1_SHIFT));
633 out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
634 out_be32(&lbc->fbcr, 1);
635 set_addr(mtd, 0, 0, 0);
636 ctrl->read_bytes = 1;
637
638 fsl_elbc_run_command(mtd);
639
640 /* The chip always seems to report that it is
641 * write-protected, even when it is not.
642 */
643 setbits8(ctrl->addr, NAND_STATUS_WP);
644 return;
645
646 /* RESET without waiting for the ready line */
647 case NAND_CMD_RESET:
648 dev_dbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_RESET.\n");
649 out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT);
650 out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT);
651 fsl_elbc_run_command(mtd);
652 return;
653
654 default:
655 dev_err(ctrl->dev,
656 "fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n",
657 command);
658 }
659}
660
661static void fsl_elbc_select_chip(struct mtd_info *mtd, int chip)
662{
663 /* The hardware does not seem to support multiple
664 * chips per bank.
665 */
666}
667
668/*
669 * Write buf to the FCM Controller Data Buffer
670 */
671static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
672{
673 struct nand_chip *chip = mtd->priv;
674 struct fsl_elbc_mtd *priv = chip->priv;
675 struct fsl_elbc_ctrl *ctrl = priv->ctrl;
676 unsigned int bufsize = mtd->writesize + mtd->oobsize;
677
678 if (len < 0) {
679 dev_err(ctrl->dev, "write_buf of %d bytes", len);
680 ctrl->status = 0;
681 return;
682 }
683
684 if ((unsigned int)len > bufsize - ctrl->index) {
685 dev_err(ctrl->dev,
686 "write_buf beyond end of buffer "
687 "(%d requested, %u available)\n",
688 len, bufsize - ctrl->index);
689 len = bufsize - ctrl->index;
690 }
691
692 memcpy_toio(&ctrl->addr[ctrl->index], buf, len);
693 ctrl->index += len;
694}
695
696/*
697 * read a byte from either the FCM hardware buffer if it has any data left
698 * otherwise issue a command to read a single byte.
699 */
700static u8 fsl_elbc_read_byte(struct mtd_info *mtd)
701{
702 struct nand_chip *chip = mtd->priv;
703 struct fsl_elbc_mtd *priv = chip->priv;
704 struct fsl_elbc_ctrl *ctrl = priv->ctrl;
705
706 /* If there are still bytes in the FCM, then use the next byte. */
707 if (ctrl->index < ctrl->read_bytes)
708 return in_8(&ctrl->addr[ctrl->index++]);
709
710 dev_err(ctrl->dev, "read_byte beyond end of buffer\n");
711 return ERR_BYTE;
712}
713
714/*
715 * Read from the FCM Controller Data Buffer
716 */
717static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
718{
719 struct nand_chip *chip = mtd->priv;
720 struct fsl_elbc_mtd *priv = chip->priv;
721 struct fsl_elbc_ctrl *ctrl = priv->ctrl;
722 int avail;
723
724 if (len < 0)
725 return;
726
727 avail = min((unsigned int)len, ctrl->read_bytes - ctrl->index);
728 memcpy_fromio(buf, &ctrl->addr[ctrl->index], avail);
729 ctrl->index += avail;
730
731 if (len > avail)
732 dev_err(ctrl->dev,
733 "read_buf beyond end of buffer "
734 "(%d requested, %d available)\n",
735 len, avail);
736}
737
738/*
739 * Verify buffer against the FCM Controller Data Buffer
740 */
741static int fsl_elbc_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
742{
743 struct nand_chip *chip = mtd->priv;
744 struct fsl_elbc_mtd *priv = chip->priv;
745 struct fsl_elbc_ctrl *ctrl = priv->ctrl;
746 int i;
747
748 if (len < 0) {
749 dev_err(ctrl->dev, "write_buf of %d bytes", len);
750 return -EINVAL;
751 }
752
753 if ((unsigned int)len > ctrl->read_bytes - ctrl->index) {
754 dev_err(ctrl->dev,
755 "verify_buf beyond end of buffer "
756 "(%d requested, %u available)\n",
757 len, ctrl->read_bytes - ctrl->index);
758
759 ctrl->index = ctrl->read_bytes;
760 return -EINVAL;
761 }
762
763 for (i = 0; i < len; i++)
764 if (in_8(&ctrl->addr[ctrl->index + i]) != buf[i])
765 break;
766
767 ctrl->index += len;
768 return i == len && ctrl->status == LTESR_CC ? 0 : -EIO;
769}
770
771/* This function is called after Program and Erase Operations to
772 * check for success or failure.
773 */
774static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
775{
776 struct fsl_elbc_mtd *priv = chip->priv;
777 struct fsl_elbc_ctrl *ctrl = priv->ctrl;
778 struct elbc_regs __iomem *lbc = ctrl->regs;
779
780 if (ctrl->status != LTESR_CC)
781 return NAND_STATUS_FAIL;
782
783 /* Use READ_STATUS command, but wait for the device to be ready */
784 ctrl->use_mdr = 0;
785 out_be32(&lbc->fir,
786 (FIR_OP_CW0 << FIR_OP0_SHIFT) |
787 (FIR_OP_RBW << FIR_OP1_SHIFT));
788 out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
789 out_be32(&lbc->fbcr, 1);
790 set_addr(mtd, 0, 0, 0);
791 ctrl->read_bytes = 1;
792
793 fsl_elbc_run_command(mtd);
794
795 if (ctrl->status != LTESR_CC)
796 return NAND_STATUS_FAIL;
797
798 /* The chip always seems to report that it is
799 * write-protected, even when it is not.
800 */
801 setbits8(ctrl->addr, NAND_STATUS_WP);
802 return fsl_elbc_read_byte(mtd);
803}
804
805static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
806{
807 struct nand_chip *chip = mtd->priv;
808 struct fsl_elbc_mtd *priv = chip->priv;
809 struct fsl_elbc_ctrl *ctrl = priv->ctrl;
810 struct elbc_regs __iomem *lbc = ctrl->regs;
811 unsigned int al;
812
813 /* calculate FMR Address Length field */
814 al = 0;
815 if (chip->pagemask & 0xffff0000)
816 al++;
817 if (chip->pagemask & 0xff000000)
818 al++;
819
820 /* add to ECCM mode set in fsl_elbc_init */
821 priv->fmr |= (12 << FMR_CWTO_SHIFT) | /* Timeout > 12 ms */
822 (al << FMR_AL_SHIFT);
823
824 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->numchips = %d\n",
825 chip->numchips);
826 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chipsize = %ld\n",
827 chip->chipsize);
828 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
829 chip->pagemask);
830 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chip_delay = %d\n",
831 chip->chip_delay);
832 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->badblockpos = %d\n",
833 chip->badblockpos);
834 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chip_shift = %d\n",
835 chip->chip_shift);
836 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->page_shift = %d\n",
837 chip->page_shift);
838 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
839 chip->phys_erase_shift);
840 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecclayout = %p\n",
841 chip->ecclayout);
842 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
843 chip->ecc.mode);
844 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
845 chip->ecc.steps);
846 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
847 chip->ecc.bytes);
848 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
849 chip->ecc.total);
850 dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.layout = %p\n",
851 chip->ecc.layout);
852 dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
853 dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->size = %d\n", mtd->size);
854 dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
855 mtd->erasesize);
856 dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->writesize = %d\n",
857 mtd->writesize);
858 dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->oobsize = %d\n",
859 mtd->oobsize);
860
861 /* adjust Option Register and ECC to match Flash page size */
862 if (mtd->writesize == 512) {
863 priv->page_size = 0;
864 clrbits32(&lbc->bank[priv->bank].or, ~OR_FCM_PGS);
865 } else if (mtd->writesize == 2048) {
866 priv->page_size = 1;
867 setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
868 /* adjust ecc setup if needed */
869 if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
870 BR_DECC_CHK_GEN) {
871 chip->ecc.size = 512;
872 chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
873 &fsl_elbc_oob_lp_eccm1 :
874 &fsl_elbc_oob_lp_eccm0;
875 mtd->ecclayout = chip->ecc.layout;
876 mtd->oobavail = chip->ecc.layout->oobavail;
877 }
878 } else {
879 dev_err(ctrl->dev,
880 "fsl_elbc_init: page size %d is not supported\n",
881 mtd->writesize);
882 return -1;
883 }
884
885 /* The default u-boot configuration on MPC8313ERDB causes errors;
886 * more delay is needed. This should be safe for other boards
887 * as well.
888 */
889 setbits32(&lbc->bank[priv->bank].or, 0x70);
890 return 0;
891}
892
893static int fsl_elbc_read_page(struct mtd_info *mtd,
894 struct nand_chip *chip,
895 uint8_t *buf)
896{
897 fsl_elbc_read_buf(mtd, buf, mtd->writesize);
898 fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
899
900 if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
901 mtd->ecc_stats.failed++;
902
903 return 0;
904}
905
906/* ECC will be calculated automatically, and errors will be detected in
907 * waitfunc.
908 */
909static void fsl_elbc_write_page(struct mtd_info *mtd,
910 struct nand_chip *chip,
911 const uint8_t *buf)
912{
913 struct fsl_elbc_mtd *priv = chip->priv;
914 struct fsl_elbc_ctrl *ctrl = priv->ctrl;
915
916 fsl_elbc_write_buf(mtd, buf, mtd->writesize);
917 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
918
919 ctrl->oob_poi = chip->oob_poi;
920}
921
922static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
923{
924 struct fsl_elbc_ctrl *ctrl = priv->ctrl;
925 struct elbc_regs __iomem *lbc = ctrl->regs;
926 struct nand_chip *chip = &priv->chip;
927
928 dev_dbg(priv->dev, "eLBC Set Information for bank %d\n", priv->bank);
929
930 /* Fill in fsl_elbc_mtd structure */
931 priv->mtd.priv = chip;
932 priv->mtd.owner = THIS_MODULE;
933 priv->fmr = 0; /* rest filled in later */
934
935 /* fill in nand_chip structure */
936 /* set up function call table */
937 chip->read_byte = fsl_elbc_read_byte;
938 chip->write_buf = fsl_elbc_write_buf;
939 chip->read_buf = fsl_elbc_read_buf;
940 chip->verify_buf = fsl_elbc_verify_buf;
941 chip->select_chip = fsl_elbc_select_chip;
942 chip->cmdfunc = fsl_elbc_cmdfunc;
943 chip->waitfunc = fsl_elbc_wait;
944
945 /* set up nand options */
946 chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
947
948 chip->controller = &ctrl->controller;
949 chip->priv = priv;
950
951 chip->ecc.read_page = fsl_elbc_read_page;
952 chip->ecc.write_page = fsl_elbc_write_page;
953
954 /* If CS Base Register selects full hardware ECC then use it */
955 if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
956 BR_DECC_CHK_GEN) {
957 chip->ecc.mode = NAND_ECC_HW;
958 /* put in small page settings and adjust later if needed */
959 chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
960 &fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0;
961 chip->ecc.size = 512;
962 chip->ecc.bytes = 3;
963 } else {
964 /* otherwise fall back to default software ECC */
965 chip->ecc.mode = NAND_ECC_SOFT;
966 }
967
968 return 0;
969}
970
971static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
972{
973 struct fsl_elbc_ctrl *ctrl = priv->ctrl;
974
975 nand_release(&priv->mtd);
976
977 if (priv->vbase)
978 iounmap(priv->vbase);
979
980 ctrl->chips[priv->bank] = NULL;
981 kfree(priv);
982
983 return 0;
984}
985
986static int fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
987 struct device_node *node)
988{
989 struct elbc_regs __iomem *lbc = ctrl->regs;
990 struct fsl_elbc_mtd *priv;
991 struct resource res;
992#ifdef CONFIG_MTD_PARTITIONS
993 static const char *part_probe_types[]
994 = { "cmdlinepart", "RedBoot", NULL };
995 struct mtd_partition *parts;
996#endif
997 int ret;
998 int bank;
999
1000 /* get, allocate and map the memory resource */
1001 ret = of_address_to_resource(node, 0, &res);
1002 if (ret) {
1003 dev_err(ctrl->dev, "failed to get resource\n");
1004 return ret;
1005 }
1006
1007 /* find which chip select it is connected to */
1008 for (bank = 0; bank < MAX_BANKS; bank++)
1009 if ((in_be32(&lbc->bank[bank].br) & BR_V) &&
1010 (in_be32(&lbc->bank[bank].br) & BR_MSEL) == BR_MS_FCM &&
1011 (in_be32(&lbc->bank[bank].br) &
1012 in_be32(&lbc->bank[bank].or) & BR_BA)
1013 == res.start)
1014 break;
1015
1016 if (bank >= MAX_BANKS) {
1017 dev_err(ctrl->dev, "address did not match any chip selects\n");
1018 return -ENODEV;
1019 }
1020
1021 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1022 if (!priv)
1023 return -ENOMEM;
1024
1025 ctrl->chips[bank] = priv;
1026 priv->bank = bank;
1027 priv->ctrl = ctrl;
1028 priv->dev = ctrl->dev;
1029
1030 priv->vbase = ioremap(res.start, res.end - res.start + 1);
1031 if (!priv->vbase) {
1032 dev_err(ctrl->dev, "failed to map chip region\n");
1033 ret = -ENOMEM;
1034 goto err;
1035 }
1036
1037 ret = fsl_elbc_chip_init(priv);
1038 if (ret)
1039 goto err;
1040
1041 ret = nand_scan_ident(&priv->mtd, 1);
1042 if (ret)
1043 goto err;
1044
1045 ret = fsl_elbc_chip_init_tail(&priv->mtd);
1046 if (ret)
1047 goto err;
1048
1049 ret = nand_scan_tail(&priv->mtd);
1050 if (ret)
1051 goto err;
1052
1053#ifdef CONFIG_MTD_PARTITIONS
1054 /* First look for RedBoot table or partitions on the command
1055 * line, these take precedence over device tree information */
1056 ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0);
1057 if (ret < 0)
1058 goto err;
1059
1060#ifdef CONFIG_MTD_OF_PARTS
1061 if (ret == 0) {
1062 ret = of_mtd_parse_partitions(priv->dev, &priv->mtd,
1063 node, &parts);
1064 if (ret < 0)
1065 goto err;
1066 }
1067#endif
1068
1069 if (ret > 0)
1070 add_mtd_partitions(&priv->mtd, parts, ret);
1071 else
1072#endif
1073 add_mtd_device(&priv->mtd);
1074
1075 printk(KERN_INFO "eLBC NAND device at 0x%zx, bank %d\n",
1076 res.start, priv->bank);
1077 return 0;
1078
1079err:
1080 fsl_elbc_chip_remove(priv);
1081 return ret;
1082}
1083
1084static int __devinit fsl_elbc_ctrl_init(struct fsl_elbc_ctrl *ctrl)
1085{
1086 struct elbc_regs __iomem *lbc = ctrl->regs;
1087
1088 /* clear event registers */
1089 setbits32(&lbc->ltesr, LTESR_NAND_MASK);
1090 out_be32(&lbc->lteatr, 0);
1091
1092 /* Enable interrupts for any detected events */
1093 out_be32(&lbc->lteir, LTESR_NAND_MASK);
1094
1095 ctrl->read_bytes = 0;
1096 ctrl->index = 0;
1097 ctrl->addr = NULL;
1098
1099 return 0;
1100}
1101
1102static int __devexit fsl_elbc_ctrl_remove(struct of_device *ofdev)
1103{
1104 struct fsl_elbc_ctrl *ctrl = dev_get_drvdata(&ofdev->dev);
1105 int i;
1106
1107 for (i = 0; i < MAX_BANKS; i++)
1108 if (ctrl->chips[i])
1109 fsl_elbc_chip_remove(ctrl->chips[i]);
1110
1111 if (ctrl->irq)
1112 free_irq(ctrl->irq, ctrl);
1113
1114 if (ctrl->regs)
1115 iounmap(ctrl->regs);
1116
1117 dev_set_drvdata(&ofdev->dev, NULL);
1118 kfree(ctrl);
1119 return 0;
1120}
1121
1122/* NOTE: This interrupt is also used to report other localbus events,
1123 * such as transaction errors on other chipselects. If we want to
1124 * capture those, we'll need to move the IRQ code into a shared
1125 * LBC driver.
1126 */
1127
1128static irqreturn_t fsl_elbc_ctrl_irq(int irqno, void *data)
1129{
1130 struct fsl_elbc_ctrl *ctrl = data;
1131 struct elbc_regs __iomem *lbc = ctrl->regs;
1132 __be32 status = in_be32(&lbc->ltesr) & LTESR_NAND_MASK;
1133
1134 if (status) {
1135 out_be32(&lbc->ltesr, status);
1136 out_be32(&lbc->lteatr, 0);
1137
1138 ctrl->irq_status = status;
1139 smp_wmb();
1140 wake_up(&ctrl->irq_wait);
1141
1142 return IRQ_HANDLED;
1143 }
1144
1145 return IRQ_NONE;
1146}
1147
1148/* fsl_elbc_ctrl_probe
1149 *
1150 * called by device layer when it finds a device matching
1151 * one our driver can handled. This code allocates all of
1152 * the resources needed for the controller only. The
1153 * resources for the NAND banks themselves are allocated
1154 * in the chip probe function.
1155*/
1156
1157static int __devinit fsl_elbc_ctrl_probe(struct of_device *ofdev,
1158 const struct of_device_id *match)
1159{
1160 struct device_node *child;
1161 struct fsl_elbc_ctrl *ctrl;
1162 int ret;
1163
1164 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1165 if (!ctrl)
1166 return -ENOMEM;
1167
1168 dev_set_drvdata(&ofdev->dev, ctrl);
1169
1170 spin_lock_init(&ctrl->controller.lock);
1171 init_waitqueue_head(&ctrl->controller.wq);
1172 init_waitqueue_head(&ctrl->irq_wait);
1173
1174 ctrl->regs = of_iomap(ofdev->node, 0);
1175 if (!ctrl->regs) {
1176 dev_err(&ofdev->dev, "failed to get memory region\n");
1177 ret = -ENODEV;
1178 goto err;
1179 }
1180
1181 ctrl->irq = of_irq_to_resource(ofdev->node, 0, NULL);
1182 if (ctrl->irq == NO_IRQ) {
1183 dev_err(&ofdev->dev, "failed to get irq resource\n");
1184 ret = -ENODEV;
1185 goto err;
1186 }
1187
1188 ctrl->dev = &ofdev->dev;
1189
1190 ret = fsl_elbc_ctrl_init(ctrl);
1191 if (ret < 0)
1192 goto err;
1193
1194 ret = request_irq(ctrl->irq, fsl_elbc_ctrl_irq, 0, "fsl-elbc", ctrl);
1195 if (ret != 0) {
1196 dev_err(&ofdev->dev, "failed to install irq (%d)\n",
1197 ctrl->irq);
1198 ret = ctrl->irq;
1199 goto err;
1200 }
1201
1202 for_each_child_of_node(ofdev->node, child)
1203 if (of_device_is_compatible(child, "fsl,elbc-fcm-nand"))
1204 fsl_elbc_chip_probe(ctrl, child);
1205
1206 return 0;
1207
1208err:
1209 fsl_elbc_ctrl_remove(ofdev);
1210 return ret;
1211}
1212
1213static const struct of_device_id fsl_elbc_match[] = {
1214 {
1215 .compatible = "fsl,elbc",
1216 },
1217 {}
1218};
1219
1220static struct of_platform_driver fsl_elbc_ctrl_driver = {
1221 .driver = {
1222 .name = "fsl-elbc",
1223 },
1224 .match_table = fsl_elbc_match,
1225 .probe = fsl_elbc_ctrl_probe,
1226 .remove = __devexit_p(fsl_elbc_ctrl_remove),
1227};
1228
1229static int __init fsl_elbc_init(void)
1230{
1231 return of_register_platform_driver(&fsl_elbc_ctrl_driver);
1232}
1233
1234static void __exit fsl_elbc_exit(void)
1235{
1236 of_unregister_platform_driver(&fsl_elbc_ctrl_driver);
1237}
1238
1239module_init(fsl_elbc_init);
1240module_exit(fsl_elbc_exit);
1241
1242MODULE_LICENSE("GPL");
1243MODULE_AUTHOR("Freescale");
1244MODULE_DESCRIPTION("Freescale Enhanced Local Bus Controller MTD NAND driver");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index ddd4fc019042..7acb1a0e7409 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2469,8 +2469,12 @@ int nand_scan_tail(struct mtd_info *mtd)
2469 chip->ecc.write_oob = nand_write_oob_std; 2469 chip->ecc.write_oob = nand_write_oob_std;
2470 2470
2471 case NAND_ECC_HW_SYNDROME: 2471 case NAND_ECC_HW_SYNDROME:
2472 if (!chip->ecc.calculate || !chip->ecc.correct || 2472 if ((!chip->ecc.calculate || !chip->ecc.correct ||
2473 !chip->ecc.hwctl) { 2473 !chip->ecc.hwctl) &&
2474 (!chip->ecc.read_page ||
2475 chip->ecc.read_page == nand_read_page_hwecc ||
2476 !chip->ecc.write_page ||
2477 chip->ecc.write_page == nand_write_page_hwecc)) {
2474 printk(KERN_WARNING "No ECC functions supplied, " 2478 printk(KERN_WARNING "No ECC functions supplied, "
2475 "Hardware ECC not possible\n"); 2479 "Hardware ECC not possible\n");
2476 BUG(); 2480 BUG();
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
new file mode 100644
index 000000000000..9162cca0182b
--- /dev/null
+++ b/drivers/mtd/nand/orion_nand.c
@@ -0,0 +1,171 @@
1/*
2 * drivers/mtd/nand/orion_nand.c
3 *
4 * NAND support for Marvell Orion SoC platforms
5 *
6 * Tzachi Perelstein <tzachi@marvell.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/nand.h>
18#include <linux/mtd/partitions.h>
19#include <asm/io.h>
20#include <asm/sizes.h>
21#include <asm/arch/platform.h>
22#include <asm/arch/hardware.h>
23
24#ifdef CONFIG_MTD_CMDLINE_PARTS
25static const char *part_probes[] = { "cmdlinepart", NULL };
26#endif
27
28static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
29{
30 struct nand_chip *nc = mtd->priv;
31 struct orion_nand_data *board = nc->priv;
32 u32 offs;
33
34 if (cmd == NAND_CMD_NONE)
35 return;
36
37 if (ctrl & NAND_CLE)
38 offs = (1 << board->cle);
39 else if (ctrl & NAND_ALE)
40 offs = (1 << board->ale);
41 else
42 return;
43
44 if (nc->options & NAND_BUSWIDTH_16)
45 offs <<= 1;
46
47 writeb(cmd, nc->IO_ADDR_W + offs);
48}
49
50static int __init orion_nand_probe(struct platform_device *pdev)
51{
52 struct mtd_info *mtd;
53 struct nand_chip *nc;
54 struct orion_nand_data *board;
55 void __iomem *io_base;
56 int ret = 0;
57#ifdef CONFIG_MTD_PARTITIONS
58 struct mtd_partition *partitions = NULL;
59 int num_part = 0;
60#endif
61
62 nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
63 if (!nc) {
64 printk(KERN_ERR "orion_nand: failed to allocate device structure.\n");
65 ret = -ENOMEM;
66 goto no_res;
67 }
68 mtd = (struct mtd_info *)(nc + 1);
69
70 io_base = ioremap(pdev->resource[0].start,
71 pdev->resource[0].end - pdev->resource[0].start + 1);
72 if (!io_base) {
73 printk(KERN_ERR "orion_nand: ioremap failed\n");
74 ret = -EIO;
75 goto no_res;
76 }
77
78 board = pdev->dev.platform_data;
79
80 mtd->priv = nc;
81 mtd->owner = THIS_MODULE;
82
83 nc->priv = board;
84 nc->IO_ADDR_R = nc->IO_ADDR_W = io_base;
85 nc->cmd_ctrl = orion_nand_cmd_ctrl;
86 nc->ecc.mode = NAND_ECC_SOFT;
87
88 if (board->width == 16)
89 nc->options |= NAND_BUSWIDTH_16;
90
91 platform_set_drvdata(pdev, mtd);
92
93 if (nand_scan(mtd, 1)) {
94 ret = -ENXIO;
95 goto no_dev;
96 }
97
98#ifdef CONFIG_MTD_PARTITIONS
99#ifdef CONFIG_MTD_CMDLINE_PARTS
100 mtd->name = "orion_nand";
101 num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
102#endif
103 /* If cmdline partitions have been passed, let them be used */
104 if (num_part <= 0) {
105 num_part = board->nr_parts;
106 partitions = board->parts;
107 }
108
109 if (partitions && num_part > 0)
110 ret = add_mtd_partitions(mtd, partitions, num_part);
111 else
112 ret = add_mtd_device(mtd);
113#else
114 ret = add_mtd_device(mtd);
115#endif
116
117 if (ret) {
118 nand_release(mtd);
119 goto no_dev;
120 }
121
122 return 0;
123
124no_dev:
125 platform_set_drvdata(pdev, NULL);
126 iounmap(io_base);
127no_res:
128 kfree(nc);
129
130 return ret;
131}
132
133static int __devexit orion_nand_remove(struct platform_device *pdev)
134{
135 struct mtd_info *mtd = platform_get_drvdata(pdev);
136 struct nand_chip *nc = mtd->priv;
137
138 nand_release(mtd);
139
140 iounmap(nc->IO_ADDR_W);
141
142 kfree(nc);
143
144 return 0;
145}
146
147static struct platform_driver orion_nand_driver = {
148 .probe = orion_nand_probe,
149 .remove = orion_nand_remove,
150 .driver = {
151 .name = "orion_nand",
152 .owner = THIS_MODULE,
153 },
154};
155
156static int __init orion_nand_init(void)
157{
158 return platform_driver_register(&orion_nand_driver);
159}
160
161static void __exit orion_nand_exit(void)
162{
163 platform_driver_unregister(&orion_nand_driver);
164}
165
166module_init(orion_nand_init);
167module_exit(orion_nand_exit);
168
169MODULE_LICENSE("GPL");
170MODULE_AUTHOR("Tzachi Perelstein");
171MODULE_DESCRIPTION("NAND glue for Orion platforms");
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
new file mode 100644
index 000000000000..75c899039023
--- /dev/null
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -0,0 +1,243 @@
1/*
2 * Copyright (C) 2006-2007 PA Semi, Inc
3 *
4 * Author: Egor Martovetsky <egor@pasemi.com>
5 * Maintained by: Olof Johansson <olof@lixom.net>
6 *
7 * Driver for the PWRficient onchip NAND flash interface
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#undef DEBUG
24
25#include <linux/slab.h>
26#include <linux/init.h>
27#include <linux/module.h>
28#include <linux/mtd/mtd.h>
29#include <linux/mtd/nand.h>
30#include <linux/mtd/nand_ecc.h>
31#include <linux/of_platform.h>
32#include <linux/platform_device.h>
33#include <linux/pci.h>
34
35#include <asm/io.h>
36
37#define LBICTRL_LPCCTL_NR 0x00004000
38#define CLE_PIN_CTL 15
39#define ALE_PIN_CTL 14
40
41static unsigned int lpcctl;
42static struct mtd_info *pasemi_nand_mtd;
43static const char driver_name[] = "pasemi-nand";
44
45static void pasemi_read_buf(struct mtd_info *mtd, u_char *buf, int len)
46{
47 struct nand_chip *chip = mtd->priv;
48
49 while (len > 0x800) {
50 memcpy_fromio(buf, chip->IO_ADDR_R, 0x800);
51 buf += 0x800;
52 len -= 0x800;
53 }
54 memcpy_fromio(buf, chip->IO_ADDR_R, len);
55}
56
57static void pasemi_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
58{
59 struct nand_chip *chip = mtd->priv;
60
61 while (len > 0x800) {
62 memcpy_toio(chip->IO_ADDR_R, buf, 0x800);
63 buf += 0x800;
64 len -= 0x800;
65 }
66 memcpy_toio(chip->IO_ADDR_R, buf, len);
67}
68
69static void pasemi_hwcontrol(struct mtd_info *mtd, int cmd,
70 unsigned int ctrl)
71{
72 struct nand_chip *chip = mtd->priv;
73
74 if (cmd == NAND_CMD_NONE)
75 return;
76
77 if (ctrl & NAND_CLE)
78 out_8(chip->IO_ADDR_W + (1 << CLE_PIN_CTL), cmd);
79 else
80 out_8(chip->IO_ADDR_W + (1 << ALE_PIN_CTL), cmd);
81
82 /* Push out posted writes */
83 eieio();
84 inl(lpcctl);
85}
86
87int pasemi_device_ready(struct mtd_info *mtd)
88{
89 return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
90}
91
92static int __devinit pasemi_nand_probe(struct of_device *ofdev,
93 const struct of_device_id *match)
94{
95 struct pci_dev *pdev;
96 struct device_node *np = ofdev->node;
97 struct resource res;
98 struct nand_chip *chip;
99 int err = 0;
100
101 err = of_address_to_resource(np, 0, &res);
102
103 if (err)
104 return -EINVAL;
105
106 /* We only support one device at the moment */
107 if (pasemi_nand_mtd)
108 return -ENODEV;
109
110 pr_debug("pasemi_nand at %lx-%lx\n", res.start, res.end);
111
112 /* Allocate memory for MTD device structure and private data */
113 pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) +
114 sizeof(struct nand_chip), GFP_KERNEL);
115 if (!pasemi_nand_mtd) {
116 printk(KERN_WARNING
117 "Unable to allocate PASEMI NAND MTD device structure\n");
118 err = -ENOMEM;
119 goto out;
120 }
121
122 /* Get pointer to private data */
123 chip = (struct nand_chip *)&pasemi_nand_mtd[1];
124
125 /* Link the private data with the MTD structure */
126 pasemi_nand_mtd->priv = chip;
127 pasemi_nand_mtd->owner = THIS_MODULE;
128
129 chip->IO_ADDR_R = of_iomap(np, 0);
130 chip->IO_ADDR_W = chip->IO_ADDR_R;
131
132 if (!chip->IO_ADDR_R) {
133 err = -EIO;
134 goto out_mtd;
135 }
136
137 pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa008, NULL);
138 if (!pdev) {
139 err = -ENODEV;
140 goto out_ior;
141 }
142
143 lpcctl = pci_resource_start(pdev, 0);
144
145 if (!request_region(lpcctl, 4, driver_name)) {
146 err = -EBUSY;
147 goto out_ior;
148 }
149
150 chip->cmd_ctrl = pasemi_hwcontrol;
151 chip->dev_ready = pasemi_device_ready;
152 chip->read_buf = pasemi_read_buf;
153 chip->write_buf = pasemi_write_buf;
154 chip->chip_delay = 0;
155 chip->ecc.mode = NAND_ECC_SOFT;
156
157 /* Enable the following for a flash based bad block table */
158 chip->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR;
159
160 /* Scan to find existance of the device */
161 if (nand_scan(pasemi_nand_mtd, 1)) {
162 err = -ENXIO;
163 goto out_lpc;
164 }
165
166 if (add_mtd_device(pasemi_nand_mtd)) {
167 printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n");
168 err = -ENODEV;
169 goto out_lpc;
170 }
171
172 printk(KERN_INFO "PA Semi NAND flash at %08lx, control at I/O %x\n",
173 res.start, lpcctl);
174
175 return 0;
176
177 out_lpc:
178 release_region(lpcctl, 4);
179 out_ior:
180 iounmap(chip->IO_ADDR_R);
181 out_mtd:
182 kfree(pasemi_nand_mtd);
183 out:
184 return err;
185}
186
187static int __devexit pasemi_nand_remove(struct of_device *ofdev)
188{
189 struct nand_chip *chip;
190
191 if (!pasemi_nand_mtd)
192 return 0;
193
194 chip = pasemi_nand_mtd->priv;
195
196 /* Release resources, unregister device */
197 nand_release(pasemi_nand_mtd);
198
199 release_region(lpcctl, 4);
200
201 iounmap(chip->IO_ADDR_R);
202
203 /* Free the MTD device structure */
204 kfree(pasemi_nand_mtd);
205
206 pasemi_nand_mtd = NULL;
207
208 return 0;
209}
210
211static struct of_device_id pasemi_nand_match[] =
212{
213 {
214 .compatible = "pasemi,localbus-nand",
215 },
216 {},
217};
218
219MODULE_DEVICE_TABLE(of, pasemi_nand_match);
220
221static struct of_platform_driver pasemi_nand_driver =
222{
223 .name = (char*)driver_name,
224 .match_table = pasemi_nand_match,
225 .probe = pasemi_nand_probe,
226 .remove = pasemi_nand_remove,
227};
228
229static int __init pasemi_nand_init(void)
230{
231 return of_register_platform_driver(&pasemi_nand_driver);
232}
233module_init(pasemi_nand_init);
234
235static void __exit pasemi_nand_exit(void)
236{
237 of_unregister_platform_driver(&pasemi_nand_driver);
238}
239module_exit(pasemi_nand_exit);
240
241MODULE_LICENSE("GPL");
242MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
243MODULE_DESCRIPTION("NAND flash interface driver for PA Semi PWRficient");
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index cd725fc5e813..f6d5c2adc4fd 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -110,7 +110,9 @@ out:
110static int __devexit plat_nand_remove(struct platform_device *pdev) 110static int __devexit plat_nand_remove(struct platform_device *pdev)
111{ 111{
112 struct plat_nand_data *data = platform_get_drvdata(pdev); 112 struct plat_nand_data *data = platform_get_drvdata(pdev);
113#ifdef CONFIG_MTD_PARTITIONS
113 struct platform_nand_data *pdata = pdev->dev.platform_data; 114 struct platform_nand_data *pdata = pdev->dev.platform_data;
115#endif
114 116
115 nand_release(&data->mtd); 117 nand_release(&data->mtd);
116#ifdef CONFIG_MTD_PARTITIONS 118#ifdef CONFIG_MTD_PARTITIONS
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 2bd0737572c6..9260ad947524 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -120,6 +120,8 @@ struct s3c2410_nand_info {
120 int sel_bit; 120 int sel_bit;
121 int mtd_count; 121 int mtd_count;
122 122
123 unsigned long save_nfconf;
124
123 enum s3c_cpu_type cpu_type; 125 enum s3c_cpu_type cpu_type;
124}; 126};
125 127
@@ -364,23 +366,21 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
364 ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) { 366 ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) {
365 /* calculate the bit position of the error */ 367 /* calculate the bit position of the error */
366 368
367 bit = (diff2 >> 2) & 1; 369 bit = ((diff2 >> 3) & 1) |
368 bit |= (diff2 >> 3) & 2; 370 ((diff2 >> 4) & 2) |
369 bit |= (diff2 >> 4) & 4; 371 ((diff2 >> 5) & 4);
370 372
371 /* calculate the byte position of the error */ 373 /* calculate the byte position of the error */
372 374
373 byte = (diff1 << 1) & 0x80; 375 byte = ((diff2 << 7) & 0x100) |
374 byte |= (diff1 << 2) & 0x40; 376 ((diff1 << 0) & 0x80) |
375 byte |= (diff1 << 3) & 0x20; 377 ((diff1 << 1) & 0x40) |
376 byte |= (diff1 << 4) & 0x10; 378 ((diff1 << 2) & 0x20) |
377 379 ((diff1 << 3) & 0x10) |
378 byte |= (diff0 >> 3) & 0x08; 380 ((diff0 >> 4) & 0x08) |
379 byte |= (diff0 >> 2) & 0x04; 381 ((diff0 >> 3) & 0x04) |
380 byte |= (diff0 >> 1) & 0x02; 382 ((diff0 >> 2) & 0x02) |
381 byte |= (diff0 >> 0) & 0x01; 383 ((diff0 >> 1) & 0x01);
382
383 byte |= (diff2 << 8) & 0x100;
384 384
385 dev_dbg(info->device, "correcting error bit %d, byte %d\n", 385 dev_dbg(info->device, "correcting error bit %d, byte %d\n",
386 bit, byte); 386 bit, byte);
@@ -399,7 +399,7 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
399 if ((diff0 & ~(1<<fls(diff0))) == 0) 399 if ((diff0 & ~(1<<fls(diff0))) == 0)
400 return 1; 400 return 1;
401 401
402 return 0; 402 return -1;
403} 403}
404 404
405/* ECC functions 405/* ECC functions
@@ -810,6 +810,16 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
810 struct s3c2410_nand_info *info = platform_get_drvdata(dev); 810 struct s3c2410_nand_info *info = platform_get_drvdata(dev);
811 811
812 if (info) { 812 if (info) {
813 info->save_nfconf = readl(info->regs + S3C2410_NFCONF);
814
815 /* For the moment, we must ensure nFCE is high during
816 * the time we are suspended. This really should be
817 * handled by suspending the MTDs we are using, but
818 * that is currently not the case. */
819
820 writel(info->save_nfconf | info->sel_bit,
821 info->regs + S3C2410_NFCONF);
822
813 if (!allow_clk_stop(info)) 823 if (!allow_clk_stop(info))
814 clk_disable(info->clk); 824 clk_disable(info->clk);
815 } 825 }
@@ -820,11 +830,19 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
820static int s3c24xx_nand_resume(struct platform_device *dev) 830static int s3c24xx_nand_resume(struct platform_device *dev)
821{ 831{
822 struct s3c2410_nand_info *info = platform_get_drvdata(dev); 832 struct s3c2410_nand_info *info = platform_get_drvdata(dev);
833 unsigned long nfconf;
823 834
824 if (info) { 835 if (info) {
825 clk_enable(info->clk); 836 clk_enable(info->clk);
826 s3c2410_nand_inithw(info, dev); 837 s3c2410_nand_inithw(info, dev);
827 838
839 /* Restore the state of the nFCE line. */
840
841 nfconf = readl(info->regs + S3C2410_NFCONF);
842 nfconf &= ~info->sel_bit;
843 nfconf |= info->save_nfconf & info->sel_bit;
844 writel(nfconf, info->regs + S3C2410_NFCONF);
845
828 if (allow_clk_stop(info)) 846 if (allow_clk_stop(info))
829 clk_disable(info->clk); 847 clk_disable(info->clk);
830 } 848 }
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
new file mode 100644
index 000000000000..f86e06934cd8
--- /dev/null
+++ b/drivers/mtd/ofpart.c
@@ -0,0 +1,74 @@
1/*
2 * Flash partitions described by the OF (or flattened) device tree
3 *
4 * Copyright (C) 2006 MontaVista Software Inc.
5 * Author: Vitaly Wool <vwool@ru.mvista.com>
6 *
7 * Revised to handle newer style flash binding by:
8 * Copyright (C) 2007 David Gibson, IBM Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/of.h>
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/partitions.h>
21
22int __devinit of_mtd_parse_partitions(struct device *dev,
23 struct mtd_info *mtd,
24 struct device_node *node,
25 struct mtd_partition **pparts)
26{
27 const char *partname;
28 struct device_node *pp;
29 int nr_parts, i;
30
31 /* First count the subnodes */
32 pp = NULL;
33 nr_parts = 0;
34 while ((pp = of_get_next_child(node, pp)))
35 nr_parts++;
36
37 if (nr_parts == 0)
38 return 0;
39
40 *pparts = kzalloc(nr_parts * sizeof(**pparts), GFP_KERNEL);
41 if (!*pparts)
42 return -ENOMEM;
43
44 pp = NULL;
45 i = 0;
46 while ((pp = of_get_next_child(node, pp))) {
47 const u32 *reg;
48 int len;
49
50 reg = of_get_property(pp, "reg", &len);
51 if (!reg || (len != 2 * sizeof(u32))) {
52 of_node_put(pp);
53 dev_err(dev, "Invalid 'reg' on %s\n", node->full_name);
54 kfree(*pparts);
55 *pparts = NULL;
56 return -EINVAL;
57 }
58 (*pparts)[i].offset = reg[0];
59 (*pparts)[i].size = reg[1];
60
61 partname = of_get_property(pp, "label", &len);
62 if (!partname)
63 partname = of_get_property(pp, "name", &len);
64 (*pparts)[i].name = (char *)partname;
65
66 if (of_get_property(pp, "read-only", &len))
67 (*pparts)[i].mask_flags = MTD_WRITEABLE;
68
69 i++;
70 }
71
72 return nr_parts;
73}
74EXPORT_SYMBOL(of_mtd_parse_partitions);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 1b0b32011415..8d7d21be1541 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -18,6 +18,7 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/delay.h>
21#include <linux/interrupt.h> 22#include <linux/interrupt.h>
22#include <linux/jiffies.h> 23#include <linux/jiffies.h>
23#include <linux/mtd/mtd.h> 24#include <linux/mtd/mtd.h>
@@ -170,6 +171,18 @@ static int onenand_buffer_address(int dataram1, int sectors, int count)
170} 171}
171 172
172/** 173/**
174 * onenand_get_density - [DEFAULT] Get OneNAND density
175 * @param dev_id OneNAND device ID
176 *
177 * Get OneNAND density from device ID
178 */
179static inline int onenand_get_density(int dev_id)
180{
181 int density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT;
182 return (density & ONENAND_DEVICE_DENSITY_MASK);
183}
184
185/**
173 * onenand_command - [DEFAULT] Send command to OneNAND device 186 * onenand_command - [DEFAULT] Send command to OneNAND device
174 * @param mtd MTD device structure 187 * @param mtd MTD device structure
175 * @param cmd the command to be sent 188 * @param cmd the command to be sent
@@ -182,8 +195,7 @@ static int onenand_buffer_address(int dataram1, int sectors, int count)
182static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t len) 195static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t len)
183{ 196{
184 struct onenand_chip *this = mtd->priv; 197 struct onenand_chip *this = mtd->priv;
185 int value, readcmd = 0, block_cmd = 0; 198 int value, block, page;
186 int block, page;
187 199
188 /* Address translation */ 200 /* Address translation */
189 switch (cmd) { 201 switch (cmd) {
@@ -198,7 +210,6 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
198 case ONENAND_CMD_ERASE: 210 case ONENAND_CMD_ERASE:
199 case ONENAND_CMD_BUFFERRAM: 211 case ONENAND_CMD_BUFFERRAM:
200 case ONENAND_CMD_OTP_ACCESS: 212 case ONENAND_CMD_OTP_ACCESS:
201 block_cmd = 1;
202 block = (int) (addr >> this->erase_shift); 213 block = (int) (addr >> this->erase_shift);
203 page = -1; 214 page = -1;
204 break; 215 break;
@@ -240,11 +251,9 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
240 value = onenand_block_address(this, block); 251 value = onenand_block_address(this, block);
241 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1); 252 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
242 253
243 if (block_cmd) { 254 /* Select DataRAM for DDP */
244 /* Select DataRAM for DDP */ 255 value = onenand_bufferram_address(this, block);
245 value = onenand_bufferram_address(this, block); 256 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
246 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
247 }
248 } 257 }
249 258
250 if (page != -1) { 259 if (page != -1) {
@@ -256,7 +265,6 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
256 case ONENAND_CMD_READ: 265 case ONENAND_CMD_READ:
257 case ONENAND_CMD_READOOB: 266 case ONENAND_CMD_READOOB:
258 dataram = ONENAND_SET_NEXT_BUFFERRAM(this); 267 dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
259 readcmd = 1;
260 break; 268 break;
261 269
262 default: 270 default:
@@ -273,12 +281,6 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
273 /* Write 'BSA, BSC' of DataRAM */ 281 /* Write 'BSA, BSC' of DataRAM */
274 value = onenand_buffer_address(dataram, sectors, count); 282 value = onenand_buffer_address(dataram, sectors, count);
275 this->write_word(value, this->base + ONENAND_REG_START_BUFFER); 283 this->write_word(value, this->base + ONENAND_REG_START_BUFFER);
276
277 if (readcmd) {
278 /* Select DataRAM for DDP */
279 value = onenand_bufferram_address(this, block);
280 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
281 }
282 } 284 }
283 285
284 /* Interrupt clear */ 286 /* Interrupt clear */
@@ -855,6 +857,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
855 this->command(mtd, ONENAND_CMD_READ, from, writesize); 857 this->command(mtd, ONENAND_CMD_READ, from, writesize);
856 ret = this->wait(mtd, FL_READING); 858 ret = this->wait(mtd, FL_READING);
857 onenand_update_bufferram(mtd, from, !ret); 859 onenand_update_bufferram(mtd, from, !ret);
860 if (ret == -EBADMSG)
861 ret = 0;
858 } 862 }
859 } 863 }
860 864
@@ -913,6 +917,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
913 /* Now wait for load */ 917 /* Now wait for load */
914 ret = this->wait(mtd, FL_READING); 918 ret = this->wait(mtd, FL_READING);
915 onenand_update_bufferram(mtd, from, !ret); 919 onenand_update_bufferram(mtd, from, !ret);
920 if (ret == -EBADMSG)
921 ret = 0;
916 } 922 }
917 923
918 /* 924 /*
@@ -923,12 +929,12 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
923 ops->retlen = read; 929 ops->retlen = read;
924 ops->oobretlen = oobread; 930 ops->oobretlen = oobread;
925 931
926 if (mtd->ecc_stats.failed - stats.failed)
927 return -EBADMSG;
928
929 if (ret) 932 if (ret)
930 return ret; 933 return ret;
931 934
935 if (mtd->ecc_stats.failed - stats.failed)
936 return -EBADMSG;
937
932 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; 938 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
933} 939}
934 940
@@ -944,6 +950,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
944 struct mtd_oob_ops *ops) 950 struct mtd_oob_ops *ops)
945{ 951{
946 struct onenand_chip *this = mtd->priv; 952 struct onenand_chip *this = mtd->priv;
953 struct mtd_ecc_stats stats;
947 int read = 0, thislen, column, oobsize; 954 int read = 0, thislen, column, oobsize;
948 size_t len = ops->ooblen; 955 size_t len = ops->ooblen;
949 mtd_oob_mode_t mode = ops->mode; 956 mtd_oob_mode_t mode = ops->mode;
@@ -977,6 +984,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
977 return -EINVAL; 984 return -EINVAL;
978 } 985 }
979 986
987 stats = mtd->ecc_stats;
988
980 while (read < len) { 989 while (read < len) {
981 cond_resched(); 990 cond_resched();
982 991
@@ -988,18 +997,16 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
988 onenand_update_bufferram(mtd, from, 0); 997 onenand_update_bufferram(mtd, from, 0);
989 998
990 ret = this->wait(mtd, FL_READING); 999 ret = this->wait(mtd, FL_READING);
991 /* First copy data and check return value for ECC handling */ 1000 if (ret && ret != -EBADMSG) {
1001 printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
1002 break;
1003 }
992 1004
993 if (mode == MTD_OOB_AUTO) 1005 if (mode == MTD_OOB_AUTO)
994 onenand_transfer_auto_oob(mtd, buf, column, thislen); 1006 onenand_transfer_auto_oob(mtd, buf, column, thislen);
995 else 1007 else
996 this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen); 1008 this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
997 1009
998 if (ret) {
999 printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
1000 break;
1001 }
1002
1003 read += thislen; 1010 read += thislen;
1004 1011
1005 if (read == len) 1012 if (read == len)
@@ -1016,7 +1023,14 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1016 } 1023 }
1017 1024
1018 ops->oobretlen = read; 1025 ops->oobretlen = read;
1019 return ret; 1026
1027 if (ret)
1028 return ret;
1029
1030 if (mtd->ecc_stats.failed - stats.failed)
1031 return -EBADMSG;
1032
1033 return 0;
1020} 1034}
1021 1035
1022/** 1036/**
@@ -1106,12 +1120,10 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
1106 interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT); 1120 interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
1107 ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS); 1121 ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
1108 1122
1123 /* Initial bad block case: 0x2400 or 0x0400 */
1109 if (ctrl & ONENAND_CTRL_ERROR) { 1124 if (ctrl & ONENAND_CTRL_ERROR) {
1110 printk(KERN_DEBUG "onenand_bbt_wait: controller error = 0x%04x\n", ctrl); 1125 printk(KERN_DEBUG "onenand_bbt_wait: controller error = 0x%04x\n", ctrl);
1111 /* Initial bad block case */ 1126 return ONENAND_BBT_READ_ERROR;
1112 if (ctrl & ONENAND_CTRL_LOAD)
1113 return ONENAND_BBT_READ_ERROR;
1114 return ONENAND_BBT_READ_FATAL_ERROR;
1115 } 1127 }
1116 1128
1117 if (interrupt & ONENAND_INT_READ) { 1129 if (interrupt & ONENAND_INT_READ) {
@@ -1206,7 +1218,7 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
1206static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to) 1218static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to)
1207{ 1219{
1208 struct onenand_chip *this = mtd->priv; 1220 struct onenand_chip *this = mtd->priv;
1209 char oobbuf[64]; 1221 u_char *oob_buf = this->oob_buf;
1210 int status, i; 1222 int status, i;
1211 1223
1212 this->command(mtd, ONENAND_CMD_READOOB, to, mtd->oobsize); 1224 this->command(mtd, ONENAND_CMD_READOOB, to, mtd->oobsize);
@@ -1215,9 +1227,9 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to
1215 if (status) 1227 if (status)
1216 return status; 1228 return status;
1217 1229
1218 this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize); 1230 this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
1219 for (i = 0; i < mtd->oobsize; i++) 1231 for (i = 0; i < mtd->oobsize; i++)
1220 if (buf[i] != 0xFF && buf[i] != oobbuf[i]) 1232 if (buf[i] != 0xFF && buf[i] != oob_buf[i])
1221 return -EBADMSG; 1233 return -EBADMSG;
1222 1234
1223 return 0; 1235 return 0;
@@ -1273,6 +1285,112 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr,
1273 1285
1274#define NOTALIGNED(x) ((x & (this->subpagesize - 1)) != 0) 1286#define NOTALIGNED(x) ((x & (this->subpagesize - 1)) != 0)
1275 1287
1288static void onenand_panic_wait(struct mtd_info *mtd)
1289{
1290 struct onenand_chip *this = mtd->priv;
1291 unsigned int interrupt;
1292 int i;
1293
1294 for (i = 0; i < 2000; i++) {
1295 interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
1296 if (interrupt & ONENAND_INT_MASTER)
1297 break;
1298 udelay(10);
1299 }
1300}
1301
1302/**
1303 * onenand_panic_write - [MTD Interface] write buffer to FLASH in a panic context
1304 * @param mtd MTD device structure
1305 * @param to offset to write to
1306 * @param len number of bytes to write
1307 * @param retlen pointer to variable to store the number of written bytes
1308 * @param buf the data to write
1309 *
1310 * Write with ECC
1311 */
1312static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1313 size_t *retlen, const u_char *buf)
1314{
1315 struct onenand_chip *this = mtd->priv;
1316 int column, subpage;
1317 int written = 0;
1318 int ret = 0;
1319
1320 if (this->state == FL_PM_SUSPENDED)
1321 return -EBUSY;
1322
1323 /* Wait for any existing operation to clear */
1324 onenand_panic_wait(mtd);
1325
1326 DEBUG(MTD_DEBUG_LEVEL3, "onenand_panic_write: to = 0x%08x, len = %i\n",
1327 (unsigned int) to, (int) len);
1328
1329 /* Initialize retlen, in case of early exit */
1330 *retlen = 0;
1331
1332 /* Do not allow writes past end of device */
1333 if (unlikely((to + len) > mtd->size)) {
1334 printk(KERN_ERR "onenand_panic_write: Attempt write to past end of device\n");
1335 return -EINVAL;
1336 }
1337
1338 /* Reject writes, which are not page aligned */
1339 if (unlikely(NOTALIGNED(to)) || unlikely(NOTALIGNED(len))) {
1340 printk(KERN_ERR "onenand_panic_write: Attempt to write not page aligned data\n");
1341 return -EINVAL;
1342 }
1343
1344 column = to & (mtd->writesize - 1);
1345
1346 /* Loop until all data write */
1347 while (written < len) {
1348 int thislen = min_t(int, mtd->writesize - column, len - written);
1349 u_char *wbuf = (u_char *) buf;
1350
1351 this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen);
1352
1353 /* Partial page write */
1354 subpage = thislen < mtd->writesize;
1355 if (subpage) {
1356 memset(this->page_buf, 0xff, mtd->writesize);
1357 memcpy(this->page_buf + column, buf, thislen);
1358 wbuf = this->page_buf;
1359 }
1360
1361 this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, mtd->writesize);
1362 this->write_bufferram(mtd, ONENAND_SPARERAM, ffchars, 0, mtd->oobsize);
1363
1364 this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize);
1365
1366 onenand_panic_wait(mtd);
1367
1368 /* In partial page write we don't update bufferram */
1369 onenand_update_bufferram(mtd, to, !ret && !subpage);
1370 if (ONENAND_IS_2PLANE(this)) {
1371 ONENAND_SET_BUFFERRAM1(this);
1372 onenand_update_bufferram(mtd, to + this->writesize, !ret && !subpage);
1373 }
1374
1375 if (ret) {
1376 printk(KERN_ERR "onenand_panic_write: write failed %d\n", ret);
1377 break;
1378 }
1379
1380 written += thislen;
1381
1382 if (written == len)
1383 break;
1384
1385 column = 0;
1386 to += thislen;
1387 buf += thislen;
1388 }
1389
1390 *retlen = written;
1391 return ret;
1392}
1393
1276/** 1394/**
1277 * onenand_fill_auto_oob - [Internal] oob auto-placement transfer 1395 * onenand_fill_auto_oob - [Internal] oob auto-placement transfer
1278 * @param mtd MTD device structure 1396 * @param mtd MTD device structure
@@ -1419,7 +1537,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1419 } 1537 }
1420 1538
1421 /* Only check verify write turn on */ 1539 /* Only check verify write turn on */
1422 ret = onenand_verify(mtd, (u_char *) wbuf, to, thislen); 1540 ret = onenand_verify(mtd, buf, to, thislen);
1423 if (ret) { 1541 if (ret) {
1424 printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret); 1542 printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret);
1425 break; 1543 break;
@@ -1435,9 +1553,6 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1435 buf += thislen; 1553 buf += thislen;
1436 } 1554 }
1437 1555
1438 /* Deselect and wake up anyone waiting on the device */
1439 onenand_release_device(mtd);
1440
1441 ops->retlen = written; 1556 ops->retlen = written;
1442 1557
1443 return ret; 1558 return ret;
@@ -2148,7 +2263,7 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2148 2263
2149 *retlen = 0; 2264 *retlen = 0;
2150 2265
2151 density = this->device_id >> ONENAND_DEVICE_DENSITY_SHIFT; 2266 density = onenand_get_density(this->device_id);
2152 if (density < ONENAND_DEVICE_DENSITY_512Mb) 2267 if (density < ONENAND_DEVICE_DENSITY_512Mb)
2153 otp_pages = 20; 2268 otp_pages = 20;
2154 else 2269 else
@@ -2299,7 +2414,8 @@ static int onenand_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2299static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 2414static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
2300 size_t len) 2415 size_t len)
2301{ 2416{
2302 unsigned char oob_buf[64]; 2417 struct onenand_chip *this = mtd->priv;
2418 u_char *oob_buf = this->oob_buf;
2303 size_t retlen; 2419 size_t retlen;
2304 int ret; 2420 int ret;
2305 2421
@@ -2339,7 +2455,7 @@ static void onenand_check_features(struct mtd_info *mtd)
2339 unsigned int density, process; 2455 unsigned int density, process;
2340 2456
2341 /* Lock scheme depends on density and process */ 2457 /* Lock scheme depends on density and process */
2342 density = this->device_id >> ONENAND_DEVICE_DENSITY_SHIFT; 2458 density = onenand_get_density(this->device_id);
2343 process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT; 2459 process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT;
2344 2460
2345 /* Lock scheme */ 2461 /* Lock scheme */
@@ -2388,7 +2504,7 @@ static void onenand_print_device_info(int device, int version)
2388 vcc = device & ONENAND_DEVICE_VCC_MASK; 2504 vcc = device & ONENAND_DEVICE_VCC_MASK;
2389 demuxed = device & ONENAND_DEVICE_IS_DEMUX; 2505 demuxed = device & ONENAND_DEVICE_IS_DEMUX;
2390 ddp = device & ONENAND_DEVICE_IS_DDP; 2506 ddp = device & ONENAND_DEVICE_IS_DDP;
2391 density = device >> ONENAND_DEVICE_DENSITY_SHIFT; 2507 density = onenand_get_density(device);
2392 printk(KERN_INFO "%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n", 2508 printk(KERN_INFO "%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
2393 demuxed ? "" : "Muxed ", 2509 demuxed ? "" : "Muxed ",
2394 ddp ? "(DDP)" : "", 2510 ddp ? "(DDP)" : "",
@@ -2480,7 +2596,7 @@ static int onenand_probe(struct mtd_info *mtd)
2480 this->device_id = dev_id; 2596 this->device_id = dev_id;
2481 this->version_id = ver_id; 2597 this->version_id = ver_id;
2482 2598
2483 density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT; 2599 density = onenand_get_density(dev_id);
2484 this->chipsize = (16 << density) << 20; 2600 this->chipsize = (16 << density) << 20;
2485 /* Set density mask. it is used for DDP */ 2601 /* Set density mask. it is used for DDP */
2486 if (ONENAND_IS_DDP(this)) 2602 if (ONENAND_IS_DDP(this))
@@ -2664,6 +2780,7 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
2664 mtd->write = onenand_write; 2780 mtd->write = onenand_write;
2665 mtd->read_oob = onenand_read_oob; 2781 mtd->read_oob = onenand_read_oob;
2666 mtd->write_oob = onenand_write_oob; 2782 mtd->write_oob = onenand_write_oob;
2783 mtd->panic_write = onenand_panic_write;
2667#ifdef CONFIG_MTD_ONENAND_OTP 2784#ifdef CONFIG_MTD_ONENAND_OTP
2668 mtd->get_fact_prot_info = onenand_get_fact_prot_info; 2785 mtd->get_fact_prot_info = onenand_get_fact_prot_info;
2669 mtd->read_fact_prot_reg = onenand_read_fact_prot_reg; 2786 mtd->read_fact_prot_reg = onenand_read_fact_prot_reg;
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index a61351f88ec0..47474903263c 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -59,16 +59,31 @@ static int parse_redboot_partitions(struct mtd_info *master,
59 static char nullstring[] = "unallocated"; 59 static char nullstring[] = "unallocated";
60#endif 60#endif
61 61
62 if ( directory < 0 ) {
63 offset = master->size + directory * master->erasesize;
64 while (master->block_isbad &&
65 master->block_isbad(master, offset)) {
66 if (!offset) {
67 nogood:
68 printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n");
69 return -EIO;
70 }
71 offset -= master->erasesize;
72 }
73 } else {
74 offset = directory * master->erasesize;
75 while (master->block_isbad &&
76 master->block_isbad(master, offset)) {
77 offset += master->erasesize;
78 if (offset == master->size)
79 goto nogood;
80 }
81 }
62 buf = vmalloc(master->erasesize); 82 buf = vmalloc(master->erasesize);
63 83
64 if (!buf) 84 if (!buf)
65 return -ENOMEM; 85 return -ENOMEM;
66 86
67 if ( directory < 0 )
68 offset = master->size + directory*master->erasesize;
69 else
70 offset = directory*master->erasesize;
71
72 printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n", 87 printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n",
73 master->name, offset); 88 master->name, offset);
74 89
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 023653977a1a..6ac81e35355c 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -21,11 +21,16 @@
21 */ 21 */
22 22
23/* 23/*
24 * This file includes UBI initialization and building of UBI devices. At the 24 * This file includes UBI initialization and building of UBI devices.
25 * moment UBI devices may only be added while UBI is initialized, but dynamic 25 *
26 * device add/remove functionality is planned. Also, at the moment we only 26 * When UBI is initialized, it attaches all the MTD devices specified as the
27 * attach UBI devices by scanning, which will become a bottleneck when flashes 27 * module load parameters or the kernel boot parameters. If MTD devices were
28 * reach certain large size. Then one may improve UBI and add other methods. 28 * specified, UBI does not attach any MTD device, but it is possible to do
29 * later using the "UBI control device".
30 *
31 * At the moment we only attach UBI devices by scanning, which will become a
32 * bottleneck when flashes reach certain large size. Then one may improve UBI
33 * and add other methods, although it does not seem to be easy to do.
29 */ 34 */
30 35
31#include <linux/err.h> 36#include <linux/err.h>
@@ -33,7 +38,9 @@
33#include <linux/moduleparam.h> 38#include <linux/moduleparam.h>
34#include <linux/stringify.h> 39#include <linux/stringify.h>
35#include <linux/stat.h> 40#include <linux/stat.h>
41#include <linux/miscdevice.h>
36#include <linux/log2.h> 42#include <linux/log2.h>
43#include <linux/kthread.h>
37#include "ubi.h" 44#include "ubi.h"
38 45
39/* Maximum length of the 'mtd=' parameter */ 46/* Maximum length of the 'mtd=' parameter */
@@ -43,13 +50,11 @@
43 * struct mtd_dev_param - MTD device parameter description data structure. 50 * struct mtd_dev_param - MTD device parameter description data structure.
44 * @name: MTD device name or number string 51 * @name: MTD device name or number string
45 * @vid_hdr_offs: VID header offset 52 * @vid_hdr_offs: VID header offset
46 * @data_offs: data offset
47 */ 53 */
48struct mtd_dev_param 54struct mtd_dev_param
49{ 55{
50 char name[MTD_PARAM_LEN_MAX]; 56 char name[MTD_PARAM_LEN_MAX];
51 int vid_hdr_offs; 57 int vid_hdr_offs;
52 int data_offs;
53}; 58};
54 59
55/* Numbers of elements set in the @mtd_dev_param array */ 60/* Numbers of elements set in the @mtd_dev_param array */
@@ -58,14 +63,27 @@ static int mtd_devs = 0;
58/* MTD devices specification parameters */ 63/* MTD devices specification parameters */
59static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES]; 64static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
60 65
61/* Number of UBI devices in system */ 66/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
62int ubi_devices_cnt; 67struct class *ubi_class;
68
69/* Slab cache for wear-leveling entries */
70struct kmem_cache *ubi_wl_entry_slab;
71
72/* UBI control character device */
73static struct miscdevice ubi_ctrl_cdev = {
74 .minor = MISC_DYNAMIC_MINOR,
75 .name = "ubi_ctrl",
76 .fops = &ubi_ctrl_cdev_operations,
77};
63 78
64/* All UBI devices in system */ 79/* All UBI devices in system */
65struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; 80static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
66 81
67/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ 82/* Serializes UBI devices creations and removals */
68struct class *ubi_class; 83DEFINE_MUTEX(ubi_devices_mutex);
84
85/* Protects @ubi_devices and @ubi->ref_count */
86static DEFINE_SPINLOCK(ubi_devices_lock);
69 87
70/* "Show" method for files in '/<sysfs>/class/ubi/' */ 88/* "Show" method for files in '/<sysfs>/class/ubi/' */
71static ssize_t ubi_version_show(struct class *class, char *buf) 89static ssize_t ubi_version_show(struct class *class, char *buf)
@@ -101,38 +119,150 @@ static struct device_attribute dev_min_io_size =
101 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); 119 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
102static struct device_attribute dev_bgt_enabled = 120static struct device_attribute dev_bgt_enabled =
103 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); 121 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
122static struct device_attribute dev_mtd_num =
123 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
124
125/**
126 * ubi_get_device - get UBI device.
127 * @ubi_num: UBI device number
128 *
129 * This function returns UBI device description object for UBI device number
130 * @ubi_num, or %NULL if the device does not exist. This function increases the
131 * device reference count to prevent removal of the device. In other words, the
132 * device cannot be removed if its reference count is not zero.
133 */
134struct ubi_device *ubi_get_device(int ubi_num)
135{
136 struct ubi_device *ubi;
137
138 spin_lock(&ubi_devices_lock);
139 ubi = ubi_devices[ubi_num];
140 if (ubi) {
141 ubi_assert(ubi->ref_count >= 0);
142 ubi->ref_count += 1;
143 get_device(&ubi->dev);
144 }
145 spin_unlock(&ubi_devices_lock);
146
147 return ubi;
148}
149
150/**
151 * ubi_put_device - drop an UBI device reference.
152 * @ubi: UBI device description object
153 */
154void ubi_put_device(struct ubi_device *ubi)
155{
156 spin_lock(&ubi_devices_lock);
157 ubi->ref_count -= 1;
158 put_device(&ubi->dev);
159 spin_unlock(&ubi_devices_lock);
160}
161
162/**
163 * ubi_get_by_major - get UBI device description object by character device
164 * major number.
165 * @major: major number
166 *
167 * This function is similar to 'ubi_get_device()', but it searches the device
168 * by its major number.
169 */
170struct ubi_device *ubi_get_by_major(int major)
171{
172 int i;
173 struct ubi_device *ubi;
174
175 spin_lock(&ubi_devices_lock);
176 for (i = 0; i < UBI_MAX_DEVICES; i++) {
177 ubi = ubi_devices[i];
178 if (ubi && MAJOR(ubi->cdev.dev) == major) {
179 ubi_assert(ubi->ref_count >= 0);
180 ubi->ref_count += 1;
181 get_device(&ubi->dev);
182 spin_unlock(&ubi_devices_lock);
183 return ubi;
184 }
185 }
186 spin_unlock(&ubi_devices_lock);
187
188 return NULL;
189}
190
191/**
192 * ubi_major2num - get UBI device number by character device major number.
193 * @major: major number
194 *
195 * This function searches UBI device number object by its major number. If UBI
196 * device was not found, this function returns -ENODEV, otherwise the UBI device
197 * number is returned.
198 */
199int ubi_major2num(int major)
200{
201 int i, ubi_num = -ENODEV;
202
203 spin_lock(&ubi_devices_lock);
204 for (i = 0; i < UBI_MAX_DEVICES; i++) {
205 struct ubi_device *ubi = ubi_devices[i];
206
207 if (ubi && MAJOR(ubi->cdev.dev) == major) {
208 ubi_num = ubi->ubi_num;
209 break;
210 }
211 }
212 spin_unlock(&ubi_devices_lock);
213
214 return ubi_num;
215}
104 216
105/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */ 217/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
106static ssize_t dev_attribute_show(struct device *dev, 218static ssize_t dev_attribute_show(struct device *dev,
107 struct device_attribute *attr, char *buf) 219 struct device_attribute *attr, char *buf)
108{ 220{
109 const struct ubi_device *ubi; 221 ssize_t ret;
222 struct ubi_device *ubi;
110 223
224 /*
225 * The below code looks weird, but it actually makes sense. We get the
226 * UBI device reference from the contained 'struct ubi_device'. But it
227 * is unclear if the device was removed or not yet. Indeed, if the
228 * device was removed before we increased its reference count,
229 * 'ubi_get_device()' will return -ENODEV and we fail.
230 *
231 * Remember, 'struct ubi_device' is freed in the release function, so
232 * we still can use 'ubi->ubi_num'.
233 */
111 ubi = container_of(dev, struct ubi_device, dev); 234 ubi = container_of(dev, struct ubi_device, dev);
235 ubi = ubi_get_device(ubi->ubi_num);
236 if (!ubi)
237 return -ENODEV;
238
112 if (attr == &dev_eraseblock_size) 239 if (attr == &dev_eraseblock_size)
113 return sprintf(buf, "%d\n", ubi->leb_size); 240 ret = sprintf(buf, "%d\n", ubi->leb_size);
114 else if (attr == &dev_avail_eraseblocks) 241 else if (attr == &dev_avail_eraseblocks)
115 return sprintf(buf, "%d\n", ubi->avail_pebs); 242 ret = sprintf(buf, "%d\n", ubi->avail_pebs);
116 else if (attr == &dev_total_eraseblocks) 243 else if (attr == &dev_total_eraseblocks)
117 return sprintf(buf, "%d\n", ubi->good_peb_count); 244 ret = sprintf(buf, "%d\n", ubi->good_peb_count);
118 else if (attr == &dev_volumes_count) 245 else if (attr == &dev_volumes_count)
119 return sprintf(buf, "%d\n", ubi->vol_count); 246 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
120 else if (attr == &dev_max_ec) 247 else if (attr == &dev_max_ec)
121 return sprintf(buf, "%d\n", ubi->max_ec); 248 ret = sprintf(buf, "%d\n", ubi->max_ec);
122 else if (attr == &dev_reserved_for_bad) 249 else if (attr == &dev_reserved_for_bad)
123 return sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); 250 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
124 else if (attr == &dev_bad_peb_count) 251 else if (attr == &dev_bad_peb_count)
125 return sprintf(buf, "%d\n", ubi->bad_peb_count); 252 ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
126 else if (attr == &dev_max_vol_count) 253 else if (attr == &dev_max_vol_count)
127 return sprintf(buf, "%d\n", ubi->vtbl_slots); 254 ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
128 else if (attr == &dev_min_io_size) 255 else if (attr == &dev_min_io_size)
129 return sprintf(buf, "%d\n", ubi->min_io_size); 256 ret = sprintf(buf, "%d\n", ubi->min_io_size);
130 else if (attr == &dev_bgt_enabled) 257 else if (attr == &dev_bgt_enabled)
131 return sprintf(buf, "%d\n", ubi->thread_enabled); 258 ret = sprintf(buf, "%d\n", ubi->thread_enabled);
259 else if (attr == &dev_mtd_num)
260 ret = sprintf(buf, "%d\n", ubi->mtd->index);
132 else 261 else
133 BUG(); 262 ret = -EINVAL;
134 263
135 return 0; 264 ubi_put_device(ubi);
265 return ret;
136} 266}
137 267
138/* Fake "release" method for UBI devices */ 268/* Fake "release" method for UBI devices */
@@ -150,68 +280,44 @@ static int ubi_sysfs_init(struct ubi_device *ubi)
150 int err; 280 int err;
151 281
152 ubi->dev.release = dev_release; 282 ubi->dev.release = dev_release;
153 ubi->dev.devt = MKDEV(ubi->major, 0); 283 ubi->dev.devt = ubi->cdev.dev;
154 ubi->dev.class = ubi_class; 284 ubi->dev.class = ubi_class;
155 sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num); 285 sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num);
156 err = device_register(&ubi->dev); 286 err = device_register(&ubi->dev);
157 if (err) 287 if (err)
158 goto out; 288 return err;
159 289
160 err = device_create_file(&ubi->dev, &dev_eraseblock_size); 290 err = device_create_file(&ubi->dev, &dev_eraseblock_size);
161 if (err) 291 if (err)
162 goto out_unregister; 292 return err;
163 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); 293 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
164 if (err) 294 if (err)
165 goto out_eraseblock_size; 295 return err;
166 err = device_create_file(&ubi->dev, &dev_total_eraseblocks); 296 err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
167 if (err) 297 if (err)
168 goto out_avail_eraseblocks; 298 return err;
169 err = device_create_file(&ubi->dev, &dev_volumes_count); 299 err = device_create_file(&ubi->dev, &dev_volumes_count);
170 if (err) 300 if (err)
171 goto out_total_eraseblocks; 301 return err;
172 err = device_create_file(&ubi->dev, &dev_max_ec); 302 err = device_create_file(&ubi->dev, &dev_max_ec);
173 if (err) 303 if (err)
174 goto out_volumes_count; 304 return err;
175 err = device_create_file(&ubi->dev, &dev_reserved_for_bad); 305 err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
176 if (err) 306 if (err)
177 goto out_volumes_max_ec; 307 return err;
178 err = device_create_file(&ubi->dev, &dev_bad_peb_count); 308 err = device_create_file(&ubi->dev, &dev_bad_peb_count);
179 if (err) 309 if (err)
180 goto out_reserved_for_bad; 310 return err;
181 err = device_create_file(&ubi->dev, &dev_max_vol_count); 311 err = device_create_file(&ubi->dev, &dev_max_vol_count);
182 if (err) 312 if (err)
183 goto out_bad_peb_count; 313 return err;
184 err = device_create_file(&ubi->dev, &dev_min_io_size); 314 err = device_create_file(&ubi->dev, &dev_min_io_size);
185 if (err) 315 if (err)
186 goto out_max_vol_count; 316 return err;
187 err = device_create_file(&ubi->dev, &dev_bgt_enabled); 317 err = device_create_file(&ubi->dev, &dev_bgt_enabled);
188 if (err) 318 if (err)
189 goto out_min_io_size; 319 return err;
190 320 err = device_create_file(&ubi->dev, &dev_mtd_num);
191 return 0;
192
193out_min_io_size:
194 device_remove_file(&ubi->dev, &dev_min_io_size);
195out_max_vol_count:
196 device_remove_file(&ubi->dev, &dev_max_vol_count);
197out_bad_peb_count:
198 device_remove_file(&ubi->dev, &dev_bad_peb_count);
199out_reserved_for_bad:
200 device_remove_file(&ubi->dev, &dev_reserved_for_bad);
201out_volumes_max_ec:
202 device_remove_file(&ubi->dev, &dev_max_ec);
203out_volumes_count:
204 device_remove_file(&ubi->dev, &dev_volumes_count);
205out_total_eraseblocks:
206 device_remove_file(&ubi->dev, &dev_total_eraseblocks);
207out_avail_eraseblocks:
208 device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
209out_eraseblock_size:
210 device_remove_file(&ubi->dev, &dev_eraseblock_size);
211out_unregister:
212 device_unregister(&ubi->dev);
213out:
214 ubi_err("failed to initialize sysfs for %s", ubi->ubi_name);
215 return err; 321 return err;
216} 322}
217 323
@@ -221,6 +327,7 @@ out:
221 */ 327 */
222static void ubi_sysfs_close(struct ubi_device *ubi) 328static void ubi_sysfs_close(struct ubi_device *ubi)
223{ 329{
330 device_remove_file(&ubi->dev, &dev_mtd_num);
224 device_remove_file(&ubi->dev, &dev_bgt_enabled); 331 device_remove_file(&ubi->dev, &dev_bgt_enabled);
225 device_remove_file(&ubi->dev, &dev_min_io_size); 332 device_remove_file(&ubi->dev, &dev_min_io_size);
226 device_remove_file(&ubi->dev, &dev_max_vol_count); 333 device_remove_file(&ubi->dev, &dev_max_vol_count);
@@ -244,7 +351,7 @@ static void kill_volumes(struct ubi_device *ubi)
244 351
245 for (i = 0; i < ubi->vtbl_slots; i++) 352 for (i = 0; i < ubi->vtbl_slots; i++)
246 if (ubi->volumes[i]) 353 if (ubi->volumes[i])
247 ubi_free_volume(ubi, i); 354 ubi_free_volume(ubi, ubi->volumes[i]);
248} 355}
249 356
250/** 357/**
@@ -259,9 +366,6 @@ static int uif_init(struct ubi_device *ubi)
259 int i, err; 366 int i, err;
260 dev_t dev; 367 dev_t dev;
261 368
262 mutex_init(&ubi->vtbl_mutex);
263 spin_lock_init(&ubi->volumes_lock);
264
265 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); 369 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
266 370
267 /* 371 /*
@@ -278,39 +382,40 @@ static int uif_init(struct ubi_device *ubi)
278 return err; 382 return err;
279 } 383 }
280 384
385 ubi_assert(MINOR(dev) == 0);
281 cdev_init(&ubi->cdev, &ubi_cdev_operations); 386 cdev_init(&ubi->cdev, &ubi_cdev_operations);
282 ubi->major = MAJOR(dev); 387 dbg_msg("%s major is %u", ubi->ubi_name, MAJOR(dev));
283 dbg_msg("%s major is %u", ubi->ubi_name, ubi->major);
284 ubi->cdev.owner = THIS_MODULE; 388 ubi->cdev.owner = THIS_MODULE;
285 389
286 dev = MKDEV(ubi->major, 0);
287 err = cdev_add(&ubi->cdev, dev, 1); 390 err = cdev_add(&ubi->cdev, dev, 1);
288 if (err) { 391 if (err) {
289 ubi_err("cannot add character device %s", ubi->ubi_name); 392 ubi_err("cannot add character device");
290 goto out_unreg; 393 goto out_unreg;
291 } 394 }
292 395
293 err = ubi_sysfs_init(ubi); 396 err = ubi_sysfs_init(ubi);
294 if (err) 397 if (err)
295 goto out_cdev; 398 goto out_sysfs;
296 399
297 for (i = 0; i < ubi->vtbl_slots; i++) 400 for (i = 0; i < ubi->vtbl_slots; i++)
298 if (ubi->volumes[i]) { 401 if (ubi->volumes[i]) {
299 err = ubi_add_volume(ubi, i); 402 err = ubi_add_volume(ubi, ubi->volumes[i]);
300 if (err) 403 if (err) {
404 ubi_err("cannot add volume %d", i);
301 goto out_volumes; 405 goto out_volumes;
406 }
302 } 407 }
303 408
304 return 0; 409 return 0;
305 410
306out_volumes: 411out_volumes:
307 kill_volumes(ubi); 412 kill_volumes(ubi);
413out_sysfs:
308 ubi_sysfs_close(ubi); 414 ubi_sysfs_close(ubi);
309out_cdev:
310 cdev_del(&ubi->cdev); 415 cdev_del(&ubi->cdev);
311out_unreg: 416out_unreg:
312 unregister_chrdev_region(MKDEV(ubi->major, 0), 417 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
313 ubi->vtbl_slots + 1); 418 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
314 return err; 419 return err;
315} 420}
316 421
@@ -323,7 +428,7 @@ static void uif_close(struct ubi_device *ubi)
323 kill_volumes(ubi); 428 kill_volumes(ubi);
324 ubi_sysfs_close(ubi); 429 ubi_sysfs_close(ubi);
325 cdev_del(&ubi->cdev); 430 cdev_del(&ubi->cdev);
326 unregister_chrdev_region(MKDEV(ubi->major, 0), ubi->vtbl_slots + 1); 431 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
327} 432}
328 433
329/** 434/**
@@ -384,9 +489,9 @@ out_si:
384 * assumed: 489 * assumed:
385 * o EC header is always at offset zero - this cannot be changed; 490 * o EC header is always at offset zero - this cannot be changed;
386 * o VID header starts just after the EC header at the closest address 491 * o VID header starts just after the EC header at the closest address
387 * aligned to @io->@hdrs_min_io_size; 492 * aligned to @io->hdrs_min_io_size;
388 * o data starts just after the VID header at the closest address aligned to 493 * o data starts just after the VID header at the closest address aligned to
389 * @io->@min_io_size 494 * @io->min_io_size
390 * 495 *
391 * This function returns zero in case of success and a negative error code in 496 * This function returns zero in case of success and a negative error code in
392 * case of failure. 497 * case of failure.
@@ -407,6 +512,9 @@ static int io_init(struct ubi_device *ubi)
407 return -EINVAL; 512 return -EINVAL;
408 } 513 }
409 514
515 if (ubi->vid_hdr_offset < 0)
516 return -EINVAL;
517
410 /* 518 /*
411 * Note, in this implementation we support MTD devices with 0x7FFFFFFF 519 * Note, in this implementation we support MTD devices with 0x7FFFFFFF
412 * physical eraseblocks maximum. 520 * physical eraseblocks maximum.
@@ -424,7 +532,8 @@ static int io_init(struct ubi_device *ubi)
424 532
425 /* Make sure minimal I/O unit is power of 2 */ 533 /* Make sure minimal I/O unit is power of 2 */
426 if (!is_power_of_2(ubi->min_io_size)) { 534 if (!is_power_of_2(ubi->min_io_size)) {
427 ubi_err("bad min. I/O unit"); 535 ubi_err("min. I/O unit (%d) is not power of 2",
536 ubi->min_io_size);
428 return -EINVAL; 537 return -EINVAL;
429 } 538 }
430 539
@@ -453,10 +562,8 @@ static int io_init(struct ubi_device *ubi)
453 } 562 }
454 563
455 /* Similar for the data offset */ 564 /* Similar for the data offset */
456 if (ubi->leb_start == 0) { 565 ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE;
457 ubi->leb_start = ubi->vid_hdr_offset + ubi->vid_hdr_alsize; 566 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
458 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
459 }
460 567
461 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); 568 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
462 dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); 569 dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
@@ -514,76 +621,147 @@ static int io_init(struct ubi_device *ubi)
514} 621}
515 622
516/** 623/**
517 * attach_mtd_dev - attach an MTD device. 624 * autoresize - re-size the volume which has the "auto-resize" flag set.
518 * @mtd_dev: MTD device name or number string 625 * @ubi: UBI device description object
519 * @vid_hdr_offset: VID header offset 626 * @vol_id: ID of the volume to re-size
520 * @data_offset: data offset
521 * 627 *
522 * This function attaches an MTD device to UBI. It first treats @mtd_dev as the 628 * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in
523 * MTD device name, and tries to open it by this name. If it is unable to open, 629 * the volume table to the largest possible size. See comments in ubi-header.h
524 * it tries to convert @mtd_dev to an integer and open the MTD device by its 630 * for more description of the flag. Returns zero in case of success and a
525 * number. Returns zero in case of success and a negative error code in case of 631 * negative error code in case of failure.
526 * failure.
527 */ 632 */
528static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset, 633static int autoresize(struct ubi_device *ubi, int vol_id)
529 int data_offset)
530{ 634{
531 struct ubi_device *ubi; 635 struct ubi_volume_desc desc;
532 struct mtd_info *mtd; 636 struct ubi_volume *vol = ubi->volumes[vol_id];
533 int i, err; 637 int err, old_reserved_pebs = vol->reserved_pebs;
534 638
535 mtd = get_mtd_device_nm(mtd_dev); 639 /*
536 if (IS_ERR(mtd)) { 640 * Clear the auto-resize flag in the volume in-memory copy of the
537 int mtd_num; 641 * volume table, and 'ubi_resize_volume()' will propogate this change
538 char *endp; 642 * to the flash.
643 */
644 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
539 645
540 if (PTR_ERR(mtd) != -ENODEV) 646 if (ubi->avail_pebs == 0) {
541 return PTR_ERR(mtd); 647 struct ubi_vtbl_record vtbl_rec;
542 648
543 /* 649 /*
544 * Probably this is not MTD device name but MTD device number - 650 * No avalilable PEBs to re-size the volume, clear the flag on
545 * check this out. 651 * flash and exit.
546 */ 652 */
547 mtd_num = simple_strtoul(mtd_dev, &endp, 0); 653 memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
548 if (*endp != '\0' || mtd_dev == endp) { 654 sizeof(struct ubi_vtbl_record));
549 ubi_err("incorrect MTD device: \"%s\"", mtd_dev); 655 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
550 return -ENODEV; 656 if (err)
551 } 657 ubi_err("cannot clean auto-resize flag for volume %d",
552 658 vol_id);
553 mtd = get_mtd_device(NULL, mtd_num); 659 } else {
554 if (IS_ERR(mtd)) 660 desc.vol = vol;
555 return PTR_ERR(mtd); 661 err = ubi_resize_volume(&desc,
662 old_reserved_pebs + ubi->avail_pebs);
663 if (err)
664 ubi_err("cannot auto-resize volume %d", vol_id);
556 } 665 }
557 666
558 /* Check if we already have the same MTD device attached */ 667 if (err)
559 for (i = 0; i < ubi_devices_cnt; i++) 668 return err;
560 if (ubi_devices[i]->mtd->index == mtd->index) { 669
561 ubi_err("mtd%d is already attached to ubi%d", 670 ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id,
671 vol->name, old_reserved_pebs, vol->reserved_pebs);
672 return 0;
673}
674
675/**
676 * ubi_attach_mtd_dev - attach an MTD device.
677 * @mtd_dev: MTD device description object
678 * @ubi_num: number to assign to the new UBI device
679 * @vid_hdr_offset: VID header offset
680 *
681 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
682 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
683 * which case this function finds a vacant device nubert and assings it
684 * automatically. Returns the new UBI device number in case of success and a
685 * negative error code in case of failure.
686 *
687 * Note, the invocations of this function has to be serialized by the
688 * @ubi_devices_mutex.
689 */
690int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
691{
692 struct ubi_device *ubi;
693 int i, err;
694
695 /*
696 * Check if we already have the same MTD device attached.
697 *
698 * Note, this function assumes that UBI devices creations and deletions
699 * are serialized, so it does not take the &ubi_devices_lock.
700 */
701 for (i = 0; i < UBI_MAX_DEVICES; i++) {
702 ubi = ubi_devices[i];
703 if (ubi && mtd->index == ubi->mtd->index) {
704 dbg_err("mtd%d is already attached to ubi%d",
562 mtd->index, i); 705 mtd->index, i);
563 err = -EINVAL; 706 return -EEXIST;
564 goto out_mtd;
565 } 707 }
708 }
566 709
567 ubi = ubi_devices[ubi_devices_cnt] = kzalloc(sizeof(struct ubi_device), 710 /*
568 GFP_KERNEL); 711 * Make sure this MTD device is not emulated on top of an UBI volume
569 if (!ubi) { 712 * already. Well, generally this recursion works fine, but there are
570 err = -ENOMEM; 713 * different problems like the UBI module takes a reference to itself
571 goto out_mtd; 714 * by attaching (and thus, opening) the emulated MTD device. This
715 * results in inability to unload the module. And in general it makes
716 * no sense to attach emulated MTD devices, so we prohibit this.
717 */
718 if (mtd->type == MTD_UBIVOLUME) {
719 ubi_err("refuse attaching mtd%d - it is already emulated on "
720 "top of UBI", mtd->index);
721 return -EINVAL;
572 } 722 }
573 723
574 ubi->ubi_num = ubi_devices_cnt; 724 if (ubi_num == UBI_DEV_NUM_AUTO) {
575 ubi->mtd = mtd; 725 /* Search for an empty slot in the @ubi_devices array */
726 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
727 if (!ubi_devices[ubi_num])
728 break;
729 if (ubi_num == UBI_MAX_DEVICES) {
730 dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES);
731 return -ENFILE;
732 }
733 } else {
734 if (ubi_num >= UBI_MAX_DEVICES)
735 return -EINVAL;
736
737 /* Make sure ubi_num is not busy */
738 if (ubi_devices[ubi_num]) {
739 dbg_err("ubi%d already exists", ubi_num);
740 return -EEXIST;
741 }
742 }
576 743
577 dbg_msg("attaching mtd%d to ubi%d: VID header offset %d data offset %d", 744 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
578 ubi->mtd->index, ubi_devices_cnt, vid_hdr_offset, data_offset); 745 if (!ubi)
746 return -ENOMEM;
579 747
748 ubi->mtd = mtd;
749 ubi->ubi_num = ubi_num;
580 ubi->vid_hdr_offset = vid_hdr_offset; 750 ubi->vid_hdr_offset = vid_hdr_offset;
581 ubi->leb_start = data_offset; 751 ubi->autoresize_vol_id = -1;
752
753 mutex_init(&ubi->buf_mutex);
754 mutex_init(&ubi->ckvol_mutex);
755 mutex_init(&ubi->volumes_mutex);
756 spin_lock_init(&ubi->volumes_lock);
757
758 dbg_msg("attaching mtd%d to ubi%d: VID header offset %d",
759 mtd->index, ubi_num, vid_hdr_offset);
760
582 err = io_init(ubi); 761 err = io_init(ubi);
583 if (err) 762 if (err)
584 goto out_free; 763 goto out_free;
585 764
586 mutex_init(&ubi->buf_mutex);
587 ubi->peb_buf1 = vmalloc(ubi->peb_size); 765 ubi->peb_buf1 = vmalloc(ubi->peb_size);
588 if (!ubi->peb_buf1) 766 if (!ubi->peb_buf1)
589 goto out_free; 767 goto out_free;
@@ -605,12 +783,26 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
605 goto out_free; 783 goto out_free;
606 } 784 }
607 785
786 if (ubi->autoresize_vol_id != -1) {
787 err = autoresize(ubi, ubi->autoresize_vol_id);
788 if (err)
789 goto out_detach;
790 }
791
608 err = uif_init(ubi); 792 err = uif_init(ubi);
609 if (err) 793 if (err)
610 goto out_detach; 794 goto out_detach;
611 795
612 ubi_msg("attached mtd%d to ubi%d", ubi->mtd->index, ubi_devices_cnt); 796 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
613 ubi_msg("MTD device name: \"%s\"", ubi->mtd->name); 797 if (IS_ERR(ubi->bgt_thread)) {
798 err = PTR_ERR(ubi->bgt_thread);
799 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
800 err);
801 goto out_uif;
802 }
803
804 ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
805 ubi_msg("MTD device name: \"%s\"", mtd->name);
614 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); 806 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
615 ubi_msg("physical eraseblock size: %d bytes (%d KiB)", 807 ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
616 ubi->peb_size, ubi->peb_size >> 10); 808 ubi->peb_size, ubi->peb_size >> 10);
@@ -638,9 +830,11 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
638 wake_up_process(ubi->bgt_thread); 830 wake_up_process(ubi->bgt_thread);
639 } 831 }
640 832
641 ubi_devices_cnt += 1; 833 ubi_devices[ubi_num] = ubi;
642 return 0; 834 return ubi_num;
643 835
836out_uif:
837 uif_close(ubi);
644out_detach: 838out_detach:
645 ubi_eba_close(ubi); 839 ubi_eba_close(ubi);
646 ubi_wl_close(ubi); 840 ubi_wl_close(ubi);
@@ -652,21 +846,58 @@ out_free:
652 vfree(ubi->dbg_peb_buf); 846 vfree(ubi->dbg_peb_buf);
653#endif 847#endif
654 kfree(ubi); 848 kfree(ubi);
655out_mtd:
656 put_mtd_device(mtd);
657 ubi_devices[ubi_devices_cnt] = NULL;
658 return err; 849 return err;
659} 850}
660 851
661/** 852/**
662 * detach_mtd_dev - detach an MTD device. 853 * ubi_detach_mtd_dev - detach an MTD device.
663 * @ubi: UBI device description object 854 * @ubi_num: UBI device number to detach from
855 * @anyway: detach MTD even if device reference count is not zero
856 *
857 * This function destroys an UBI device number @ubi_num and detaches the
858 * underlying MTD device. Returns zero in case of success and %-EBUSY if the
859 * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
860 * exist.
861 *
862 * Note, the invocations of this function has to be serialized by the
863 * @ubi_devices_mutex.
664 */ 864 */
665static void detach_mtd_dev(struct ubi_device *ubi) 865int ubi_detach_mtd_dev(int ubi_num, int anyway)
666{ 866{
667 int ubi_num = ubi->ubi_num, mtd_num = ubi->mtd->index; 867 struct ubi_device *ubi;
868
869 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
870 return -EINVAL;
871
872 spin_lock(&ubi_devices_lock);
873 ubi = ubi_devices[ubi_num];
874 if (!ubi) {
875 spin_unlock(&ubi_devices_lock);
876 return -EINVAL;
877 }
878
879 if (ubi->ref_count) {
880 if (!anyway) {
881 spin_unlock(&ubi_devices_lock);
882 return -EBUSY;
883 }
884 /* This may only happen if there is a bug */
885 ubi_err("%s reference count %d, destroy anyway",
886 ubi->ubi_name, ubi->ref_count);
887 }
888 ubi_devices[ubi_num] = NULL;
889 spin_unlock(&ubi_devices_lock);
668 890
891 ubi_assert(ubi_num == ubi->ubi_num);
669 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); 892 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
893
894 /*
895 * Before freeing anything, we have to stop the background thread to
896 * prevent it from doing anything on this device while we are freeing.
897 */
898 if (ubi->bgt_thread)
899 kthread_stop(ubi->bgt_thread);
900
670 uif_close(ubi); 901 uif_close(ubi);
671 ubi_eba_close(ubi); 902 ubi_eba_close(ubi);
672 ubi_wl_close(ubi); 903 ubi_wl_close(ubi);
@@ -677,11 +908,37 @@ static void detach_mtd_dev(struct ubi_device *ubi)
677#ifdef CONFIG_MTD_UBI_DEBUG 908#ifdef CONFIG_MTD_UBI_DEBUG
678 vfree(ubi->dbg_peb_buf); 909 vfree(ubi->dbg_peb_buf);
679#endif 910#endif
680 kfree(ubi_devices[ubi_num]); 911 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
681 ubi_devices[ubi_num] = NULL; 912 kfree(ubi);
682 ubi_devices_cnt -= 1; 913 return 0;
683 ubi_assert(ubi_devices_cnt >= 0); 914}
684 ubi_msg("mtd%d is detached from ubi%d", mtd_num, ubi_num); 915
916/**
917 * find_mtd_device - open an MTD device by its name or number.
918 * @mtd_dev: name or number of the device
919 *
920 * This function tries to open and MTD device described by @mtd_dev string,
921 * which is first treated as an ASCII number, and if it is not true, it is
922 * treated as MTD device name. Returns MTD device description object in case of
923 * success and a negative error code in case of failure.
924 */
925static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
926{
927 struct mtd_info *mtd;
928 int mtd_num;
929 char *endp;
930
931 mtd_num = simple_strtoul(mtd_dev, &endp, 0);
932 if (*endp != '\0' || mtd_dev == endp) {
933 /*
934 * This does not look like an ASCII integer, probably this is
935 * MTD device name.
936 */
937 mtd = get_mtd_device_nm(mtd_dev);
938 } else
939 mtd = get_mtd_device(NULL, mtd_num);
940
941 return mtd;
685} 942}
686 943
687static int __init ubi_init(void) 944static int __init ubi_init(void)
@@ -693,47 +950,96 @@ static int __init ubi_init(void)
693 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); 950 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
694 951
695 if (mtd_devs > UBI_MAX_DEVICES) { 952 if (mtd_devs > UBI_MAX_DEVICES) {
696 printk("UBI error: too many MTD devices, maximum is %d\n", 953 printk(KERN_ERR "UBI error: too many MTD devices, "
697 UBI_MAX_DEVICES); 954 "maximum is %d\n", UBI_MAX_DEVICES);
698 return -EINVAL; 955 return -EINVAL;
699 } 956 }
700 957
958 /* Create base sysfs directory and sysfs files */
701 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); 959 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
702 if (IS_ERR(ubi_class)) 960 if (IS_ERR(ubi_class)) {
703 return PTR_ERR(ubi_class); 961 err = PTR_ERR(ubi_class);
962 printk(KERN_ERR "UBI error: cannot create UBI class\n");
963 goto out;
964 }
704 965
705 err = class_create_file(ubi_class, &ubi_version); 966 err = class_create_file(ubi_class, &ubi_version);
706 if (err) 967 if (err) {
968 printk(KERN_ERR "UBI error: cannot create sysfs file\n");
707 goto out_class; 969 goto out_class;
970 }
971
972 err = misc_register(&ubi_ctrl_cdev);
973 if (err) {
974 printk(KERN_ERR "UBI error: cannot register device\n");
975 goto out_version;
976 }
977
978 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
979 sizeof(struct ubi_wl_entry),
980 0, 0, NULL);
981 if (!ubi_wl_entry_slab)
982 goto out_dev_unreg;
708 983
709 /* Attach MTD devices */ 984 /* Attach MTD devices */
710 for (i = 0; i < mtd_devs; i++) { 985 for (i = 0; i < mtd_devs; i++) {
711 struct mtd_dev_param *p = &mtd_dev_param[i]; 986 struct mtd_dev_param *p = &mtd_dev_param[i];
987 struct mtd_info *mtd;
712 988
713 cond_resched(); 989 cond_resched();
714 err = attach_mtd_dev(p->name, p->vid_hdr_offs, p->data_offs); 990
715 if (err) 991 mtd = open_mtd_device(p->name);
992 if (IS_ERR(mtd)) {
993 err = PTR_ERR(mtd);
994 goto out_detach;
995 }
996
997 mutex_lock(&ubi_devices_mutex);
998 err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
999 p->vid_hdr_offs);
1000 mutex_unlock(&ubi_devices_mutex);
1001 if (err < 0) {
1002 put_mtd_device(mtd);
1003 printk(KERN_ERR "UBI error: cannot attach %s\n",
1004 p->name);
716 goto out_detach; 1005 goto out_detach;
1006 }
717 } 1007 }
718 1008
719 return 0; 1009 return 0;
720 1010
721out_detach: 1011out_detach:
722 for (k = 0; k < i; k++) 1012 for (k = 0; k < i; k++)
723 detach_mtd_dev(ubi_devices[k]); 1013 if (ubi_devices[k]) {
1014 mutex_lock(&ubi_devices_mutex);
1015 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
1016 mutex_unlock(&ubi_devices_mutex);
1017 }
1018 kmem_cache_destroy(ubi_wl_entry_slab);
1019out_dev_unreg:
1020 misc_deregister(&ubi_ctrl_cdev);
1021out_version:
724 class_remove_file(ubi_class, &ubi_version); 1022 class_remove_file(ubi_class, &ubi_version);
725out_class: 1023out_class:
726 class_destroy(ubi_class); 1024 class_destroy(ubi_class);
1025out:
1026 printk(KERN_ERR "UBI error: cannot initialize UBI, error %d\n", err);
727 return err; 1027 return err;
728} 1028}
729module_init(ubi_init); 1029module_init(ubi_init);
730 1030
731static void __exit ubi_exit(void) 1031static void __exit ubi_exit(void)
732{ 1032{
733 int i, n = ubi_devices_cnt; 1033 int i;
734 1034
735 for (i = 0; i < n; i++) 1035 for (i = 0; i < UBI_MAX_DEVICES; i++)
736 detach_mtd_dev(ubi_devices[i]); 1036 if (ubi_devices[i]) {
1037 mutex_lock(&ubi_devices_mutex);
1038 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
1039 mutex_unlock(&ubi_devices_mutex);
1040 }
1041 kmem_cache_destroy(ubi_wl_entry_slab);
1042 misc_deregister(&ubi_ctrl_cdev);
737 class_remove_file(ubi_class, &ubi_version); 1043 class_remove_file(ubi_class, &ubi_version);
738 class_destroy(ubi_class); 1044 class_destroy(ubi_class);
739} 1045}
@@ -754,7 +1060,8 @@ static int __init bytes_str_to_int(const char *str)
754 1060
755 result = simple_strtoul(str, &endp, 0); 1061 result = simple_strtoul(str, &endp, 0);
756 if (str == endp || result < 0) { 1062 if (str == endp || result < 0) {
757 printk("UBI error: incorrect bytes count: \"%s\"\n", str); 1063 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
1064 str);
758 return -EINVAL; 1065 return -EINVAL;
759 } 1066 }
760 1067
@@ -764,15 +1071,14 @@ static int __init bytes_str_to_int(const char *str)
764 case 'M': 1071 case 'M':
765 result *= 1024; 1072 result *= 1024;
766 case 'K': 1073 case 'K':
767 case 'k':
768 result *= 1024; 1074 result *= 1024;
769 if (endp[1] == 'i' && (endp[2] == '\0' || 1075 if (endp[1] == 'i' && endp[2] == 'B')
770 endp[2] == 'B' || endp[2] == 'b'))
771 endp += 2; 1076 endp += 2;
772 case '\0': 1077 case '\0':
773 break; 1078 break;
774 default: 1079 default:
775 printk("UBI error: incorrect bytes count: \"%s\"\n", str); 1080 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
1081 str);
776 return -EINVAL; 1082 return -EINVAL;
777 } 1083 }
778 1084
@@ -793,23 +1099,27 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
793 struct mtd_dev_param *p; 1099 struct mtd_dev_param *p;
794 char buf[MTD_PARAM_LEN_MAX]; 1100 char buf[MTD_PARAM_LEN_MAX];
795 char *pbuf = &buf[0]; 1101 char *pbuf = &buf[0];
796 char *tokens[3] = {NULL, NULL, NULL}; 1102 char *tokens[2] = {NULL, NULL};
1103
1104 if (!val)
1105 return -EINVAL;
797 1106
798 if (mtd_devs == UBI_MAX_DEVICES) { 1107 if (mtd_devs == UBI_MAX_DEVICES) {
799 printk("UBI error: too many parameters, max. is %d\n", 1108 printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
800 UBI_MAX_DEVICES); 1109 UBI_MAX_DEVICES);
801 return -EINVAL; 1110 return -EINVAL;
802 } 1111 }
803 1112
804 len = strnlen(val, MTD_PARAM_LEN_MAX); 1113 len = strnlen(val, MTD_PARAM_LEN_MAX);
805 if (len == MTD_PARAM_LEN_MAX) { 1114 if (len == MTD_PARAM_LEN_MAX) {
806 printk("UBI error: parameter \"%s\" is too long, max. is %d\n", 1115 printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
807 val, MTD_PARAM_LEN_MAX); 1116 "max. is %d\n", val, MTD_PARAM_LEN_MAX);
808 return -EINVAL; 1117 return -EINVAL;
809 } 1118 }
810 1119
811 if (len == 0) { 1120 if (len == 0) {
812 printk("UBI warning: empty 'mtd=' parameter - ignored\n"); 1121 printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
1122 "ignored\n");
813 return 0; 1123 return 0;
814 } 1124 }
815 1125
@@ -819,11 +1129,12 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
819 if (buf[len - 1] == '\n') 1129 if (buf[len - 1] == '\n')
820 buf[len - 1] = '\0'; 1130 buf[len - 1] = '\0';
821 1131
822 for (i = 0; i < 3; i++) 1132 for (i = 0; i < 2; i++)
823 tokens[i] = strsep(&pbuf, ","); 1133 tokens[i] = strsep(&pbuf, ",");
824 1134
825 if (pbuf) { 1135 if (pbuf) {
826 printk("UBI error: too many arguments at \"%s\"\n", val); 1136 printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
1137 val);
827 return -EINVAL; 1138 return -EINVAL;
828 } 1139 }
829 1140
@@ -832,13 +1143,9 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
832 1143
833 if (tokens[1]) 1144 if (tokens[1])
834 p->vid_hdr_offs = bytes_str_to_int(tokens[1]); 1145 p->vid_hdr_offs = bytes_str_to_int(tokens[1]);
835 if (tokens[2])
836 p->data_offs = bytes_str_to_int(tokens[2]);
837 1146
838 if (p->vid_hdr_offs < 0) 1147 if (p->vid_hdr_offs < 0)
839 return p->vid_hdr_offs; 1148 return p->vid_hdr_offs;
840 if (p->data_offs < 0)
841 return p->data_offs;
842 1149
843 mtd_devs += 1; 1150 mtd_devs += 1;
844 return 0; 1151 return 0;
@@ -846,16 +1153,15 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
846 1153
847module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); 1154module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
848MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " 1155MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
849 "mtd=<name|num>[,<vid_hdr_offs>,<data_offs>]. " 1156 "mtd=<name|num>[,<vid_hdr_offs>].\n"
850 "Multiple \"mtd\" parameters may be specified.\n" 1157 "Multiple \"mtd\" parameters may be specified.\n"
851 "MTD devices may be specified by their number or name. " 1158 "MTD devices may be specified by their number or name.\n"
852 "Optional \"vid_hdr_offs\" and \"data_offs\" parameters " 1159 "Optional \"vid_hdr_offs\" parameter specifies UBI VID "
853 "specify UBI VID header position and data starting " 1160 "header position and data starting position to be used "
854 "position to be used by UBI.\n" 1161 "by UBI.\n"
855 "Example: mtd=content,1984,2048 mtd=4 - attach MTD device" 1162 "Example: mtd=content,1984 mtd=4 - attach MTD device"
856 "with name content using VID header offset 1984 and data " 1163 "with name \"content\" using VID header offset 1984, and "
857 "start 2048, and MTD device number 4 using default " 1164 "MTD device number 4 with default VID header offset.");
858 "offsets");
859 1165
860MODULE_VERSION(__stringify(UBI_VERSION)); 1166MODULE_VERSION(__stringify(UBI_VERSION));
861MODULE_DESCRIPTION("UBI - Unsorted Block Images"); 1167MODULE_DESCRIPTION("UBI - Unsorted Block Images");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index fe4da1e96c52..9d6aae5449b6 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -28,6 +28,11 @@
28 * 28 *
29 * Major and minor numbers are assigned dynamically to both UBI and volume 29 * Major and minor numbers are assigned dynamically to both UBI and volume
30 * character devices. 30 * character devices.
31 *
32 * Well, there is the third kind of character devices - the UBI control
33 * character device, which allows to manipulate by UBI devices - create and
34 * delete them. In other words, it is used for attaching and detaching MTD
35 * devices.
31 */ 36 */
32 37
33#include <linux/module.h> 38#include <linux/module.h>
@@ -39,34 +44,6 @@
39#include <asm/div64.h> 44#include <asm/div64.h>
40#include "ubi.h" 45#include "ubi.h"
41 46
42/*
43 * Maximum sequence numbers of UBI and volume character device IOCTLs (direct
44 * logical eraseblock erase is a debug-only feature).
45 */
46#define UBI_CDEV_IOC_MAX_SEQ 2
47#ifndef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO
48#define VOL_CDEV_IOC_MAX_SEQ 1
49#else
50#define VOL_CDEV_IOC_MAX_SEQ 2
51#endif
52
53/**
54 * major_to_device - get UBI device object by character device major number.
55 * @major: major number
56 *
57 * This function returns a pointer to the UBI device object.
58 */
59static struct ubi_device *major_to_device(int major)
60{
61 int i;
62
63 for (i = 0; i < ubi_devices_cnt; i++)
64 if (ubi_devices[i] && ubi_devices[i]->major == major)
65 return ubi_devices[i];
66 BUG();
67 return NULL;
68}
69
70/** 47/**
71 * get_exclusive - get exclusive access to an UBI volume. 48 * get_exclusive - get exclusive access to an UBI volume.
72 * @desc: volume descriptor 49 * @desc: volume descriptor
@@ -124,9 +101,11 @@ static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
124static int vol_cdev_open(struct inode *inode, struct file *file) 101static int vol_cdev_open(struct inode *inode, struct file *file)
125{ 102{
126 struct ubi_volume_desc *desc; 103 struct ubi_volume_desc *desc;
127 const struct ubi_device *ubi = major_to_device(imajor(inode)); 104 int vol_id = iminor(inode) - 1, mode, ubi_num;
128 int vol_id = iminor(inode) - 1; 105
129 int mode; 106 ubi_num = ubi_major2num(imajor(inode));
107 if (ubi_num < 0)
108 return ubi_num;
130 109
131 if (file->f_mode & FMODE_WRITE) 110 if (file->f_mode & FMODE_WRITE)
132 mode = UBI_READWRITE; 111 mode = UBI_READWRITE;
@@ -135,7 +114,7 @@ static int vol_cdev_open(struct inode *inode, struct file *file)
135 114
136 dbg_msg("open volume %d, mode %d", vol_id, mode); 115 dbg_msg("open volume %d, mode %d", vol_id, mode);
137 116
138 desc = ubi_open_volume(ubi->ubi_num, vol_id, mode); 117 desc = ubi_open_volume(ubi_num, vol_id, mode);
139 if (IS_ERR(desc)) 118 if (IS_ERR(desc))
140 return PTR_ERR(desc); 119 return PTR_ERR(desc);
141 120
@@ -153,8 +132,15 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
153 if (vol->updating) { 132 if (vol->updating) {
154 ubi_warn("update of volume %d not finished, volume is damaged", 133 ubi_warn("update of volume %d not finished, volume is damaged",
155 vol->vol_id); 134 vol->vol_id);
135 ubi_assert(!vol->changing_leb);
156 vol->updating = 0; 136 vol->updating = 0;
157 vfree(vol->upd_buf); 137 vfree(vol->upd_buf);
138 } else if (vol->changing_leb) {
139 dbg_msg("only %lld of %lld bytes received for atomic LEB change"
140 " for volume %d:%d, cancel", vol->upd_received,
141 vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id);
142 vol->changing_leb = 0;
143 vfree(vol->upd_buf);
158 } 144 }
159 145
160 ubi_close_volume(desc); 146 ubi_close_volume(desc);
@@ -205,13 +191,13 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
205 struct ubi_volume_desc *desc = file->private_data; 191 struct ubi_volume_desc *desc = file->private_data;
206 struct ubi_volume *vol = desc->vol; 192 struct ubi_volume *vol = desc->vol;
207 struct ubi_device *ubi = vol->ubi; 193 struct ubi_device *ubi = vol->ubi;
208 int err, lnum, off, len, vol_id = desc->vol->vol_id, tbuf_size; 194 int err, lnum, off, len, tbuf_size;
209 size_t count_save = count; 195 size_t count_save = count;
210 void *tbuf; 196 void *tbuf;
211 uint64_t tmp; 197 uint64_t tmp;
212 198
213 dbg_msg("read %zd bytes from offset %lld of volume %d", 199 dbg_msg("read %zd bytes from offset %lld of volume %d",
214 count, *offp, vol_id); 200 count, *offp, vol->vol_id);
215 201
216 if (vol->updating) { 202 if (vol->updating) {
217 dbg_err("updating"); 203 dbg_err("updating");
@@ -225,7 +211,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
225 return 0; 211 return 0;
226 212
227 if (vol->corrupted) 213 if (vol->corrupted)
228 dbg_msg("read from corrupted volume %d", vol_id); 214 dbg_msg("read from corrupted volume %d", vol->vol_id);
229 215
230 if (*offp + count > vol->used_bytes) 216 if (*offp + count > vol->used_bytes)
231 count_save = count = vol->used_bytes - *offp; 217 count_save = count = vol->used_bytes - *offp;
@@ -249,7 +235,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
249 if (off + len >= vol->usable_leb_size) 235 if (off + len >= vol->usable_leb_size)
250 len = vol->usable_leb_size - off; 236 len = vol->usable_leb_size - off;
251 237
252 err = ubi_eba_read_leb(ubi, vol_id, lnum, tbuf, off, len, 0); 238 err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
253 if (err) 239 if (err)
254 break; 240 break;
255 241
@@ -289,13 +275,13 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
289 struct ubi_volume_desc *desc = file->private_data; 275 struct ubi_volume_desc *desc = file->private_data;
290 struct ubi_volume *vol = desc->vol; 276 struct ubi_volume *vol = desc->vol;
291 struct ubi_device *ubi = vol->ubi; 277 struct ubi_device *ubi = vol->ubi;
292 int lnum, off, len, tbuf_size, vol_id = vol->vol_id, err = 0; 278 int lnum, off, len, tbuf_size, err = 0;
293 size_t count_save = count; 279 size_t count_save = count;
294 char *tbuf; 280 char *tbuf;
295 uint64_t tmp; 281 uint64_t tmp;
296 282
297 dbg_msg("requested: write %zd bytes to offset %lld of volume %u", 283 dbg_msg("requested: write %zd bytes to offset %lld of volume %u",
298 count, *offp, desc->vol->vol_id); 284 count, *offp, vol->vol_id);
299 285
300 if (vol->vol_type == UBI_STATIC_VOLUME) 286 if (vol->vol_type == UBI_STATIC_VOLUME)
301 return -EROFS; 287 return -EROFS;
@@ -339,7 +325,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
339 break; 325 break;
340 } 326 }
341 327
342 err = ubi_eba_write_leb(ubi, vol_id, lnum, tbuf, off, len, 328 err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len,
343 UBI_UNKNOWN); 329 UBI_UNKNOWN);
344 if (err) 330 if (err)
345 break; 331 break;
@@ -372,22 +358,32 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
372 struct ubi_volume *vol = desc->vol; 358 struct ubi_volume *vol = desc->vol;
373 struct ubi_device *ubi = vol->ubi; 359 struct ubi_device *ubi = vol->ubi;
374 360
375 if (!vol->updating) 361 if (!vol->updating && !vol->changing_leb)
376 return vol_cdev_direct_write(file, buf, count, offp); 362 return vol_cdev_direct_write(file, buf, count, offp);
377 363
378 err = ubi_more_update_data(ubi, vol->vol_id, buf, count); 364 if (vol->updating)
365 err = ubi_more_update_data(ubi, vol, buf, count);
366 else
367 err = ubi_more_leb_change_data(ubi, vol, buf, count);
368
379 if (err < 0) { 369 if (err < 0) {
380 ubi_err("cannot write %zd bytes of update data", count); 370 ubi_err("cannot accept more %zd bytes of data, error %d",
371 count, err);
381 return err; 372 return err;
382 } 373 }
383 374
384 if (err) { 375 if (err) {
385 /* 376 /*
386 * Update is finished, @err contains number of actually written 377 * The operation is finished, @err contains number of actually
387 * bytes now. 378 * written bytes.
388 */ 379 */
389 count = err; 380 count = err;
390 381
382 if (vol->changing_leb) {
383 revoke_exclusive(desc, UBI_READWRITE);
384 return count;
385 }
386
391 err = ubi_check_volume(ubi, vol->vol_id); 387 err = ubi_check_volume(ubi, vol->vol_id);
392 if (err < 0) 388 if (err < 0)
393 return err; 389 return err;
@@ -402,7 +398,6 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
402 revoke_exclusive(desc, UBI_READWRITE); 398 revoke_exclusive(desc, UBI_READWRITE);
403 } 399 }
404 400
405 *offp += count;
406 return count; 401 return count;
407} 402}
408 403
@@ -447,11 +442,46 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
447 if (err < 0) 442 if (err < 0)
448 break; 443 break;
449 444
450 err = ubi_start_update(ubi, vol->vol_id, bytes); 445 err = ubi_start_update(ubi, vol, bytes);
451 if (bytes == 0) 446 if (bytes == 0)
452 revoke_exclusive(desc, UBI_READWRITE); 447 revoke_exclusive(desc, UBI_READWRITE);
448 break;
449 }
450
451 /* Atomic logical eraseblock change command */
452 case UBI_IOCEBCH:
453 {
454 struct ubi_leb_change_req req;
455
456 err = copy_from_user(&req, argp,
457 sizeof(struct ubi_leb_change_req));
458 if (err) {
459 err = -EFAULT;
460 break;
461 }
462
463 if (desc->mode == UBI_READONLY ||
464 vol->vol_type == UBI_STATIC_VOLUME) {
465 err = -EROFS;
466 break;
467 }
468
469 /* Validate the request */
470 err = -EINVAL;
471 if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
472 req.bytes < 0 || req.lnum >= vol->usable_leb_size)
473 break;
474 if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM &&
475 req.dtype != UBI_UNKNOWN)
476 break;
477
478 err = get_exclusive(desc);
479 if (err < 0)
480 break;
453 481
454 file->f_pos = 0; 482 err = ubi_start_leb_change(ubi, vol, &req);
483 if (req.bytes == 0)
484 revoke_exclusive(desc, UBI_READWRITE);
455 break; 485 break;
456 } 486 }
457 487
@@ -467,7 +497,8 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
467 break; 497 break;
468 } 498 }
469 499
470 if (desc->mode == UBI_READONLY) { 500 if (desc->mode == UBI_READONLY ||
501 vol->vol_type == UBI_STATIC_VOLUME) {
471 err = -EROFS; 502 err = -EROFS;
472 break; 503 break;
473 } 504 }
@@ -477,13 +508,8 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
477 break; 508 break;
478 } 509 }
479 510
480 if (vol->vol_type != UBI_DYNAMIC_VOLUME) {
481 err = -EROFS;
482 break;
483 }
484
485 dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); 511 dbg_msg("erase LEB %d:%d", vol->vol_id, lnum);
486 err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum); 512 err = ubi_eba_unmap_leb(ubi, vol, lnum);
487 if (err) 513 if (err)
488 break; 514 break;
489 515
@@ -580,9 +606,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
580 if (!capable(CAP_SYS_RESOURCE)) 606 if (!capable(CAP_SYS_RESOURCE))
581 return -EPERM; 607 return -EPERM;
582 608
583 ubi = major_to_device(imajor(inode)); 609 ubi = ubi_get_by_major(imajor(inode));
584 if (IS_ERR(ubi)) 610 if (!ubi)
585 return PTR_ERR(ubi); 611 return -ENODEV;
586 612
587 switch (cmd) { 613 switch (cmd) {
588 /* Create volume command */ 614 /* Create volume command */
@@ -591,8 +617,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
591 struct ubi_mkvol_req req; 617 struct ubi_mkvol_req req;
592 618
593 dbg_msg("create volume"); 619 dbg_msg("create volume");
594 err = copy_from_user(&req, argp, 620 err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
595 sizeof(struct ubi_mkvol_req));
596 if (err) { 621 if (err) {
597 err = -EFAULT; 622 err = -EFAULT;
598 break; 623 break;
@@ -604,7 +629,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
604 629
605 req.name[req.name_len] = '\0'; 630 req.name[req.name_len] = '\0';
606 631
632 mutex_lock(&ubi->volumes_mutex);
607 err = ubi_create_volume(ubi, &req); 633 err = ubi_create_volume(ubi, &req);
634 mutex_unlock(&ubi->volumes_mutex);
608 if (err) 635 if (err)
609 break; 636 break;
610 637
@@ -633,10 +660,16 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
633 break; 660 break;
634 } 661 }
635 662
663 mutex_lock(&ubi->volumes_mutex);
636 err = ubi_remove_volume(desc); 664 err = ubi_remove_volume(desc);
637 if (err) 665 mutex_unlock(&ubi->volumes_mutex);
638 ubi_close_volume(desc);
639 666
667 /*
668 * The volume is deleted (unless an error occurred), and the
669 * 'struct ubi_volume' object will be freed when
670 * 'ubi_close_volume()' will call 'put_device()'.
671 */
672 ubi_close_volume(desc);
640 break; 673 break;
641 } 674 }
642 675
@@ -648,8 +681,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
648 struct ubi_rsvol_req req; 681 struct ubi_rsvol_req req;
649 682
650 dbg_msg("re-size volume"); 683 dbg_msg("re-size volume");
651 err = copy_from_user(&req, argp, 684 err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
652 sizeof(struct ubi_rsvol_req));
653 if (err) { 685 if (err) {
654 err = -EFAULT; 686 err = -EFAULT;
655 break; 687 break;
@@ -669,7 +701,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
669 pebs = !!do_div(tmp, desc->vol->usable_leb_size); 701 pebs = !!do_div(tmp, desc->vol->usable_leb_size);
670 pebs += tmp; 702 pebs += tmp;
671 703
704 mutex_lock(&ubi->volumes_mutex);
672 err = ubi_resize_volume(desc, pebs); 705 err = ubi_resize_volume(desc, pebs);
706 mutex_unlock(&ubi->volumes_mutex);
673 ubi_close_volume(desc); 707 ubi_close_volume(desc);
674 break; 708 break;
675 } 709 }
@@ -679,9 +713,93 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
679 break; 713 break;
680 } 714 }
681 715
716 ubi_put_device(ubi);
682 return err; 717 return err;
683} 718}
684 719
720static int ctrl_cdev_ioctl(struct inode *inode, struct file *file,
721 unsigned int cmd, unsigned long arg)
722{
723 int err = 0;
724 void __user *argp = (void __user *)arg;
725
726 if (!capable(CAP_SYS_RESOURCE))
727 return -EPERM;
728
729 switch (cmd) {
730 /* Attach an MTD device command */
731 case UBI_IOCATT:
732 {
733 struct ubi_attach_req req;
734 struct mtd_info *mtd;
735
736 dbg_msg("attach MTD device");
737 err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
738 if (err) {
739 err = -EFAULT;
740 break;
741 }
742
743 if (req.mtd_num < 0 ||
744 (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) {
745 err = -EINVAL;
746 break;
747 }
748
749 mtd = get_mtd_device(NULL, req.mtd_num);
750 if (IS_ERR(mtd)) {
751 err = PTR_ERR(mtd);
752 break;
753 }
754
755 /*
756 * Note, further request verification is done by
757 * 'ubi_attach_mtd_dev()'.
758 */
759 mutex_lock(&ubi_devices_mutex);
760 err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset);
761 mutex_unlock(&ubi_devices_mutex);
762 if (err < 0)
763 put_mtd_device(mtd);
764 else
765 /* @err contains UBI device number */
766 err = put_user(err, (__user int32_t *)argp);
767
768 break;
769 }
770
771 /* Detach an MTD device command */
772 case UBI_IOCDET:
773 {
774 int ubi_num;
775
776 dbg_msg("dettach MTD device");
777 err = get_user(ubi_num, (__user int32_t *)argp);
778 if (err) {
779 err = -EFAULT;
780 break;
781 }
782
783 mutex_lock(&ubi_devices_mutex);
784 err = ubi_detach_mtd_dev(ubi_num, 0);
785 mutex_unlock(&ubi_devices_mutex);
786 break;
787 }
788
789 default:
790 err = -ENOTTY;
791 break;
792 }
793
794 return err;
795}
796
797/* UBI control character device operations */
798struct file_operations ubi_ctrl_cdev_operations = {
799 .ioctl = ctrl_cdev_ioctl,
800 .owner = THIS_MODULE,
801};
802
685/* UBI character device operations */ 803/* UBI character device operations */
686struct file_operations ubi_cdev_operations = { 804struct file_operations ubi_cdev_operations = {
687 .owner = THIS_MODULE, 805 .owner = THIS_MODULE,
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 467722eb618b..51c40b17f1ec 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -39,8 +39,9 @@
39 39
40#ifdef CONFIG_MTD_UBI_DEBUG_MSG 40#ifdef CONFIG_MTD_UBI_DEBUG_MSG
41/* Generic debugging message */ 41/* Generic debugging message */
42#define dbg_msg(fmt, ...) \ 42#define dbg_msg(fmt, ...) \
43 printk(KERN_DEBUG "UBI DBG: %s: " fmt "\n", __FUNCTION__, ##__VA_ARGS__) 43 printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
44 current->pid, __FUNCTION__, ##__VA_ARGS__)
44 45
45#define ubi_dbg_dump_stack() dump_stack() 46#define ubi_dbg_dump_stack() dump_stack()
46 47
@@ -76,36 +77,28 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
76 77
77#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA 78#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA
78/* Messages from the eraseblock association unit */ 79/* Messages from the eraseblock association unit */
79#define dbg_eba(fmt, ...) \ 80#define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
80 printk(KERN_DEBUG "UBI DBG eba: %s: " fmt "\n", __FUNCTION__, \
81 ##__VA_ARGS__)
82#else 81#else
83#define dbg_eba(fmt, ...) ({}) 82#define dbg_eba(fmt, ...) ({})
84#endif 83#endif
85 84
86#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL 85#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL
87/* Messages from the wear-leveling unit */ 86/* Messages from the wear-leveling unit */
88#define dbg_wl(fmt, ...) \ 87#define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
89 printk(KERN_DEBUG "UBI DBG wl: %s: " fmt "\n", __FUNCTION__, \
90 ##__VA_ARGS__)
91#else 88#else
92#define dbg_wl(fmt, ...) ({}) 89#define dbg_wl(fmt, ...) ({})
93#endif 90#endif
94 91
95#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO 92#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO
96/* Messages from the input/output unit */ 93/* Messages from the input/output unit */
97#define dbg_io(fmt, ...) \ 94#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
98 printk(KERN_DEBUG "UBI DBG io: %s: " fmt "\n", __FUNCTION__, \
99 ##__VA_ARGS__)
100#else 95#else
101#define dbg_io(fmt, ...) ({}) 96#define dbg_io(fmt, ...) ({})
102#endif 97#endif
103 98
104#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD 99#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD
105/* Initialization and build messages */ 100/* Initialization and build messages */
106#define dbg_bld(fmt, ...) \ 101#define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
107 printk(KERN_DEBUG "UBI DBG bld: %s: " fmt "\n", __FUNCTION__, \
108 ##__VA_ARGS__)
109#else 102#else
110#define dbg_bld(fmt, ...) ({}) 103#define dbg_bld(fmt, ...) ({})
111#endif 104#endif
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 880fa3690352..7ce91ca742b1 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -31,7 +31,7 @@
31 * logical eraseblock it is locked for reading or writing. The per-logical 31 * logical eraseblock it is locked for reading or writing. The per-logical
32 * eraseblock locking is implemented by means of the lock tree. The lock tree 32 * eraseblock locking is implemented by means of the lock tree. The lock tree
33 * is an RB-tree which refers all the currently locked logical eraseblocks. The 33 * is an RB-tree which refers all the currently locked logical eraseblocks. The
34 * lock tree elements are &struct ltree_entry objects. They are indexed by 34 * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by
35 * (@vol_id, @lnum) pairs. 35 * (@vol_id, @lnum) pairs.
36 * 36 *
37 * EBA also maintains the global sequence counter which is incremented each 37 * EBA also maintains the global sequence counter which is incremented each
@@ -50,29 +50,6 @@
50#define EBA_RESERVED_PEBS 1 50#define EBA_RESERVED_PEBS 1
51 51
52/** 52/**
53 * struct ltree_entry - an entry in the lock tree.
54 * @rb: links RB-tree nodes
55 * @vol_id: volume ID of the locked logical eraseblock
56 * @lnum: locked logical eraseblock number
57 * @users: how many tasks are using this logical eraseblock or wait for it
58 * @mutex: read/write mutex to implement read/write access serialization to
59 * the (@vol_id, @lnum) logical eraseblock
60 *
61 * When a logical eraseblock is being locked - corresponding &struct ltree_entry
62 * object is inserted to the lock tree (@ubi->ltree).
63 */
64struct ltree_entry {
65 struct rb_node rb;
66 int vol_id;
67 int lnum;
68 int users;
69 struct rw_semaphore mutex;
70};
71
72/* Slab cache for lock-tree entries */
73static struct kmem_cache *ltree_slab;
74
75/**
76 * next_sqnum - get next sequence number. 53 * next_sqnum - get next sequence number.
77 * @ubi: UBI device description object 54 * @ubi: UBI device description object
78 * 55 *
@@ -101,7 +78,7 @@ static unsigned long long next_sqnum(struct ubi_device *ubi)
101 */ 78 */
102static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) 79static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
103{ 80{
104 if (vol_id == UBI_LAYOUT_VOL_ID) 81 if (vol_id == UBI_LAYOUT_VOLUME_ID)
105 return UBI_LAYOUT_VOLUME_COMPAT; 82 return UBI_LAYOUT_VOLUME_COMPAT;
106 return 0; 83 return 0;
107} 84}
@@ -112,20 +89,20 @@ static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
112 * @vol_id: volume ID 89 * @vol_id: volume ID
113 * @lnum: logical eraseblock number 90 * @lnum: logical eraseblock number
114 * 91 *
115 * This function returns a pointer to the corresponding &struct ltree_entry 92 * This function returns a pointer to the corresponding &struct ubi_ltree_entry
116 * object if the logical eraseblock is locked and %NULL if it is not. 93 * object if the logical eraseblock is locked and %NULL if it is not.
117 * @ubi->ltree_lock has to be locked. 94 * @ubi->ltree_lock has to be locked.
118 */ 95 */
119static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, 96static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
120 int lnum) 97 int lnum)
121{ 98{
122 struct rb_node *p; 99 struct rb_node *p;
123 100
124 p = ubi->ltree.rb_node; 101 p = ubi->ltree.rb_node;
125 while (p) { 102 while (p) {
126 struct ltree_entry *le; 103 struct ubi_ltree_entry *le;
127 104
128 le = rb_entry(p, struct ltree_entry, rb); 105 le = rb_entry(p, struct ubi_ltree_entry, rb);
129 106
130 if (vol_id < le->vol_id) 107 if (vol_id < le->vol_id)
131 p = p->rb_left; 108 p = p->rb_left;
@@ -155,15 +132,17 @@ static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
155 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation 132 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
156 * failed. 133 * failed.
157 */ 134 */
158static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id, 135static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
159 int lnum) 136 int vol_id, int lnum)
160{ 137{
161 struct ltree_entry *le, *le1, *le_free; 138 struct ubi_ltree_entry *le, *le1, *le_free;
162 139
163 le = kmem_cache_alloc(ltree_slab, GFP_NOFS); 140 le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
164 if (!le) 141 if (!le)
165 return ERR_PTR(-ENOMEM); 142 return ERR_PTR(-ENOMEM);
166 143
144 le->users = 0;
145 init_rwsem(&le->mutex);
167 le->vol_id = vol_id; 146 le->vol_id = vol_id;
168 le->lnum = lnum; 147 le->lnum = lnum;
169 148
@@ -189,7 +168,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
189 p = &ubi->ltree.rb_node; 168 p = &ubi->ltree.rb_node;
190 while (*p) { 169 while (*p) {
191 parent = *p; 170 parent = *p;
192 le1 = rb_entry(parent, struct ltree_entry, rb); 171 le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
193 172
194 if (vol_id < le1->vol_id) 173 if (vol_id < le1->vol_id)
195 p = &(*p)->rb_left; 174 p = &(*p)->rb_left;
@@ -211,7 +190,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
211 spin_unlock(&ubi->ltree_lock); 190 spin_unlock(&ubi->ltree_lock);
212 191
213 if (le_free) 192 if (le_free)
214 kmem_cache_free(ltree_slab, le_free); 193 kfree(le_free);
215 194
216 return le; 195 return le;
217} 196}
@@ -227,7 +206,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
227 */ 206 */
228static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) 207static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
229{ 208{
230 struct ltree_entry *le; 209 struct ubi_ltree_entry *le;
231 210
232 le = ltree_add_entry(ubi, vol_id, lnum); 211 le = ltree_add_entry(ubi, vol_id, lnum);
233 if (IS_ERR(le)) 212 if (IS_ERR(le))
@@ -245,7 +224,7 @@ static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
245static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) 224static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
246{ 225{
247 int free = 0; 226 int free = 0;
248 struct ltree_entry *le; 227 struct ubi_ltree_entry *le;
249 228
250 spin_lock(&ubi->ltree_lock); 229 spin_lock(&ubi->ltree_lock);
251 le = ltree_lookup(ubi, vol_id, lnum); 230 le = ltree_lookup(ubi, vol_id, lnum);
@@ -259,7 +238,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
259 238
260 up_read(&le->mutex); 239 up_read(&le->mutex);
261 if (free) 240 if (free)
262 kmem_cache_free(ltree_slab, le); 241 kfree(le);
263} 242}
264 243
265/** 244/**
@@ -273,7 +252,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
273 */ 252 */
274static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) 253static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
275{ 254{
276 struct ltree_entry *le; 255 struct ubi_ltree_entry *le;
277 256
278 le = ltree_add_entry(ubi, vol_id, lnum); 257 le = ltree_add_entry(ubi, vol_id, lnum);
279 if (IS_ERR(le)) 258 if (IS_ERR(le))
@@ -283,6 +262,44 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
283} 262}
284 263
285/** 264/**
265 * leb_write_lock - lock logical eraseblock for writing.
266 * @ubi: UBI device description object
267 * @vol_id: volume ID
268 * @lnum: logical eraseblock number
269 *
270 * This function locks a logical eraseblock for writing if there is no
271 * contention and does nothing if there is contention. Returns %0 in case of
272 * success, %1 in case of contention, and and a negative error code in case of
273 * failure.
274 */
275static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
276{
277 int free;
278 struct ubi_ltree_entry *le;
279
280 le = ltree_add_entry(ubi, vol_id, lnum);
281 if (IS_ERR(le))
282 return PTR_ERR(le);
283 if (down_write_trylock(&le->mutex))
284 return 0;
285
286 /* Contention, cancel */
287 spin_lock(&ubi->ltree_lock);
288 le->users -= 1;
289 ubi_assert(le->users >= 0);
290 if (le->users == 0) {
291 rb_erase(&le->rb, &ubi->ltree);
292 free = 1;
293 } else
294 free = 0;
295 spin_unlock(&ubi->ltree_lock);
296 if (free)
297 kfree(le);
298
299 return 1;
300}
301
302/**
286 * leb_write_unlock - unlock logical eraseblock. 303 * leb_write_unlock - unlock logical eraseblock.
287 * @ubi: UBI device description object 304 * @ubi: UBI device description object
288 * @vol_id: volume ID 305 * @vol_id: volume ID
@@ -291,7 +308,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
291static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) 308static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
292{ 309{
293 int free; 310 int free;
294 struct ltree_entry *le; 311 struct ubi_ltree_entry *le;
295 312
296 spin_lock(&ubi->ltree_lock); 313 spin_lock(&ubi->ltree_lock);
297 le = ltree_lookup(ubi, vol_id, lnum); 314 le = ltree_lookup(ubi, vol_id, lnum);
@@ -306,23 +323,23 @@ static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
306 323
307 up_write(&le->mutex); 324 up_write(&le->mutex);
308 if (free) 325 if (free)
309 kmem_cache_free(ltree_slab, le); 326 kfree(le);
310} 327}
311 328
312/** 329/**
313 * ubi_eba_unmap_leb - un-map logical eraseblock. 330 * ubi_eba_unmap_leb - un-map logical eraseblock.
314 * @ubi: UBI device description object 331 * @ubi: UBI device description object
315 * @vol_id: volume ID 332 * @vol: volume description object
316 * @lnum: logical eraseblock number 333 * @lnum: logical eraseblock number
317 * 334 *
318 * This function un-maps logical eraseblock @lnum and schedules corresponding 335 * This function un-maps logical eraseblock @lnum and schedules corresponding
319 * physical eraseblock for erasure. Returns zero in case of success and a 336 * physical eraseblock for erasure. Returns zero in case of success and a
320 * negative error code in case of failure. 337 * negative error code in case of failure.
321 */ 338 */
322int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum) 339int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
340 int lnum)
323{ 341{
324 int idx = vol_id2idx(ubi, vol_id), err, pnum; 342 int err, pnum, vol_id = vol->vol_id;
325 struct ubi_volume *vol = ubi->volumes[idx];
326 343
327 if (ubi->ro_mode) 344 if (ubi->ro_mode)
328 return -EROFS; 345 return -EROFS;
@@ -349,7 +366,7 @@ out_unlock:
349/** 366/**
350 * ubi_eba_read_leb - read data. 367 * ubi_eba_read_leb - read data.
351 * @ubi: UBI device description object 368 * @ubi: UBI device description object
352 * @vol_id: volume ID 369 * @vol: volume description object
353 * @lnum: logical eraseblock number 370 * @lnum: logical eraseblock number
354 * @buf: buffer to store the read data 371 * @buf: buffer to store the read data
355 * @offset: offset from where to read 372 * @offset: offset from where to read
@@ -365,12 +382,11 @@ out_unlock:
365 * returned for any volume type if an ECC error was detected by the MTD device 382 * returned for any volume type if an ECC error was detected by the MTD device
366 * driver. Other negative error cored may be returned in case of other errors. 383 * driver. Other negative error cored may be returned in case of other errors.
367 */ 384 */
368int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, 385int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
369 int offset, int len, int check) 386 void *buf, int offset, int len, int check)
370{ 387{
371 int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id); 388 int err, pnum, scrub = 0, vol_id = vol->vol_id;
372 struct ubi_vid_hdr *vid_hdr; 389 struct ubi_vid_hdr *vid_hdr;
373 struct ubi_volume *vol = ubi->volumes[idx];
374 uint32_t uninitialized_var(crc); 390 uint32_t uninitialized_var(crc);
375 391
376 err = leb_read_lock(ubi, vol_id, lnum); 392 err = leb_read_lock(ubi, vol_id, lnum);
@@ -578,7 +594,7 @@ write_error:
578/** 594/**
579 * ubi_eba_write_leb - write data to dynamic volume. 595 * ubi_eba_write_leb - write data to dynamic volume.
580 * @ubi: UBI device description object 596 * @ubi: UBI device description object
581 * @vol_id: volume ID 597 * @vol: volume description object
582 * @lnum: logical eraseblock number 598 * @lnum: logical eraseblock number
583 * @buf: the data to write 599 * @buf: the data to write
584 * @offset: offset within the logical eraseblock where to write 600 * @offset: offset within the logical eraseblock where to write
@@ -586,15 +602,14 @@ write_error:
586 * @dtype: data type 602 * @dtype: data type
587 * 603 *
588 * This function writes data to logical eraseblock @lnum of a dynamic volume 604 * This function writes data to logical eraseblock @lnum of a dynamic volume
589 * @vol_id. Returns zero in case of success and a negative error code in case 605 * @vol. Returns zero in case of success and a negative error code in case
590 * of failure. In case of error, it is possible that something was still 606 * of failure. In case of error, it is possible that something was still
591 * written to the flash media, but may be some garbage. 607 * written to the flash media, but may be some garbage.
592 */ 608 */
593int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, 609int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
594 const void *buf, int offset, int len, int dtype) 610 const void *buf, int offset, int len, int dtype)
595{ 611{
596 int idx = vol_id2idx(ubi, vol_id), err, pnum, tries = 0; 612 int err, pnum, tries = 0, vol_id = vol->vol_id;
597 struct ubi_volume *vol = ubi->volumes[idx];
598 struct ubi_vid_hdr *vid_hdr; 613 struct ubi_vid_hdr *vid_hdr;
599 614
600 if (ubi->ro_mode) 615 if (ubi->ro_mode)
@@ -613,7 +628,8 @@ int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
613 if (err) { 628 if (err) {
614 ubi_warn("failed to write data to PEB %d", pnum); 629 ubi_warn("failed to write data to PEB %d", pnum);
615 if (err == -EIO && ubi->bad_allowed) 630 if (err == -EIO && ubi->bad_allowed)
616 err = recover_peb(ubi, pnum, vol_id, lnum, buf, offset, len); 631 err = recover_peb(ubi, pnum, vol_id, lnum, buf,
632 offset, len);
617 if (err) 633 if (err)
618 ubi_ro_mode(ubi); 634 ubi_ro_mode(ubi);
619 } 635 }
@@ -656,11 +672,14 @@ retry:
656 goto write_error; 672 goto write_error;
657 } 673 }
658 674
659 err = ubi_io_write_data(ubi, buf, pnum, offset, len); 675 if (len) {
660 if (err) { 676 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
661 ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, " 677 if (err) {
662 "PEB %d", len, offset, vol_id, lnum, pnum); 678 ubi_warn("failed to write %d bytes at offset %d of "
663 goto write_error; 679 "LEB %d:%d, PEB %d", len, offset, vol_id,
680 lnum, pnum);
681 goto write_error;
682 }
664 } 683 }
665 684
666 vol->eba_tbl[lnum] = pnum; 685 vol->eba_tbl[lnum] = pnum;
@@ -698,7 +717,7 @@ write_error:
698/** 717/**
699 * ubi_eba_write_leb_st - write data to static volume. 718 * ubi_eba_write_leb_st - write data to static volume.
700 * @ubi: UBI device description object 719 * @ubi: UBI device description object
701 * @vol_id: volume ID 720 * @vol: volume description object
702 * @lnum: logical eraseblock number 721 * @lnum: logical eraseblock number
703 * @buf: data to write 722 * @buf: data to write
704 * @len: how many bytes to write 723 * @len: how many bytes to write
@@ -706,7 +725,7 @@ write_error:
706 * @used_ebs: how many logical eraseblocks will this volume contain 725 * @used_ebs: how many logical eraseblocks will this volume contain
707 * 726 *
708 * This function writes data to logical eraseblock @lnum of static volume 727 * This function writes data to logical eraseblock @lnum of static volume
709 * @vol_id. The @used_ebs argument should contain total number of logical 728 * @vol. The @used_ebs argument should contain total number of logical
710 * eraseblock in this static volume. 729 * eraseblock in this static volume.
711 * 730 *
712 * When writing to the last logical eraseblock, the @len argument doesn't have 731 * When writing to the last logical eraseblock, the @len argument doesn't have
@@ -718,12 +737,11 @@ write_error:
718 * volumes. This function returns zero in case of success and a negative error 737 * volumes. This function returns zero in case of success and a negative error
719 * code in case of failure. 738 * code in case of failure.
720 */ 739 */
721int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum, 740int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
722 const void *buf, int len, int dtype, int used_ebs) 741 int lnum, const void *buf, int len, int dtype,
742 int used_ebs)
723{ 743{
724 int err, pnum, tries = 0, data_size = len; 744 int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
725 int idx = vol_id2idx(ubi, vol_id);
726 struct ubi_volume *vol = ubi->volumes[idx];
727 struct ubi_vid_hdr *vid_hdr; 745 struct ubi_vid_hdr *vid_hdr;
728 uint32_t crc; 746 uint32_t crc;
729 747
@@ -819,7 +837,7 @@ write_error:
819/* 837/*
820 * ubi_eba_atomic_leb_change - change logical eraseblock atomically. 838 * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
821 * @ubi: UBI device description object 839 * @ubi: UBI device description object
822 * @vol_id: volume ID 840 * @vol: volume description object
823 * @lnum: logical eraseblock number 841 * @lnum: logical eraseblock number
824 * @buf: data to write 842 * @buf: data to write
825 * @len: how many bytes to write 843 * @len: how many bytes to write
@@ -834,17 +852,27 @@ write_error:
834 * UBI reserves one LEB for the "atomic LEB change" operation, so only one 852 * UBI reserves one LEB for the "atomic LEB change" operation, so only one
835 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. 853 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
836 */ 854 */
837int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, 855int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
838 const void *buf, int len, int dtype) 856 int lnum, const void *buf, int len, int dtype)
839{ 857{
840 int err, pnum, tries = 0, idx = vol_id2idx(ubi, vol_id); 858 int err, pnum, tries = 0, vol_id = vol->vol_id;
841 struct ubi_volume *vol = ubi->volumes[idx];
842 struct ubi_vid_hdr *vid_hdr; 859 struct ubi_vid_hdr *vid_hdr;
843 uint32_t crc; 860 uint32_t crc;
844 861
845 if (ubi->ro_mode) 862 if (ubi->ro_mode)
846 return -EROFS; 863 return -EROFS;
847 864
865 if (len == 0) {
866 /*
867 * Special case when data length is zero. In this case the LEB
868 * has to be unmapped and mapped somewhere else.
869 */
870 err = ubi_eba_unmap_leb(ubi, vol, lnum);
871 if (err)
872 return err;
873 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
874 }
875
848 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 876 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
849 if (!vid_hdr) 877 if (!vid_hdr)
850 return -ENOMEM; 878 return -ENOMEM;
@@ -928,20 +956,6 @@ write_error:
928} 956}
929 957
930/** 958/**
931 * ltree_entry_ctor - lock tree entries slab cache constructor.
932 * @obj: the lock-tree entry to construct
933 * @cache: the lock tree entry slab cache
934 * @flags: constructor flags
935 */
936static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
937{
938 struct ltree_entry *le = obj;
939
940 le->users = 0;
941 init_rwsem(&le->mutex);
942}
943
944/**
945 * ubi_eba_copy_leb - copy logical eraseblock. 959 * ubi_eba_copy_leb - copy logical eraseblock.
946 * @ubi: UBI device description object 960 * @ubi: UBI device description object
947 * @from: physical eraseblock number from where to copy 961 * @from: physical eraseblock number from where to copy
@@ -950,14 +964,16 @@ static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
950 * 964 *
951 * This function copies logical eraseblock from physical eraseblock @from to 965 * This function copies logical eraseblock from physical eraseblock @from to
952 * physical eraseblock @to. The @vid_hdr buffer may be changed by this 966 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
953 * function. Returns zero in case of success, %UBI_IO_BITFLIPS if the operation 967 * function. Returns:
954 * was canceled because bit-flips were detected at the target PEB, and a 968 * o %0 in case of success;
955 * negative error code in case of failure. 969 * o %1 if the operation was canceled and should be tried later (e.g.,
970 * because a bit-flip was detected at the target PEB);
971 * o %2 if the volume is being deleted and this LEB should not be moved.
956 */ 972 */
957int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 973int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
958 struct ubi_vid_hdr *vid_hdr) 974 struct ubi_vid_hdr *vid_hdr)
959{ 975{
960 int err, vol_id, lnum, data_size, aldata_size, pnum, idx; 976 int err, vol_id, lnum, data_size, aldata_size, idx;
961 struct ubi_volume *vol; 977 struct ubi_volume *vol;
962 uint32_t crc; 978 uint32_t crc;
963 979
@@ -973,51 +989,67 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
973 data_size = aldata_size = 989 data_size = aldata_size =
974 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); 990 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
975 991
976 /*
977 * We do not want anybody to write to this logical eraseblock while we
978 * are moving it, so we lock it.
979 */
980 err = leb_write_lock(ubi, vol_id, lnum);
981 if (err)
982 return err;
983
984 mutex_lock(&ubi->buf_mutex);
985
986 /*
987 * But the logical eraseblock might have been put by this time.
988 * Cancel if it is true.
989 */
990 idx = vol_id2idx(ubi, vol_id); 992 idx = vol_id2idx(ubi, vol_id);
991 993 spin_lock(&ubi->volumes_lock);
992 /* 994 /*
993 * We may race with volume deletion/re-size, so we have to hold 995 * Note, we may race with volume deletion, which means that the volume
994 * @ubi->volumes_lock. 996 * this logical eraseblock belongs to might be being deleted. Since the
997 * volume deletion unmaps all the volume's logical eraseblocks, it will
998 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
995 */ 999 */
996 spin_lock(&ubi->volumes_lock);
997 vol = ubi->volumes[idx]; 1000 vol = ubi->volumes[idx];
998 if (!vol) { 1001 if (!vol) {
999 dbg_eba("volume %d was removed meanwhile", vol_id); 1002 /* No need to do further work, cancel */
1003 dbg_eba("volume %d is being removed, cancel", vol_id);
1000 spin_unlock(&ubi->volumes_lock); 1004 spin_unlock(&ubi->volumes_lock);
1001 goto out_unlock; 1005 return 2;
1002 } 1006 }
1007 spin_unlock(&ubi->volumes_lock);
1003 1008
1004 pnum = vol->eba_tbl[lnum]; 1009 /*
1005 if (pnum != from) { 1010 * We do not want anybody to write to this logical eraseblock while we
1006 dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " 1011 * are moving it, so lock it.
1007 "PEB %d, cancel", vol_id, lnum, from, pnum); 1012 *
1008 spin_unlock(&ubi->volumes_lock); 1013 * Note, we are using non-waiting locking here, because we cannot sleep
1009 goto out_unlock; 1014 * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
1015 * unmapping the LEB which is mapped to the PEB we are going to move
1016 * (@from). This task locks the LEB and goes sleep in the
1017 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
1018 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
1019 * LEB is already locked, we just do not move it and return %1.
1020 */
1021 err = leb_write_trylock(ubi, vol_id, lnum);
1022 if (err) {
1023 dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum);
1024 return err;
1010 } 1025 }
1011 spin_unlock(&ubi->volumes_lock);
1012 1026
1013 /* OK, now the LEB is locked and we can safely start moving it */ 1027 /*
1028 * The LEB might have been put meanwhile, and the task which put it is
1029 * probably waiting on @ubi->move_mutex. No need to continue the work,
1030 * cancel it.
1031 */
1032 if (vol->eba_tbl[lnum] != from) {
1033 dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
1034 "PEB %d, cancel", vol_id, lnum, from,
1035 vol->eba_tbl[lnum]);
1036 err = 1;
1037 goto out_unlock_leb;
1038 }
1014 1039
1040 /*
1041 * OK, now the LEB is locked and we can safely start moving iy. Since
1042 * this function utilizes thie @ubi->peb1_buf buffer which is shared
1043 * with some other functions, so lock the buffer by taking the
1044 * @ubi->buf_mutex.
1045 */
1046 mutex_lock(&ubi->buf_mutex);
1015 dbg_eba("read %d bytes of data", aldata_size); 1047 dbg_eba("read %d bytes of data", aldata_size);
1016 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size); 1048 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
1017 if (err && err != UBI_IO_BITFLIPS) { 1049 if (err && err != UBI_IO_BITFLIPS) {
1018 ubi_warn("error %d while reading data from PEB %d", 1050 ubi_warn("error %d while reading data from PEB %d",
1019 err, from); 1051 err, from);
1020 goto out_unlock; 1052 goto out_unlock_buf;
1021 } 1053 }
1022 1054
1023 /* 1055 /*
@@ -1053,7 +1085,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1053 1085
1054 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1086 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1055 if (err) 1087 if (err)
1056 goto out_unlock; 1088 goto out_unlock_buf;
1057 1089
1058 cond_resched(); 1090 cond_resched();
1059 1091
@@ -1062,13 +1094,15 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1062 if (err) { 1094 if (err) {
1063 if (err != UBI_IO_BITFLIPS) 1095 if (err != UBI_IO_BITFLIPS)
1064 ubi_warn("cannot read VID header back from PEB %d", to); 1096 ubi_warn("cannot read VID header back from PEB %d", to);
1065 goto out_unlock; 1097 else
1098 err = 1;
1099 goto out_unlock_buf;
1066 } 1100 }
1067 1101
1068 if (data_size > 0) { 1102 if (data_size > 0) {
1069 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); 1103 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
1070 if (err) 1104 if (err)
1071 goto out_unlock; 1105 goto out_unlock_buf;
1072 1106
1073 cond_resched(); 1107 cond_resched();
1074 1108
@@ -1082,7 +1116,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1082 if (err != UBI_IO_BITFLIPS) 1116 if (err != UBI_IO_BITFLIPS)
1083 ubi_warn("cannot read data back from PEB %d", 1117 ubi_warn("cannot read data back from PEB %d",
1084 to); 1118 to);
1085 goto out_unlock; 1119 else
1120 err = 1;
1121 goto out_unlock_buf;
1086 } 1122 }
1087 1123
1088 cond_resched(); 1124 cond_resched();
@@ -1090,15 +1126,16 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1090 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { 1126 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
1091 ubi_warn("read data back from PEB %d - it is different", 1127 ubi_warn("read data back from PEB %d - it is different",
1092 to); 1128 to);
1093 goto out_unlock; 1129 goto out_unlock_buf;
1094 } 1130 }
1095 } 1131 }
1096 1132
1097 ubi_assert(vol->eba_tbl[lnum] == from); 1133 ubi_assert(vol->eba_tbl[lnum] == from);
1098 vol->eba_tbl[lnum] = to; 1134 vol->eba_tbl[lnum] = to;
1099 1135
1100out_unlock: 1136out_unlock_buf:
1101 mutex_unlock(&ubi->buf_mutex); 1137 mutex_unlock(&ubi->buf_mutex);
1138out_unlock_leb:
1102 leb_write_unlock(ubi, vol_id, lnum); 1139 leb_write_unlock(ubi, vol_id, lnum);
1103 return err; 1140 return err;
1104} 1141}
@@ -1125,14 +1162,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1125 mutex_init(&ubi->alc_mutex); 1162 mutex_init(&ubi->alc_mutex);
1126 ubi->ltree = RB_ROOT; 1163 ubi->ltree = RB_ROOT;
1127 1164
1128 if (ubi_devices_cnt == 0) {
1129 ltree_slab = kmem_cache_create("ubi_ltree_slab",
1130 sizeof(struct ltree_entry), 0,
1131 0, &ltree_entry_ctor);
1132 if (!ltree_slab)
1133 return -ENOMEM;
1134 }
1135
1136 ubi->global_sqnum = si->max_sqnum + 1; 1165 ubi->global_sqnum = si->max_sqnum + 1;
1137 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; 1166 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1138 1167
@@ -1168,6 +1197,15 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1168 } 1197 }
1169 } 1198 }
1170 1199
1200 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1201 ubi_err("no enough physical eraseblocks (%d, need %d)",
1202 ubi->avail_pebs, EBA_RESERVED_PEBS);
1203 err = -ENOSPC;
1204 goto out_free;
1205 }
1206 ubi->avail_pebs -= EBA_RESERVED_PEBS;
1207 ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1208
1171 if (ubi->bad_allowed) { 1209 if (ubi->bad_allowed) {
1172 ubi_calculate_reserved(ubi); 1210 ubi_calculate_reserved(ubi);
1173 1211
@@ -1184,15 +1222,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1184 ubi->rsvd_pebs += ubi->beb_rsvd_pebs; 1222 ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
1185 } 1223 }
1186 1224
1187 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1188 ubi_err("no enough physical eraseblocks (%d, need %d)",
1189 ubi->avail_pebs, EBA_RESERVED_PEBS);
1190 err = -ENOSPC;
1191 goto out_free;
1192 }
1193 ubi->avail_pebs -= EBA_RESERVED_PEBS;
1194 ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1195
1196 dbg_eba("EBA unit is initialized"); 1225 dbg_eba("EBA unit is initialized");
1197 return 0; 1226 return 0;
1198 1227
@@ -1202,8 +1231,6 @@ out_free:
1202 continue; 1231 continue;
1203 kfree(ubi->volumes[i]->eba_tbl); 1232 kfree(ubi->volumes[i]->eba_tbl);
1204 } 1233 }
1205 if (ubi_devices_cnt == 0)
1206 kmem_cache_destroy(ltree_slab);
1207 return err; 1234 return err;
1208} 1235}
1209 1236
@@ -1222,6 +1249,4 @@ void ubi_eba_close(const struct ubi_device *ubi)
1222 continue; 1249 continue;
1223 kfree(ubi->volumes[i]->eba_tbl); 1250 kfree(ubi->volumes[i]->eba_tbl);
1224 } 1251 }
1225 if (ubi_devices_cnt == 1)
1226 kmem_cache_destroy(ltree_slab);
1227} 1252}
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 41ff74c60e14..d397219238d3 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -129,8 +129,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
129 if (to_read > total_read) 129 if (to_read > total_read)
130 to_read = total_read; 130 to_read = total_read;
131 131
132 err = ubi_eba_read_leb(ubi, vol->vol_id, lnum, buf, offs, 132 err = ubi_eba_read_leb(ubi, vol, lnum, buf, offs, to_read, 0);
133 to_read, 0);
134 if (err) 133 if (err)
135 break; 134 break;
136 135
@@ -187,8 +186,8 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
187 if (to_write > total_written) 186 if (to_write > total_written)
188 to_write = total_written; 187 to_write = total_written;
189 188
190 err = ubi_eba_write_leb(ubi, vol->vol_id, lnum, buf, offs, 189 err = ubi_eba_write_leb(ubi, vol, lnum, buf, offs, to_write,
191 to_write, UBI_UNKNOWN); 190 UBI_UNKNOWN);
192 if (err) 191 if (err)
193 break; 192 break;
194 193
@@ -237,7 +236,7 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
237 return -EROFS; 236 return -EROFS;
238 237
239 for (i = 0; i < count; i++) { 238 for (i = 0; i < count; i++) {
240 err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum + i); 239 err = ubi_eba_unmap_leb(ubi, vol, lnum + i);
241 if (err) 240 if (err)
242 goto out_err; 241 goto out_err;
243 } 242 }
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 7c304eec78b5..db3efdef2433 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -173,6 +173,16 @@ retry:
173 ubi_err("error %d while reading %d bytes from PEB %d:%d, " 173 ubi_err("error %d while reading %d bytes from PEB %d:%d, "
174 "read %zd bytes", err, len, pnum, offset, read); 174 "read %zd bytes", err, len, pnum, offset, read);
175 ubi_dbg_dump_stack(); 175 ubi_dbg_dump_stack();
176
177 /*
178 * The driver should never return -EBADMSG if it failed to read
179 * all the requested data. But some buggy drivers might do
180 * this, so we change it to -EIO.
181 */
182 if (read != len && err == -EBADMSG) {
183 ubi_assert(0);
184 err = -EIO;
185 }
176 } else { 186 } else {
177 ubi_assert(len == read); 187 ubi_assert(len == read);
178 188
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 03c774f41549..a70d58823f8d 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -30,23 +30,27 @@
30 * @ubi_num: UBI device number 30 * @ubi_num: UBI device number
31 * @di: the information is stored here 31 * @di: the information is stored here
32 * 32 *
33 * This function returns %0 in case of success and a %-ENODEV if there is no 33 * This function returns %0 in case of success, %-EINVAL if the UBI device
34 * such UBI device. 34 * number is invalid, and %-ENODEV if there is no such UBI device.
35 */ 35 */
36int ubi_get_device_info(int ubi_num, struct ubi_device_info *di) 36int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
37{ 37{
38 const struct ubi_device *ubi; 38 struct ubi_device *ubi;
39
40 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
41 return -EINVAL;
39 42
40 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || 43 ubi = ubi_get_device(ubi_num);
41 !ubi_devices[ubi_num]) 44 if (!ubi)
42 return -ENODEV; 45 return -ENODEV;
43 46
44 ubi = ubi_devices[ubi_num];
45 di->ubi_num = ubi->ubi_num; 47 di->ubi_num = ubi->ubi_num;
46 di->leb_size = ubi->leb_size; 48 di->leb_size = ubi->leb_size;
47 di->min_io_size = ubi->min_io_size; 49 di->min_io_size = ubi->min_io_size;
48 di->ro_mode = ubi->ro_mode; 50 di->ro_mode = ubi->ro_mode;
49 di->cdev = MKDEV(ubi->major, 0); 51 di->cdev = ubi->cdev.dev;
52
53 ubi_put_device(ubi);
50 return 0; 54 return 0;
51} 55}
52EXPORT_SYMBOL_GPL(ubi_get_device_info); 56EXPORT_SYMBOL_GPL(ubi_get_device_info);
@@ -73,7 +77,7 @@ void ubi_get_volume_info(struct ubi_volume_desc *desc,
73 vi->usable_leb_size = vol->usable_leb_size; 77 vi->usable_leb_size = vol->usable_leb_size;
74 vi->name_len = vol->name_len; 78 vi->name_len = vol->name_len;
75 vi->name = vol->name; 79 vi->name = vol->name;
76 vi->cdev = MKDEV(ubi->major, vi->vol_id + 1); 80 vi->cdev = vol->cdev.dev;
77} 81}
78EXPORT_SYMBOL_GPL(ubi_get_volume_info); 82EXPORT_SYMBOL_GPL(ubi_get_volume_info);
79 83
@@ -104,37 +108,39 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
104 108
105 dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode); 109 dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode);
106 110
107 err = -ENODEV; 111 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
108 if (ubi_num < 0) 112 return ERR_PTR(-EINVAL);
109 return ERR_PTR(err);
110
111 ubi = ubi_devices[ubi_num];
112
113 if (!try_module_get(THIS_MODULE))
114 return ERR_PTR(err);
115
116 if (ubi_num >= UBI_MAX_DEVICES || !ubi)
117 goto out_put;
118 113
119 err = -EINVAL;
120 if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
121 goto out_put;
122 if (mode != UBI_READONLY && mode != UBI_READWRITE && 114 if (mode != UBI_READONLY && mode != UBI_READWRITE &&
123 mode != UBI_EXCLUSIVE) 115 mode != UBI_EXCLUSIVE)
124 goto out_put; 116 return ERR_PTR(-EINVAL);
117
118 /*
119 * First of all, we have to get the UBI device to prevent its removal.
120 */
121 ubi = ubi_get_device(ubi_num);
122 if (!ubi)
123 return ERR_PTR(-ENODEV);
124
125 if (vol_id < 0 || vol_id >= ubi->vtbl_slots) {
126 err = -EINVAL;
127 goto out_put_ubi;
128 }
125 129
126 desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL); 130 desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL);
127 if (!desc) { 131 if (!desc) {
128 err = -ENOMEM; 132 err = -ENOMEM;
129 goto out_put; 133 goto out_put_ubi;
130 } 134 }
131 135
136 err = -ENODEV;
137 if (!try_module_get(THIS_MODULE))
138 goto out_free;
139
132 spin_lock(&ubi->volumes_lock); 140 spin_lock(&ubi->volumes_lock);
133 vol = ubi->volumes[vol_id]; 141 vol = ubi->volumes[vol_id];
134 if (!vol) { 142 if (!vol)
135 err = -ENODEV;
136 goto out_unlock; 143 goto out_unlock;
137 }
138 144
139 err = -EBUSY; 145 err = -EBUSY;
140 switch (mode) { 146 switch (mode) {
@@ -156,21 +162,19 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
156 vol->exclusive = 1; 162 vol->exclusive = 1;
157 break; 163 break;
158 } 164 }
165 get_device(&vol->dev);
166 vol->ref_count += 1;
159 spin_unlock(&ubi->volumes_lock); 167 spin_unlock(&ubi->volumes_lock);
160 168
161 desc->vol = vol; 169 desc->vol = vol;
162 desc->mode = mode; 170 desc->mode = mode;
163 171
164 /* 172 mutex_lock(&ubi->ckvol_mutex);
165 * To prevent simultaneous checks of the same volume we use @vtbl_mutex,
166 * although it is not the purpose it was introduced for.
167 */
168 mutex_lock(&ubi->vtbl_mutex);
169 if (!vol->checked) { 173 if (!vol->checked) {
170 /* This is the first open - check the volume */ 174 /* This is the first open - check the volume */
171 err = ubi_check_volume(ubi, vol_id); 175 err = ubi_check_volume(ubi, vol_id);
172 if (err < 0) { 176 if (err < 0) {
173 mutex_unlock(&ubi->vtbl_mutex); 177 mutex_unlock(&ubi->ckvol_mutex);
174 ubi_close_volume(desc); 178 ubi_close_volume(desc);
175 return ERR_PTR(err); 179 return ERR_PTR(err);
176 } 180 }
@@ -181,14 +185,17 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
181 } 185 }
182 vol->checked = 1; 186 vol->checked = 1;
183 } 187 }
184 mutex_unlock(&ubi->vtbl_mutex); 188 mutex_unlock(&ubi->ckvol_mutex);
189
185 return desc; 190 return desc;
186 191
187out_unlock: 192out_unlock:
188 spin_unlock(&ubi->volumes_lock); 193 spin_unlock(&ubi->volumes_lock);
189 kfree(desc);
190out_put:
191 module_put(THIS_MODULE); 194 module_put(THIS_MODULE);
195out_free:
196 kfree(desc);
197out_put_ubi:
198 ubi_put_device(ubi);
192 return ERR_PTR(err); 199 return ERR_PTR(err);
193} 200}
194EXPORT_SYMBOL_GPL(ubi_open_volume); 201EXPORT_SYMBOL_GPL(ubi_open_volume);
@@ -205,8 +212,8 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
205 int mode) 212 int mode)
206{ 213{
207 int i, vol_id = -1, len; 214 int i, vol_id = -1, len;
208 struct ubi_volume_desc *ret;
209 struct ubi_device *ubi; 215 struct ubi_device *ubi;
216 struct ubi_volume_desc *ret;
210 217
211 dbg_msg("open volume %s, mode %d", name, mode); 218 dbg_msg("open volume %s, mode %d", name, mode);
212 219
@@ -217,14 +224,12 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
217 if (len > UBI_VOL_NAME_MAX) 224 if (len > UBI_VOL_NAME_MAX)
218 return ERR_PTR(-EINVAL); 225 return ERR_PTR(-EINVAL);
219 226
220 ret = ERR_PTR(-ENODEV); 227 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
221 if (!try_module_get(THIS_MODULE)) 228 return ERR_PTR(-EINVAL);
222 return ret;
223
224 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || !ubi_devices[ubi_num])
225 goto out_put;
226 229
227 ubi = ubi_devices[ubi_num]; 230 ubi = ubi_get_device(ubi_num);
231 if (!ubi)
232 return ERR_PTR(-ENODEV);
228 233
229 spin_lock(&ubi->volumes_lock); 234 spin_lock(&ubi->volumes_lock);
230 /* Walk all volumes of this UBI device */ 235 /* Walk all volumes of this UBI device */
@@ -238,13 +243,16 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
238 } 243 }
239 spin_unlock(&ubi->volumes_lock); 244 spin_unlock(&ubi->volumes_lock);
240 245
241 if (vol_id < 0) 246 if (vol_id >= 0)
242 goto out_put; 247 ret = ubi_open_volume(ubi_num, vol_id, mode);
248 else
249 ret = ERR_PTR(-ENODEV);
243 250
244 ret = ubi_open_volume(ubi_num, vol_id, mode); 251 /*
245 252 * We should put the UBI device even in case of success, because
246out_put: 253 * 'ubi_open_volume()' took a reference as well.
247 module_put(THIS_MODULE); 254 */
255 ubi_put_device(ubi);
248 return ret; 256 return ret;
249} 257}
250EXPORT_SYMBOL_GPL(ubi_open_volume_nm); 258EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
@@ -256,10 +264,11 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
256void ubi_close_volume(struct ubi_volume_desc *desc) 264void ubi_close_volume(struct ubi_volume_desc *desc)
257{ 265{
258 struct ubi_volume *vol = desc->vol; 266 struct ubi_volume *vol = desc->vol;
267 struct ubi_device *ubi = vol->ubi;
259 268
260 dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode); 269 dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode);
261 270
262 spin_lock(&vol->ubi->volumes_lock); 271 spin_lock(&ubi->volumes_lock);
263 switch (desc->mode) { 272 switch (desc->mode) {
264 case UBI_READONLY: 273 case UBI_READONLY:
265 vol->readers -= 1; 274 vol->readers -= 1;
@@ -270,9 +279,12 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
270 case UBI_EXCLUSIVE: 279 case UBI_EXCLUSIVE:
271 vol->exclusive = 0; 280 vol->exclusive = 0;
272 } 281 }
273 spin_unlock(&vol->ubi->volumes_lock); 282 vol->ref_count -= 1;
283 spin_unlock(&ubi->volumes_lock);
274 284
275 kfree(desc); 285 kfree(desc);
286 put_device(&vol->dev);
287 ubi_put_device(ubi);
276 module_put(THIS_MODULE); 288 module_put(THIS_MODULE);
277} 289}
278EXPORT_SYMBOL_GPL(ubi_close_volume); 290EXPORT_SYMBOL_GPL(ubi_close_volume);
@@ -332,7 +344,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
332 if (len == 0) 344 if (len == 0)
333 return 0; 345 return 0;
334 346
335 err = ubi_eba_read_leb(ubi, vol_id, lnum, buf, offset, len, check); 347 err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
336 if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) { 348 if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) {
337 ubi_warn("mark volume %d as corrupted", vol_id); 349 ubi_warn("mark volume %d as corrupted", vol_id);
338 vol->corrupted = 1; 350 vol->corrupted = 1;
@@ -399,7 +411,7 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
399 if (len == 0) 411 if (len == 0)
400 return 0; 412 return 0;
401 413
402 return ubi_eba_write_leb(ubi, vol_id, lnum, buf, offset, len, dtype); 414 return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype);
403} 415}
404EXPORT_SYMBOL_GPL(ubi_leb_write); 416EXPORT_SYMBOL_GPL(ubi_leb_write);
405 417
@@ -448,7 +460,7 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
448 if (len == 0) 460 if (len == 0)
449 return 0; 461 return 0;
450 462
451 return ubi_eba_atomic_leb_change(ubi, vol_id, lnum, buf, len, dtype); 463 return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype);
452} 464}
453EXPORT_SYMBOL_GPL(ubi_leb_change); 465EXPORT_SYMBOL_GPL(ubi_leb_change);
454 466
@@ -468,9 +480,9 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
468{ 480{
469 struct ubi_volume *vol = desc->vol; 481 struct ubi_volume *vol = desc->vol;
470 struct ubi_device *ubi = vol->ubi; 482 struct ubi_device *ubi = vol->ubi;
471 int err, vol_id = vol->vol_id; 483 int err;
472 484
473 dbg_msg("erase LEB %d:%d", vol_id, lnum); 485 dbg_msg("erase LEB %d:%d", vol->vol_id, lnum);
474 486
475 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 487 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
476 return -EROFS; 488 return -EROFS;
@@ -481,7 +493,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
481 if (vol->upd_marker) 493 if (vol->upd_marker)
482 return -EBADF; 494 return -EBADF;
483 495
484 err = ubi_eba_unmap_leb(ubi, vol_id, lnum); 496 err = ubi_eba_unmap_leb(ubi, vol, lnum);
485 if (err) 497 if (err)
486 return err; 498 return err;
487 499
@@ -529,9 +541,8 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
529{ 541{
530 struct ubi_volume *vol = desc->vol; 542 struct ubi_volume *vol = desc->vol;
531 struct ubi_device *ubi = vol->ubi; 543 struct ubi_device *ubi = vol->ubi;
532 int vol_id = vol->vol_id;
533 544
534 dbg_msg("unmap LEB %d:%d", vol_id, lnum); 545 dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum);
535 546
536 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 547 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
537 return -EROFS; 548 return -EROFS;
@@ -542,11 +553,55 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
542 if (vol->upd_marker) 553 if (vol->upd_marker)
543 return -EBADF; 554 return -EBADF;
544 555
545 return ubi_eba_unmap_leb(ubi, vol_id, lnum); 556 return ubi_eba_unmap_leb(ubi, vol, lnum);
546} 557}
547EXPORT_SYMBOL_GPL(ubi_leb_unmap); 558EXPORT_SYMBOL_GPL(ubi_leb_unmap);
548 559
549/** 560/**
561 * ubi_leb_map - map logical erasblock to a physical eraseblock.
562 * @desc: volume descriptor
563 * @lnum: logical eraseblock number
564 * @dtype: expected data type
565 *
566 * This function maps an un-mapped logical eraseblock @lnum to a physical
567 * eraseblock. This means, that after a successfull invocation of this
568 * function the logical eraseblock @lnum will be empty (contain only %0xFF
569 * bytes) and be mapped to a physical eraseblock, even if an unclean reboot
570 * happens.
571 *
572 * This function returns zero in case of success, %-EBADF if the volume is
573 * damaged because of an interrupted update, %-EBADMSG if the logical
574 * eraseblock is already mapped, and other negative error codes in case of
575 * other failures.
576 */
577int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
578{
579 struct ubi_volume *vol = desc->vol;
580 struct ubi_device *ubi = vol->ubi;
581
582 dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum);
583
584 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
585 return -EROFS;
586
587 if (lnum < 0 || lnum >= vol->reserved_pebs)
588 return -EINVAL;
589
590 if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
591 dtype != UBI_UNKNOWN)
592 return -EINVAL;
593
594 if (vol->upd_marker)
595 return -EBADF;
596
597 if (vol->eba_tbl[lnum] >= 0)
598 return -EBADMSG;
599
600 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
601}
602EXPORT_SYMBOL_GPL(ubi_leb_map);
603
604/**
550 * ubi_is_mapped - check if logical eraseblock is mapped. 605 * ubi_is_mapped - check if logical eraseblock is mapped.
551 * @desc: volume descriptor 606 * @desc: volume descriptor
552 * @lnum: logical eraseblock number 607 * @lnum: logical eraseblock number
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 9e2338c8e2cf..93e052812012 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -79,7 +79,7 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
79 else 79 else
80 size = vol->usable_leb_size; 80 size = vol->usable_leb_size;
81 81
82 err = ubi_eba_read_leb(ubi, vol_id, i, buf, 0, size, 1); 82 err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
83 if (err) { 83 if (err) {
84 if (err == -EBADMSG) 84 if (err == -EBADMSG)
85 err = 1; 85 err = 1;
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index c7b0afc9d280..05aa3e7daba1 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -286,9 +286,14 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
286 * FIXME: but this is anyway obsolete and will be removed at 286 * FIXME: but this is anyway obsolete and will be removed at
287 * some point. 287 * some point.
288 */ 288 */
289
290 dbg_bld("using old crappy leb_ver stuff"); 289 dbg_bld("using old crappy leb_ver stuff");
291 290
291 if (v1 == v2) {
292 ubi_err("PEB %d and PEB %d have the same version %lld",
293 seb->pnum, pnum, v1);
294 return -EINVAL;
295 }
296
292 abs = v1 - v2; 297 abs = v1 - v2;
293 if (abs < 0) 298 if (abs < 0)
294 abs = -abs; 299 abs = -abs;
@@ -390,7 +395,6 @@ out_free_buf:
390 vfree(buf); 395 vfree(buf);
391out_free_vidh: 396out_free_vidh:
392 ubi_free_vid_hdr(ubi, vh); 397 ubi_free_vid_hdr(ubi, vh);
393 ubi_assert(err < 0);
394 return err; 398 return err;
395} 399}
396 400
@@ -769,7 +773,7 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
769 */ 773 */
770static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum) 774static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum)
771{ 775{
772 long long ec; 776 long long uninitialized_var(ec);
773 int err, bitflips = 0, vol_id, ec_corr = 0; 777 int err, bitflips = 0, vol_id, ec_corr = 0;
774 778
775 dbg_bld("scan PEB %d", pnum); 779 dbg_bld("scan PEB %d", pnum);
@@ -854,7 +858,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum
854 } 858 }
855 859
856 vol_id = be32_to_cpu(vidh->vol_id); 860 vol_id = be32_to_cpu(vidh->vol_id);
857 if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOL_ID) { 861 if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
858 int lnum = be32_to_cpu(vidh->lnum); 862 int lnum = be32_to_cpu(vidh->lnum);
859 863
860 /* Unsupported internal volume */ 864 /* Unsupported internal volume */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 5e941a633030..457710615261 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -94,8 +94,43 @@ enum {
94 UBI_IO_BITFLIPS 94 UBI_IO_BITFLIPS
95}; 95};
96 96
97extern int ubi_devices_cnt; 97/**
98extern struct ubi_device *ubi_devices[]; 98 * struct ubi_wl_entry - wear-leveling entry.
99 * @rb: link in the corresponding RB-tree
100 * @ec: erase counter
101 * @pnum: physical eraseblock number
102 *
103 * This data structure is used in the WL unit. Each physical eraseblock has a
104 * corresponding &struct wl_entry object which may be kept in different
105 * RB-trees. See WL unit for details.
106 */
107struct ubi_wl_entry {
108 struct rb_node rb;
109 int ec;
110 int pnum;
111};
112
113/**
114 * struct ubi_ltree_entry - an entry in the lock tree.
115 * @rb: links RB-tree nodes
116 * @vol_id: volume ID of the locked logical eraseblock
117 * @lnum: locked logical eraseblock number
118 * @users: how many tasks are using this logical eraseblock or wait for it
119 * @mutex: read/write mutex to implement read/write access serialization to
120 * the (@vol_id, @lnum) logical eraseblock
121 *
122 * This data structure is used in the EBA unit to implement per-LEB locking.
123 * When a logical eraseblock is being locked - corresponding
124 * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree).
125 * See EBA unit for details.
126 */
127struct ubi_ltree_entry {
128 struct rb_node rb;
129 int vol_id;
130 int lnum;
131 int users;
132 struct rw_semaphore mutex;
133};
99 134
100struct ubi_volume_desc; 135struct ubi_volume_desc;
101 136
@@ -105,11 +140,10 @@ struct ubi_volume_desc;
105 * @cdev: character device object to create character device 140 * @cdev: character device object to create character device
106 * @ubi: reference to the UBI device description object 141 * @ubi: reference to the UBI device description object
107 * @vol_id: volume ID 142 * @vol_id: volume ID
143 * @ref_count: volume reference count
108 * @readers: number of users holding this volume in read-only mode 144 * @readers: number of users holding this volume in read-only mode
109 * @writers: number of users holding this volume in read-write mode 145 * @writers: number of users holding this volume in read-write mode
110 * @exclusive: whether somebody holds this volume in exclusive mode 146 * @exclusive: whether somebody holds this volume in exclusive mode
111 * @removed: if the volume was removed
112 * @checked: if this static volume was checked
113 * 147 *
114 * @reserved_pebs: how many physical eraseblocks are reserved for this volume 148 * @reserved_pebs: how many physical eraseblocks are reserved for this volume
115 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) 149 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
@@ -117,21 +151,30 @@ struct ubi_volume_desc;
117 * @used_ebs: how many logical eraseblocks in this volume contain data 151 * @used_ebs: how many logical eraseblocks in this volume contain data
118 * @last_eb_bytes: how many bytes are stored in the last logical eraseblock 152 * @last_eb_bytes: how many bytes are stored in the last logical eraseblock
119 * @used_bytes: how many bytes of data this volume contains 153 * @used_bytes: how many bytes of data this volume contains
120 * @upd_marker: non-zero if the update marker is set for this volume
121 * @corrupted: non-zero if the volume is corrupted (static volumes only)
122 * @alignment: volume alignment 154 * @alignment: volume alignment
123 * @data_pad: how many bytes are not used at the end of physical eraseblocks to 155 * @data_pad: how many bytes are not used at the end of physical eraseblocks to
124 * satisfy the requested alignment 156 * satisfy the requested alignment
125 * @name_len: volume name length 157 * @name_len: volume name length
126 * @name: volume name 158 * @name: volume name
127 * 159 *
128 * @updating: whether the volume is being updated
129 * @upd_ebs: how many eraseblocks are expected to be updated 160 * @upd_ebs: how many eraseblocks are expected to be updated
130 * @upd_bytes: how many bytes are expected to be received 161 * @ch_lnum: LEB number which is being changing by the atomic LEB change
131 * @upd_received: how many update bytes were already received 162 * operation
132 * @upd_buf: update buffer which is used to collect update data 163 * @ch_dtype: data persistency type which is being changing by the atomic LEB
164 * change operation
165 * @upd_bytes: how many bytes are expected to be received for volume update or
166 * atomic LEB change
167 * @upd_received: how many bytes were already received for volume update or
168 * atomic LEB change
169 * @upd_buf: update buffer which is used to collect update data or data for
170 * atomic LEB change
133 * 171 *
134 * @eba_tbl: EBA table of this volume (LEB->PEB mapping) 172 * @eba_tbl: EBA table of this volume (LEB->PEB mapping)
173 * @checked: %1 if this static volume was checked
174 * @corrupted: %1 if the volume is corrupted (static volumes only)
175 * @upd_marker: %1 if the update marker is set for this volume
176 * @updating: %1 if the volume is being updated
177 * @changing_leb: %1 if the atomic LEB change ioctl command is in progress
135 * 178 *
136 * @gluebi_desc: gluebi UBI volume descriptor 179 * @gluebi_desc: gluebi UBI volume descriptor
137 * @gluebi_refcount: reference count of the gluebi MTD device 180 * @gluebi_refcount: reference count of the gluebi MTD device
@@ -150,11 +193,10 @@ struct ubi_volume {
150 struct cdev cdev; 193 struct cdev cdev;
151 struct ubi_device *ubi; 194 struct ubi_device *ubi;
152 int vol_id; 195 int vol_id;
196 int ref_count;
153 int readers; 197 int readers;
154 int writers; 198 int writers;
155 int exclusive; 199 int exclusive;
156 int removed;
157 int checked;
158 200
159 int reserved_pebs; 201 int reserved_pebs;
160 int vol_type; 202 int vol_type;
@@ -162,23 +204,31 @@ struct ubi_volume {
162 int used_ebs; 204 int used_ebs;
163 int last_eb_bytes; 205 int last_eb_bytes;
164 long long used_bytes; 206 long long used_bytes;
165 int upd_marker;
166 int corrupted;
167 int alignment; 207 int alignment;
168 int data_pad; 208 int data_pad;
169 int name_len; 209 int name_len;
170 char name[UBI_VOL_NAME_MAX+1]; 210 char name[UBI_VOL_NAME_MAX+1];
171 211
172 int updating;
173 int upd_ebs; 212 int upd_ebs;
213 int ch_lnum;
214 int ch_dtype;
174 long long upd_bytes; 215 long long upd_bytes;
175 long long upd_received; 216 long long upd_received;
176 void *upd_buf; 217 void *upd_buf;
177 218
178 int *eba_tbl; 219 int *eba_tbl;
220 int checked:1;
221 int corrupted:1;
222 int upd_marker:1;
223 int updating:1;
224 int changing_leb:1;
179 225
180#ifdef CONFIG_MTD_UBI_GLUEBI 226#ifdef CONFIG_MTD_UBI_GLUEBI
181 /* Gluebi-related stuff may be compiled out */ 227 /*
228 * Gluebi-related stuff may be compiled out.
229 * TODO: this should not be built into UBI but should be a separate
230 * ubimtd driver which works on top of UBI and emulates MTD devices.
231 */
182 struct ubi_volume_desc *gluebi_desc; 232 struct ubi_volume_desc *gluebi_desc;
183 int gluebi_refcount; 233 int gluebi_refcount;
184 struct mtd_info gluebi_mtd; 234 struct mtd_info gluebi_mtd;
@@ -200,28 +250,31 @@ struct ubi_wl_entry;
200 250
201/** 251/**
202 * struct ubi_device - UBI device description structure 252 * struct ubi_device - UBI device description structure
203 * @dev: class device object to use the the Linux device model 253 * @dev: UBI device object to use the the Linux device model
204 * @cdev: character device object to create character device 254 * @cdev: character device object to create character device
205 * @ubi_num: UBI device number 255 * @ubi_num: UBI device number
206 * @ubi_name: UBI device name 256 * @ubi_name: UBI device name
207 * @major: character device major number
208 * @vol_count: number of volumes in this UBI device 257 * @vol_count: number of volumes in this UBI device
209 * @volumes: volumes of this UBI device 258 * @volumes: volumes of this UBI device
210 * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs, 259 * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs,
211 * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, @vol->readers, 260 * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count,
212 * @vol->writers, @vol->exclusive, @vol->removed, @vol->mapping and 261 * @vol->readers, @vol->writers, @vol->exclusive,
213 * @vol->eba_tbl. 262 * @vol->ref_count, @vol->mapping and @vol->eba_tbl.
263 * @ref_count: count of references on the UBI device
214 * 264 *
215 * @rsvd_pebs: count of reserved physical eraseblocks 265 * @rsvd_pebs: count of reserved physical eraseblocks
216 * @avail_pebs: count of available physical eraseblocks 266 * @avail_pebs: count of available physical eraseblocks
217 * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB 267 * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB
218 * handling 268 * handling
219 * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling 269 * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
220 * 270 *
271 * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end
272 * of UBI ititializetion
221 * @vtbl_slots: how many slots are available in the volume table 273 * @vtbl_slots: how many slots are available in the volume table
222 * @vtbl_size: size of the volume table in bytes 274 * @vtbl_size: size of the volume table in bytes
223 * @vtbl: in-RAM volume table copy 275 * @vtbl: in-RAM volume table copy
224 * @vtbl_mutex: protects on-flash volume table 276 * @volumes_mutex: protects on-flash volume table and serializes volume
277 * changes, like creation, deletion, update, resize
225 * 278 *
226 * @max_ec: current highest erase counter value 279 * @max_ec: current highest erase counter value
227 * @mean_ec: current mean erase counter value 280 * @mean_ec: current mean erase counter value
@@ -238,15 +291,15 @@ struct ubi_wl_entry;
238 * @prot.pnum: protection tree indexed by physical eraseblock numbers 291 * @prot.pnum: protection tree indexed by physical eraseblock numbers
239 * @prot.aec: protection tree indexed by absolute erase counter value 292 * @prot.aec: protection tree indexed by absolute erase counter value
240 * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, 293 * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from,
241 * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works 294 * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works
242 * fields 295 * fields
296 * @move_mutex: serializes eraseblock moves
243 * @wl_scheduled: non-zero if the wear-leveling was scheduled 297 * @wl_scheduled: non-zero if the wear-leveling was scheduled
244 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any 298 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
245 * physical eraseblock 299 * physical eraseblock
246 * @abs_ec: absolute erase counter 300 * @abs_ec: absolute erase counter
247 * @move_from: physical eraseblock from where the data is being moved 301 * @move_from: physical eraseblock from where the data is being moved
248 * @move_to: physical eraseblock where the data is being moved to 302 * @move_to: physical eraseblock where the data is being moved to
249 * @move_from_put: if the "from" PEB was put
250 * @move_to_put: if the "to" PEB was put 303 * @move_to_put: if the "to" PEB was put
251 * @works: list of pending works 304 * @works: list of pending works
252 * @works_count: count of pending works 305 * @works_count: count of pending works
@@ -273,13 +326,13 @@ struct ubi_wl_entry;
273 * @hdrs_min_io_size 326 * @hdrs_min_io_size
274 * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset 327 * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
275 * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or 328 * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
276 * not 329 * not
277 * @mtd: MTD device descriptor 330 * @mtd: MTD device descriptor
278 * 331 *
279 * @peb_buf1: a buffer of PEB size used for different purposes 332 * @peb_buf1: a buffer of PEB size used for different purposes
280 * @peb_buf2: another buffer of PEB size used for different purposes 333 * @peb_buf2: another buffer of PEB size used for different purposes
281 * @buf_mutex: proptects @peb_buf1 and @peb_buf2 334 * @buf_mutex: proptects @peb_buf1 and @peb_buf2
282 * @dbg_peb_buf: buffer of PEB size used for debugging 335 * @dbg_peb_buf: buffer of PEB size used for debugging
283 * @dbg_buf_mutex: proptects @dbg_peb_buf 336 * @dbg_buf_mutex: proptects @dbg_peb_buf
284 */ 337 */
285struct ubi_device { 338struct ubi_device {
@@ -287,22 +340,24 @@ struct ubi_device {
287 struct device dev; 340 struct device dev;
288 int ubi_num; 341 int ubi_num;
289 char ubi_name[sizeof(UBI_NAME_STR)+5]; 342 char ubi_name[sizeof(UBI_NAME_STR)+5];
290 int major;
291 int vol_count; 343 int vol_count;
292 struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT]; 344 struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT];
293 spinlock_t volumes_lock; 345 spinlock_t volumes_lock;
346 int ref_count;
294 347
295 int rsvd_pebs; 348 int rsvd_pebs;
296 int avail_pebs; 349 int avail_pebs;
297 int beb_rsvd_pebs; 350 int beb_rsvd_pebs;
298 int beb_rsvd_level; 351 int beb_rsvd_level;
299 352
353 int autoresize_vol_id;
300 int vtbl_slots; 354 int vtbl_slots;
301 int vtbl_size; 355 int vtbl_size;
302 struct ubi_vtbl_record *vtbl; 356 struct ubi_vtbl_record *vtbl;
303 struct mutex vtbl_mutex; 357 struct mutex volumes_mutex;
304 358
305 int max_ec; 359 int max_ec;
360 /* TODO: mean_ec is not updated run-time, fix */
306 int mean_ec; 361 int mean_ec;
307 362
308 /* EBA unit's stuff */ 363 /* EBA unit's stuff */
@@ -320,12 +375,13 @@ struct ubi_device {
320 struct rb_root aec; 375 struct rb_root aec;
321 } prot; 376 } prot;
322 spinlock_t wl_lock; 377 spinlock_t wl_lock;
378 struct mutex move_mutex;
379 struct rw_semaphore work_sem;
323 int wl_scheduled; 380 int wl_scheduled;
324 struct ubi_wl_entry **lookuptbl; 381 struct ubi_wl_entry **lookuptbl;
325 unsigned long long abs_ec; 382 unsigned long long abs_ec;
326 struct ubi_wl_entry *move_from; 383 struct ubi_wl_entry *move_from;
327 struct ubi_wl_entry *move_to; 384 struct ubi_wl_entry *move_to;
328 int move_from_put;
329 int move_to_put; 385 int move_to_put;
330 struct list_head works; 386 struct list_head works;
331 int works_count; 387 int works_count;
@@ -355,15 +411,19 @@ struct ubi_device {
355 void *peb_buf1; 411 void *peb_buf1;
356 void *peb_buf2; 412 void *peb_buf2;
357 struct mutex buf_mutex; 413 struct mutex buf_mutex;
414 struct mutex ckvol_mutex;
358#ifdef CONFIG_MTD_UBI_DEBUG 415#ifdef CONFIG_MTD_UBI_DEBUG
359 void *dbg_peb_buf; 416 void *dbg_peb_buf;
360 struct mutex dbg_buf_mutex; 417 struct mutex dbg_buf_mutex;
361#endif 418#endif
362}; 419};
363 420
421extern struct kmem_cache *ubi_wl_entry_slab;
422extern struct file_operations ubi_ctrl_cdev_operations;
364extern struct file_operations ubi_cdev_operations; 423extern struct file_operations ubi_cdev_operations;
365extern struct file_operations ubi_vol_cdev_operations; 424extern struct file_operations ubi_vol_cdev_operations;
366extern struct class *ubi_class; 425extern struct class *ubi_class;
426extern struct mutex ubi_devices_mutex;
367 427
368/* vtbl.c */ 428/* vtbl.c */
369int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, 429int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
@@ -374,13 +434,18 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si);
374int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); 434int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
375int ubi_remove_volume(struct ubi_volume_desc *desc); 435int ubi_remove_volume(struct ubi_volume_desc *desc);
376int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); 436int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs);
377int ubi_add_volume(struct ubi_device *ubi, int vol_id); 437int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol);
378void ubi_free_volume(struct ubi_device *ubi, int vol_id); 438void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol);
379 439
380/* upd.c */ 440/* upd.c */
381int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes); 441int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
382int ubi_more_update_data(struct ubi_device *ubi, int vol_id, 442 long long bytes);
443int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
383 const void __user *buf, int count); 444 const void __user *buf, int count);
445int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
446 const struct ubi_leb_change_req *req);
447int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
448 const void __user *buf, int count);
384 449
385/* misc.c */ 450/* misc.c */
386int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length); 451int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length);
@@ -399,16 +464,17 @@ void ubi_gluebi_updated(struct ubi_volume *vol);
399#endif 464#endif
400 465
401/* eba.c */ 466/* eba.c */
402int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum); 467int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
403int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, 468 int lnum);
404 int offset, int len, int check); 469int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
405int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, 470 void *buf, int offset, int len, int check);
471int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
406 const void *buf, int offset, int len, int dtype); 472 const void *buf, int offset, int len, int dtype);
407int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum, 473int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
408 const void *buf, int len, int dtype, 474 int lnum, const void *buf, int len, int dtype,
409 int used_ebs); 475 int used_ebs);
410int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, 476int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
411 const void *buf, int len, int dtype); 477 int lnum, const void *buf, int len, int dtype);
412int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 478int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
413 struct ubi_vid_hdr *vid_hdr); 479 struct ubi_vid_hdr *vid_hdr);
414int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); 480int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
@@ -421,6 +487,7 @@ int ubi_wl_flush(struct ubi_device *ubi);
421int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum); 487int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
422int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); 488int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
423void ubi_wl_close(struct ubi_device *ubi); 489void ubi_wl_close(struct ubi_device *ubi);
490int ubi_thread(void *u);
424 491
425/* io.c */ 492/* io.c */
426int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, 493int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
@@ -439,6 +506,14 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
439int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, 506int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
440 struct ubi_vid_hdr *vid_hdr); 507 struct ubi_vid_hdr *vid_hdr);
441 508
509/* build.c */
510int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
511int ubi_detach_mtd_dev(int ubi_num, int anyway);
512struct ubi_device *ubi_get_device(int ubi_num);
513void ubi_put_device(struct ubi_device *ubi);
514struct ubi_device *ubi_get_by_major(int major);
515int ubi_major2num(int major);
516
442/* 517/*
443 * ubi_rb_for_each_entry - walk an RB-tree. 518 * ubi_rb_for_each_entry - walk an RB-tree.
444 * @rb: a pointer to type 'struct rb_node' to to use as a loop counter 519 * @rb: a pointer to type 'struct rb_node' to to use as a loop counter
@@ -523,8 +598,10 @@ static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
523 */ 598 */
524static inline void ubi_ro_mode(struct ubi_device *ubi) 599static inline void ubi_ro_mode(struct ubi_device *ubi)
525{ 600{
526 ubi->ro_mode = 1; 601 if (!ubi->ro_mode) {
527 ubi_warn("switch to read-only mode"); 602 ubi->ro_mode = 1;
603 ubi_warn("switch to read-only mode");
604 }
528} 605}
529 606
530/** 607/**
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 0efc586a8328..ddaa1a56cc69 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -22,7 +22,8 @@
22 */ 22 */
23 23
24/* 24/*
25 * This file contains implementation of the volume update functionality. 25 * This file contains implementation of the volume update and atomic LEB change
26 * functionality.
26 * 27 *
27 * The update operation is based on the per-volume update marker which is 28 * The update operation is based on the per-volume update marker which is
28 * stored in the volume table. The update marker is set before the update 29 * stored in the volume table. The update marker is set before the update
@@ -45,29 +46,31 @@
45/** 46/**
46 * set_update_marker - set update marker. 47 * set_update_marker - set update marker.
47 * @ubi: UBI device description object 48 * @ubi: UBI device description object
48 * @vol_id: volume ID 49 * @vol: volume description object
49 * 50 *
50 * This function sets the update marker flag for volume @vol_id. Returns zero 51 * This function sets the update marker flag for volume @vol. Returns zero
51 * in case of success and a negative error code in case of failure. 52 * in case of success and a negative error code in case of failure.
52 */ 53 */
53static int set_update_marker(struct ubi_device *ubi, int vol_id) 54static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
54{ 55{
55 int err; 56 int err;
56 struct ubi_vtbl_record vtbl_rec; 57 struct ubi_vtbl_record vtbl_rec;
57 struct ubi_volume *vol = ubi->volumes[vol_id];
58 58
59 dbg_msg("set update marker for volume %d", vol_id); 59 dbg_msg("set update marker for volume %d", vol->vol_id);
60 60
61 if (vol->upd_marker) { 61 if (vol->upd_marker) {
62 ubi_assert(ubi->vtbl[vol_id].upd_marker); 62 ubi_assert(ubi->vtbl[vol->vol_id].upd_marker);
63 dbg_msg("already set"); 63 dbg_msg("already set");
64 return 0; 64 return 0;
65 } 65 }
66 66
67 memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record)); 67 memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
68 sizeof(struct ubi_vtbl_record));
68 vtbl_rec.upd_marker = 1; 69 vtbl_rec.upd_marker = 1;
69 70
70 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 71 mutex_lock(&ubi->volumes_mutex);
72 err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
73 mutex_unlock(&ubi->volumes_mutex);
71 vol->upd_marker = 1; 74 vol->upd_marker = 1;
72 return err; 75 return err;
73} 76}
@@ -75,23 +78,24 @@ static int set_update_marker(struct ubi_device *ubi, int vol_id)
75/** 78/**
76 * clear_update_marker - clear update marker. 79 * clear_update_marker - clear update marker.
77 * @ubi: UBI device description object 80 * @ubi: UBI device description object
78 * @vol_id: volume ID 81 * @vol: volume description object
79 * @bytes: new data size in bytes 82 * @bytes: new data size in bytes
80 * 83 *
81 * This function clears the update marker for volume @vol_id, sets new volume 84 * This function clears the update marker for volume @vol, sets new volume
82 * data size and clears the "corrupted" flag (static volumes only). Returns 85 * data size and clears the "corrupted" flag (static volumes only). Returns
83 * zero in case of success and a negative error code in case of failure. 86 * zero in case of success and a negative error code in case of failure.
84 */ 87 */
85static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long bytes) 88static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
89 long long bytes)
86{ 90{
87 int err; 91 int err;
88 uint64_t tmp; 92 uint64_t tmp;
89 struct ubi_vtbl_record vtbl_rec; 93 struct ubi_vtbl_record vtbl_rec;
90 struct ubi_volume *vol = ubi->volumes[vol_id];
91 94
92 dbg_msg("clear update marker for volume %d", vol_id); 95 dbg_msg("clear update marker for volume %d", vol->vol_id);
93 96
94 memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record)); 97 memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
98 sizeof(struct ubi_vtbl_record));
95 ubi_assert(vol->upd_marker && vtbl_rec.upd_marker); 99 ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
96 vtbl_rec.upd_marker = 0; 100 vtbl_rec.upd_marker = 0;
97 101
@@ -106,7 +110,9 @@ static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long byt
106 vol->last_eb_bytes = vol->usable_leb_size; 110 vol->last_eb_bytes = vol->usable_leb_size;
107 } 111 }
108 112
109 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 113 mutex_lock(&ubi->volumes_mutex);
114 err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
115 mutex_unlock(&ubi->volumes_mutex);
110 vol->upd_marker = 0; 116 vol->upd_marker = 0;
111 return err; 117 return err;
112} 118}
@@ -114,35 +120,36 @@ static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long byt
114/** 120/**
115 * ubi_start_update - start volume update. 121 * ubi_start_update - start volume update.
116 * @ubi: UBI device description object 122 * @ubi: UBI device description object
117 * @vol_id: volume ID 123 * @vol: volume description object
118 * @bytes: update bytes 124 * @bytes: update bytes
119 * 125 *
120 * This function starts volume update operation. If @bytes is zero, the volume 126 * This function starts volume update operation. If @bytes is zero, the volume
121 * is just wiped out. Returns zero in case of success and a negative error code 127 * is just wiped out. Returns zero in case of success and a negative error code
122 * in case of failure. 128 * in case of failure.
123 */ 129 */
124int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes) 130int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
131 long long bytes)
125{ 132{
126 int i, err; 133 int i, err;
127 uint64_t tmp; 134 uint64_t tmp;
128 struct ubi_volume *vol = ubi->volumes[vol_id];
129 135
130 dbg_msg("start update of volume %d, %llu bytes", vol_id, bytes); 136 dbg_msg("start update of volume %d, %llu bytes", vol->vol_id, bytes);
137 ubi_assert(!vol->updating && !vol->changing_leb);
131 vol->updating = 1; 138 vol->updating = 1;
132 139
133 err = set_update_marker(ubi, vol_id); 140 err = set_update_marker(ubi, vol);
134 if (err) 141 if (err)
135 return err; 142 return err;
136 143
137 /* Before updating - wipe out the volume */ 144 /* Before updating - wipe out the volume */
138 for (i = 0; i < vol->reserved_pebs; i++) { 145 for (i = 0; i < vol->reserved_pebs; i++) {
139 err = ubi_eba_unmap_leb(ubi, vol_id, i); 146 err = ubi_eba_unmap_leb(ubi, vol, i);
140 if (err) 147 if (err)
141 return err; 148 return err;
142 } 149 }
143 150
144 if (bytes == 0) { 151 if (bytes == 0) {
145 err = clear_update_marker(ubi, vol_id, 0); 152 err = clear_update_marker(ubi, vol, 0);
146 if (err) 153 if (err)
147 return err; 154 return err;
148 err = ubi_wl_flush(ubi); 155 err = ubi_wl_flush(ubi);
@@ -163,9 +170,42 @@ int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes)
163} 170}
164 171
165/** 172/**
173 * ubi_start_leb_change - start atomic LEB change.
174 * @ubi: UBI device description object
175 * @vol: volume description object
176 * @req: operation request
177 *
178 * This function starts atomic LEB change operation. Returns zero in case of
179 * success and a negative error code in case of failure.
180 */
181int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
182 const struct ubi_leb_change_req *req)
183{
184 ubi_assert(!vol->updating && !vol->changing_leb);
185
186 dbg_msg("start changing LEB %d:%d, %u bytes",
187 vol->vol_id, req->lnum, req->bytes);
188 if (req->bytes == 0)
189 return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0,
190 req->dtype);
191
192 vol->upd_bytes = req->bytes;
193 vol->upd_received = 0;
194 vol->changing_leb = 1;
195 vol->ch_lnum = req->lnum;
196 vol->ch_dtype = req->dtype;
197
198 vol->upd_buf = vmalloc(req->bytes);
199 if (!vol->upd_buf)
200 return -ENOMEM;
201
202 return 0;
203}
204
205/**
166 * write_leb - write update data. 206 * write_leb - write update data.
167 * @ubi: UBI device description object 207 * @ubi: UBI device description object
168 * @vol_id: volume ID 208 * @vol: volume description object
169 * @lnum: logical eraseblock number 209 * @lnum: logical eraseblock number
170 * @buf: data to write 210 * @buf: data to write
171 * @len: data size 211 * @len: data size
@@ -191,26 +231,22 @@ int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes)
191 * This function returns zero in case of success and a negative error code in 231 * This function returns zero in case of success and a negative error code in
192 * case of failure. 232 * case of failure.
193 */ 233 */
194static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, 234static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
195 int len, int used_ebs) 235 void *buf, int len, int used_ebs)
196{ 236{
197 int err, l; 237 int err;
198 struct ubi_volume *vol = ubi->volumes[vol_id];
199 238
200 if (vol->vol_type == UBI_DYNAMIC_VOLUME) { 239 if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
201 l = ALIGN(len, ubi->min_io_size); 240 len = ALIGN(len, ubi->min_io_size);
202 memset(buf + len, 0xFF, l - len); 241 memset(buf + len, 0xFF, len - len);
203 242
204 l = ubi_calc_data_len(ubi, buf, l); 243 len = ubi_calc_data_len(ubi, buf, len);
205 if (l == 0) { 244 if (len == 0) {
206 dbg_msg("all %d bytes contain 0xFF - skip", len); 245 dbg_msg("all %d bytes contain 0xFF - skip", len);
207 return 0; 246 return 0;
208 } 247 }
209 if (len != l)
210 dbg_msg("skip last %d bytes (0xFF)", len - l);
211 248
212 err = ubi_eba_write_leb(ubi, vol_id, lnum, buf, 0, l, 249 err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, UBI_UNKNOWN);
213 UBI_UNKNOWN);
214 } else { 250 } else {
215 /* 251 /*
216 * When writing static volume, and this is the last logical 252 * When writing static volume, and this is the last logical
@@ -222,7 +258,7 @@ static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
222 * contain zeros, not random trash. 258 * contain zeros, not random trash.
223 */ 259 */
224 memset(buf + len, 0, vol->usable_leb_size - len); 260 memset(buf + len, 0, vol->usable_leb_size - len);
225 err = ubi_eba_write_leb_st(ubi, vol_id, lnum, buf, len, 261 err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len,
226 UBI_UNKNOWN, used_ebs); 262 UBI_UNKNOWN, used_ebs);
227 } 263 }
228 264
@@ -236,16 +272,15 @@ static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
236 * @count: how much bytes to write 272 * @count: how much bytes to write
237 * 273 *
238 * This function writes more data to the volume which is being updated. It may 274 * This function writes more data to the volume which is being updated. It may
239 * be called arbitrary number of times until all of the update data arrive. 275 * be called arbitrary number of times until all the update data arriveis. This
240 * This function returns %0 in case of success, number of bytes written during 276 * function returns %0 in case of success, number of bytes written during the
241 * the last call if the whole volume update was successfully finished, and a 277 * last call if the whole volume update has been successfully finished, and a
242 * negative error code in case of failure. 278 * negative error code in case of failure.
243 */ 279 */
244int ubi_more_update_data(struct ubi_device *ubi, int vol_id, 280int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
245 const void __user *buf, int count) 281 const void __user *buf, int count)
246{ 282{
247 uint64_t tmp; 283 uint64_t tmp;
248 struct ubi_volume *vol = ubi->volumes[vol_id];
249 int lnum, offs, err = 0, len, to_write = count; 284 int lnum, offs, err = 0, len, to_write = count;
250 285
251 dbg_msg("write %d of %lld bytes, %lld already passed", 286 dbg_msg("write %d of %lld bytes, %lld already passed",
@@ -290,8 +325,8 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
290 * is the last chunk, it's time to flush the buffer. 325 * is the last chunk, it's time to flush the buffer.
291 */ 326 */
292 ubi_assert(flush_len <= vol->usable_leb_size); 327 ubi_assert(flush_len <= vol->usable_leb_size);
293 err = write_leb(ubi, vol_id, lnum, vol->upd_buf, 328 err = write_leb(ubi, vol, lnum, vol->upd_buf, flush_len,
294 flush_len, vol->upd_ebs); 329 vol->upd_ebs);
295 if (err) 330 if (err)
296 return err; 331 return err;
297 } 332 }
@@ -318,8 +353,8 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
318 353
319 if (len == vol->usable_leb_size || 354 if (len == vol->usable_leb_size ||
320 vol->upd_received + len == vol->upd_bytes) { 355 vol->upd_received + len == vol->upd_bytes) {
321 err = write_leb(ubi, vol_id, lnum, vol->upd_buf, len, 356 err = write_leb(ubi, vol, lnum, vol->upd_buf,
322 vol->upd_ebs); 357 len, vol->upd_ebs);
323 if (err) 358 if (err)
324 break; 359 break;
325 } 360 }
@@ -333,16 +368,70 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
333 ubi_assert(vol->upd_received <= vol->upd_bytes); 368 ubi_assert(vol->upd_received <= vol->upd_bytes);
334 if (vol->upd_received == vol->upd_bytes) { 369 if (vol->upd_received == vol->upd_bytes) {
335 /* The update is finished, clear the update marker */ 370 /* The update is finished, clear the update marker */
336 err = clear_update_marker(ubi, vol_id, vol->upd_bytes); 371 err = clear_update_marker(ubi, vol, vol->upd_bytes);
337 if (err) 372 if (err)
338 return err; 373 return err;
339 err = ubi_wl_flush(ubi); 374 err = ubi_wl_flush(ubi);
340 if (err == 0) { 375 if (err == 0) {
376 vol->updating = 0;
341 err = to_write; 377 err = to_write;
342 vfree(vol->upd_buf); 378 vfree(vol->upd_buf);
343 vol->updating = 0;
344 } 379 }
345 } 380 }
346 381
347 return err; 382 return err;
348} 383}
384
385/**
386 * ubi_more_leb_change_data - accept more data for atomic LEB change.
387 * @vol: volume description object
388 * @buf: write data (user-space memory buffer)
389 * @count: how much bytes to write
390 *
391 * This function accepts more data to the volume which is being under the
392 * "atomic LEB change" operation. It may be called arbitrary number of times
393 * until all data arrives. This function returns %0 in case of success, number
394 * of bytes written during the last call if the whole "atomic LEB change"
395 * operation has been successfully finished, and a negative error code in case
396 * of failure.
397 */
398int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
399 const void __user *buf, int count)
400{
401 int err;
402
403 dbg_msg("write %d of %lld bytes, %lld already passed",
404 count, vol->upd_bytes, vol->upd_received);
405
406 if (ubi->ro_mode)
407 return -EROFS;
408
409 if (vol->upd_received + count > vol->upd_bytes)
410 count = vol->upd_bytes - vol->upd_received;
411
412 err = copy_from_user(vol->upd_buf + vol->upd_received, buf, count);
413 if (err)
414 return -EFAULT;
415
416 vol->upd_received += count;
417
418 if (vol->upd_received == vol->upd_bytes) {
419 int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size);
420
421 memset(vol->upd_buf + vol->upd_bytes, 0xFF, len - vol->upd_bytes);
422 len = ubi_calc_data_len(ubi, vol->upd_buf, len);
423 err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
424 vol->upd_buf, len, UBI_UNKNOWN);
425 if (err)
426 return err;
427 }
428
429 ubi_assert(vol->upd_received <= vol->upd_bytes);
430 if (vol->upd_received == vol->upd_bytes) {
431 vol->changing_leb = 0;
432 err = count;
433 vfree(vol->upd_buf);
434 }
435
436 return err;
437}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 88629a320c2b..a3ca2257e601 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -63,21 +63,30 @@ static struct device_attribute attr_vol_upd_marker =
63 * B. process 2 removes volume Y; 63 * B. process 2 removes volume Y;
64 * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file; 64 * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file;
65 * 65 *
66 * What we want to do in a situation like that is to return error when the file 66 * In this situation, this function will return %-ENODEV because it will find
67 * is read. This is done by means of the 'removed' flag and the 'vol_lock' of 67 * out that the volume was removed from the @ubi->volumes array.
68 * the UBI volume description object.
69 */ 68 */
70static ssize_t vol_attribute_show(struct device *dev, 69static ssize_t vol_attribute_show(struct device *dev,
71 struct device_attribute *attr, char *buf) 70 struct device_attribute *attr, char *buf)
72{ 71{
73 int ret; 72 int ret;
74 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); 73 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
74 struct ubi_device *ubi;
75 75
76 spin_lock(&vol->ubi->volumes_lock); 76 ubi = ubi_get_device(vol->ubi->ubi_num);
77 if (vol->removed) { 77 if (!ubi)
78 spin_unlock(&vol->ubi->volumes_lock); 78 return -ENODEV;
79
80 spin_lock(&ubi->volumes_lock);
81 if (!ubi->volumes[vol->vol_id]) {
82 spin_unlock(&ubi->volumes_lock);
83 ubi_put_device(ubi);
79 return -ENODEV; 84 return -ENODEV;
80 } 85 }
86 /* Take a reference to prevent volume removal */
87 vol->ref_count += 1;
88 spin_unlock(&ubi->volumes_lock);
89
81 if (attr == &attr_vol_reserved_ebs) 90 if (attr == &attr_vol_reserved_ebs)
82 ret = sprintf(buf, "%d\n", vol->reserved_pebs); 91 ret = sprintf(buf, "%d\n", vol->reserved_pebs);
83 else if (attr == &attr_vol_type) { 92 else if (attr == &attr_vol_type) {
@@ -94,15 +103,22 @@ static ssize_t vol_attribute_show(struct device *dev,
94 ret = sprintf(buf, "%d\n", vol->corrupted); 103 ret = sprintf(buf, "%d\n", vol->corrupted);
95 else if (attr == &attr_vol_alignment) 104 else if (attr == &attr_vol_alignment)
96 ret = sprintf(buf, "%d\n", vol->alignment); 105 ret = sprintf(buf, "%d\n", vol->alignment);
97 else if (attr == &attr_vol_usable_eb_size) { 106 else if (attr == &attr_vol_usable_eb_size)
98 ret = sprintf(buf, "%d\n", vol->usable_leb_size); 107 ret = sprintf(buf, "%d\n", vol->usable_leb_size);
99 } else if (attr == &attr_vol_data_bytes) 108 else if (attr == &attr_vol_data_bytes)
100 ret = sprintf(buf, "%lld\n", vol->used_bytes); 109 ret = sprintf(buf, "%lld\n", vol->used_bytes);
101 else if (attr == &attr_vol_upd_marker) 110 else if (attr == &attr_vol_upd_marker)
102 ret = sprintf(buf, "%d\n", vol->upd_marker); 111 ret = sprintf(buf, "%d\n", vol->upd_marker);
103 else 112 else
104 BUG(); 113 /* This must be a bug */
105 spin_unlock(&vol->ubi->volumes_lock); 114 ret = -EINVAL;
115
116 /* We've done the operation, drop volume and UBI device references */
117 spin_lock(&ubi->volumes_lock);
118 vol->ref_count -= 1;
119 ubi_assert(vol->ref_count >= 0);
120 spin_unlock(&ubi->volumes_lock);
121 ubi_put_device(ubi);
106 return ret; 122 return ret;
107} 123}
108 124
@@ -110,7 +126,7 @@ static ssize_t vol_attribute_show(struct device *dev,
110static void vol_release(struct device *dev) 126static void vol_release(struct device *dev)
111{ 127{
112 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); 128 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
113 ubi_assert(vol->removed); 129
114 kfree(vol); 130 kfree(vol);
115} 131}
116 132
@@ -152,9 +168,7 @@ static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol)
152 if (err) 168 if (err)
153 return err; 169 return err;
154 err = device_create_file(&vol->dev, &attr_vol_upd_marker); 170 err = device_create_file(&vol->dev, &attr_vol_upd_marker);
155 if (err) 171 return err;
156 return err;
157 return 0;
158} 172}
159 173
160/** 174/**
@@ -180,16 +194,18 @@ static void volume_sysfs_close(struct ubi_volume *vol)
180 * @req: volume creation request 194 * @req: volume creation request
181 * 195 *
182 * This function creates volume described by @req. If @req->vol_id id 196 * This function creates volume described by @req. If @req->vol_id id
183 * %UBI_VOL_NUM_AUTO, this function automatically assigne ID to the new volume 197 * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume
184 * and saves it in @req->vol_id. Returns zero in case of success and a negative 198 * and saves it in @req->vol_id. Returns zero in case of success and a negative
185 * error code in case of failure. 199 * error code in case of failure. Note, the caller has to have the
200 * @ubi->volumes_mutex locked.
186 */ 201 */
187int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) 202int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
188{ 203{
189 int i, err, vol_id = req->vol_id; 204 int i, err, vol_id = req->vol_id, dont_free = 0;
190 struct ubi_volume *vol; 205 struct ubi_volume *vol;
191 struct ubi_vtbl_record vtbl_rec; 206 struct ubi_vtbl_record vtbl_rec;
192 uint64_t bytes; 207 uint64_t bytes;
208 dev_t dev;
193 209
194 if (ubi->ro_mode) 210 if (ubi->ro_mode)
195 return -EROFS; 211 return -EROFS;
@@ -199,7 +215,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
199 return -ENOMEM; 215 return -ENOMEM;
200 216
201 spin_lock(&ubi->volumes_lock); 217 spin_lock(&ubi->volumes_lock);
202
203 if (vol_id == UBI_VOL_NUM_AUTO) { 218 if (vol_id == UBI_VOL_NUM_AUTO) {
204 /* Find unused volume ID */ 219 /* Find unused volume ID */
205 dbg_msg("search for vacant volume ID"); 220 dbg_msg("search for vacant volume ID");
@@ -252,6 +267,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
252 } 267 }
253 ubi->avail_pebs -= vol->reserved_pebs; 268 ubi->avail_pebs -= vol->reserved_pebs;
254 ubi->rsvd_pebs += vol->reserved_pebs; 269 ubi->rsvd_pebs += vol->reserved_pebs;
270 spin_unlock(&ubi->volumes_lock);
255 271
256 vol->vol_id = vol_id; 272 vol->vol_id = vol_id;
257 vol->alignment = req->alignment; 273 vol->alignment = req->alignment;
@@ -259,10 +275,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
259 vol->vol_type = req->vol_type; 275 vol->vol_type = req->vol_type;
260 vol->name_len = req->name_len; 276 vol->name_len = req->name_len;
261 memcpy(vol->name, req->name, vol->name_len + 1); 277 memcpy(vol->name, req->name, vol->name_len + 1);
262 vol->exclusive = 1;
263 vol->ubi = ubi; 278 vol->ubi = ubi;
264 ubi->volumes[vol_id] = vol;
265 spin_unlock(&ubi->volumes_lock);
266 279
267 /* 280 /*
268 * Finish all pending erases because there may be some LEBs belonging 281 * Finish all pending erases because there may be some LEBs belonging
@@ -299,9 +312,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
299 /* Register character device for the volume */ 312 /* Register character device for the volume */
300 cdev_init(&vol->cdev, &ubi_vol_cdev_operations); 313 cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
301 vol->cdev.owner = THIS_MODULE; 314 vol->cdev.owner = THIS_MODULE;
302 err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol_id + 1), 1); 315 dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
316 err = cdev_add(&vol->cdev, dev, 1);
303 if (err) { 317 if (err) {
304 ubi_err("cannot add character device for volume %d", vol_id); 318 ubi_err("cannot add character device");
305 goto out_mapping; 319 goto out_mapping;
306 } 320 }
307 321
@@ -311,12 +325,15 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
311 325
312 vol->dev.release = vol_release; 326 vol->dev.release = vol_release;
313 vol->dev.parent = &ubi->dev; 327 vol->dev.parent = &ubi->dev;
314 vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1); 328 vol->dev.devt = dev;
315 vol->dev.class = ubi_class; 329 vol->dev.class = ubi_class;
330
316 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); 331 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
317 err = device_register(&vol->dev); 332 err = device_register(&vol->dev);
318 if (err) 333 if (err) {
334 ubi_err("cannot register device");
319 goto out_gluebi; 335 goto out_gluebi;
336 }
320 337
321 err = volume_sysfs_init(ubi, vol); 338 err = volume_sysfs_init(ubi, vol);
322 if (err) 339 if (err)
@@ -339,15 +356,27 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
339 goto out_sysfs; 356 goto out_sysfs;
340 357
341 spin_lock(&ubi->volumes_lock); 358 spin_lock(&ubi->volumes_lock);
359 ubi->volumes[vol_id] = vol;
342 ubi->vol_count += 1; 360 ubi->vol_count += 1;
343 vol->exclusive = 0;
344 spin_unlock(&ubi->volumes_lock); 361 spin_unlock(&ubi->volumes_lock);
345 362
346 paranoid_check_volumes(ubi); 363 paranoid_check_volumes(ubi);
347 return 0; 364 return 0;
348 365
366out_sysfs:
367 /*
368 * We have registered our device, we should not free the volume*
369 * description object in this function in case of an error - it is
370 * freed by the release function.
371 *
372 * Get device reference to prevent the release function from being
373 * called just after sysfs has been closed.
374 */
375 dont_free = 1;
376 get_device(&vol->dev);
377 volume_sysfs_close(vol);
349out_gluebi: 378out_gluebi:
350 err = ubi_destroy_gluebi(vol); 379 ubi_destroy_gluebi(vol);
351out_cdev: 380out_cdev:
352 cdev_del(&vol->cdev); 381 cdev_del(&vol->cdev);
353out_mapping: 382out_mapping:
@@ -356,26 +385,13 @@ out_acc:
356 spin_lock(&ubi->volumes_lock); 385 spin_lock(&ubi->volumes_lock);
357 ubi->rsvd_pebs -= vol->reserved_pebs; 386 ubi->rsvd_pebs -= vol->reserved_pebs;
358 ubi->avail_pebs += vol->reserved_pebs; 387 ubi->avail_pebs += vol->reserved_pebs;
359 ubi->volumes[vol_id] = NULL;
360out_unlock: 388out_unlock:
361 spin_unlock(&ubi->volumes_lock); 389 spin_unlock(&ubi->volumes_lock);
362 kfree(vol); 390 if (dont_free)
363 return err; 391 put_device(&vol->dev);
364 392 else
365 /* 393 kfree(vol);
366 * We are registered, so @vol is destroyed in the release function and 394 ubi_err("cannot create volume %d, error %d", vol_id, err);
367 * we have to de-initialize differently.
368 */
369out_sysfs:
370 err = ubi_destroy_gluebi(vol);
371 cdev_del(&vol->cdev);
372 kfree(vol->eba_tbl);
373 spin_lock(&ubi->volumes_lock);
374 ubi->rsvd_pebs -= vol->reserved_pebs;
375 ubi->avail_pebs += vol->reserved_pebs;
376 ubi->volumes[vol_id] = NULL;
377 spin_unlock(&ubi->volumes_lock);
378 volume_sysfs_close(vol);
379 return err; 395 return err;
380} 396}
381 397
@@ -385,7 +401,8 @@ out_sysfs:
385 * 401 *
386 * This function removes volume described by @desc. The volume has to be opened 402 * This function removes volume described by @desc. The volume has to be opened
387 * in "exclusive" mode. Returns zero in case of success and a negative error 403 * in "exclusive" mode. Returns zero in case of success and a negative error
388 * code in case of failure. 404 * code in case of failure. The caller has to have the @ubi->volumes_mutex
405 * locked.
389 */ 406 */
390int ubi_remove_volume(struct ubi_volume_desc *desc) 407int ubi_remove_volume(struct ubi_volume_desc *desc)
391{ 408{
@@ -400,30 +417,36 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
400 if (ubi->ro_mode) 417 if (ubi->ro_mode)
401 return -EROFS; 418 return -EROFS;
402 419
420 spin_lock(&ubi->volumes_lock);
421 if (vol->ref_count > 1) {
422 /*
423 * The volume is busy, probably someone is reading one of its
424 * sysfs files.
425 */
426 err = -EBUSY;
427 goto out_unlock;
428 }
429 ubi->volumes[vol_id] = NULL;
430 spin_unlock(&ubi->volumes_lock);
431
403 err = ubi_destroy_gluebi(vol); 432 err = ubi_destroy_gluebi(vol);
404 if (err) 433 if (err)
405 return err; 434 goto out_err;
406 435
407 err = ubi_change_vtbl_record(ubi, vol_id, NULL); 436 err = ubi_change_vtbl_record(ubi, vol_id, NULL);
408 if (err) 437 if (err)
409 return err; 438 goto out_err;
410 439
411 for (i = 0; i < vol->reserved_pebs; i++) { 440 for (i = 0; i < vol->reserved_pebs; i++) {
412 err = ubi_eba_unmap_leb(ubi, vol_id, i); 441 err = ubi_eba_unmap_leb(ubi, vol, i);
413 if (err) 442 if (err)
414 return err; 443 goto out_err;
415 } 444 }
416 445
417 spin_lock(&ubi->volumes_lock);
418 vol->removed = 1;
419 ubi->volumes[vol_id] = NULL;
420 spin_unlock(&ubi->volumes_lock);
421
422 kfree(vol->eba_tbl); 446 kfree(vol->eba_tbl);
423 vol->eba_tbl = NULL; 447 vol->eba_tbl = NULL;
424 cdev_del(&vol->cdev); 448 cdev_del(&vol->cdev);
425 volume_sysfs_close(vol); 449 volume_sysfs_close(vol);
426 kfree(desc);
427 450
428 spin_lock(&ubi->volumes_lock); 451 spin_lock(&ubi->volumes_lock);
429 ubi->rsvd_pebs -= reserved_pebs; 452 ubi->rsvd_pebs -= reserved_pebs;
@@ -441,8 +464,15 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
441 spin_unlock(&ubi->volumes_lock); 464 spin_unlock(&ubi->volumes_lock);
442 465
443 paranoid_check_volumes(ubi); 466 paranoid_check_volumes(ubi);
444 module_put(THIS_MODULE);
445 return 0; 467 return 0;
468
469out_err:
470 ubi_err("cannot remove volume %d, error %d", vol_id, err);
471 spin_lock(&ubi->volumes_lock);
472 ubi->volumes[vol_id] = vol;
473out_unlock:
474 spin_unlock(&ubi->volumes_lock);
475 return err;
446} 476}
447 477
448/** 478/**
@@ -450,8 +480,9 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
450 * @desc: volume descriptor 480 * @desc: volume descriptor
451 * @reserved_pebs: new size in physical eraseblocks 481 * @reserved_pebs: new size in physical eraseblocks
452 * 482 *
453 * This function returns zero in case of success, and a negative error code in 483 * This function re-sizes the volume and returns zero in case of success, and a
454 * case of failure. 484 * negative error code in case of failure. The caller has to have the
485 * @ubi->volumes_mutex locked.
455 */ 486 */
456int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) 487int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
457{ 488{
@@ -466,8 +497,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
466 497
467 dbg_msg("re-size volume %d to from %d to %d PEBs", 498 dbg_msg("re-size volume %d to from %d to %d PEBs",
468 vol_id, vol->reserved_pebs, reserved_pebs); 499 vol_id, vol->reserved_pebs, reserved_pebs);
469 ubi_assert(desc->mode == UBI_EXCLUSIVE);
470 ubi_assert(vol == ubi->volumes[vol_id]);
471 500
472 if (vol->vol_type == UBI_STATIC_VOLUME && 501 if (vol->vol_type == UBI_STATIC_VOLUME &&
473 reserved_pebs < vol->used_ebs) { 502 reserved_pebs < vol->used_ebs) {
@@ -487,6 +516,14 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
487 for (i = 0; i < reserved_pebs; i++) 516 for (i = 0; i < reserved_pebs; i++)
488 new_mapping[i] = UBI_LEB_UNMAPPED; 517 new_mapping[i] = UBI_LEB_UNMAPPED;
489 518
519 spin_lock(&ubi->volumes_lock);
520 if (vol->ref_count > 1) {
521 spin_unlock(&ubi->volumes_lock);
522 err = -EBUSY;
523 goto out_free;
524 }
525 spin_unlock(&ubi->volumes_lock);
526
490 /* Reserve physical eraseblocks */ 527 /* Reserve physical eraseblocks */
491 pebs = reserved_pebs - vol->reserved_pebs; 528 pebs = reserved_pebs - vol->reserved_pebs;
492 if (pebs > 0) { 529 if (pebs > 0) {
@@ -516,7 +553,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
516 553
517 if (pebs < 0) { 554 if (pebs < 0) {
518 for (i = 0; i < -pebs; i++) { 555 for (i = 0; i < -pebs; i++) {
519 err = ubi_eba_unmap_leb(ubi, vol_id, reserved_pebs + i); 556 err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
520 if (err) 557 if (err)
521 goto out_acc; 558 goto out_acc;
522 } 559 }
@@ -565,27 +602,28 @@ out_free:
565/** 602/**
566 * ubi_add_volume - add volume. 603 * ubi_add_volume - add volume.
567 * @ubi: UBI device description object 604 * @ubi: UBI device description object
568 * @vol_id: volume ID 605 * @vol: volume description object
569 * 606 *
570 * This function adds an existin volume and initializes all its data 607 * This function adds an existing volume and initializes all its data
571 * structures. Returnes zero in case of success and a negative error code in 608 * structures. Returns zero in case of success and a negative error code in
572 * case of failure. 609 * case of failure.
573 */ 610 */
574int ubi_add_volume(struct ubi_device *ubi, int vol_id) 611int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
575{ 612{
576 int err; 613 int err, vol_id = vol->vol_id;
577 struct ubi_volume *vol = ubi->volumes[vol_id]; 614 dev_t dev;
578 615
579 dbg_msg("add volume %d", vol_id); 616 dbg_msg("add volume %d", vol_id);
580 ubi_dbg_dump_vol_info(vol); 617 ubi_dbg_dump_vol_info(vol);
581 ubi_assert(vol);
582 618
583 /* Register character device for the volume */ 619 /* Register character device for the volume */
584 cdev_init(&vol->cdev, &ubi_vol_cdev_operations); 620 cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
585 vol->cdev.owner = THIS_MODULE; 621 vol->cdev.owner = THIS_MODULE;
586 err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol->vol_id + 1), 1); 622 dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
623 err = cdev_add(&vol->cdev, dev, 1);
587 if (err) { 624 if (err) {
588 ubi_err("cannot add character device for volume %d", vol_id); 625 ubi_err("cannot add character device for volume %d, error %d",
626 vol_id, err);
589 return err; 627 return err;
590 } 628 }
591 629
@@ -595,7 +633,7 @@ int ubi_add_volume(struct ubi_device *ubi, int vol_id)
595 633
596 vol->dev.release = vol_release; 634 vol->dev.release = vol_release;
597 vol->dev.parent = &ubi->dev; 635 vol->dev.parent = &ubi->dev;
598 vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1); 636 vol->dev.devt = dev;
599 vol->dev.class = ubi_class; 637 vol->dev.class = ubi_class;
600 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); 638 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
601 err = device_register(&vol->dev); 639 err = device_register(&vol->dev);
@@ -623,22 +661,19 @@ out_cdev:
623/** 661/**
624 * ubi_free_volume - free volume. 662 * ubi_free_volume - free volume.
625 * @ubi: UBI device description object 663 * @ubi: UBI device description object
626 * @vol_id: volume ID 664 * @vol: volume description object
627 * 665 *
628 * This function frees all resources for volume @vol_id but does not remove it. 666 * This function frees all resources for volume @vol but does not remove it.
629 * Used only when the UBI device is detached. 667 * Used only when the UBI device is detached.
630 */ 668 */
631void ubi_free_volume(struct ubi_device *ubi, int vol_id) 669void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
632{ 670{
633 int err; 671 int err;
634 struct ubi_volume *vol = ubi->volumes[vol_id];
635 672
636 dbg_msg("free volume %d", vol_id); 673 dbg_msg("free volume %d", vol->vol_id);
637 ubi_assert(vol);
638 674
639 vol->removed = 1; 675 ubi->volumes[vol->vol_id] = NULL;
640 err = ubi_destroy_gluebi(vol); 676 err = ubi_destroy_gluebi(vol);
641 ubi->volumes[vol_id] = NULL;
642 cdev_del(&vol->cdev); 677 cdev_del(&vol->cdev);
643 volume_sysfs_close(vol); 678 volume_sysfs_close(vol);
644} 679}
@@ -708,11 +743,6 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
708 goto fail; 743 goto fail;
709 } 744 }
710 745
711 if (vol->upd_marker != 0 && vol->upd_marker != 1) {
712 ubi_err("bad upd_marker");
713 goto fail;
714 }
715
716 if (vol->upd_marker && vol->corrupted) { 746 if (vol->upd_marker && vol->corrupted) {
717 dbg_err("update marker and corrupted simultaneously"); 747 dbg_err("update marker and corrupted simultaneously");
718 goto fail; 748 goto fail;
@@ -747,7 +777,7 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
747 777
748 n = (long long)vol->used_ebs * vol->usable_leb_size; 778 n = (long long)vol->used_ebs * vol->usable_leb_size;
749 if (vol->vol_type == UBI_DYNAMIC_VOLUME) { 779 if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
750 if (vol->corrupted != 0) { 780 if (vol->corrupted) {
751 ubi_err("corrupted dynamic volume"); 781 ubi_err("corrupted dynamic volume");
752 goto fail; 782 goto fail;
753 } 783 }
@@ -764,10 +794,6 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
764 goto fail; 794 goto fail;
765 } 795 }
766 } else { 796 } else {
767 if (vol->corrupted != 0 && vol->corrupted != 1) {
768 ubi_err("bad corrupted");
769 goto fail;
770 }
771 if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) { 797 if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) {
772 ubi_err("bad used_ebs"); 798 ubi_err("bad used_ebs");
773 goto fail; 799 goto fail;
@@ -820,9 +846,7 @@ static void paranoid_check_volumes(struct ubi_device *ubi)
820{ 846{
821 int i; 847 int i;
822 848
823 mutex_lock(&ubi->vtbl_mutex);
824 for (i = 0; i < ubi->vtbl_slots; i++) 849 for (i = 0; i < ubi->vtbl_slots; i++)
825 paranoid_check_volume(ubi, i); 850 paranoid_check_volume(ubi, i);
826 mutex_unlock(&ubi->vtbl_mutex);
827} 851}
828#endif 852#endif
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 25b3bd61c7ec..56fc3fbce838 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -86,8 +86,10 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
86{ 86{
87 int i, err; 87 int i, err;
88 uint32_t crc; 88 uint32_t crc;
89 struct ubi_volume *layout_vol;
89 90
90 ubi_assert(idx >= 0 && idx < ubi->vtbl_slots); 91 ubi_assert(idx >= 0 && idx < ubi->vtbl_slots);
92 layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
91 93
92 if (!vtbl_rec) 94 if (!vtbl_rec)
93 vtbl_rec = &empty_vtbl_record; 95 vtbl_rec = &empty_vtbl_record;
@@ -96,31 +98,25 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
96 vtbl_rec->crc = cpu_to_be32(crc); 98 vtbl_rec->crc = cpu_to_be32(crc);
97 } 99 }
98 100
99 mutex_lock(&ubi->vtbl_mutex);
100 memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record)); 101 memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
101 for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { 102 for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
102 err = ubi_eba_unmap_leb(ubi, UBI_LAYOUT_VOL_ID, i); 103 err = ubi_eba_unmap_leb(ubi, layout_vol, i);
103 if (err) { 104 if (err)
104 mutex_unlock(&ubi->vtbl_mutex);
105 return err; 105 return err;
106 } 106
107 err = ubi_eba_write_leb(ubi, UBI_LAYOUT_VOL_ID, i, ubi->vtbl, 0, 107 err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
108 ubi->vtbl_size, UBI_LONGTERM); 108 ubi->vtbl_size, UBI_LONGTERM);
109 if (err) { 109 if (err)
110 mutex_unlock(&ubi->vtbl_mutex);
111 return err; 110 return err;
112 }
113 } 111 }
114 112
115 paranoid_vtbl_check(ubi); 113 paranoid_vtbl_check(ubi);
116 mutex_unlock(&ubi->vtbl_mutex); 114 return 0;
117 return ubi_wl_flush(ubi);
118} 115}
119 116
120/** 117/**
121 * vol_til_check - check if volume table is not corrupted and contains sensible 118 * vtbl_check - check if volume table is not corrupted and contains sensible
122 * data. 119 * data.
123 *
124 * @ubi: UBI device description object 120 * @ubi: UBI device description object
125 * @vtbl: volume table 121 * @vtbl: volume table
126 * 122 *
@@ -273,7 +269,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
273 * this volume table copy was found during scanning. It has to be wiped 269 * this volume table copy was found during scanning. It has to be wiped
274 * out. 270 * out.
275 */ 271 */
276 sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID); 272 sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
277 if (sv) 273 if (sv)
278 old_seb = ubi_scan_find_seb(sv, copy); 274 old_seb = ubi_scan_find_seb(sv, copy);
279 275
@@ -285,7 +281,7 @@ retry:
285 } 281 }
286 282
287 vid_hdr->vol_type = UBI_VID_DYNAMIC; 283 vid_hdr->vol_type = UBI_VID_DYNAMIC;
288 vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOL_ID); 284 vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID);
289 vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT; 285 vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
290 vid_hdr->data_size = vid_hdr->used_ebs = 286 vid_hdr->data_size = vid_hdr->used_ebs =
291 vid_hdr->data_pad = cpu_to_be32(0); 287 vid_hdr->data_pad = cpu_to_be32(0);
@@ -518,6 +514,17 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
518 vol->name[vol->name_len] = '\0'; 514 vol->name[vol->name_len] = '\0';
519 vol->vol_id = i; 515 vol->vol_id = i;
520 516
517 if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
518 /* Auto re-size flag may be set only for one volume */
519 if (ubi->autoresize_vol_id != -1) {
520 ubi_err("more then one auto-resize volume (%d "
521 "and %d)", ubi->autoresize_vol_id, i);
522 return -EINVAL;
523 }
524
525 ubi->autoresize_vol_id = i;
526 }
527
521 ubi_assert(!ubi->volumes[i]); 528 ubi_assert(!ubi->volumes[i]);
522 ubi->volumes[i] = vol; 529 ubi->volumes[i] = vol;
523 ubi->vol_count += 1; 530 ubi->vol_count += 1;
@@ -568,6 +575,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
568 vol->last_eb_bytes = sv->last_data_size; 575 vol->last_eb_bytes = sv->last_data_size;
569 } 576 }
570 577
578 /* And add the layout volume */
571 vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); 579 vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
572 if (!vol) 580 if (!vol)
573 return -ENOMEM; 581 return -ENOMEM;
@@ -582,7 +590,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
582 vol->last_eb_bytes = vol->reserved_pebs; 590 vol->last_eb_bytes = vol->reserved_pebs;
583 vol->used_bytes = 591 vol->used_bytes =
584 (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad); 592 (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad);
585 vol->vol_id = UBI_LAYOUT_VOL_ID; 593 vol->vol_id = UBI_LAYOUT_VOLUME_ID;
594 vol->ref_count = 1;
586 595
587 ubi_assert(!ubi->volumes[i]); 596 ubi_assert(!ubi->volumes[i]);
588 ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol; 597 ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol;
@@ -734,7 +743,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
734 ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE; 743 ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
735 ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size); 744 ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
736 745
737 sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID); 746 sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
738 if (!sv) { 747 if (!sv) {
739 /* 748 /*
740 * No logical eraseblocks belonging to the layout volume were 749 * No logical eraseblocks belonging to the layout volume were
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 6330c8cc72b5..a471a491f0ab 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -117,21 +117,6 @@
117#define WL_MAX_FAILURES 32 117#define WL_MAX_FAILURES 32
118 118
119/** 119/**
120 * struct ubi_wl_entry - wear-leveling entry.
121 * @rb: link in the corresponding RB-tree
122 * @ec: erase counter
123 * @pnum: physical eraseblock number
124 *
125 * Each physical eraseblock has a corresponding &struct wl_entry object which
126 * may be kept in different RB-trees.
127 */
128struct ubi_wl_entry {
129 struct rb_node rb;
130 int ec;
131 int pnum;
132};
133
134/**
135 * struct ubi_wl_prot_entry - PEB protection entry. 120 * struct ubi_wl_prot_entry - PEB protection entry.
136 * @rb_pnum: link in the @wl->prot.pnum RB-tree 121 * @rb_pnum: link in the @wl->prot.pnum RB-tree
137 * @rb_aec: link in the @wl->prot.aec RB-tree 122 * @rb_aec: link in the @wl->prot.aec RB-tree
@@ -216,9 +201,6 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
216#define paranoid_check_in_wl_tree(e, root) 201#define paranoid_check_in_wl_tree(e, root)
217#endif 202#endif
218 203
219/* Slab cache for wear-leveling entries */
220static struct kmem_cache *wl_entries_slab;
221
222/** 204/**
223 * wl_tree_add - add a wear-leveling entry to a WL RB-tree. 205 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
224 * @e: the wear-leveling entry to add 206 * @e: the wear-leveling entry to add
@@ -267,15 +249,26 @@ static int do_work(struct ubi_device *ubi)
267 int err; 249 int err;
268 struct ubi_work *wrk; 250 struct ubi_work *wrk;
269 251
270 spin_lock(&ubi->wl_lock); 252 cond_resched();
271 253
254 /*
255 * @ubi->work_sem is used to synchronize with the workers. Workers take
256 * it in read mode, so many of them may be doing works at a time. But
257 * the queue flush code has to be sure the whole queue of works is
258 * done, and it takes the mutex in write mode.
259 */
260 down_read(&ubi->work_sem);
261 spin_lock(&ubi->wl_lock);
272 if (list_empty(&ubi->works)) { 262 if (list_empty(&ubi->works)) {
273 spin_unlock(&ubi->wl_lock); 263 spin_unlock(&ubi->wl_lock);
264 up_read(&ubi->work_sem);
274 return 0; 265 return 0;
275 } 266 }
276 267
277 wrk = list_entry(ubi->works.next, struct ubi_work, list); 268 wrk = list_entry(ubi->works.next, struct ubi_work, list);
278 list_del(&wrk->list); 269 list_del(&wrk->list);
270 ubi->works_count -= 1;
271 ubi_assert(ubi->works_count >= 0);
279 spin_unlock(&ubi->wl_lock); 272 spin_unlock(&ubi->wl_lock);
280 273
281 /* 274 /*
@@ -286,11 +279,8 @@ static int do_work(struct ubi_device *ubi)
286 err = wrk->func(ubi, wrk, 0); 279 err = wrk->func(ubi, wrk, 0);
287 if (err) 280 if (err)
288 ubi_err("work failed with error code %d", err); 281 ubi_err("work failed with error code %d", err);
282 up_read(&ubi->work_sem);
289 283
290 spin_lock(&ubi->wl_lock);
291 ubi->works_count -= 1;
292 ubi_assert(ubi->works_count >= 0);
293 spin_unlock(&ubi->wl_lock);
294 return err; 284 return err;
295} 285}
296 286
@@ -549,8 +539,12 @@ retry:
549 * prot_tree_del - remove a physical eraseblock from the protection trees 539 * prot_tree_del - remove a physical eraseblock from the protection trees
550 * @ubi: UBI device description object 540 * @ubi: UBI device description object
551 * @pnum: the physical eraseblock to remove 541 * @pnum: the physical eraseblock to remove
542 *
543 * This function returns PEB @pnum from the protection trees and returns zero
544 * in case of success and %-ENODEV if the PEB was not found in the protection
545 * trees.
552 */ 546 */
553static void prot_tree_del(struct ubi_device *ubi, int pnum) 547static int prot_tree_del(struct ubi_device *ubi, int pnum)
554{ 548{
555 struct rb_node *p; 549 struct rb_node *p;
556 struct ubi_wl_prot_entry *pe = NULL; 550 struct ubi_wl_prot_entry *pe = NULL;
@@ -561,7 +555,7 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum)
561 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum); 555 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
562 556
563 if (pnum == pe->e->pnum) 557 if (pnum == pe->e->pnum)
564 break; 558 goto found;
565 559
566 if (pnum < pe->e->pnum) 560 if (pnum < pe->e->pnum)
567 p = p->rb_left; 561 p = p->rb_left;
@@ -569,10 +563,14 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum)
569 p = p->rb_right; 563 p = p->rb_right;
570 } 564 }
571 565
566 return -ENODEV;
567
568found:
572 ubi_assert(pe->e->pnum == pnum); 569 ubi_assert(pe->e->pnum == pnum);
573 rb_erase(&pe->rb_aec, &ubi->prot.aec); 570 rb_erase(&pe->rb_aec, &ubi->prot.aec);
574 rb_erase(&pe->rb_pnum, &ubi->prot.pnum); 571 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
575 kfree(pe); 572 kfree(pe);
573 return 0;
576} 574}
577 575
578/** 576/**
@@ -744,7 +742,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
744static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 742static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
745 int cancel) 743 int cancel)
746{ 744{
747 int err, put = 0; 745 int err, put = 0, scrubbing = 0, protect = 0;
746 struct ubi_wl_prot_entry *uninitialized_var(pe);
748 struct ubi_wl_entry *e1, *e2; 747 struct ubi_wl_entry *e1, *e2;
749 struct ubi_vid_hdr *vid_hdr; 748 struct ubi_vid_hdr *vid_hdr;
750 749
@@ -757,21 +756,17 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
757 if (!vid_hdr) 756 if (!vid_hdr)
758 return -ENOMEM; 757 return -ENOMEM;
759 758
759 mutex_lock(&ubi->move_mutex);
760 spin_lock(&ubi->wl_lock); 760 spin_lock(&ubi->wl_lock);
761 ubi_assert(!ubi->move_from && !ubi->move_to);
762 ubi_assert(!ubi->move_to_put);
761 763
762 /* 764 if (!ubi->free.rb_node ||
763 * Only one WL worker at a time is supported at this implementation, so
764 * make sure a PEB is not being moved already.
765 */
766 if (ubi->move_to || !ubi->free.rb_node ||
767 (!ubi->used.rb_node && !ubi->scrub.rb_node)) { 765 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
768 /* 766 /*
769 * Only one WL worker at a time is supported at this 767 * No free physical eraseblocks? Well, they must be waiting in
770 * implementation, so if a LEB is already being moved, cancel. 768 * the queue to be erased. Cancel movement - it will be
771 * 769 * triggered again when a free physical eraseblock appears.
772 * No free physical eraseblocks? Well, we cancel wear-leveling
773 * then. It will be triggered again when a free physical
774 * eraseblock appears.
775 * 770 *
776 * No used physical eraseblocks? They must be temporarily 771 * No used physical eraseblocks? They must be temporarily
777 * protected from being moved. They will be moved to the 772 * protected from being moved. They will be moved to the
@@ -780,10 +775,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
780 */ 775 */
781 dbg_wl("cancel WL, a list is empty: free %d, used %d", 776 dbg_wl("cancel WL, a list is empty: free %d, used %d",
782 !ubi->free.rb_node, !ubi->used.rb_node); 777 !ubi->free.rb_node, !ubi->used.rb_node);
783 ubi->wl_scheduled = 0; 778 goto out_cancel;
784 spin_unlock(&ubi->wl_lock);
785 ubi_free_vid_hdr(ubi, vid_hdr);
786 return 0;
787 } 779 }
788 780
789 if (!ubi->scrub.rb_node) { 781 if (!ubi->scrub.rb_node) {
@@ -798,27 +790,24 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
798 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { 790 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
799 dbg_wl("no WL needed: min used EC %d, max free EC %d", 791 dbg_wl("no WL needed: min used EC %d, max free EC %d",
800 e1->ec, e2->ec); 792 e1->ec, e2->ec);
801 ubi->wl_scheduled = 0; 793 goto out_cancel;
802 spin_unlock(&ubi->wl_lock);
803 ubi_free_vid_hdr(ubi, vid_hdr);
804 return 0;
805 } 794 }
806 paranoid_check_in_wl_tree(e1, &ubi->used); 795 paranoid_check_in_wl_tree(e1, &ubi->used);
807 rb_erase(&e1->rb, &ubi->used); 796 rb_erase(&e1->rb, &ubi->used);
808 dbg_wl("move PEB %d EC %d to PEB %d EC %d", 797 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
809 e1->pnum, e1->ec, e2->pnum, e2->ec); 798 e1->pnum, e1->ec, e2->pnum, e2->ec);
810 } else { 799 } else {
800 /* Perform scrubbing */
801 scrubbing = 1;
811 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); 802 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
812 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 803 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
813 paranoid_check_in_wl_tree(e1, &ubi->scrub); 804 paranoid_check_in_wl_tree(e1, &ubi->scrub);
814 rb_erase(&e1->rb, &ubi->scrub); 805 rb_erase(&e1->rb, &ubi->scrub);
815 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); 806 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
816 } 807 }
817 808
818 paranoid_check_in_wl_tree(e2, &ubi->free); 809 paranoid_check_in_wl_tree(e2, &ubi->free);
819 rb_erase(&e2->rb, &ubi->free); 810 rb_erase(&e2->rb, &ubi->free);
820 ubi_assert(!ubi->move_from && !ubi->move_to);
821 ubi_assert(!ubi->move_to_put && !ubi->move_from_put);
822 ubi->move_from = e1; 811 ubi->move_from = e1;
823 ubi->move_to = e2; 812 ubi->move_to = e2;
824 spin_unlock(&ubi->wl_lock); 813 spin_unlock(&ubi->wl_lock);
@@ -828,6 +817,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
828 * We so far do not know which logical eraseblock our physical 817 * We so far do not know which logical eraseblock our physical
829 * eraseblock (@e1) belongs to. We have to read the volume identifier 818 * eraseblock (@e1) belongs to. We have to read the volume identifier
830 * header first. 819 * header first.
820 *
821 * Note, we are protected from this PEB being unmapped and erased. The
822 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
823 * which is being moved was unmapped.
831 */ 824 */
832 825
833 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); 826 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
@@ -842,32 +835,51 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
842 * likely have the VID header in place. 835 * likely have the VID header in place.
843 */ 836 */
844 dbg_wl("PEB %d has no VID header", e1->pnum); 837 dbg_wl("PEB %d has no VID header", e1->pnum);
845 err = 0; 838 goto out_not_moved;
846 } else {
847 ubi_err("error %d while reading VID header from PEB %d",
848 err, e1->pnum);
849 if (err > 0)
850 err = -EIO;
851 } 839 }
852 goto error; 840
841 ubi_err("error %d while reading VID header from PEB %d",
842 err, e1->pnum);
843 if (err > 0)
844 err = -EIO;
845 goto out_error;
853 } 846 }
854 847
855 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 848 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
856 if (err) { 849 if (err) {
857 if (err == UBI_IO_BITFLIPS) 850
858 err = 0; 851 if (err < 0)
859 goto error; 852 goto out_error;
853 if (err == 1)
854 goto out_not_moved;
855
856 /*
857 * For some reason the LEB was not moved - it might be because
858 * the volume is being deleted. We should prevent this PEB from
859 * being selected for wear-levelling movement for some "time",
860 * so put it to the protection tree.
861 */
862
863 dbg_wl("cancelled moving PEB %d", e1->pnum);
864 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
865 if (!pe) {
866 err = -ENOMEM;
867 goto out_error;
868 }
869
870 protect = 1;
860 } 871 }
861 872
862 ubi_free_vid_hdr(ubi, vid_hdr); 873 ubi_free_vid_hdr(ubi, vid_hdr);
863 spin_lock(&ubi->wl_lock); 874 spin_lock(&ubi->wl_lock);
875 if (protect)
876 prot_tree_add(ubi, e1, pe, protect);
864 if (!ubi->move_to_put) 877 if (!ubi->move_to_put)
865 wl_tree_add(e2, &ubi->used); 878 wl_tree_add(e2, &ubi->used);
866 else 879 else
867 put = 1; 880 put = 1;
868 ubi->move_from = ubi->move_to = NULL; 881 ubi->move_from = ubi->move_to = NULL;
869 ubi->move_from_put = ubi->move_to_put = 0; 882 ubi->move_to_put = ubi->wl_scheduled = 0;
870 ubi->wl_scheduled = 0;
871 spin_unlock(&ubi->wl_lock); 883 spin_unlock(&ubi->wl_lock);
872 884
873 if (put) { 885 if (put) {
@@ -877,62 +889,67 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
877 */ 889 */
878 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); 890 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
879 err = schedule_erase(ubi, e2, 0); 891 err = schedule_erase(ubi, e2, 0);
880 if (err) { 892 if (err)
881 kmem_cache_free(wl_entries_slab, e2); 893 goto out_error;
882 ubi_ro_mode(ubi);
883 }
884 } 894 }
885 895
886 err = schedule_erase(ubi, e1, 0); 896 if (!protect) {
887 if (err) { 897 err = schedule_erase(ubi, e1, 0);
888 kmem_cache_free(wl_entries_slab, e1); 898 if (err)
889 ubi_ro_mode(ubi); 899 goto out_error;
890 } 900 }
891 901
902
892 dbg_wl("done"); 903 dbg_wl("done");
893 return err; 904 mutex_unlock(&ubi->move_mutex);
905 return 0;
894 906
895 /* 907 /*
896 * Some error occurred. @e1 was not changed, so return it back. @e2 908 * For some reasons the LEB was not moved, might be an error, might be
897 * might be changed, schedule it for erasure. 909 * something else. @e1 was not changed, so return it back. @e2 might
910 * be changed, schedule it for erasure.
898 */ 911 */
899error: 912out_not_moved:
900 if (err)
901 dbg_wl("error %d occurred, cancel operation", err);
902 ubi_assert(err <= 0);
903
904 ubi_free_vid_hdr(ubi, vid_hdr); 913 ubi_free_vid_hdr(ubi, vid_hdr);
905 spin_lock(&ubi->wl_lock); 914 spin_lock(&ubi->wl_lock);
906 ubi->wl_scheduled = 0; 915 if (scrubbing)
907 if (ubi->move_from_put) 916 wl_tree_add(e1, &ubi->scrub);
908 put = 1;
909 else 917 else
910 wl_tree_add(e1, &ubi->used); 918 wl_tree_add(e1, &ubi->used);
911 ubi->move_from = ubi->move_to = NULL; 919 ubi->move_from = ubi->move_to = NULL;
912 ubi->move_from_put = ubi->move_to_put = 0; 920 ubi->move_to_put = ubi->wl_scheduled = 0;
913 spin_unlock(&ubi->wl_lock); 921 spin_unlock(&ubi->wl_lock);
914 922
915 if (put) {
916 /*
917 * Well, the target PEB was put meanwhile, schedule it for
918 * erasure.
919 */
920 dbg_wl("PEB %d was put meanwhile, erase", e1->pnum);
921 err = schedule_erase(ubi, e1, 0);
922 if (err) {
923 kmem_cache_free(wl_entries_slab, e1);
924 ubi_ro_mode(ubi);
925 }
926 }
927
928 err = schedule_erase(ubi, e2, 0); 923 err = schedule_erase(ubi, e2, 0);
929 if (err) { 924 if (err)
930 kmem_cache_free(wl_entries_slab, e2); 925 goto out_error;
931 ubi_ro_mode(ubi); 926
932 } 927 mutex_unlock(&ubi->move_mutex);
928 return 0;
929
930out_error:
931 ubi_err("error %d while moving PEB %d to PEB %d",
932 err, e1->pnum, e2->pnum);
933 933
934 yield(); 934 ubi_free_vid_hdr(ubi, vid_hdr);
935 spin_lock(&ubi->wl_lock);
936 ubi->move_from = ubi->move_to = NULL;
937 ubi->move_to_put = ubi->wl_scheduled = 0;
938 spin_unlock(&ubi->wl_lock);
939
940 kmem_cache_free(ubi_wl_entry_slab, e1);
941 kmem_cache_free(ubi_wl_entry_slab, e2);
942 ubi_ro_mode(ubi);
943
944 mutex_unlock(&ubi->move_mutex);
935 return err; 945 return err;
946
947out_cancel:
948 ubi->wl_scheduled = 0;
949 spin_unlock(&ubi->wl_lock);
950 mutex_unlock(&ubi->move_mutex);
951 ubi_free_vid_hdr(ubi, vid_hdr);
952 return 0;
936} 953}
937 954
938/** 955/**
@@ -1020,7 +1037,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1020 if (cancel) { 1037 if (cancel) {
1021 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); 1038 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1022 kfree(wl_wrk); 1039 kfree(wl_wrk);
1023 kmem_cache_free(wl_entries_slab, e); 1040 kmem_cache_free(ubi_wl_entry_slab, e);
1024 return 0; 1041 return 0;
1025 } 1042 }
1026 1043
@@ -1049,7 +1066,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1049 1066
1050 ubi_err("failed to erase PEB %d, error %d", pnum, err); 1067 ubi_err("failed to erase PEB %d, error %d", pnum, err);
1051 kfree(wl_wrk); 1068 kfree(wl_wrk);
1052 kmem_cache_free(wl_entries_slab, e); 1069 kmem_cache_free(ubi_wl_entry_slab, e);
1053 1070
1054 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || 1071 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1055 err == -EBUSY) { 1072 err == -EBUSY) {
@@ -1119,8 +1136,7 @@ out_ro:
1119} 1136}
1120 1137
1121/** 1138/**
1122 * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling 1139 * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit.
1123 * unit.
1124 * @ubi: UBI device description object 1140 * @ubi: UBI device description object
1125 * @pnum: physical eraseblock to return 1141 * @pnum: physical eraseblock to return
1126 * @torture: if this physical eraseblock has to be tortured 1142 * @torture: if this physical eraseblock has to be tortured
@@ -1128,7 +1144,7 @@ out_ro:
1128 * This function is called to return physical eraseblock @pnum to the pool of 1144 * This function is called to return physical eraseblock @pnum to the pool of
1129 * free physical eraseblocks. The @torture flag has to be set if an I/O error 1145 * free physical eraseblocks. The @torture flag has to be set if an I/O error
1130 * occurred to this @pnum and it has to be tested. This function returns zero 1146 * occurred to this @pnum and it has to be tested. This function returns zero
1131 * in case of success and a negative error code in case of failure. 1147 * in case of success, and a negative error code in case of failure.
1132 */ 1148 */
1133int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) 1149int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1134{ 1150{
@@ -1139,8 +1155,8 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1139 ubi_assert(pnum >= 0); 1155 ubi_assert(pnum >= 0);
1140 ubi_assert(pnum < ubi->peb_count); 1156 ubi_assert(pnum < ubi->peb_count);
1141 1157
1158retry:
1142 spin_lock(&ubi->wl_lock); 1159 spin_lock(&ubi->wl_lock);
1143
1144 e = ubi->lookuptbl[pnum]; 1160 e = ubi->lookuptbl[pnum];
1145 if (e == ubi->move_from) { 1161 if (e == ubi->move_from) {
1146 /* 1162 /*
@@ -1148,17 +1164,22 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1148 * be moved. It will be scheduled for erasure in the 1164 * be moved. It will be scheduled for erasure in the
1149 * wear-leveling worker. 1165 * wear-leveling worker.
1150 */ 1166 */
1151 dbg_wl("PEB %d is being moved", pnum); 1167 dbg_wl("PEB %d is being moved, wait", pnum);
1152 ubi_assert(!ubi->move_from_put);
1153 ubi->move_from_put = 1;
1154 spin_unlock(&ubi->wl_lock); 1168 spin_unlock(&ubi->wl_lock);
1155 return 0; 1169
1170 /* Wait for the WL worker by taking the @ubi->move_mutex */
1171 mutex_lock(&ubi->move_mutex);
1172 mutex_unlock(&ubi->move_mutex);
1173 goto retry;
1156 } else if (e == ubi->move_to) { 1174 } else if (e == ubi->move_to) {
1157 /* 1175 /*
1158 * User is putting the physical eraseblock which was selected 1176 * User is putting the physical eraseblock which was selected
1159 * as the target the data is moved to. It may happen if the EBA 1177 * as the target the data is moved to. It may happen if the EBA
1160 * unit already re-mapped the LEB but the WL unit did has not 1178 * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but
1161 * put the PEB to the "used" tree. 1179 * the WL unit has not put the PEB to the "used" tree yet, but
1180 * it is about to do this. So we just set a flag which will
1181 * tell the WL worker that the PEB is not needed anymore and
1182 * should be scheduled for erasure.
1162 */ 1183 */
1163 dbg_wl("PEB %d is the target of data moving", pnum); 1184 dbg_wl("PEB %d is the target of data moving", pnum);
1164 ubi_assert(!ubi->move_to_put); 1185 ubi_assert(!ubi->move_to_put);
@@ -1172,8 +1193,15 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1172 } else if (in_wl_tree(e, &ubi->scrub)) { 1193 } else if (in_wl_tree(e, &ubi->scrub)) {
1173 paranoid_check_in_wl_tree(e, &ubi->scrub); 1194 paranoid_check_in_wl_tree(e, &ubi->scrub);
1174 rb_erase(&e->rb, &ubi->scrub); 1195 rb_erase(&e->rb, &ubi->scrub);
1175 } else 1196 } else {
1176 prot_tree_del(ubi, e->pnum); 1197 err = prot_tree_del(ubi, e->pnum);
1198 if (err) {
1199 ubi_err("PEB %d not found", pnum);
1200 ubi_ro_mode(ubi);
1201 spin_unlock(&ubi->wl_lock);
1202 return err;
1203 }
1204 }
1177 } 1205 }
1178 spin_unlock(&ubi->wl_lock); 1206 spin_unlock(&ubi->wl_lock);
1179 1207
@@ -1227,8 +1255,17 @@ retry:
1227 if (in_wl_tree(e, &ubi->used)) { 1255 if (in_wl_tree(e, &ubi->used)) {
1228 paranoid_check_in_wl_tree(e, &ubi->used); 1256 paranoid_check_in_wl_tree(e, &ubi->used);
1229 rb_erase(&e->rb, &ubi->used); 1257 rb_erase(&e->rb, &ubi->used);
1230 } else 1258 } else {
1231 prot_tree_del(ubi, pnum); 1259 int err;
1260
1261 err = prot_tree_del(ubi, e->pnum);
1262 if (err) {
1263 ubi_err("PEB %d not found", pnum);
1264 ubi_ro_mode(ubi);
1265 spin_unlock(&ubi->wl_lock);
1266 return err;
1267 }
1268 }
1232 1269
1233 wl_tree_add(e, &ubi->scrub); 1270 wl_tree_add(e, &ubi->scrub);
1234 spin_unlock(&ubi->wl_lock); 1271 spin_unlock(&ubi->wl_lock);
@@ -1249,17 +1286,32 @@ retry:
1249 */ 1286 */
1250int ubi_wl_flush(struct ubi_device *ubi) 1287int ubi_wl_flush(struct ubi_device *ubi)
1251{ 1288{
1252 int err, pending_count; 1289 int err;
1253
1254 pending_count = ubi->works_count;
1255
1256 dbg_wl("flush (%d pending works)", pending_count);
1257 1290
1258 /* 1291 /*
1259 * Erase while the pending works queue is not empty, but not more then 1292 * Erase while the pending works queue is not empty, but not more then
1260 * the number of currently pending works. 1293 * the number of currently pending works.
1261 */ 1294 */
1262 while (pending_count-- > 0) { 1295 dbg_wl("flush (%d pending works)", ubi->works_count);
1296 while (ubi->works_count) {
1297 err = do_work(ubi);
1298 if (err)
1299 return err;
1300 }
1301
1302 /*
1303 * Make sure all the works which have been done in parallel are
1304 * finished.
1305 */
1306 down_write(&ubi->work_sem);
1307 up_write(&ubi->work_sem);
1308
1309 /*
1310 * And in case last was the WL worker and it cancelled the LEB
1311 * movement, flush again.
1312 */
1313 while (ubi->works_count) {
1314 dbg_wl("flush more (%d pending works)", ubi->works_count);
1263 err = do_work(ubi); 1315 err = do_work(ubi);
1264 if (err) 1316 if (err)
1265 return err; 1317 return err;
@@ -1294,7 +1346,7 @@ static void tree_destroy(struct rb_root *root)
1294 rb->rb_right = NULL; 1346 rb->rb_right = NULL;
1295 } 1347 }
1296 1348
1297 kmem_cache_free(wl_entries_slab, e); 1349 kmem_cache_free(ubi_wl_entry_slab, e);
1298 } 1350 }
1299 } 1351 }
1300} 1352}
@@ -1303,7 +1355,7 @@ static void tree_destroy(struct rb_root *root)
1303 * ubi_thread - UBI background thread. 1355 * ubi_thread - UBI background thread.
1304 * @u: the UBI device description object pointer 1356 * @u: the UBI device description object pointer
1305 */ 1357 */
1306static int ubi_thread(void *u) 1358int ubi_thread(void *u)
1307{ 1359{
1308 int failures = 0; 1360 int failures = 0;
1309 struct ubi_device *ubi = u; 1361 struct ubi_device *ubi = u;
@@ -1394,36 +1446,22 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1394 ubi->used = ubi->free = ubi->scrub = RB_ROOT; 1446 ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1395 ubi->prot.pnum = ubi->prot.aec = RB_ROOT; 1447 ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1396 spin_lock_init(&ubi->wl_lock); 1448 spin_lock_init(&ubi->wl_lock);
1449 mutex_init(&ubi->move_mutex);
1450 init_rwsem(&ubi->work_sem);
1397 ubi->max_ec = si->max_ec; 1451 ubi->max_ec = si->max_ec;
1398 INIT_LIST_HEAD(&ubi->works); 1452 INIT_LIST_HEAD(&ubi->works);
1399 1453
1400 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); 1454 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1401 1455
1402 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
1403 if (IS_ERR(ubi->bgt_thread)) {
1404 err = PTR_ERR(ubi->bgt_thread);
1405 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
1406 err);
1407 return err;
1408 }
1409
1410 if (ubi_devices_cnt == 0) {
1411 wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab",
1412 sizeof(struct ubi_wl_entry),
1413 0, 0, NULL);
1414 if (!wl_entries_slab)
1415 return -ENOMEM;
1416 }
1417
1418 err = -ENOMEM; 1456 err = -ENOMEM;
1419 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); 1457 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1420 if (!ubi->lookuptbl) 1458 if (!ubi->lookuptbl)
1421 goto out_free; 1459 return err;
1422 1460
1423 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { 1461 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1424 cond_resched(); 1462 cond_resched();
1425 1463
1426 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1464 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1427 if (!e) 1465 if (!e)
1428 goto out_free; 1466 goto out_free;
1429 1467
@@ -1431,7 +1469,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1431 e->ec = seb->ec; 1469 e->ec = seb->ec;
1432 ubi->lookuptbl[e->pnum] = e; 1470 ubi->lookuptbl[e->pnum] = e;
1433 if (schedule_erase(ubi, e, 0)) { 1471 if (schedule_erase(ubi, e, 0)) {
1434 kmem_cache_free(wl_entries_slab, e); 1472 kmem_cache_free(ubi_wl_entry_slab, e);
1435 goto out_free; 1473 goto out_free;
1436 } 1474 }
1437 } 1475 }
@@ -1439,7 +1477,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1439 list_for_each_entry(seb, &si->free, u.list) { 1477 list_for_each_entry(seb, &si->free, u.list) {
1440 cond_resched(); 1478 cond_resched();
1441 1479
1442 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1480 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1443 if (!e) 1481 if (!e)
1444 goto out_free; 1482 goto out_free;
1445 1483
@@ -1453,7 +1491,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1453 list_for_each_entry(seb, &si->corr, u.list) { 1491 list_for_each_entry(seb, &si->corr, u.list) {
1454 cond_resched(); 1492 cond_resched();
1455 1493
1456 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1494 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1457 if (!e) 1495 if (!e)
1458 goto out_free; 1496 goto out_free;
1459 1497
@@ -1461,7 +1499,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1461 e->ec = seb->ec; 1499 e->ec = seb->ec;
1462 ubi->lookuptbl[e->pnum] = e; 1500 ubi->lookuptbl[e->pnum] = e;
1463 if (schedule_erase(ubi, e, 0)) { 1501 if (schedule_erase(ubi, e, 0)) {
1464 kmem_cache_free(wl_entries_slab, e); 1502 kmem_cache_free(ubi_wl_entry_slab, e);
1465 goto out_free; 1503 goto out_free;
1466 } 1504 }
1467 } 1505 }
@@ -1470,7 +1508,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1470 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { 1508 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1471 cond_resched(); 1509 cond_resched();
1472 1510
1473 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1511 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1474 if (!e) 1512 if (!e)
1475 goto out_free; 1513 goto out_free;
1476 1514
@@ -1510,8 +1548,6 @@ out_free:
1510 tree_destroy(&ubi->free); 1548 tree_destroy(&ubi->free);
1511 tree_destroy(&ubi->scrub); 1549 tree_destroy(&ubi->scrub);
1512 kfree(ubi->lookuptbl); 1550 kfree(ubi->lookuptbl);
1513 if (ubi_devices_cnt == 0)
1514 kmem_cache_destroy(wl_entries_slab);
1515 return err; 1551 return err;
1516} 1552}
1517 1553
@@ -1541,7 +1577,7 @@ static void protection_trees_destroy(struct ubi_device *ubi)
1541 rb->rb_right = NULL; 1577 rb->rb_right = NULL;
1542 } 1578 }
1543 1579
1544 kmem_cache_free(wl_entries_slab, pe->e); 1580 kmem_cache_free(ubi_wl_entry_slab, pe->e);
1545 kfree(pe); 1581 kfree(pe);
1546 } 1582 }
1547 } 1583 }
@@ -1553,10 +1589,6 @@ static void protection_trees_destroy(struct ubi_device *ubi)
1553 */ 1589 */
1554void ubi_wl_close(struct ubi_device *ubi) 1590void ubi_wl_close(struct ubi_device *ubi)
1555{ 1591{
1556 dbg_wl("disable \"%s\"", ubi->bgt_name);
1557 if (ubi->bgt_thread)
1558 kthread_stop(ubi->bgt_thread);
1559
1560 dbg_wl("close the UBI wear-leveling unit"); 1592 dbg_wl("close the UBI wear-leveling unit");
1561 1593
1562 cancel_pending(ubi); 1594 cancel_pending(ubi);
@@ -1565,8 +1597,6 @@ void ubi_wl_close(struct ubi_device *ubi)
1565 tree_destroy(&ubi->free); 1597 tree_destroy(&ubi->free);
1566 tree_destroy(&ubi->scrub); 1598 tree_destroy(&ubi->scrub);
1567 kfree(ubi->lookuptbl); 1599 kfree(ubi->lookuptbl);
1568 if (ubi_devices_cnt == 1)
1569 kmem_cache_destroy(wl_entries_slab);
1570} 1600}
1571 1601
1572#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1602#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID