aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/Kconfig11
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c78
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c14
-rw-r--r--drivers/mtd/chips/cfi_probe.c12
-rw-r--r--drivers/mtd/chips/jedec_probe.c1376
-rw-r--r--drivers/mtd/cmdlinepart.c9
-rw-r--r--drivers/mtd/devices/lart.c2
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c2
-rw-r--r--drivers/mtd/maps/Kconfig9
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/physmap_of.c88
-rw-r--r--drivers/mtd/maps/pnc2000.c93
-rw-r--r--drivers/mtd/maps/scb2_flash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/mtd/mtdchar.c8
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/mtdoops.c170
-rw-r--r--drivers/mtd/nand/Kconfig17
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/at91_nand.c12
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c20
-rw-r--r--drivers/mtd/nand/cafe_nand.c19
-rw-r--r--drivers/mtd/nand/nand_base.c8
-rw-r--r--drivers/mtd/nand/orion_nand.c171
-rw-r--r--drivers/mtd/nand/pasemi_nand.c243
-rw-r--r--drivers/mtd/nand/s3c2410.c48
-rw-r--r--drivers/mtd/ofpart.c74
-rw-r--r--drivers/mtd/onenand/onenand_base.c32
-rw-r--r--drivers/mtd/redboot.c25
-rw-r--r--drivers/mtd/ubi/build.c633
-rw-r--r--drivers/mtd/ubi/cdev.c164
-rw-r--r--drivers/mtd/ubi/debug.h21
-rw-r--r--drivers/mtd/ubi/eba.c321
-rw-r--r--drivers/mtd/ubi/gluebi.c9
-rw-r--r--drivers/mtd/ubi/io.c10
-rw-r--r--drivers/mtd/ubi/kapi.c171
-rw-r--r--drivers/mtd/ubi/misc.c2
-rw-r--r--drivers/mtd/ubi/scan.c2
-rw-r--r--drivers/mtd/ubi/ubi.h118
-rw-r--r--drivers/mtd/ubi/upd.c11
-rw-r--r--drivers/mtd/ubi/vmt.c196
-rw-r--r--drivers/mtd/ubi/vtbl.c24
-rw-r--r--drivers/mtd/ubi/wl.c339
44 files changed, 2767 insertions, 1805 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 8848e8ac705d..e8503341e3b1 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -150,6 +150,14 @@ config MTD_AFS_PARTS
150 for your particular device. It won't happen automatically. The 150 for your particular device. It won't happen automatically. The
151 'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example. 151 'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example.
152 152
153config MTD_OF_PARTS
154 tristate "Flash partition map based on OF description"
155 depends on PPC_OF && MTD_PARTITIONS
156 help
157 This provides a partition parsing function which derives
158 the partition map from the children of the flash node,
159 as described in Documentation/powerpc/booting-without-of.txt.
160
153comment "User Modules And Translation Layers" 161comment "User Modules And Translation Layers"
154 162
155config MTD_CHAR 163config MTD_CHAR
@@ -286,6 +294,9 @@ config MTD_OOPS
286 buffer in a flash partition where it can be read back at some 294 buffer in a flash partition where it can be read back at some
287 later point. 295 later point.
288 296
297 To use, add console=ttyMTDx to the kernel command line,
298 where x is the MTD device number to use.
299
289source "drivers/mtd/chips/Kconfig" 300source "drivers/mtd/chips/Kconfig"
290 301
291source "drivers/mtd/maps/Kconfig" 302source "drivers/mtd/maps/Kconfig"
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 7f0b04b4caa7..538e33d11d46 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
11obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o 11obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
12obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o 12obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
13obj-$(CONFIG_MTD_AFS_PARTS) += afs.o 13obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
14obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
14 15
15# 'Users' - code which presents functionality to userspace. 16# 'Users' - code which presents functionality to userspace.
16obj-$(CONFIG_MTD_CHAR) += mtdchar.o 17obj-$(CONFIG_MTD_CHAR) += mtdchar.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 1707f98c322c..47794d23a42e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -50,6 +50,7 @@
50#define I82802AC 0x00ac 50#define I82802AC 0x00ac
51#define MANUFACTURER_ST 0x0020 51#define MANUFACTURER_ST 0x0020
52#define M50LPW080 0x002F 52#define M50LPW080 0x002F
53#define AT49BV640D 0x02de
53 54
54static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 55static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
55static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 56static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -157,6 +158,47 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
157} 158}
158#endif 159#endif
159 160
161/* Atmel chips don't use the same PRI format as Intel chips */
162static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
163{
164 struct map_info *map = mtd->priv;
165 struct cfi_private *cfi = map->fldrv_priv;
166 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
167 struct cfi_pri_atmel atmel_pri;
168 uint32_t features = 0;
169
170 /* Reverse byteswapping */
171 extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
172 extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
173 extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
174
175 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
176 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
177
178 printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
179
180 if (atmel_pri.Features & 0x01) /* chip erase supported */
181 features |= (1<<0);
182 if (atmel_pri.Features & 0x02) /* erase suspend supported */
183 features |= (1<<1);
184 if (atmel_pri.Features & 0x04) /* program suspend supported */
185 features |= (1<<2);
186 if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
187 features |= (1<<9);
188 if (atmel_pri.Features & 0x20) /* page mode read supported */
189 features |= (1<<7);
190 if (atmel_pri.Features & 0x40) /* queued erase supported */
191 features |= (1<<4);
192 if (atmel_pri.Features & 0x80) /* Protection bits supported */
193 features |= (1<<6);
194
195 extp->FeatureSupport = features;
196
197 /* burst write mode not supported */
198 cfi->cfiq->BufWriteTimeoutTyp = 0;
199 cfi->cfiq->BufWriteTimeoutMax = 0;
200}
201
160#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 202#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
161/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 203/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
162static void fixup_intel_strataflash(struct mtd_info *mtd, void* param) 204static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
@@ -227,13 +269,20 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
227/* 269/*
228 * Some chips power-up with all sectors locked by default. 270 * Some chips power-up with all sectors locked by default.
229 */ 271 */
230static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param) 272static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
231{ 273{
232 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" ); 274 struct map_info *map = mtd->priv;
233 mtd->flags |= MTD_STUPID_LOCK; 275 struct cfi_private *cfi = map->fldrv_priv;
276 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
277
278 if (cfip->FeatureSupport&32) {
279 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
280 mtd->flags |= MTD_POWERUP_LOCK;
281 }
234} 282}
235 283
236static struct cfi_fixup cfi_fixup_table[] = { 284static struct cfi_fixup cfi_fixup_table[] = {
285 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
237#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 286#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
238 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 287 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
239#endif 288#endif
@@ -245,7 +294,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
245#endif 294#endif
246 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL }, 295 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
247 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL }, 296 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
248 { MANUFACTURER_INTEL, 0x891c, fixup_use_powerup_lock, NULL, }, 297 { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
249 { 0, 0, NULL, NULL } 298 { 0, 0, NULL, NULL }
250}; 299};
251 300
@@ -277,7 +326,7 @@ read_pri_intelext(struct map_info *map, __u16 adr)
277 return NULL; 326 return NULL;
278 327
279 if (extp->MajorVersion != '1' || 328 if (extp->MajorVersion != '1' ||
280 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) { 329 (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
281 printk(KERN_ERR " Unknown Intel/Sharp Extended Query " 330 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
282 "version %c.%c.\n", extp->MajorVersion, 331 "version %c.%c.\n", extp->MajorVersion,
283 extp->MinorVersion); 332 extp->MinorVersion);
@@ -752,6 +801,7 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
752static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 801static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
753{ 802{
754 int ret; 803 int ret;
804 DECLARE_WAITQUEUE(wait, current);
755 805
756 retry: 806 retry:
757 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING 807 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
@@ -808,6 +858,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
808 spin_unlock(contender->mutex); 858 spin_unlock(contender->mutex);
809 } 859 }
810 860
861 /* Check if we already have suspended erase
862 * on this chip. Sleep. */
863 if (mode == FL_ERASING && shared->erasing
864 && shared->erasing->oldstate == FL_ERASING) {
865 spin_unlock(&shared->lock);
866 set_current_state(TASK_UNINTERRUPTIBLE);
867 add_wait_queue(&chip->wq, &wait);
868 spin_unlock(chip->mutex);
869 schedule();
870 remove_wait_queue(&chip->wq, &wait);
871 spin_lock(chip->mutex);
872 goto retry;
873 }
874
811 /* We now own it */ 875 /* We now own it */
812 shared->writing = chip; 876 shared->writing = chip;
813 if (mode == FL_ERASING) 877 if (mode == FL_ERASING)
@@ -2294,7 +2358,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2294 struct flchip *chip; 2358 struct flchip *chip;
2295 int ret = 0; 2359 int ret = 0;
2296 2360
2297 if ((mtd->flags & MTD_STUPID_LOCK) 2361 if ((mtd->flags & MTD_POWERUP_LOCK)
2298 && extp && (extp->FeatureSupport & (1 << 5))) 2362 && extp && (extp->FeatureSupport & (1 << 5)))
2299 cfi_intelext_save_locks(mtd); 2363 cfi_intelext_save_locks(mtd);
2300 2364
@@ -2405,7 +2469,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
2405 spin_unlock(chip->mutex); 2469 spin_unlock(chip->mutex);
2406 } 2470 }
2407 2471
2408 if ((mtd->flags & MTD_STUPID_LOCK) 2472 if ((mtd->flags & MTD_POWERUP_LOCK)
2409 && extp && (extp->FeatureSupport & (1 << 5))) 2473 && extp && (extp->FeatureSupport & (1 << 5)))
2410 cfi_intelext_restore_locks(mtd); 2474 cfi_intelext_restore_locks(mtd);
2411} 2475}
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 389acc600f5e..d072e87ce4e2 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -185,6 +185,10 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
185 extp->TopBottom = 2; 185 extp->TopBottom = 2;
186 else 186 else
187 extp->TopBottom = 3; 187 extp->TopBottom = 3;
188
189 /* burst write mode not supported */
190 cfi->cfiq->BufWriteTimeoutTyp = 0;
191 cfi->cfiq->BufWriteTimeoutMax = 0;
188} 192}
189 193
190static void fixup_use_secsi(struct mtd_info *mtd, void *param) 194static void fixup_use_secsi(struct mtd_info *mtd, void *param)
@@ -213,10 +217,11 @@ static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
213{ 217{
214 mtd->lock = cfi_atmel_lock; 218 mtd->lock = cfi_atmel_lock;
215 mtd->unlock = cfi_atmel_unlock; 219 mtd->unlock = cfi_atmel_unlock;
216 mtd->flags |= MTD_STUPID_LOCK; 220 mtd->flags |= MTD_POWERUP_LOCK;
217} 221}
218 222
219static struct cfi_fixup cfi_fixup_table[] = { 223static struct cfi_fixup cfi_fixup_table[] = {
224 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
220#ifdef AMD_BOOTLOC_BUG 225#ifdef AMD_BOOTLOC_BUG
221 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 226 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
222#endif 227#endif
@@ -229,7 +234,6 @@ static struct cfi_fixup cfi_fixup_table[] = {
229#if !FORCE_WORD_WRITE 234#if !FORCE_WORD_WRITE
230 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, }, 235 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
231#endif 236#endif
232 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
233 { 0, 0, NULL, NULL } 237 { 0, 0, NULL, NULL }
234}; 238};
235static struct cfi_fixup jedec_fixup_table[] = { 239static struct cfi_fixup jedec_fixup_table[] = {
@@ -338,10 +342,12 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
338 /* Modify the unlock address if we are in compatibility mode */ 342 /* Modify the unlock address if we are in compatibility mode */
339 if ( /* x16 in x8 mode */ 343 if ( /* x16 in x8 mode */
340 ((cfi->device_type == CFI_DEVICETYPE_X8) && 344 ((cfi->device_type == CFI_DEVICETYPE_X8) &&
341 (cfi->cfiq->InterfaceDesc == 2)) || 345 (cfi->cfiq->InterfaceDesc ==
346 CFI_INTERFACE_X8_BY_X16_ASYNC)) ||
342 /* x32 in x16 mode */ 347 /* x32 in x16 mode */
343 ((cfi->device_type == CFI_DEVICETYPE_X16) && 348 ((cfi->device_type == CFI_DEVICETYPE_X16) &&
344 (cfi->cfiq->InterfaceDesc == 4))) 349 (cfi->cfiq->InterfaceDesc ==
350 CFI_INTERFACE_X16_BY_X32_ASYNC)))
345 { 351 {
346 cfi->addr_unlock1 = 0xaaa; 352 cfi->addr_unlock1 = 0xaaa;
347 cfi->addr_unlock2 = 0x555; 353 cfi->addr_unlock2 = 0x555;
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index 60e11a0ada97..f651b6ef1c5d 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -370,27 +370,27 @@ static void print_cfi_ident(struct cfi_ident *cfip)
370 printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20)); 370 printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20));
371 printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc); 371 printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc);
372 switch(cfip->InterfaceDesc) { 372 switch(cfip->InterfaceDesc) {
373 case 0: 373 case CFI_INTERFACE_X8_ASYNC:
374 printk(" - x8-only asynchronous interface\n"); 374 printk(" - x8-only asynchronous interface\n");
375 break; 375 break;
376 376
377 case 1: 377 case CFI_INTERFACE_X16_ASYNC:
378 printk(" - x16-only asynchronous interface\n"); 378 printk(" - x16-only asynchronous interface\n");
379 break; 379 break;
380 380
381 case 2: 381 case CFI_INTERFACE_X8_BY_X16_ASYNC:
382 printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n"); 382 printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n");
383 break; 383 break;
384 384
385 case 3: 385 case CFI_INTERFACE_X32_ASYNC:
386 printk(" - x32-only asynchronous interface\n"); 386 printk(" - x32-only asynchronous interface\n");
387 break; 387 break;
388 388
389 case 4: 389 case CFI_INTERFACE_X16_BY_X32_ASYNC:
390 printk(" - supports x16 and x32 via Word# with asynchronous interface\n"); 390 printk(" - supports x16 and x32 via Word# with asynchronous interface\n");
391 break; 391 break;
392 392
393 case 65535: 393 case CFI_INTERFACE_NOT_ALLOWED:
394 printk(" - Not Allowed / Reserved\n"); 394 printk(" - Not Allowed / Reserved\n");
395 break; 395 break;
396 396
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index a67b23b87fc0..4be51a86a85c 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -194,8 +194,8 @@ enum uaddr {
194 194
195 195
196struct unlock_addr { 196struct unlock_addr {
197 u32 addr1; 197 uint32_t addr1;
198 u32 addr2; 198 uint32_t addr2;
199}; 199};
200 200
201 201
@@ -246,16 +246,16 @@ static const struct unlock_addr unlock_addrs[] = {
246 } 246 }
247}; 247};
248 248
249
250struct amd_flash_info { 249struct amd_flash_info {
251 const __u16 mfr_id;
252 const __u16 dev_id;
253 const char *name; 250 const char *name;
254 const int DevSize; 251 const uint16_t mfr_id;
255 const int NumEraseRegions; 252 const uint16_t dev_id;
256 const int CmdSet; 253 const uint8_t dev_size;
257 const __u8 uaddr[4]; /* unlock addrs for 8, 16, 32, 64 */ 254 const uint8_t nr_regions;
258 const ulong regions[6]; 255 const uint16_t cmd_set;
256 const uint32_t regions[6];
257 const uint8_t devtypes; /* Bitmask for x8, x16 etc. */
258 const uint8_t uaddr; /* unlock addrs for 8, 16, 32, 64 */
259}; 259};
260 260
261#define ERASEINFO(size,blocks) (size<<8)|(blocks-1) 261#define ERASEINFO(size,blocks) (size<<8)|(blocks-1)
@@ -280,12 +280,11 @@ static const struct amd_flash_info jedec_table[] = {
280 .mfr_id = MANUFACTURER_AMD, 280 .mfr_id = MANUFACTURER_AMD,
281 .dev_id = AM29F032B, 281 .dev_id = AM29F032B,
282 .name = "AMD AM29F032B", 282 .name = "AMD AM29F032B",
283 .uaddr = { 283 .uaddr = MTD_UADDR_0x0555_0x02AA,
284 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 284 .devtypes = CFI_DEVICETYPE_X8,
285 }, 285 .dev_size = SIZE_4MiB,
286 .DevSize = SIZE_4MiB, 286 .cmd_set = P_ID_AMD_STD,
287 .CmdSet = P_ID_AMD_STD, 287 .nr_regions = 1,
288 .NumEraseRegions= 1,
289 .regions = { 288 .regions = {
290 ERASEINFO(0x10000,64) 289 ERASEINFO(0x10000,64)
291 } 290 }
@@ -293,13 +292,11 @@ static const struct amd_flash_info jedec_table[] = {
293 .mfr_id = MANUFACTURER_AMD, 292 .mfr_id = MANUFACTURER_AMD,
294 .dev_id = AM29LV160DT, 293 .dev_id = AM29LV160DT,
295 .name = "AMD AM29LV160DT", 294 .name = "AMD AM29LV160DT",
296 .uaddr = { 295 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
297 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 296 .uaddr = MTD_UADDR_0x0AAA_0x0555,
298 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 297 .dev_size = SIZE_2MiB,
299 }, 298 .cmd_set = P_ID_AMD_STD,
300 .DevSize = SIZE_2MiB, 299 .nr_regions = 4,
301 .CmdSet = P_ID_AMD_STD,
302 .NumEraseRegions= 4,
303 .regions = { 300 .regions = {
304 ERASEINFO(0x10000,31), 301 ERASEINFO(0x10000,31),
305 ERASEINFO(0x08000,1), 302 ERASEINFO(0x08000,1),
@@ -310,13 +307,11 @@ static const struct amd_flash_info jedec_table[] = {
310 .mfr_id = MANUFACTURER_AMD, 307 .mfr_id = MANUFACTURER_AMD,
311 .dev_id = AM29LV160DB, 308 .dev_id = AM29LV160DB,
312 .name = "AMD AM29LV160DB", 309 .name = "AMD AM29LV160DB",
313 .uaddr = { 310 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
314 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 311 .uaddr = MTD_UADDR_0x0AAA_0x0555,
315 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 312 .dev_size = SIZE_2MiB,
316 }, 313 .cmd_set = P_ID_AMD_STD,
317 .DevSize = SIZE_2MiB, 314 .nr_regions = 4,
318 .CmdSet = P_ID_AMD_STD,
319 .NumEraseRegions= 4,
320 .regions = { 315 .regions = {
321 ERASEINFO(0x04000,1), 316 ERASEINFO(0x04000,1),
322 ERASEINFO(0x02000,2), 317 ERASEINFO(0x02000,2),
@@ -327,13 +322,11 @@ static const struct amd_flash_info jedec_table[] = {
327 .mfr_id = MANUFACTURER_AMD, 322 .mfr_id = MANUFACTURER_AMD,
328 .dev_id = AM29LV400BB, 323 .dev_id = AM29LV400BB,
329 .name = "AMD AM29LV400BB", 324 .name = "AMD AM29LV400BB",
330 .uaddr = { 325 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
331 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 326 .uaddr = MTD_UADDR_0x0AAA_0x0555,
332 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 327 .dev_size = SIZE_512KiB,
333 }, 328 .cmd_set = P_ID_AMD_STD,
334 .DevSize = SIZE_512KiB, 329 .nr_regions = 4,
335 .CmdSet = P_ID_AMD_STD,
336 .NumEraseRegions= 4,
337 .regions = { 330 .regions = {
338 ERASEINFO(0x04000,1), 331 ERASEINFO(0x04000,1),
339 ERASEINFO(0x02000,2), 332 ERASEINFO(0x02000,2),
@@ -344,13 +337,11 @@ static const struct amd_flash_info jedec_table[] = {
344 .mfr_id = MANUFACTURER_AMD, 337 .mfr_id = MANUFACTURER_AMD,
345 .dev_id = AM29LV400BT, 338 .dev_id = AM29LV400BT,
346 .name = "AMD AM29LV400BT", 339 .name = "AMD AM29LV400BT",
347 .uaddr = { 340 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
348 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 341 .uaddr = MTD_UADDR_0x0AAA_0x0555,
349 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 342 .dev_size = SIZE_512KiB,
350 }, 343 .cmd_set = P_ID_AMD_STD,
351 .DevSize = SIZE_512KiB, 344 .nr_regions = 4,
352 .CmdSet = P_ID_AMD_STD,
353 .NumEraseRegions= 4,
354 .regions = { 345 .regions = {
355 ERASEINFO(0x10000,7), 346 ERASEINFO(0x10000,7),
356 ERASEINFO(0x08000,1), 347 ERASEINFO(0x08000,1),
@@ -361,13 +352,11 @@ static const struct amd_flash_info jedec_table[] = {
361 .mfr_id = MANUFACTURER_AMD, 352 .mfr_id = MANUFACTURER_AMD,
362 .dev_id = AM29LV800BB, 353 .dev_id = AM29LV800BB,
363 .name = "AMD AM29LV800BB", 354 .name = "AMD AM29LV800BB",
364 .uaddr = { 355 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
365 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 356 .uaddr = MTD_UADDR_0x0AAA_0x0555,
366 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 357 .dev_size = SIZE_1MiB,
367 }, 358 .cmd_set = P_ID_AMD_STD,
368 .DevSize = SIZE_1MiB, 359 .nr_regions = 4,
369 .CmdSet = P_ID_AMD_STD,
370 .NumEraseRegions= 4,
371 .regions = { 360 .regions = {
372 ERASEINFO(0x04000,1), 361 ERASEINFO(0x04000,1),
373 ERASEINFO(0x02000,2), 362 ERASEINFO(0x02000,2),
@@ -379,13 +368,11 @@ static const struct amd_flash_info jedec_table[] = {
379 .mfr_id = MANUFACTURER_AMD, 368 .mfr_id = MANUFACTURER_AMD,
380 .dev_id = AM29DL800BB, 369 .dev_id = AM29DL800BB,
381 .name = "AMD AM29DL800BB", 370 .name = "AMD AM29DL800BB",
382 .uaddr = { 371 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
383 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 372 .uaddr = MTD_UADDR_0x0AAA_0x0555,
384 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 373 .dev_size = SIZE_1MiB,
385 }, 374 .cmd_set = P_ID_AMD_STD,
386 .DevSize = SIZE_1MiB, 375 .nr_regions = 6,
387 .CmdSet = P_ID_AMD_STD,
388 .NumEraseRegions= 6,
389 .regions = { 376 .regions = {
390 ERASEINFO(0x04000,1), 377 ERASEINFO(0x04000,1),
391 ERASEINFO(0x08000,1), 378 ERASEINFO(0x08000,1),
@@ -398,13 +385,11 @@ static const struct amd_flash_info jedec_table[] = {
398 .mfr_id = MANUFACTURER_AMD, 385 .mfr_id = MANUFACTURER_AMD,
399 .dev_id = AM29DL800BT, 386 .dev_id = AM29DL800BT,
400 .name = "AMD AM29DL800BT", 387 .name = "AMD AM29DL800BT",
401 .uaddr = { 388 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
402 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 389 .uaddr = MTD_UADDR_0x0AAA_0x0555,
403 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 390 .dev_size = SIZE_1MiB,
404 }, 391 .cmd_set = P_ID_AMD_STD,
405 .DevSize = SIZE_1MiB, 392 .nr_regions = 6,
406 .CmdSet = P_ID_AMD_STD,
407 .NumEraseRegions= 6,
408 .regions = { 393 .regions = {
409 ERASEINFO(0x10000,14), 394 ERASEINFO(0x10000,14),
410 ERASEINFO(0x04000,1), 395 ERASEINFO(0x04000,1),
@@ -417,13 +402,11 @@ static const struct amd_flash_info jedec_table[] = {
417 .mfr_id = MANUFACTURER_AMD, 402 .mfr_id = MANUFACTURER_AMD,
418 .dev_id = AM29F800BB, 403 .dev_id = AM29F800BB,
419 .name = "AMD AM29F800BB", 404 .name = "AMD AM29F800BB",
420 .uaddr = { 405 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
421 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 406 .uaddr = MTD_UADDR_0x0AAA_0x0555,
422 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 407 .dev_size = SIZE_1MiB,
423 }, 408 .cmd_set = P_ID_AMD_STD,
424 .DevSize = SIZE_1MiB, 409 .nr_regions = 4,
425 .CmdSet = P_ID_AMD_STD,
426 .NumEraseRegions= 4,
427 .regions = { 410 .regions = {
428 ERASEINFO(0x04000,1), 411 ERASEINFO(0x04000,1),
429 ERASEINFO(0x02000,2), 412 ERASEINFO(0x02000,2),
@@ -434,13 +417,11 @@ static const struct amd_flash_info jedec_table[] = {
434 .mfr_id = MANUFACTURER_AMD, 417 .mfr_id = MANUFACTURER_AMD,
435 .dev_id = AM29LV800BT, 418 .dev_id = AM29LV800BT,
436 .name = "AMD AM29LV800BT", 419 .name = "AMD AM29LV800BT",
437 .uaddr = { 420 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
438 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 421 .uaddr = MTD_UADDR_0x0AAA_0x0555,
439 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 422 .dev_size = SIZE_1MiB,
440 }, 423 .cmd_set = P_ID_AMD_STD,
441 .DevSize = SIZE_1MiB, 424 .nr_regions = 4,
442 .CmdSet = P_ID_AMD_STD,
443 .NumEraseRegions= 4,
444 .regions = { 425 .regions = {
445 ERASEINFO(0x10000,15), 426 ERASEINFO(0x10000,15),
446 ERASEINFO(0x08000,1), 427 ERASEINFO(0x08000,1),
@@ -451,13 +432,11 @@ static const struct amd_flash_info jedec_table[] = {
451 .mfr_id = MANUFACTURER_AMD, 432 .mfr_id = MANUFACTURER_AMD,
452 .dev_id = AM29F800BT, 433 .dev_id = AM29F800BT,
453 .name = "AMD AM29F800BT", 434 .name = "AMD AM29F800BT",
454 .uaddr = { 435 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
455 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 436 .uaddr = MTD_UADDR_0x0AAA_0x0555,
456 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 437 .dev_size = SIZE_1MiB,
457 }, 438 .cmd_set = P_ID_AMD_STD,
458 .DevSize = SIZE_1MiB, 439 .nr_regions = 4,
459 .CmdSet = P_ID_AMD_STD,
460 .NumEraseRegions= 4,
461 .regions = { 440 .regions = {
462 ERASEINFO(0x10000,15), 441 ERASEINFO(0x10000,15),
463 ERASEINFO(0x08000,1), 442 ERASEINFO(0x08000,1),
@@ -468,12 +447,11 @@ static const struct amd_flash_info jedec_table[] = {
468 .mfr_id = MANUFACTURER_AMD, 447 .mfr_id = MANUFACTURER_AMD,
469 .dev_id = AM29F017D, 448 .dev_id = AM29F017D,
470 .name = "AMD AM29F017D", 449 .name = "AMD AM29F017D",
471 .uaddr = { 450 .devtypes = CFI_DEVICETYPE_X8,
472 [0] = MTD_UADDR_DONT_CARE /* x8 */ 451 .uaddr = MTD_UADDR_DONT_CARE,
473 }, 452 .dev_size = SIZE_2MiB,
474 .DevSize = SIZE_2MiB, 453 .cmd_set = P_ID_AMD_STD,
475 .CmdSet = P_ID_AMD_STD, 454 .nr_regions = 1,
476 .NumEraseRegions= 1,
477 .regions = { 455 .regions = {
478 ERASEINFO(0x10000,32), 456 ERASEINFO(0x10000,32),
479 } 457 }
@@ -481,12 +459,11 @@ static const struct amd_flash_info jedec_table[] = {
481 .mfr_id = MANUFACTURER_AMD, 459 .mfr_id = MANUFACTURER_AMD,
482 .dev_id = AM29F016D, 460 .dev_id = AM29F016D,
483 .name = "AMD AM29F016D", 461 .name = "AMD AM29F016D",
484 .uaddr = { 462 .devtypes = CFI_DEVICETYPE_X8,
485 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 463 .uaddr = MTD_UADDR_0x0555_0x02AA,
486 }, 464 .dev_size = SIZE_2MiB,
487 .DevSize = SIZE_2MiB, 465 .cmd_set = P_ID_AMD_STD,
488 .CmdSet = P_ID_AMD_STD, 466 .nr_regions = 1,
489 .NumEraseRegions= 1,
490 .regions = { 467 .regions = {
491 ERASEINFO(0x10000,32), 468 ERASEINFO(0x10000,32),
492 } 469 }
@@ -494,12 +471,11 @@ static const struct amd_flash_info jedec_table[] = {
494 .mfr_id = MANUFACTURER_AMD, 471 .mfr_id = MANUFACTURER_AMD,
495 .dev_id = AM29F080, 472 .dev_id = AM29F080,
496 .name = "AMD AM29F080", 473 .name = "AMD AM29F080",
497 .uaddr = { 474 .devtypes = CFI_DEVICETYPE_X8,
498 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 475 .uaddr = MTD_UADDR_0x0555_0x02AA,
499 }, 476 .dev_size = SIZE_1MiB,
500 .DevSize = SIZE_1MiB, 477 .cmd_set = P_ID_AMD_STD,
501 .CmdSet = P_ID_AMD_STD, 478 .nr_regions = 1,
502 .NumEraseRegions= 1,
503 .regions = { 479 .regions = {
504 ERASEINFO(0x10000,16), 480 ERASEINFO(0x10000,16),
505 } 481 }
@@ -507,12 +483,11 @@ static const struct amd_flash_info jedec_table[] = {
507 .mfr_id = MANUFACTURER_AMD, 483 .mfr_id = MANUFACTURER_AMD,
508 .dev_id = AM29F040, 484 .dev_id = AM29F040,
509 .name = "AMD AM29F040", 485 .name = "AMD AM29F040",
510 .uaddr = { 486 .devtypes = CFI_DEVICETYPE_X8,
511 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 487 .uaddr = MTD_UADDR_0x0555_0x02AA,
512 }, 488 .dev_size = SIZE_512KiB,
513 .DevSize = SIZE_512KiB, 489 .cmd_set = P_ID_AMD_STD,
514 .CmdSet = P_ID_AMD_STD, 490 .nr_regions = 1,
515 .NumEraseRegions= 1,
516 .regions = { 491 .regions = {
517 ERASEINFO(0x10000,8), 492 ERASEINFO(0x10000,8),
518 } 493 }
@@ -520,12 +495,11 @@ static const struct amd_flash_info jedec_table[] = {
520 .mfr_id = MANUFACTURER_AMD, 495 .mfr_id = MANUFACTURER_AMD,
521 .dev_id = AM29LV040B, 496 .dev_id = AM29LV040B,
522 .name = "AMD AM29LV040B", 497 .name = "AMD AM29LV040B",
523 .uaddr = { 498 .devtypes = CFI_DEVICETYPE_X8,
524 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 499 .uaddr = MTD_UADDR_0x0555_0x02AA,
525 }, 500 .dev_size = SIZE_512KiB,
526 .DevSize = SIZE_512KiB, 501 .cmd_set = P_ID_AMD_STD,
527 .CmdSet = P_ID_AMD_STD, 502 .nr_regions = 1,
528 .NumEraseRegions= 1,
529 .regions = { 503 .regions = {
530 ERASEINFO(0x10000,8), 504 ERASEINFO(0x10000,8),
531 } 505 }
@@ -533,12 +507,11 @@ static const struct amd_flash_info jedec_table[] = {
533 .mfr_id = MANUFACTURER_AMD, 507 .mfr_id = MANUFACTURER_AMD,
534 .dev_id = AM29F002T, 508 .dev_id = AM29F002T,
535 .name = "AMD AM29F002T", 509 .name = "AMD AM29F002T",
536 .uaddr = { 510 .devtypes = CFI_DEVICETYPE_X8,
537 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 511 .uaddr = MTD_UADDR_0x0555_0x02AA,
538 }, 512 .dev_size = SIZE_256KiB,
539 .DevSize = SIZE_256KiB, 513 .cmd_set = P_ID_AMD_STD,
540 .CmdSet = P_ID_AMD_STD, 514 .nr_regions = 4,
541 .NumEraseRegions= 4,
542 .regions = { 515 .regions = {
543 ERASEINFO(0x10000,3), 516 ERASEINFO(0x10000,3),
544 ERASEINFO(0x08000,1), 517 ERASEINFO(0x08000,1),
@@ -549,12 +522,11 @@ static const struct amd_flash_info jedec_table[] = {
549 .mfr_id = MANUFACTURER_ATMEL, 522 .mfr_id = MANUFACTURER_ATMEL,
550 .dev_id = AT49BV512, 523 .dev_id = AT49BV512,
551 .name = "Atmel AT49BV512", 524 .name = "Atmel AT49BV512",
552 .uaddr = { 525 .devtypes = CFI_DEVICETYPE_X8,
553 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 526 .uaddr = MTD_UADDR_0x5555_0x2AAA,
554 }, 527 .dev_size = SIZE_64KiB,
555 .DevSize = SIZE_64KiB, 528 .cmd_set = P_ID_AMD_STD,
556 .CmdSet = P_ID_AMD_STD, 529 .nr_regions = 1,
557 .NumEraseRegions= 1,
558 .regions = { 530 .regions = {
559 ERASEINFO(0x10000,1) 531 ERASEINFO(0x10000,1)
560 } 532 }
@@ -562,12 +534,11 @@ static const struct amd_flash_info jedec_table[] = {
562 .mfr_id = MANUFACTURER_ATMEL, 534 .mfr_id = MANUFACTURER_ATMEL,
563 .dev_id = AT29LV512, 535 .dev_id = AT29LV512,
564 .name = "Atmel AT29LV512", 536 .name = "Atmel AT29LV512",
565 .uaddr = { 537 .devtypes = CFI_DEVICETYPE_X8,
566 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 538 .uaddr = MTD_UADDR_0x5555_0x2AAA,
567 }, 539 .dev_size = SIZE_64KiB,
568 .DevSize = SIZE_64KiB, 540 .cmd_set = P_ID_AMD_STD,
569 .CmdSet = P_ID_AMD_STD, 541 .nr_regions = 1,
570 .NumEraseRegions= 1,
571 .regions = { 542 .regions = {
572 ERASEINFO(0x80,256), 543 ERASEINFO(0x80,256),
573 ERASEINFO(0x80,256) 544 ERASEINFO(0x80,256)
@@ -576,13 +547,11 @@ static const struct amd_flash_info jedec_table[] = {
576 .mfr_id = MANUFACTURER_ATMEL, 547 .mfr_id = MANUFACTURER_ATMEL,
577 .dev_id = AT49BV16X, 548 .dev_id = AT49BV16X,
578 .name = "Atmel AT49BV16X", 549 .name = "Atmel AT49BV16X",
579 .uaddr = { 550 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
580 [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ 551 .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
581 [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ 552 .dev_size = SIZE_2MiB,
582 }, 553 .cmd_set = P_ID_AMD_STD,
583 .DevSize = SIZE_2MiB, 554 .nr_regions = 2,
584 .CmdSet = P_ID_AMD_STD,
585 .NumEraseRegions= 2,
586 .regions = { 555 .regions = {
587 ERASEINFO(0x02000,8), 556 ERASEINFO(0x02000,8),
588 ERASEINFO(0x10000,31) 557 ERASEINFO(0x10000,31)
@@ -591,13 +560,11 @@ static const struct amd_flash_info jedec_table[] = {
591 .mfr_id = MANUFACTURER_ATMEL, 560 .mfr_id = MANUFACTURER_ATMEL,
592 .dev_id = AT49BV16XT, 561 .dev_id = AT49BV16XT,
593 .name = "Atmel AT49BV16XT", 562 .name = "Atmel AT49BV16XT",
594 .uaddr = { 563 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
595 [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ 564 .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
596 [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ 565 .dev_size = SIZE_2MiB,
597 }, 566 .cmd_set = P_ID_AMD_STD,
598 .DevSize = SIZE_2MiB, 567 .nr_regions = 2,
599 .CmdSet = P_ID_AMD_STD,
600 .NumEraseRegions= 2,
601 .regions = { 568 .regions = {
602 ERASEINFO(0x10000,31), 569 ERASEINFO(0x10000,31),
603 ERASEINFO(0x02000,8) 570 ERASEINFO(0x02000,8)
@@ -606,13 +573,11 @@ static const struct amd_flash_info jedec_table[] = {
606 .mfr_id = MANUFACTURER_ATMEL, 573 .mfr_id = MANUFACTURER_ATMEL,
607 .dev_id = AT49BV32X, 574 .dev_id = AT49BV32X,
608 .name = "Atmel AT49BV32X", 575 .name = "Atmel AT49BV32X",
609 .uaddr = { 576 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
610 [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ 577 .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
611 [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ 578 .dev_size = SIZE_4MiB,
612 }, 579 .cmd_set = P_ID_AMD_STD,
613 .DevSize = SIZE_4MiB, 580 .nr_regions = 2,
614 .CmdSet = P_ID_AMD_STD,
615 .NumEraseRegions= 2,
616 .regions = { 581 .regions = {
617 ERASEINFO(0x02000,8), 582 ERASEINFO(0x02000,8),
618 ERASEINFO(0x10000,63) 583 ERASEINFO(0x10000,63)
@@ -621,13 +586,11 @@ static const struct amd_flash_info jedec_table[] = {
621 .mfr_id = MANUFACTURER_ATMEL, 586 .mfr_id = MANUFACTURER_ATMEL,
622 .dev_id = AT49BV32XT, 587 .dev_id = AT49BV32XT,
623 .name = "Atmel AT49BV32XT", 588 .name = "Atmel AT49BV32XT",
624 .uaddr = { 589 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
625 [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ 590 .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
626 [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ 591 .dev_size = SIZE_4MiB,
627 }, 592 .cmd_set = P_ID_AMD_STD,
628 .DevSize = SIZE_4MiB, 593 .nr_regions = 2,
629 .CmdSet = P_ID_AMD_STD,
630 .NumEraseRegions= 2,
631 .regions = { 594 .regions = {
632 ERASEINFO(0x10000,63), 595 ERASEINFO(0x10000,63),
633 ERASEINFO(0x02000,8) 596 ERASEINFO(0x02000,8)
@@ -636,12 +599,11 @@ static const struct amd_flash_info jedec_table[] = {
636 .mfr_id = MANUFACTURER_FUJITSU, 599 .mfr_id = MANUFACTURER_FUJITSU,
637 .dev_id = MBM29F040C, 600 .dev_id = MBM29F040C,
638 .name = "Fujitsu MBM29F040C", 601 .name = "Fujitsu MBM29F040C",
639 .uaddr = { 602 .devtypes = CFI_DEVICETYPE_X8,
640 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 603 .uaddr = MTD_UADDR_0x0AAA_0x0555,
641 }, 604 .dev_size = SIZE_512KiB,
642 .DevSize = SIZE_512KiB, 605 .cmd_set = P_ID_AMD_STD,
643 .CmdSet = P_ID_AMD_STD, 606 .nr_regions = 1,
644 .NumEraseRegions= 1,
645 .regions = { 607 .regions = {
646 ERASEINFO(0x10000,8) 608 ERASEINFO(0x10000,8)
647 } 609 }
@@ -649,13 +611,11 @@ static const struct amd_flash_info jedec_table[] = {
649 .mfr_id = MANUFACTURER_FUJITSU, 611 .mfr_id = MANUFACTURER_FUJITSU,
650 .dev_id = MBM29F800BA, 612 .dev_id = MBM29F800BA,
651 .name = "Fujitsu MBM29F800BA", 613 .name = "Fujitsu MBM29F800BA",
652 .uaddr = { 614 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
653 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 615 .uaddr = MTD_UADDR_0x0AAA_0x0555,
654 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 616 .dev_size = SIZE_1MiB,
655 }, 617 .cmd_set = P_ID_AMD_STD,
656 .DevSize = SIZE_1MiB, 618 .nr_regions = 4,
657 .CmdSet = P_ID_AMD_STD,
658 .NumEraseRegions= 4,
659 .regions = { 619 .regions = {
660 ERASEINFO(0x04000,1), 620 ERASEINFO(0x04000,1),
661 ERASEINFO(0x02000,2), 621 ERASEINFO(0x02000,2),
@@ -666,12 +626,11 @@ static const struct amd_flash_info jedec_table[] = {
666 .mfr_id = MANUFACTURER_FUJITSU, 626 .mfr_id = MANUFACTURER_FUJITSU,
667 .dev_id = MBM29LV650UE, 627 .dev_id = MBM29LV650UE,
668 .name = "Fujitsu MBM29LV650UE", 628 .name = "Fujitsu MBM29LV650UE",
669 .uaddr = { 629 .devtypes = CFI_DEVICETYPE_X8,
670 [0] = MTD_UADDR_DONT_CARE /* x16 */ 630 .uaddr = MTD_UADDR_DONT_CARE,
671 }, 631 .dev_size = SIZE_8MiB,
672 .DevSize = SIZE_8MiB, 632 .cmd_set = P_ID_AMD_STD,
673 .CmdSet = P_ID_AMD_STD, 633 .nr_regions = 1,
674 .NumEraseRegions= 1,
675 .regions = { 634 .regions = {
676 ERASEINFO(0x10000,128) 635 ERASEINFO(0x10000,128)
677 } 636 }
@@ -679,13 +638,11 @@ static const struct amd_flash_info jedec_table[] = {
679 .mfr_id = MANUFACTURER_FUJITSU, 638 .mfr_id = MANUFACTURER_FUJITSU,
680 .dev_id = MBM29LV320TE, 639 .dev_id = MBM29LV320TE,
681 .name = "Fujitsu MBM29LV320TE", 640 .name = "Fujitsu MBM29LV320TE",
682 .uaddr = { 641 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
683 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 642 .uaddr = MTD_UADDR_0x0AAA_0x0555,
684 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 643 .dev_size = SIZE_4MiB,
685 }, 644 .cmd_set = P_ID_AMD_STD,
686 .DevSize = SIZE_4MiB, 645 .nr_regions = 2,
687 .CmdSet = P_ID_AMD_STD,
688 .NumEraseRegions= 2,
689 .regions = { 646 .regions = {
690 ERASEINFO(0x10000,63), 647 ERASEINFO(0x10000,63),
691 ERASEINFO(0x02000,8) 648 ERASEINFO(0x02000,8)
@@ -694,13 +651,11 @@ static const struct amd_flash_info jedec_table[] = {
694 .mfr_id = MANUFACTURER_FUJITSU, 651 .mfr_id = MANUFACTURER_FUJITSU,
695 .dev_id = MBM29LV320BE, 652 .dev_id = MBM29LV320BE,
696 .name = "Fujitsu MBM29LV320BE", 653 .name = "Fujitsu MBM29LV320BE",
697 .uaddr = { 654 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
698 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 655 .uaddr = MTD_UADDR_0x0AAA_0x0555,
699 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 656 .dev_size = SIZE_4MiB,
700 }, 657 .cmd_set = P_ID_AMD_STD,
701 .DevSize = SIZE_4MiB, 658 .nr_regions = 2,
702 .CmdSet = P_ID_AMD_STD,
703 .NumEraseRegions= 2,
704 .regions = { 659 .regions = {
705 ERASEINFO(0x02000,8), 660 ERASEINFO(0x02000,8),
706 ERASEINFO(0x10000,63) 661 ERASEINFO(0x10000,63)
@@ -709,13 +664,11 @@ static const struct amd_flash_info jedec_table[] = {
709 .mfr_id = MANUFACTURER_FUJITSU, 664 .mfr_id = MANUFACTURER_FUJITSU,
710 .dev_id = MBM29LV160TE, 665 .dev_id = MBM29LV160TE,
711 .name = "Fujitsu MBM29LV160TE", 666 .name = "Fujitsu MBM29LV160TE",
712 .uaddr = { 667 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
713 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 668 .uaddr = MTD_UADDR_0x0AAA_0x0555,
714 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 669 .dev_size = SIZE_2MiB,
715 }, 670 .cmd_set = P_ID_AMD_STD,
716 .DevSize = SIZE_2MiB, 671 .nr_regions = 4,
717 .CmdSet = P_ID_AMD_STD,
718 .NumEraseRegions= 4,
719 .regions = { 672 .regions = {
720 ERASEINFO(0x10000,31), 673 ERASEINFO(0x10000,31),
721 ERASEINFO(0x08000,1), 674 ERASEINFO(0x08000,1),
@@ -726,13 +679,11 @@ static const struct amd_flash_info jedec_table[] = {
726 .mfr_id = MANUFACTURER_FUJITSU, 679 .mfr_id = MANUFACTURER_FUJITSU,
727 .dev_id = MBM29LV160BE, 680 .dev_id = MBM29LV160BE,
728 .name = "Fujitsu MBM29LV160BE", 681 .name = "Fujitsu MBM29LV160BE",
729 .uaddr = { 682 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
730 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 683 .uaddr = MTD_UADDR_0x0AAA_0x0555,
731 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 684 .dev_size = SIZE_2MiB,
732 }, 685 .cmd_set = P_ID_AMD_STD,
733 .DevSize = SIZE_2MiB, 686 .nr_regions = 4,
734 .CmdSet = P_ID_AMD_STD,
735 .NumEraseRegions= 4,
736 .regions = { 687 .regions = {
737 ERASEINFO(0x04000,1), 688 ERASEINFO(0x04000,1),
738 ERASEINFO(0x02000,2), 689 ERASEINFO(0x02000,2),
@@ -743,13 +694,11 @@ static const struct amd_flash_info jedec_table[] = {
743 .mfr_id = MANUFACTURER_FUJITSU, 694 .mfr_id = MANUFACTURER_FUJITSU,
744 .dev_id = MBM29LV800BA, 695 .dev_id = MBM29LV800BA,
745 .name = "Fujitsu MBM29LV800BA", 696 .name = "Fujitsu MBM29LV800BA",
746 .uaddr = { 697 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
747 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 698 .uaddr = MTD_UADDR_0x0AAA_0x0555,
748 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 699 .dev_size = SIZE_1MiB,
749 }, 700 .cmd_set = P_ID_AMD_STD,
750 .DevSize = SIZE_1MiB, 701 .nr_regions = 4,
751 .CmdSet = P_ID_AMD_STD,
752 .NumEraseRegions= 4,
753 .regions = { 702 .regions = {
754 ERASEINFO(0x04000,1), 703 ERASEINFO(0x04000,1),
755 ERASEINFO(0x02000,2), 704 ERASEINFO(0x02000,2),
@@ -760,13 +709,11 @@ static const struct amd_flash_info jedec_table[] = {
760 .mfr_id = MANUFACTURER_FUJITSU, 709 .mfr_id = MANUFACTURER_FUJITSU,
761 .dev_id = MBM29LV800TA, 710 .dev_id = MBM29LV800TA,
762 .name = "Fujitsu MBM29LV800TA", 711 .name = "Fujitsu MBM29LV800TA",
763 .uaddr = { 712 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
764 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 713 .uaddr = MTD_UADDR_0x0AAA_0x0555,
765 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 714 .dev_size = SIZE_1MiB,
766 }, 715 .cmd_set = P_ID_AMD_STD,
767 .DevSize = SIZE_1MiB, 716 .nr_regions = 4,
768 .CmdSet = P_ID_AMD_STD,
769 .NumEraseRegions= 4,
770 .regions = { 717 .regions = {
771 ERASEINFO(0x10000,15), 718 ERASEINFO(0x10000,15),
772 ERASEINFO(0x08000,1), 719 ERASEINFO(0x08000,1),
@@ -777,13 +724,11 @@ static const struct amd_flash_info jedec_table[] = {
777 .mfr_id = MANUFACTURER_FUJITSU, 724 .mfr_id = MANUFACTURER_FUJITSU,
778 .dev_id = MBM29LV400BC, 725 .dev_id = MBM29LV400BC,
779 .name = "Fujitsu MBM29LV400BC", 726 .name = "Fujitsu MBM29LV400BC",
780 .uaddr = { 727 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
781 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 728 .uaddr = MTD_UADDR_0x0AAA_0x0555,
782 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 729 .dev_size = SIZE_512KiB,
783 }, 730 .cmd_set = P_ID_AMD_STD,
784 .DevSize = SIZE_512KiB, 731 .nr_regions = 4,
785 .CmdSet = P_ID_AMD_STD,
786 .NumEraseRegions= 4,
787 .regions = { 732 .regions = {
788 ERASEINFO(0x04000,1), 733 ERASEINFO(0x04000,1),
789 ERASEINFO(0x02000,2), 734 ERASEINFO(0x02000,2),
@@ -794,13 +739,11 @@ static const struct amd_flash_info jedec_table[] = {
794 .mfr_id = MANUFACTURER_FUJITSU, 739 .mfr_id = MANUFACTURER_FUJITSU,
795 .dev_id = MBM29LV400TC, 740 .dev_id = MBM29LV400TC,
796 .name = "Fujitsu MBM29LV400TC", 741 .name = "Fujitsu MBM29LV400TC",
797 .uaddr = { 742 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
798 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 743 .uaddr = MTD_UADDR_0x0AAA_0x0555,
799 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 744 .dev_size = SIZE_512KiB,
800 }, 745 .cmd_set = P_ID_AMD_STD,
801 .DevSize = SIZE_512KiB, 746 .nr_regions = 4,
802 .CmdSet = P_ID_AMD_STD,
803 .NumEraseRegions= 4,
804 .regions = { 747 .regions = {
805 ERASEINFO(0x10000,7), 748 ERASEINFO(0x10000,7),
806 ERASEINFO(0x08000,1), 749 ERASEINFO(0x08000,1),
@@ -811,12 +754,11 @@ static const struct amd_flash_info jedec_table[] = {
811 .mfr_id = MANUFACTURER_HYUNDAI, 754 .mfr_id = MANUFACTURER_HYUNDAI,
812 .dev_id = HY29F002T, 755 .dev_id = HY29F002T,
813 .name = "Hyundai HY29F002T", 756 .name = "Hyundai HY29F002T",
814 .uaddr = { 757 .devtypes = CFI_DEVICETYPE_X8,
815 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 758 .uaddr = MTD_UADDR_0x0555_0x02AA,
816 }, 759 .dev_size = SIZE_256KiB,
817 .DevSize = SIZE_256KiB, 760 .cmd_set = P_ID_AMD_STD,
818 .CmdSet = P_ID_AMD_STD, 761 .nr_regions = 4,
819 .NumEraseRegions= 4,
820 .regions = { 762 .regions = {
821 ERASEINFO(0x10000,3), 763 ERASEINFO(0x10000,3),
822 ERASEINFO(0x08000,1), 764 ERASEINFO(0x08000,1),
@@ -827,12 +769,11 @@ static const struct amd_flash_info jedec_table[] = {
827 .mfr_id = MANUFACTURER_INTEL, 769 .mfr_id = MANUFACTURER_INTEL,
828 .dev_id = I28F004B3B, 770 .dev_id = I28F004B3B,
829 .name = "Intel 28F004B3B", 771 .name = "Intel 28F004B3B",
830 .uaddr = { 772 .devtypes = CFI_DEVICETYPE_X8,
831 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 773 .uaddr = MTD_UADDR_UNNECESSARY,
832 }, 774 .dev_size = SIZE_512KiB,
833 .DevSize = SIZE_512KiB, 775 .cmd_set = P_ID_INTEL_STD,
834 .CmdSet = P_ID_INTEL_STD, 776 .nr_regions = 2,
835 .NumEraseRegions= 2,
836 .regions = { 777 .regions = {
837 ERASEINFO(0x02000, 8), 778 ERASEINFO(0x02000, 8),
838 ERASEINFO(0x10000, 7), 779 ERASEINFO(0x10000, 7),
@@ -841,12 +782,11 @@ static const struct amd_flash_info jedec_table[] = {
841 .mfr_id = MANUFACTURER_INTEL, 782 .mfr_id = MANUFACTURER_INTEL,
842 .dev_id = I28F004B3T, 783 .dev_id = I28F004B3T,
843 .name = "Intel 28F004B3T", 784 .name = "Intel 28F004B3T",
844 .uaddr = { 785 .devtypes = CFI_DEVICETYPE_X8,
845 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 786 .uaddr = MTD_UADDR_UNNECESSARY,
846 }, 787 .dev_size = SIZE_512KiB,
847 .DevSize = SIZE_512KiB, 788 .cmd_set = P_ID_INTEL_STD,
848 .CmdSet = P_ID_INTEL_STD, 789 .nr_regions = 2,
849 .NumEraseRegions= 2,
850 .regions = { 790 .regions = {
851 ERASEINFO(0x10000, 7), 791 ERASEINFO(0x10000, 7),
852 ERASEINFO(0x02000, 8), 792 ERASEINFO(0x02000, 8),
@@ -855,13 +795,11 @@ static const struct amd_flash_info jedec_table[] = {
855 .mfr_id = MANUFACTURER_INTEL, 795 .mfr_id = MANUFACTURER_INTEL,
856 .dev_id = I28F400B3B, 796 .dev_id = I28F400B3B,
857 .name = "Intel 28F400B3B", 797 .name = "Intel 28F400B3B",
858 .uaddr = { 798 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
859 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 799 .uaddr = MTD_UADDR_UNNECESSARY,
860 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 800 .dev_size = SIZE_512KiB,
861 }, 801 .cmd_set = P_ID_INTEL_STD,
862 .DevSize = SIZE_512KiB, 802 .nr_regions = 2,
863 .CmdSet = P_ID_INTEL_STD,
864 .NumEraseRegions= 2,
865 .regions = { 803 .regions = {
866 ERASEINFO(0x02000, 8), 804 ERASEINFO(0x02000, 8),
867 ERASEINFO(0x10000, 7), 805 ERASEINFO(0x10000, 7),
@@ -870,13 +808,11 @@ static const struct amd_flash_info jedec_table[] = {
870 .mfr_id = MANUFACTURER_INTEL, 808 .mfr_id = MANUFACTURER_INTEL,
871 .dev_id = I28F400B3T, 809 .dev_id = I28F400B3T,
872 .name = "Intel 28F400B3T", 810 .name = "Intel 28F400B3T",
873 .uaddr = { 811 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
874 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 812 .uaddr = MTD_UADDR_UNNECESSARY,
875 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 813 .dev_size = SIZE_512KiB,
876 }, 814 .cmd_set = P_ID_INTEL_STD,
877 .DevSize = SIZE_512KiB, 815 .nr_regions = 2,
878 .CmdSet = P_ID_INTEL_STD,
879 .NumEraseRegions= 2,
880 .regions = { 816 .regions = {
881 ERASEINFO(0x10000, 7), 817 ERASEINFO(0x10000, 7),
882 ERASEINFO(0x02000, 8), 818 ERASEINFO(0x02000, 8),
@@ -885,12 +821,11 @@ static const struct amd_flash_info jedec_table[] = {
885 .mfr_id = MANUFACTURER_INTEL, 821 .mfr_id = MANUFACTURER_INTEL,
886 .dev_id = I28F008B3B, 822 .dev_id = I28F008B3B,
887 .name = "Intel 28F008B3B", 823 .name = "Intel 28F008B3B",
888 .uaddr = { 824 .devtypes = CFI_DEVICETYPE_X8,
889 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 825 .uaddr = MTD_UADDR_UNNECESSARY,
890 }, 826 .dev_size = SIZE_1MiB,
891 .DevSize = SIZE_1MiB, 827 .cmd_set = P_ID_INTEL_STD,
892 .CmdSet = P_ID_INTEL_STD, 828 .nr_regions = 2,
893 .NumEraseRegions= 2,
894 .regions = { 829 .regions = {
895 ERASEINFO(0x02000, 8), 830 ERASEINFO(0x02000, 8),
896 ERASEINFO(0x10000, 15), 831 ERASEINFO(0x10000, 15),
@@ -899,12 +834,11 @@ static const struct amd_flash_info jedec_table[] = {
899 .mfr_id = MANUFACTURER_INTEL, 834 .mfr_id = MANUFACTURER_INTEL,
900 .dev_id = I28F008B3T, 835 .dev_id = I28F008B3T,
901 .name = "Intel 28F008B3T", 836 .name = "Intel 28F008B3T",
902 .uaddr = { 837 .devtypes = CFI_DEVICETYPE_X8,
903 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 838 .uaddr = MTD_UADDR_UNNECESSARY,
904 }, 839 .dev_size = SIZE_1MiB,
905 .DevSize = SIZE_1MiB, 840 .cmd_set = P_ID_INTEL_STD,
906 .CmdSet = P_ID_INTEL_STD, 841 .nr_regions = 2,
907 .NumEraseRegions= 2,
908 .regions = { 842 .regions = {
909 ERASEINFO(0x10000, 15), 843 ERASEINFO(0x10000, 15),
910 ERASEINFO(0x02000, 8), 844 ERASEINFO(0x02000, 8),
@@ -913,12 +847,11 @@ static const struct amd_flash_info jedec_table[] = {
913 .mfr_id = MANUFACTURER_INTEL, 847 .mfr_id = MANUFACTURER_INTEL,
914 .dev_id = I28F008S5, 848 .dev_id = I28F008S5,
915 .name = "Intel 28F008S5", 849 .name = "Intel 28F008S5",
916 .uaddr = { 850 .devtypes = CFI_DEVICETYPE_X8,
917 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 851 .uaddr = MTD_UADDR_UNNECESSARY,
918 }, 852 .dev_size = SIZE_1MiB,
919 .DevSize = SIZE_1MiB, 853 .cmd_set = P_ID_INTEL_EXT,
920 .CmdSet = P_ID_INTEL_EXT, 854 .nr_regions = 1,
921 .NumEraseRegions= 1,
922 .regions = { 855 .regions = {
923 ERASEINFO(0x10000,16), 856 ERASEINFO(0x10000,16),
924 } 857 }
@@ -926,12 +859,11 @@ static const struct amd_flash_info jedec_table[] = {
926 .mfr_id = MANUFACTURER_INTEL, 859 .mfr_id = MANUFACTURER_INTEL,
927 .dev_id = I28F016S5, 860 .dev_id = I28F016S5,
928 .name = "Intel 28F016S5", 861 .name = "Intel 28F016S5",
929 .uaddr = { 862 .devtypes = CFI_DEVICETYPE_X8,
930 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 863 .uaddr = MTD_UADDR_UNNECESSARY,
931 }, 864 .dev_size = SIZE_2MiB,
932 .DevSize = SIZE_2MiB, 865 .cmd_set = P_ID_INTEL_EXT,
933 .CmdSet = P_ID_INTEL_EXT, 866 .nr_regions = 1,
934 .NumEraseRegions= 1,
935 .regions = { 867 .regions = {
936 ERASEINFO(0x10000,32), 868 ERASEINFO(0x10000,32),
937 } 869 }
@@ -939,12 +871,11 @@ static const struct amd_flash_info jedec_table[] = {
939 .mfr_id = MANUFACTURER_INTEL, 871 .mfr_id = MANUFACTURER_INTEL,
940 .dev_id = I28F008SA, 872 .dev_id = I28F008SA,
941 .name = "Intel 28F008SA", 873 .name = "Intel 28F008SA",
942 .uaddr = { 874 .devtypes = CFI_DEVICETYPE_X8,
943 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 875 .uaddr = MTD_UADDR_UNNECESSARY,
944 }, 876 .dev_size = SIZE_1MiB,
945 .DevSize = SIZE_1MiB, 877 .cmd_set = P_ID_INTEL_STD,
946 .CmdSet = P_ID_INTEL_STD, 878 .nr_regions = 1,
947 .NumEraseRegions= 1,
948 .regions = { 879 .regions = {
949 ERASEINFO(0x10000, 16), 880 ERASEINFO(0x10000, 16),
950 } 881 }
@@ -952,12 +883,11 @@ static const struct amd_flash_info jedec_table[] = {
952 .mfr_id = MANUFACTURER_INTEL, 883 .mfr_id = MANUFACTURER_INTEL,
953 .dev_id = I28F800B3B, 884 .dev_id = I28F800B3B,
954 .name = "Intel 28F800B3B", 885 .name = "Intel 28F800B3B",
955 .uaddr = { 886 .devtypes = CFI_DEVICETYPE_X16,
956 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 887 .uaddr = MTD_UADDR_UNNECESSARY,
957 }, 888 .dev_size = SIZE_1MiB,
958 .DevSize = SIZE_1MiB, 889 .cmd_set = P_ID_INTEL_STD,
959 .CmdSet = P_ID_INTEL_STD, 890 .nr_regions = 2,
960 .NumEraseRegions= 2,
961 .regions = { 891 .regions = {
962 ERASEINFO(0x02000, 8), 892 ERASEINFO(0x02000, 8),
963 ERASEINFO(0x10000, 15), 893 ERASEINFO(0x10000, 15),
@@ -966,12 +896,11 @@ static const struct amd_flash_info jedec_table[] = {
966 .mfr_id = MANUFACTURER_INTEL, 896 .mfr_id = MANUFACTURER_INTEL,
967 .dev_id = I28F800B3T, 897 .dev_id = I28F800B3T,
968 .name = "Intel 28F800B3T", 898 .name = "Intel 28F800B3T",
969 .uaddr = { 899 .devtypes = CFI_DEVICETYPE_X16,
970 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 900 .uaddr = MTD_UADDR_UNNECESSARY,
971 }, 901 .dev_size = SIZE_1MiB,
972 .DevSize = SIZE_1MiB, 902 .cmd_set = P_ID_INTEL_STD,
973 .CmdSet = P_ID_INTEL_STD, 903 .nr_regions = 2,
974 .NumEraseRegions= 2,
975 .regions = { 904 .regions = {
976 ERASEINFO(0x10000, 15), 905 ERASEINFO(0x10000, 15),
977 ERASEINFO(0x02000, 8), 906 ERASEINFO(0x02000, 8),
@@ -980,12 +909,11 @@ static const struct amd_flash_info jedec_table[] = {
980 .mfr_id = MANUFACTURER_INTEL, 909 .mfr_id = MANUFACTURER_INTEL,
981 .dev_id = I28F016B3B, 910 .dev_id = I28F016B3B,
982 .name = "Intel 28F016B3B", 911 .name = "Intel 28F016B3B",
983 .uaddr = { 912 .devtypes = CFI_DEVICETYPE_X8,
984 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 913 .uaddr = MTD_UADDR_UNNECESSARY,
985 }, 914 .dev_size = SIZE_2MiB,
986 .DevSize = SIZE_2MiB, 915 .cmd_set = P_ID_INTEL_STD,
987 .CmdSet = P_ID_INTEL_STD, 916 .nr_regions = 2,
988 .NumEraseRegions= 2,
989 .regions = { 917 .regions = {
990 ERASEINFO(0x02000, 8), 918 ERASEINFO(0x02000, 8),
991 ERASEINFO(0x10000, 31), 919 ERASEINFO(0x10000, 31),
@@ -994,12 +922,11 @@ static const struct amd_flash_info jedec_table[] = {
994 .mfr_id = MANUFACTURER_INTEL, 922 .mfr_id = MANUFACTURER_INTEL,
995 .dev_id = I28F016S3, 923 .dev_id = I28F016S3,
996 .name = "Intel I28F016S3", 924 .name = "Intel I28F016S3",
997 .uaddr = { 925 .devtypes = CFI_DEVICETYPE_X8,
998 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 926 .uaddr = MTD_UADDR_UNNECESSARY,
999 }, 927 .dev_size = SIZE_2MiB,
1000 .DevSize = SIZE_2MiB, 928 .cmd_set = P_ID_INTEL_STD,
1001 .CmdSet = P_ID_INTEL_STD, 929 .nr_regions = 1,
1002 .NumEraseRegions= 1,
1003 .regions = { 930 .regions = {
1004 ERASEINFO(0x10000, 32), 931 ERASEINFO(0x10000, 32),
1005 } 932 }
@@ -1007,12 +934,11 @@ static const struct amd_flash_info jedec_table[] = {
1007 .mfr_id = MANUFACTURER_INTEL, 934 .mfr_id = MANUFACTURER_INTEL,
1008 .dev_id = I28F016B3T, 935 .dev_id = I28F016B3T,
1009 .name = "Intel 28F016B3T", 936 .name = "Intel 28F016B3T",
1010 .uaddr = { 937 .devtypes = CFI_DEVICETYPE_X8,
1011 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 938 .uaddr = MTD_UADDR_UNNECESSARY,
1012 }, 939 .dev_size = SIZE_2MiB,
1013 .DevSize = SIZE_2MiB, 940 .cmd_set = P_ID_INTEL_STD,
1014 .CmdSet = P_ID_INTEL_STD, 941 .nr_regions = 2,
1015 .NumEraseRegions= 2,
1016 .regions = { 942 .regions = {
1017 ERASEINFO(0x10000, 31), 943 ERASEINFO(0x10000, 31),
1018 ERASEINFO(0x02000, 8), 944 ERASEINFO(0x02000, 8),
@@ -1021,12 +947,11 @@ static const struct amd_flash_info jedec_table[] = {
1021 .mfr_id = MANUFACTURER_INTEL, 947 .mfr_id = MANUFACTURER_INTEL,
1022 .dev_id = I28F160B3B, 948 .dev_id = I28F160B3B,
1023 .name = "Intel 28F160B3B", 949 .name = "Intel 28F160B3B",
1024 .uaddr = { 950 .devtypes = CFI_DEVICETYPE_X16,
1025 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 951 .uaddr = MTD_UADDR_UNNECESSARY,
1026 }, 952 .dev_size = SIZE_2MiB,
1027 .DevSize = SIZE_2MiB, 953 .cmd_set = P_ID_INTEL_STD,
1028 .CmdSet = P_ID_INTEL_STD, 954 .nr_regions = 2,
1029 .NumEraseRegions= 2,
1030 .regions = { 955 .regions = {
1031 ERASEINFO(0x02000, 8), 956 ERASEINFO(0x02000, 8),
1032 ERASEINFO(0x10000, 31), 957 ERASEINFO(0x10000, 31),
@@ -1035,12 +960,11 @@ static const struct amd_flash_info jedec_table[] = {
1035 .mfr_id = MANUFACTURER_INTEL, 960 .mfr_id = MANUFACTURER_INTEL,
1036 .dev_id = I28F160B3T, 961 .dev_id = I28F160B3T,
1037 .name = "Intel 28F160B3T", 962 .name = "Intel 28F160B3T",
1038 .uaddr = { 963 .devtypes = CFI_DEVICETYPE_X16,
1039 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 964 .uaddr = MTD_UADDR_UNNECESSARY,
1040 }, 965 .dev_size = SIZE_2MiB,
1041 .DevSize = SIZE_2MiB, 966 .cmd_set = P_ID_INTEL_STD,
1042 .CmdSet = P_ID_INTEL_STD, 967 .nr_regions = 2,
1043 .NumEraseRegions= 2,
1044 .regions = { 968 .regions = {
1045 ERASEINFO(0x10000, 31), 969 ERASEINFO(0x10000, 31),
1046 ERASEINFO(0x02000, 8), 970 ERASEINFO(0x02000, 8),
@@ -1049,12 +973,11 @@ static const struct amd_flash_info jedec_table[] = {
1049 .mfr_id = MANUFACTURER_INTEL, 973 .mfr_id = MANUFACTURER_INTEL,
1050 .dev_id = I28F320B3B, 974 .dev_id = I28F320B3B,
1051 .name = "Intel 28F320B3B", 975 .name = "Intel 28F320B3B",
1052 .uaddr = { 976 .devtypes = CFI_DEVICETYPE_X16,
1053 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 977 .uaddr = MTD_UADDR_UNNECESSARY,
1054 }, 978 .dev_size = SIZE_4MiB,
1055 .DevSize = SIZE_4MiB, 979 .cmd_set = P_ID_INTEL_STD,
1056 .CmdSet = P_ID_INTEL_STD, 980 .nr_regions = 2,
1057 .NumEraseRegions= 2,
1058 .regions = { 981 .regions = {
1059 ERASEINFO(0x02000, 8), 982 ERASEINFO(0x02000, 8),
1060 ERASEINFO(0x10000, 63), 983 ERASEINFO(0x10000, 63),
@@ -1063,12 +986,11 @@ static const struct amd_flash_info jedec_table[] = {
1063 .mfr_id = MANUFACTURER_INTEL, 986 .mfr_id = MANUFACTURER_INTEL,
1064 .dev_id = I28F320B3T, 987 .dev_id = I28F320B3T,
1065 .name = "Intel 28F320B3T", 988 .name = "Intel 28F320B3T",
1066 .uaddr = { 989 .devtypes = CFI_DEVICETYPE_X16,
1067 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 990 .uaddr = MTD_UADDR_UNNECESSARY,
1068 }, 991 .dev_size = SIZE_4MiB,
1069 .DevSize = SIZE_4MiB, 992 .cmd_set = P_ID_INTEL_STD,
1070 .CmdSet = P_ID_INTEL_STD, 993 .nr_regions = 2,
1071 .NumEraseRegions= 2,
1072 .regions = { 994 .regions = {
1073 ERASEINFO(0x10000, 63), 995 ERASEINFO(0x10000, 63),
1074 ERASEINFO(0x02000, 8), 996 ERASEINFO(0x02000, 8),
@@ -1077,12 +999,11 @@ static const struct amd_flash_info jedec_table[] = {
1077 .mfr_id = MANUFACTURER_INTEL, 999 .mfr_id = MANUFACTURER_INTEL,
1078 .dev_id = I28F640B3B, 1000 .dev_id = I28F640B3B,
1079 .name = "Intel 28F640B3B", 1001 .name = "Intel 28F640B3B",
1080 .uaddr = { 1002 .devtypes = CFI_DEVICETYPE_X16,
1081 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 1003 .uaddr = MTD_UADDR_UNNECESSARY,
1082 }, 1004 .dev_size = SIZE_8MiB,
1083 .DevSize = SIZE_8MiB, 1005 .cmd_set = P_ID_INTEL_STD,
1084 .CmdSet = P_ID_INTEL_STD, 1006 .nr_regions = 2,
1085 .NumEraseRegions= 2,
1086 .regions = { 1007 .regions = {
1087 ERASEINFO(0x02000, 8), 1008 ERASEINFO(0x02000, 8),
1088 ERASEINFO(0x10000, 127), 1009 ERASEINFO(0x10000, 127),
@@ -1091,12 +1012,11 @@ static const struct amd_flash_info jedec_table[] = {
1091 .mfr_id = MANUFACTURER_INTEL, 1012 .mfr_id = MANUFACTURER_INTEL,
1092 .dev_id = I28F640B3T, 1013 .dev_id = I28F640B3T,
1093 .name = "Intel 28F640B3T", 1014 .name = "Intel 28F640B3T",
1094 .uaddr = { 1015 .devtypes = CFI_DEVICETYPE_X16,
1095 [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 1016 .uaddr = MTD_UADDR_UNNECESSARY,
1096 }, 1017 .dev_size = SIZE_8MiB,
1097 .DevSize = SIZE_8MiB, 1018 .cmd_set = P_ID_INTEL_STD,
1098 .CmdSet = P_ID_INTEL_STD, 1019 .nr_regions = 2,
1099 .NumEraseRegions= 2,
1100 .regions = { 1020 .regions = {
1101 ERASEINFO(0x10000, 127), 1021 ERASEINFO(0x10000, 127),
1102 ERASEINFO(0x02000, 8), 1022 ERASEINFO(0x02000, 8),
@@ -1105,12 +1025,11 @@ static const struct amd_flash_info jedec_table[] = {
1105 .mfr_id = MANUFACTURER_INTEL, 1025 .mfr_id = MANUFACTURER_INTEL,
1106 .dev_id = I82802AB, 1026 .dev_id = I82802AB,
1107 .name = "Intel 82802AB", 1027 .name = "Intel 82802AB",
1108 .uaddr = { 1028 .devtypes = CFI_DEVICETYPE_X8,
1109 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1029 .uaddr = MTD_UADDR_UNNECESSARY,
1110 }, 1030 .dev_size = SIZE_512KiB,
1111 .DevSize = SIZE_512KiB, 1031 .cmd_set = P_ID_INTEL_EXT,
1112 .CmdSet = P_ID_INTEL_EXT, 1032 .nr_regions = 1,
1113 .NumEraseRegions= 1,
1114 .regions = { 1033 .regions = {
1115 ERASEINFO(0x10000,8), 1034 ERASEINFO(0x10000,8),
1116 } 1035 }
@@ -1118,12 +1037,11 @@ static const struct amd_flash_info jedec_table[] = {
1118 .mfr_id = MANUFACTURER_INTEL, 1037 .mfr_id = MANUFACTURER_INTEL,
1119 .dev_id = I82802AC, 1038 .dev_id = I82802AC,
1120 .name = "Intel 82802AC", 1039 .name = "Intel 82802AC",
1121 .uaddr = { 1040 .devtypes = CFI_DEVICETYPE_X8,
1122 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1041 .uaddr = MTD_UADDR_UNNECESSARY,
1123 }, 1042 .dev_size = SIZE_1MiB,
1124 .DevSize = SIZE_1MiB, 1043 .cmd_set = P_ID_INTEL_EXT,
1125 .CmdSet = P_ID_INTEL_EXT, 1044 .nr_regions = 1,
1126 .NumEraseRegions= 1,
1127 .regions = { 1045 .regions = {
1128 ERASEINFO(0x10000,16), 1046 ERASEINFO(0x10000,16),
1129 } 1047 }
@@ -1131,12 +1049,11 @@ static const struct amd_flash_info jedec_table[] = {
1131 .mfr_id = MANUFACTURER_MACRONIX, 1049 .mfr_id = MANUFACTURER_MACRONIX,
1132 .dev_id = MX29LV040C, 1050 .dev_id = MX29LV040C,
1133 .name = "Macronix MX29LV040C", 1051 .name = "Macronix MX29LV040C",
1134 .uaddr = { 1052 .devtypes = CFI_DEVICETYPE_X8,
1135 [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ 1053 .uaddr = MTD_UADDR_0x0555_0x02AA,
1136 }, 1054 .dev_size = SIZE_512KiB,
1137 .DevSize = SIZE_512KiB, 1055 .cmd_set = P_ID_AMD_STD,
1138 .CmdSet = P_ID_AMD_STD, 1056 .nr_regions = 1,
1139 .NumEraseRegions= 1,
1140 .regions = { 1057 .regions = {
1141 ERASEINFO(0x10000,8), 1058 ERASEINFO(0x10000,8),
1142 } 1059 }
@@ -1144,13 +1061,11 @@ static const struct amd_flash_info jedec_table[] = {
1144 .mfr_id = MANUFACTURER_MACRONIX, 1061 .mfr_id = MANUFACTURER_MACRONIX,
1145 .dev_id = MX29LV160T, 1062 .dev_id = MX29LV160T,
1146 .name = "MXIC MX29LV160T", 1063 .name = "MXIC MX29LV160T",
1147 .uaddr = { 1064 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1148 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1065 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1149 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1066 .dev_size = SIZE_2MiB,
1150 }, 1067 .cmd_set = P_ID_AMD_STD,
1151 .DevSize = SIZE_2MiB, 1068 .nr_regions = 4,
1152 .CmdSet = P_ID_AMD_STD,
1153 .NumEraseRegions= 4,
1154 .regions = { 1069 .regions = {
1155 ERASEINFO(0x10000,31), 1070 ERASEINFO(0x10000,31),
1156 ERASEINFO(0x08000,1), 1071 ERASEINFO(0x08000,1),
@@ -1161,13 +1076,11 @@ static const struct amd_flash_info jedec_table[] = {
1161 .mfr_id = MANUFACTURER_NEC, 1076 .mfr_id = MANUFACTURER_NEC,
1162 .dev_id = UPD29F064115, 1077 .dev_id = UPD29F064115,
1163 .name = "NEC uPD29F064115", 1078 .name = "NEC uPD29F064115",
1164 .uaddr = { 1079 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1165 [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ 1080 .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
1166 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1081 .dev_size = SIZE_8MiB,
1167 }, 1082 .cmd_set = P_ID_AMD_STD,
1168 .DevSize = SIZE_8MiB, 1083 .nr_regions = 3,
1169 .CmdSet = P_ID_AMD_STD,
1170 .NumEraseRegions= 3,
1171 .regions = { 1084 .regions = {
1172 ERASEINFO(0x2000,8), 1085 ERASEINFO(0x2000,8),
1173 ERASEINFO(0x10000,126), 1086 ERASEINFO(0x10000,126),
@@ -1177,13 +1090,11 @@ static const struct amd_flash_info jedec_table[] = {
1177 .mfr_id = MANUFACTURER_MACRONIX, 1090 .mfr_id = MANUFACTURER_MACRONIX,
1178 .dev_id = MX29LV160B, 1091 .dev_id = MX29LV160B,
1179 .name = "MXIC MX29LV160B", 1092 .name = "MXIC MX29LV160B",
1180 .uaddr = { 1093 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1181 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1094 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1182 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1095 .dev_size = SIZE_2MiB,
1183 }, 1096 .cmd_set = P_ID_AMD_STD,
1184 .DevSize = SIZE_2MiB, 1097 .nr_regions = 4,
1185 .CmdSet = P_ID_AMD_STD,
1186 .NumEraseRegions= 4,
1187 .regions = { 1098 .regions = {
1188 ERASEINFO(0x04000,1), 1099 ERASEINFO(0x04000,1),
1189 ERASEINFO(0x02000,2), 1100 ERASEINFO(0x02000,2),
@@ -1194,12 +1105,11 @@ static const struct amd_flash_info jedec_table[] = {
1194 .mfr_id = MANUFACTURER_MACRONIX, 1105 .mfr_id = MANUFACTURER_MACRONIX,
1195 .dev_id = MX29F040, 1106 .dev_id = MX29F040,
1196 .name = "Macronix MX29F040", 1107 .name = "Macronix MX29F040",
1197 .uaddr = { 1108 .devtypes = CFI_DEVICETYPE_X8,
1198 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1109 .uaddr = MTD_UADDR_0x0555_0x02AA,
1199 }, 1110 .dev_size = SIZE_512KiB,
1200 .DevSize = SIZE_512KiB, 1111 .cmd_set = P_ID_AMD_STD,
1201 .CmdSet = P_ID_AMD_STD, 1112 .nr_regions = 1,
1202 .NumEraseRegions= 1,
1203 .regions = { 1113 .regions = {
1204 ERASEINFO(0x10000,8), 1114 ERASEINFO(0x10000,8),
1205 } 1115 }
@@ -1207,12 +1117,11 @@ static const struct amd_flash_info jedec_table[] = {
1207 .mfr_id = MANUFACTURER_MACRONIX, 1117 .mfr_id = MANUFACTURER_MACRONIX,
1208 .dev_id = MX29F016, 1118 .dev_id = MX29F016,
1209 .name = "Macronix MX29F016", 1119 .name = "Macronix MX29F016",
1210 .uaddr = { 1120 .devtypes = CFI_DEVICETYPE_X8,
1211 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1121 .uaddr = MTD_UADDR_0x0555_0x02AA,
1212 }, 1122 .dev_size = SIZE_2MiB,
1213 .DevSize = SIZE_2MiB, 1123 .cmd_set = P_ID_AMD_STD,
1214 .CmdSet = P_ID_AMD_STD, 1124 .nr_regions = 1,
1215 .NumEraseRegions= 1,
1216 .regions = { 1125 .regions = {
1217 ERASEINFO(0x10000,32), 1126 ERASEINFO(0x10000,32),
1218 } 1127 }
@@ -1220,12 +1129,11 @@ static const struct amd_flash_info jedec_table[] = {
1220 .mfr_id = MANUFACTURER_MACRONIX, 1129 .mfr_id = MANUFACTURER_MACRONIX,
1221 .dev_id = MX29F004T, 1130 .dev_id = MX29F004T,
1222 .name = "Macronix MX29F004T", 1131 .name = "Macronix MX29F004T",
1223 .uaddr = { 1132 .devtypes = CFI_DEVICETYPE_X8,
1224 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1133 .uaddr = MTD_UADDR_0x0555_0x02AA,
1225 }, 1134 .dev_size = SIZE_512KiB,
1226 .DevSize = SIZE_512KiB, 1135 .cmd_set = P_ID_AMD_STD,
1227 .CmdSet = P_ID_AMD_STD, 1136 .nr_regions = 4,
1228 .NumEraseRegions= 4,
1229 .regions = { 1137 .regions = {
1230 ERASEINFO(0x10000,7), 1138 ERASEINFO(0x10000,7),
1231 ERASEINFO(0x08000,1), 1139 ERASEINFO(0x08000,1),
@@ -1236,12 +1144,11 @@ static const struct amd_flash_info jedec_table[] = {
1236 .mfr_id = MANUFACTURER_MACRONIX, 1144 .mfr_id = MANUFACTURER_MACRONIX,
1237 .dev_id = MX29F004B, 1145 .dev_id = MX29F004B,
1238 .name = "Macronix MX29F004B", 1146 .name = "Macronix MX29F004B",
1239 .uaddr = { 1147 .devtypes = CFI_DEVICETYPE_X8,
1240 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1148 .uaddr = MTD_UADDR_0x0555_0x02AA,
1241 }, 1149 .dev_size = SIZE_512KiB,
1242 .DevSize = SIZE_512KiB, 1150 .cmd_set = P_ID_AMD_STD,
1243 .CmdSet = P_ID_AMD_STD, 1151 .nr_regions = 4,
1244 .NumEraseRegions= 4,
1245 .regions = { 1152 .regions = {
1246 ERASEINFO(0x04000,1), 1153 ERASEINFO(0x04000,1),
1247 ERASEINFO(0x02000,2), 1154 ERASEINFO(0x02000,2),
@@ -1252,12 +1159,11 @@ static const struct amd_flash_info jedec_table[] = {
1252 .mfr_id = MANUFACTURER_MACRONIX, 1159 .mfr_id = MANUFACTURER_MACRONIX,
1253 .dev_id = MX29F002T, 1160 .dev_id = MX29F002T,
1254 .name = "Macronix MX29F002T", 1161 .name = "Macronix MX29F002T",
1255 .uaddr = { 1162 .devtypes = CFI_DEVICETYPE_X8,
1256 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1163 .uaddr = MTD_UADDR_0x0555_0x02AA,
1257 }, 1164 .dev_size = SIZE_256KiB,
1258 .DevSize = SIZE_256KiB, 1165 .cmd_set = P_ID_AMD_STD,
1259 .CmdSet = P_ID_AMD_STD, 1166 .nr_regions = 4,
1260 .NumEraseRegions= 4,
1261 .regions = { 1167 .regions = {
1262 ERASEINFO(0x10000,3), 1168 ERASEINFO(0x10000,3),
1263 ERASEINFO(0x08000,1), 1169 ERASEINFO(0x08000,1),
@@ -1268,12 +1174,11 @@ static const struct amd_flash_info jedec_table[] = {
1268 .mfr_id = MANUFACTURER_PMC, 1174 .mfr_id = MANUFACTURER_PMC,
1269 .dev_id = PM49FL002, 1175 .dev_id = PM49FL002,
1270 .name = "PMC Pm49FL002", 1176 .name = "PMC Pm49FL002",
1271 .uaddr = { 1177 .devtypes = CFI_DEVICETYPE_X8,
1272 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1178 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1273 }, 1179 .dev_size = SIZE_256KiB,
1274 .DevSize = SIZE_256KiB, 1180 .cmd_set = P_ID_AMD_STD,
1275 .CmdSet = P_ID_AMD_STD, 1181 .nr_regions = 1,
1276 .NumEraseRegions= 1,
1277 .regions = { 1182 .regions = {
1278 ERASEINFO( 0x01000, 64 ) 1183 ERASEINFO( 0x01000, 64 )
1279 } 1184 }
@@ -1281,12 +1186,11 @@ static const struct amd_flash_info jedec_table[] = {
1281 .mfr_id = MANUFACTURER_PMC, 1186 .mfr_id = MANUFACTURER_PMC,
1282 .dev_id = PM49FL004, 1187 .dev_id = PM49FL004,
1283 .name = "PMC Pm49FL004", 1188 .name = "PMC Pm49FL004",
1284 .uaddr = { 1189 .devtypes = CFI_DEVICETYPE_X8,
1285 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1190 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1286 }, 1191 .dev_size = SIZE_512KiB,
1287 .DevSize = SIZE_512KiB, 1192 .cmd_set = P_ID_AMD_STD,
1288 .CmdSet = P_ID_AMD_STD, 1193 .nr_regions = 1,
1289 .NumEraseRegions= 1,
1290 .regions = { 1194 .regions = {
1291 ERASEINFO( 0x01000, 128 ) 1195 ERASEINFO( 0x01000, 128 )
1292 } 1196 }
@@ -1294,12 +1198,11 @@ static const struct amd_flash_info jedec_table[] = {
1294 .mfr_id = MANUFACTURER_PMC, 1198 .mfr_id = MANUFACTURER_PMC,
1295 .dev_id = PM49FL008, 1199 .dev_id = PM49FL008,
1296 .name = "PMC Pm49FL008", 1200 .name = "PMC Pm49FL008",
1297 .uaddr = { 1201 .devtypes = CFI_DEVICETYPE_X8,
1298 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1202 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1299 }, 1203 .dev_size = SIZE_1MiB,
1300 .DevSize = SIZE_1MiB, 1204 .cmd_set = P_ID_AMD_STD,
1301 .CmdSet = P_ID_AMD_STD, 1205 .nr_regions = 1,
1302 .NumEraseRegions= 1,
1303 .regions = { 1206 .regions = {
1304 ERASEINFO( 0x01000, 256 ) 1207 ERASEINFO( 0x01000, 256 )
1305 } 1208 }
@@ -1307,25 +1210,23 @@ static const struct amd_flash_info jedec_table[] = {
1307 .mfr_id = MANUFACTURER_SHARP, 1210 .mfr_id = MANUFACTURER_SHARP,
1308 .dev_id = LH28F640BF, 1211 .dev_id = LH28F640BF,
1309 .name = "LH28F640BF", 1212 .name = "LH28F640BF",
1310 .uaddr = { 1213 .devtypes = CFI_DEVICETYPE_X8,
1311 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1214 .uaddr = MTD_UADDR_UNNECESSARY,
1312 }, 1215 .dev_size = SIZE_4MiB,
1313 .DevSize = SIZE_4MiB, 1216 .cmd_set = P_ID_INTEL_STD,
1314 .CmdSet = P_ID_INTEL_STD, 1217 .nr_regions = 1,
1315 .NumEraseRegions= 1, 1218 .regions = {
1316 .regions = {
1317 ERASEINFO(0x40000,16), 1219 ERASEINFO(0x40000,16),
1318 } 1220 }
1319 }, { 1221 }, {
1320 .mfr_id = MANUFACTURER_SST, 1222 .mfr_id = MANUFACTURER_SST,
1321 .dev_id = SST39LF512, 1223 .dev_id = SST39LF512,
1322 .name = "SST 39LF512", 1224 .name = "SST 39LF512",
1323 .uaddr = { 1225 .devtypes = CFI_DEVICETYPE_X8,
1324 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1226 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1325 }, 1227 .dev_size = SIZE_64KiB,
1326 .DevSize = SIZE_64KiB, 1228 .cmd_set = P_ID_AMD_STD,
1327 .CmdSet = P_ID_AMD_STD, 1229 .nr_regions = 1,
1328 .NumEraseRegions= 1,
1329 .regions = { 1230 .regions = {
1330 ERASEINFO(0x01000,16), 1231 ERASEINFO(0x01000,16),
1331 } 1232 }
@@ -1333,12 +1234,11 @@ static const struct amd_flash_info jedec_table[] = {
1333 .mfr_id = MANUFACTURER_SST, 1234 .mfr_id = MANUFACTURER_SST,
1334 .dev_id = SST39LF010, 1235 .dev_id = SST39LF010,
1335 .name = "SST 39LF010", 1236 .name = "SST 39LF010",
1336 .uaddr = { 1237 .devtypes = CFI_DEVICETYPE_X8,
1337 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1238 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1338 }, 1239 .dev_size = SIZE_128KiB,
1339 .DevSize = SIZE_128KiB, 1240 .cmd_set = P_ID_AMD_STD,
1340 .CmdSet = P_ID_AMD_STD, 1241 .nr_regions = 1,
1341 .NumEraseRegions= 1,
1342 .regions = { 1242 .regions = {
1343 ERASEINFO(0x01000,32), 1243 ERASEINFO(0x01000,32),
1344 } 1244 }
@@ -1346,36 +1246,33 @@ static const struct amd_flash_info jedec_table[] = {
1346 .mfr_id = MANUFACTURER_SST, 1246 .mfr_id = MANUFACTURER_SST,
1347 .dev_id = SST29EE020, 1247 .dev_id = SST29EE020,
1348 .name = "SST 29EE020", 1248 .name = "SST 29EE020",
1349 .uaddr = { 1249 .devtypes = CFI_DEVICETYPE_X8,
1350 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1250 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1351 }, 1251 .dev_size = SIZE_256KiB,
1352 .DevSize = SIZE_256KiB, 1252 .cmd_set = P_ID_SST_PAGE,
1353 .CmdSet = P_ID_SST_PAGE, 1253 .nr_regions = 1,
1354 .NumEraseRegions= 1, 1254 .regions = {ERASEINFO(0x01000,64),
1355 .regions = {ERASEINFO(0x01000,64), 1255 }
1356 } 1256 }, {
1357 }, {
1358 .mfr_id = MANUFACTURER_SST, 1257 .mfr_id = MANUFACTURER_SST,
1359 .dev_id = SST29LE020, 1258 .dev_id = SST29LE020,
1360 .name = "SST 29LE020", 1259 .name = "SST 29LE020",
1361 .uaddr = { 1260 .devtypes = CFI_DEVICETYPE_X8,
1362 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1261 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1363 }, 1262 .dev_size = SIZE_256KiB,
1364 .DevSize = SIZE_256KiB, 1263 .cmd_set = P_ID_SST_PAGE,
1365 .CmdSet = P_ID_SST_PAGE, 1264 .nr_regions = 1,
1366 .NumEraseRegions= 1, 1265 .regions = {ERASEINFO(0x01000,64),
1367 .regions = {ERASEINFO(0x01000,64), 1266 }
1368 }
1369 }, { 1267 }, {
1370 .mfr_id = MANUFACTURER_SST, 1268 .mfr_id = MANUFACTURER_SST,
1371 .dev_id = SST39LF020, 1269 .dev_id = SST39LF020,
1372 .name = "SST 39LF020", 1270 .name = "SST 39LF020",
1373 .uaddr = { 1271 .devtypes = CFI_DEVICETYPE_X8,
1374 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1272 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1375 }, 1273 .dev_size = SIZE_256KiB,
1376 .DevSize = SIZE_256KiB, 1274 .cmd_set = P_ID_AMD_STD,
1377 .CmdSet = P_ID_AMD_STD, 1275 .nr_regions = 1,
1378 .NumEraseRegions= 1,
1379 .regions = { 1276 .regions = {
1380 ERASEINFO(0x01000,64), 1277 ERASEINFO(0x01000,64),
1381 } 1278 }
@@ -1383,12 +1280,11 @@ static const struct amd_flash_info jedec_table[] = {
1383 .mfr_id = MANUFACTURER_SST, 1280 .mfr_id = MANUFACTURER_SST,
1384 .dev_id = SST39LF040, 1281 .dev_id = SST39LF040,
1385 .name = "SST 39LF040", 1282 .name = "SST 39LF040",
1386 .uaddr = { 1283 .devtypes = CFI_DEVICETYPE_X8,
1387 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1284 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1388 }, 1285 .dev_size = SIZE_512KiB,
1389 .DevSize = SIZE_512KiB, 1286 .cmd_set = P_ID_AMD_STD,
1390 .CmdSet = P_ID_AMD_STD, 1287 .nr_regions = 1,
1391 .NumEraseRegions= 1,
1392 .regions = { 1288 .regions = {
1393 ERASEINFO(0x01000,128), 1289 ERASEINFO(0x01000,128),
1394 } 1290 }
@@ -1396,12 +1292,11 @@ static const struct amd_flash_info jedec_table[] = {
1396 .mfr_id = MANUFACTURER_SST, 1292 .mfr_id = MANUFACTURER_SST,
1397 .dev_id = SST39SF010A, 1293 .dev_id = SST39SF010A,
1398 .name = "SST 39SF010A", 1294 .name = "SST 39SF010A",
1399 .uaddr = { 1295 .devtypes = CFI_DEVICETYPE_X8,
1400 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1296 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1401 }, 1297 .dev_size = SIZE_128KiB,
1402 .DevSize = SIZE_128KiB, 1298 .cmd_set = P_ID_AMD_STD,
1403 .CmdSet = P_ID_AMD_STD, 1299 .nr_regions = 1,
1404 .NumEraseRegions= 1,
1405 .regions = { 1300 .regions = {
1406 ERASEINFO(0x01000,32), 1301 ERASEINFO(0x01000,32),
1407 } 1302 }
@@ -1409,26 +1304,24 @@ static const struct amd_flash_info jedec_table[] = {
1409 .mfr_id = MANUFACTURER_SST, 1304 .mfr_id = MANUFACTURER_SST,
1410 .dev_id = SST39SF020A, 1305 .dev_id = SST39SF020A,
1411 .name = "SST 39SF020A", 1306 .name = "SST 39SF020A",
1412 .uaddr = { 1307 .devtypes = CFI_DEVICETYPE_X8,
1413 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1308 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1414 }, 1309 .dev_size = SIZE_256KiB,
1415 .DevSize = SIZE_256KiB, 1310 .cmd_set = P_ID_AMD_STD,
1416 .CmdSet = P_ID_AMD_STD, 1311 .nr_regions = 1,
1417 .NumEraseRegions= 1,
1418 .regions = { 1312 .regions = {
1419 ERASEINFO(0x01000,64), 1313 ERASEINFO(0x01000,64),
1420 } 1314 }
1421 }, { 1315 }, {
1422 .mfr_id = MANUFACTURER_SST, 1316 .mfr_id = MANUFACTURER_SST,
1423 .dev_id = SST49LF040B, 1317 .dev_id = SST49LF040B,
1424 .name = "SST 49LF040B", 1318 .name = "SST 49LF040B",
1425 .uaddr = { 1319 .devtypes = CFI_DEVICETYPE_X8,
1426 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1320 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1427 }, 1321 .dev_size = SIZE_512KiB,
1428 .DevSize = SIZE_512KiB, 1322 .cmd_set = P_ID_AMD_STD,
1429 .CmdSet = P_ID_AMD_STD, 1323 .nr_regions = 1,
1430 .NumEraseRegions= 1, 1324 .regions = {
1431 .regions = {
1432 ERASEINFO(0x01000,128), 1325 ERASEINFO(0x01000,128),
1433 } 1326 }
1434 }, { 1327 }, {
@@ -1436,12 +1329,11 @@ static const struct amd_flash_info jedec_table[] = {
1436 .mfr_id = MANUFACTURER_SST, 1329 .mfr_id = MANUFACTURER_SST,
1437 .dev_id = SST49LF004B, 1330 .dev_id = SST49LF004B,
1438 .name = "SST 49LF004B", 1331 .name = "SST 49LF004B",
1439 .uaddr = { 1332 .devtypes = CFI_DEVICETYPE_X8,
1440 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1333 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1441 }, 1334 .dev_size = SIZE_512KiB,
1442 .DevSize = SIZE_512KiB, 1335 .cmd_set = P_ID_AMD_STD,
1443 .CmdSet = P_ID_AMD_STD, 1336 .nr_regions = 1,
1444 .NumEraseRegions= 1,
1445 .regions = { 1337 .regions = {
1446 ERASEINFO(0x01000,128), 1338 ERASEINFO(0x01000,128),
1447 } 1339 }
@@ -1449,12 +1341,11 @@ static const struct amd_flash_info jedec_table[] = {
1449 .mfr_id = MANUFACTURER_SST, 1341 .mfr_id = MANUFACTURER_SST,
1450 .dev_id = SST49LF008A, 1342 .dev_id = SST49LF008A,
1451 .name = "SST 49LF008A", 1343 .name = "SST 49LF008A",
1452 .uaddr = { 1344 .devtypes = CFI_DEVICETYPE_X8,
1453 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1345 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1454 }, 1346 .dev_size = SIZE_1MiB,
1455 .DevSize = SIZE_1MiB, 1347 .cmd_set = P_ID_AMD_STD,
1456 .CmdSet = P_ID_AMD_STD, 1348 .nr_regions = 1,
1457 .NumEraseRegions= 1,
1458 .regions = { 1349 .regions = {
1459 ERASEINFO(0x01000,256), 1350 ERASEINFO(0x01000,256),
1460 } 1351 }
@@ -1462,12 +1353,11 @@ static const struct amd_flash_info jedec_table[] = {
1462 .mfr_id = MANUFACTURER_SST, 1353 .mfr_id = MANUFACTURER_SST,
1463 .dev_id = SST49LF030A, 1354 .dev_id = SST49LF030A,
1464 .name = "SST 49LF030A", 1355 .name = "SST 49LF030A",
1465 .uaddr = { 1356 .devtypes = CFI_DEVICETYPE_X8,
1466 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1357 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1467 }, 1358 .dev_size = SIZE_512KiB,
1468 .DevSize = SIZE_512KiB, 1359 .cmd_set = P_ID_AMD_STD,
1469 .CmdSet = P_ID_AMD_STD, 1360 .nr_regions = 1,
1470 .NumEraseRegions= 1,
1471 .regions = { 1361 .regions = {
1472 ERASEINFO(0x01000,96), 1362 ERASEINFO(0x01000,96),
1473 } 1363 }
@@ -1475,12 +1365,11 @@ static const struct amd_flash_info jedec_table[] = {
1475 .mfr_id = MANUFACTURER_SST, 1365 .mfr_id = MANUFACTURER_SST,
1476 .dev_id = SST49LF040A, 1366 .dev_id = SST49LF040A,
1477 .name = "SST 49LF040A", 1367 .name = "SST 49LF040A",
1478 .uaddr = { 1368 .devtypes = CFI_DEVICETYPE_X8,
1479 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1369 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1480 }, 1370 .dev_size = SIZE_512KiB,
1481 .DevSize = SIZE_512KiB, 1371 .cmd_set = P_ID_AMD_STD,
1482 .CmdSet = P_ID_AMD_STD, 1372 .nr_regions = 1,
1483 .NumEraseRegions= 1,
1484 .regions = { 1373 .regions = {
1485 ERASEINFO(0x01000,128), 1374 ERASEINFO(0x01000,128),
1486 } 1375 }
@@ -1488,57 +1377,49 @@ static const struct amd_flash_info jedec_table[] = {
1488 .mfr_id = MANUFACTURER_SST, 1377 .mfr_id = MANUFACTURER_SST,
1489 .dev_id = SST49LF080A, 1378 .dev_id = SST49LF080A,
1490 .name = "SST 49LF080A", 1379 .name = "SST 49LF080A",
1491 .uaddr = { 1380 .devtypes = CFI_DEVICETYPE_X8,
1492 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1381 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1493 }, 1382 .dev_size = SIZE_1MiB,
1494 .DevSize = SIZE_1MiB, 1383 .cmd_set = P_ID_AMD_STD,
1495 .CmdSet = P_ID_AMD_STD, 1384 .nr_regions = 1,
1496 .NumEraseRegions= 1,
1497 .regions = { 1385 .regions = {
1498 ERASEINFO(0x01000,256), 1386 ERASEINFO(0x01000,256),
1499 } 1387 }
1500 }, { 1388 }, {
1501 .mfr_id = MANUFACTURER_SST, /* should be CFI */ 1389 .mfr_id = MANUFACTURER_SST, /* should be CFI */
1502 .dev_id = SST39LF160, 1390 .dev_id = SST39LF160,
1503 .name = "SST 39LF160", 1391 .name = "SST 39LF160",
1504 .uaddr = { 1392 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1505 [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ 1393 .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
1506 [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ 1394 .dev_size = SIZE_2MiB,
1507 }, 1395 .cmd_set = P_ID_AMD_STD,
1508 .DevSize = SIZE_2MiB, 1396 .nr_regions = 2,
1509 .CmdSet = P_ID_AMD_STD, 1397 .regions = {
1510 .NumEraseRegions= 2, 1398 ERASEINFO(0x1000,256),
1511 .regions = { 1399 ERASEINFO(0x1000,256)
1512 ERASEINFO(0x1000,256), 1400 }
1513 ERASEINFO(0x1000,256) 1401 }, {
1514 } 1402 .mfr_id = MANUFACTURER_SST, /* should be CFI */
1515 }, { 1403 .dev_id = SST39VF1601,
1516 .mfr_id = MANUFACTURER_SST, /* should be CFI */ 1404 .name = "SST 39VF1601",
1517 .dev_id = SST39VF1601, 1405 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1518 .name = "SST 39VF1601", 1406 .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
1519 .uaddr = { 1407 .dev_size = SIZE_2MiB,
1520 [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ 1408 .cmd_set = P_ID_AMD_STD,
1521 [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ 1409 .nr_regions = 2,
1522 }, 1410 .regions = {
1523 .DevSize = SIZE_2MiB, 1411 ERASEINFO(0x1000,256),
1524 .CmdSet = P_ID_AMD_STD, 1412 ERASEINFO(0x1000,256)
1525 .NumEraseRegions= 2, 1413 }
1526 .regions = {
1527 ERASEINFO(0x1000,256),
1528 ERASEINFO(0x1000,256)
1529 }
1530
1531 }, { 1414 }, {
1532 .mfr_id = MANUFACTURER_ST, 1415 .mfr_id = MANUFACTURER_ST,
1533 .dev_id = M29F800AB, 1416 .dev_id = M29F800AB,
1534 .name = "ST M29F800AB", 1417 .name = "ST M29F800AB",
1535 .uaddr = { 1418 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1536 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1419 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1537 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1420 .dev_size = SIZE_1MiB,
1538 }, 1421 .cmd_set = P_ID_AMD_STD,
1539 .DevSize = SIZE_1MiB, 1422 .nr_regions = 4,
1540 .CmdSet = P_ID_AMD_STD,
1541 .NumEraseRegions= 4,
1542 .regions = { 1423 .regions = {
1543 ERASEINFO(0x04000,1), 1424 ERASEINFO(0x04000,1),
1544 ERASEINFO(0x02000,2), 1425 ERASEINFO(0x02000,2),
@@ -1549,13 +1430,11 @@ static const struct amd_flash_info jedec_table[] = {
1549 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1430 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1550 .dev_id = M29W800DT, 1431 .dev_id = M29W800DT,
1551 .name = "ST M29W800DT", 1432 .name = "ST M29W800DT",
1552 .uaddr = { 1433 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1553 [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ 1434 .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
1554 [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ 1435 .dev_size = SIZE_1MiB,
1555 }, 1436 .cmd_set = P_ID_AMD_STD,
1556 .DevSize = SIZE_1MiB, 1437 .nr_regions = 4,
1557 .CmdSet = P_ID_AMD_STD,
1558 .NumEraseRegions= 4,
1559 .regions = { 1438 .regions = {
1560 ERASEINFO(0x10000,15), 1439 ERASEINFO(0x10000,15),
1561 ERASEINFO(0x08000,1), 1440 ERASEINFO(0x08000,1),
@@ -1566,13 +1445,11 @@ static const struct amd_flash_info jedec_table[] = {
1566 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1445 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1567 .dev_id = M29W800DB, 1446 .dev_id = M29W800DB,
1568 .name = "ST M29W800DB", 1447 .name = "ST M29W800DB",
1569 .uaddr = { 1448 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1570 [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ 1449 .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
1571 [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ 1450 .dev_size = SIZE_1MiB,
1572 }, 1451 .cmd_set = P_ID_AMD_STD,
1573 .DevSize = SIZE_1MiB, 1452 .nr_regions = 4,
1574 .CmdSet = P_ID_AMD_STD,
1575 .NumEraseRegions= 4,
1576 .regions = { 1453 .regions = {
1577 ERASEINFO(0x04000,1), 1454 ERASEINFO(0x04000,1),
1578 ERASEINFO(0x02000,2), 1455 ERASEINFO(0x02000,2),
@@ -1583,13 +1460,11 @@ static const struct amd_flash_info jedec_table[] = {
1583 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1460 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1584 .dev_id = M29W160DT, 1461 .dev_id = M29W160DT,
1585 .name = "ST M29W160DT", 1462 .name = "ST M29W160DT",
1586 .uaddr = { 1463 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1587 [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ 1464 .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
1588 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1465 .dev_size = SIZE_2MiB,
1589 }, 1466 .cmd_set = P_ID_AMD_STD,
1590 .DevSize = SIZE_2MiB, 1467 .nr_regions = 4,
1591 .CmdSet = P_ID_AMD_STD,
1592 .NumEraseRegions= 4,
1593 .regions = { 1468 .regions = {
1594 ERASEINFO(0x10000,31), 1469 ERASEINFO(0x10000,31),
1595 ERASEINFO(0x08000,1), 1470 ERASEINFO(0x08000,1),
@@ -1600,13 +1475,11 @@ static const struct amd_flash_info jedec_table[] = {
1600 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1475 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
1601 .dev_id = M29W160DB, 1476 .dev_id = M29W160DB,
1602 .name = "ST M29W160DB", 1477 .name = "ST M29W160DB",
1603 .uaddr = { 1478 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1604 [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ 1479 .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
1605 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1480 .dev_size = SIZE_2MiB,
1606 }, 1481 .cmd_set = P_ID_AMD_STD,
1607 .DevSize = SIZE_2MiB, 1482 .nr_regions = 4,
1608 .CmdSet = P_ID_AMD_STD,
1609 .NumEraseRegions= 4,
1610 .regions = { 1483 .regions = {
1611 ERASEINFO(0x04000,1), 1484 ERASEINFO(0x04000,1),
1612 ERASEINFO(0x02000,2), 1485 ERASEINFO(0x02000,2),
@@ -1617,12 +1490,11 @@ static const struct amd_flash_info jedec_table[] = {
1617 .mfr_id = MANUFACTURER_ST, 1490 .mfr_id = MANUFACTURER_ST,
1618 .dev_id = M29W040B, 1491 .dev_id = M29W040B,
1619 .name = "ST M29W040B", 1492 .name = "ST M29W040B",
1620 .uaddr = { 1493 .devtypes = CFI_DEVICETYPE_X8,
1621 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1494 .uaddr = MTD_UADDR_0x0555_0x02AA,
1622 }, 1495 .dev_size = SIZE_512KiB,
1623 .DevSize = SIZE_512KiB, 1496 .cmd_set = P_ID_AMD_STD,
1624 .CmdSet = P_ID_AMD_STD, 1497 .nr_regions = 1,
1625 .NumEraseRegions= 1,
1626 .regions = { 1498 .regions = {
1627 ERASEINFO(0x10000,8), 1499 ERASEINFO(0x10000,8),
1628 } 1500 }
@@ -1630,12 +1502,11 @@ static const struct amd_flash_info jedec_table[] = {
1630 .mfr_id = MANUFACTURER_ST, 1502 .mfr_id = MANUFACTURER_ST,
1631 .dev_id = M50FW040, 1503 .dev_id = M50FW040,
1632 .name = "ST M50FW040", 1504 .name = "ST M50FW040",
1633 .uaddr = { 1505 .devtypes = CFI_DEVICETYPE_X8,
1634 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1506 .uaddr = MTD_UADDR_UNNECESSARY,
1635 }, 1507 .dev_size = SIZE_512KiB,
1636 .DevSize = SIZE_512KiB, 1508 .cmd_set = P_ID_INTEL_EXT,
1637 .CmdSet = P_ID_INTEL_EXT, 1509 .nr_regions = 1,
1638 .NumEraseRegions= 1,
1639 .regions = { 1510 .regions = {
1640 ERASEINFO(0x10000,8), 1511 ERASEINFO(0x10000,8),
1641 } 1512 }
@@ -1643,12 +1514,11 @@ static const struct amd_flash_info jedec_table[] = {
1643 .mfr_id = MANUFACTURER_ST, 1514 .mfr_id = MANUFACTURER_ST,
1644 .dev_id = M50FW080, 1515 .dev_id = M50FW080,
1645 .name = "ST M50FW080", 1516 .name = "ST M50FW080",
1646 .uaddr = { 1517 .devtypes = CFI_DEVICETYPE_X8,
1647 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1518 .uaddr = MTD_UADDR_UNNECESSARY,
1648 }, 1519 .dev_size = SIZE_1MiB,
1649 .DevSize = SIZE_1MiB, 1520 .cmd_set = P_ID_INTEL_EXT,
1650 .CmdSet = P_ID_INTEL_EXT, 1521 .nr_regions = 1,
1651 .NumEraseRegions= 1,
1652 .regions = { 1522 .regions = {
1653 ERASEINFO(0x10000,16), 1523 ERASEINFO(0x10000,16),
1654 } 1524 }
@@ -1656,12 +1526,11 @@ static const struct amd_flash_info jedec_table[] = {
1656 .mfr_id = MANUFACTURER_ST, 1526 .mfr_id = MANUFACTURER_ST,
1657 .dev_id = M50FW016, 1527 .dev_id = M50FW016,
1658 .name = "ST M50FW016", 1528 .name = "ST M50FW016",
1659 .uaddr = { 1529 .devtypes = CFI_DEVICETYPE_X8,
1660 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1530 .uaddr = MTD_UADDR_UNNECESSARY,
1661 }, 1531 .dev_size = SIZE_2MiB,
1662 .DevSize = SIZE_2MiB, 1532 .cmd_set = P_ID_INTEL_EXT,
1663 .CmdSet = P_ID_INTEL_EXT, 1533 .nr_regions = 1,
1664 .NumEraseRegions= 1,
1665 .regions = { 1534 .regions = {
1666 ERASEINFO(0x10000,32), 1535 ERASEINFO(0x10000,32),
1667 } 1536 }
@@ -1669,12 +1538,11 @@ static const struct amd_flash_info jedec_table[] = {
1669 .mfr_id = MANUFACTURER_ST, 1538 .mfr_id = MANUFACTURER_ST,
1670 .dev_id = M50LPW080, 1539 .dev_id = M50LPW080,
1671 .name = "ST M50LPW080", 1540 .name = "ST M50LPW080",
1672 .uaddr = { 1541 .devtypes = CFI_DEVICETYPE_X8,
1673 [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1542 .uaddr = MTD_UADDR_UNNECESSARY,
1674 }, 1543 .dev_size = SIZE_1MiB,
1675 .DevSize = SIZE_1MiB, 1544 .cmd_set = P_ID_INTEL_EXT,
1676 .CmdSet = P_ID_INTEL_EXT, 1545 .nr_regions = 1,
1677 .NumEraseRegions= 1,
1678 .regions = { 1546 .regions = {
1679 ERASEINFO(0x10000,16), 1547 ERASEINFO(0x10000,16),
1680 } 1548 }
@@ -1682,13 +1550,11 @@ static const struct amd_flash_info jedec_table[] = {
1682 .mfr_id = MANUFACTURER_TOSHIBA, 1550 .mfr_id = MANUFACTURER_TOSHIBA,
1683 .dev_id = TC58FVT160, 1551 .dev_id = TC58FVT160,
1684 .name = "Toshiba TC58FVT160", 1552 .name = "Toshiba TC58FVT160",
1685 .uaddr = { 1553 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1686 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1554 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1687 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 1555 .dev_size = SIZE_2MiB,
1688 }, 1556 .cmd_set = P_ID_AMD_STD,
1689 .DevSize = SIZE_2MiB, 1557 .nr_regions = 4,
1690 .CmdSet = P_ID_AMD_STD,
1691 .NumEraseRegions= 4,
1692 .regions = { 1558 .regions = {
1693 ERASEINFO(0x10000,31), 1559 ERASEINFO(0x10000,31),
1694 ERASEINFO(0x08000,1), 1560 ERASEINFO(0x08000,1),
@@ -1699,13 +1565,11 @@ static const struct amd_flash_info jedec_table[] = {
1699 .mfr_id = MANUFACTURER_TOSHIBA, 1565 .mfr_id = MANUFACTURER_TOSHIBA,
1700 .dev_id = TC58FVB160, 1566 .dev_id = TC58FVB160,
1701 .name = "Toshiba TC58FVB160", 1567 .name = "Toshiba TC58FVB160",
1702 .uaddr = { 1568 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1703 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1569 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1704 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 1570 .dev_size = SIZE_2MiB,
1705 }, 1571 .cmd_set = P_ID_AMD_STD,
1706 .DevSize = SIZE_2MiB, 1572 .nr_regions = 4,
1707 .CmdSet = P_ID_AMD_STD,
1708 .NumEraseRegions= 4,
1709 .regions = { 1573 .regions = {
1710 ERASEINFO(0x04000,1), 1574 ERASEINFO(0x04000,1),
1711 ERASEINFO(0x02000,2), 1575 ERASEINFO(0x02000,2),
@@ -1716,13 +1580,11 @@ static const struct amd_flash_info jedec_table[] = {
1716 .mfr_id = MANUFACTURER_TOSHIBA, 1580 .mfr_id = MANUFACTURER_TOSHIBA,
1717 .dev_id = TC58FVB321, 1581 .dev_id = TC58FVB321,
1718 .name = "Toshiba TC58FVB321", 1582 .name = "Toshiba TC58FVB321",
1719 .uaddr = { 1583 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1720 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1584 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1721 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 1585 .dev_size = SIZE_4MiB,
1722 }, 1586 .cmd_set = P_ID_AMD_STD,
1723 .DevSize = SIZE_4MiB, 1587 .nr_regions = 2,
1724 .CmdSet = P_ID_AMD_STD,
1725 .NumEraseRegions= 2,
1726 .regions = { 1588 .regions = {
1727 ERASEINFO(0x02000,8), 1589 ERASEINFO(0x02000,8),
1728 ERASEINFO(0x10000,63) 1590 ERASEINFO(0x10000,63)
@@ -1731,13 +1593,11 @@ static const struct amd_flash_info jedec_table[] = {
1731 .mfr_id = MANUFACTURER_TOSHIBA, 1593 .mfr_id = MANUFACTURER_TOSHIBA,
1732 .dev_id = TC58FVT321, 1594 .dev_id = TC58FVT321,
1733 .name = "Toshiba TC58FVT321", 1595 .name = "Toshiba TC58FVT321",
1734 .uaddr = { 1596 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1735 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1597 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1736 [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 1598 .dev_size = SIZE_4MiB,
1737 }, 1599 .cmd_set = P_ID_AMD_STD,
1738 .DevSize = SIZE_4MiB, 1600 .nr_regions = 2,
1739 .CmdSet = P_ID_AMD_STD,
1740 .NumEraseRegions= 2,
1741 .regions = { 1601 .regions = {
1742 ERASEINFO(0x10000,63), 1602 ERASEINFO(0x10000,63),
1743 ERASEINFO(0x02000,8) 1603 ERASEINFO(0x02000,8)
@@ -1746,13 +1606,11 @@ static const struct amd_flash_info jedec_table[] = {
1746 .mfr_id = MANUFACTURER_TOSHIBA, 1606 .mfr_id = MANUFACTURER_TOSHIBA,
1747 .dev_id = TC58FVB641, 1607 .dev_id = TC58FVB641,
1748 .name = "Toshiba TC58FVB641", 1608 .name = "Toshiba TC58FVB641",
1749 .uaddr = { 1609 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1750 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1610 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1751 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1611 .dev_size = SIZE_8MiB,
1752 }, 1612 .cmd_set = P_ID_AMD_STD,
1753 .DevSize = SIZE_8MiB, 1613 .nr_regions = 2,
1754 .CmdSet = P_ID_AMD_STD,
1755 .NumEraseRegions= 2,
1756 .regions = { 1614 .regions = {
1757 ERASEINFO(0x02000,8), 1615 ERASEINFO(0x02000,8),
1758 ERASEINFO(0x10000,127) 1616 ERASEINFO(0x10000,127)
@@ -1761,13 +1619,11 @@ static const struct amd_flash_info jedec_table[] = {
1761 .mfr_id = MANUFACTURER_TOSHIBA, 1619 .mfr_id = MANUFACTURER_TOSHIBA,
1762 .dev_id = TC58FVT641, 1620 .dev_id = TC58FVT641,
1763 .name = "Toshiba TC58FVT641", 1621 .name = "Toshiba TC58FVT641",
1764 .uaddr = { 1622 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1765 [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1623 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1766 [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1624 .dev_size = SIZE_8MiB,
1767 }, 1625 .cmd_set = P_ID_AMD_STD,
1768 .DevSize = SIZE_8MiB, 1626 .nr_regions = 2,
1769 .CmdSet = P_ID_AMD_STD,
1770 .NumEraseRegions= 2,
1771 .regions = { 1627 .regions = {
1772 ERASEINFO(0x10000,127), 1628 ERASEINFO(0x10000,127),
1773 ERASEINFO(0x02000,8) 1629 ERASEINFO(0x02000,8)
@@ -1776,12 +1632,11 @@ static const struct amd_flash_info jedec_table[] = {
1776 .mfr_id = MANUFACTURER_WINBOND, 1632 .mfr_id = MANUFACTURER_WINBOND,
1777 .dev_id = W49V002A, 1633 .dev_id = W49V002A,
1778 .name = "Winbond W49V002A", 1634 .name = "Winbond W49V002A",
1779 .uaddr = { 1635 .devtypes = CFI_DEVICETYPE_X8,
1780 [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1636 .uaddr = MTD_UADDR_0x5555_0x2AAA,
1781 }, 1637 .dev_size = SIZE_256KiB,
1782 .DevSize = SIZE_256KiB, 1638 .cmd_set = P_ID_AMD_STD,
1783 .CmdSet = P_ID_AMD_STD, 1639 .nr_regions = 4,
1784 .NumEraseRegions= 4,
1785 .regions = { 1640 .regions = {
1786 ERASEINFO(0x10000, 3), 1641 ERASEINFO(0x10000, 3),
1787 ERASEINFO(0x08000, 1), 1642 ERASEINFO(0x08000, 1),
@@ -1791,15 +1646,7 @@ static const struct amd_flash_info jedec_table[] = {
1791 } 1646 }
1792}; 1647};
1793 1648
1794 1649static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
1795static int cfi_jedec_setup(struct cfi_private *p_cfi, int index);
1796
1797static int jedec_probe_chip(struct map_info *map, __u32 base,
1798 unsigned long *chip_map, struct cfi_private *cfi);
1799
1800static struct mtd_info *jedec_probe(struct map_info *map);
1801
1802static inline u32 jedec_read_mfr(struct map_info *map, __u32 base,
1803 struct cfi_private *cfi) 1650 struct cfi_private *cfi)
1804{ 1651{
1805 map_word result; 1652 map_word result;
@@ -1810,7 +1657,7 @@ static inline u32 jedec_read_mfr(struct map_info *map, __u32 base,
1810 return result.x[0] & mask; 1657 return result.x[0] & mask;
1811} 1658}
1812 1659
1813static inline u32 jedec_read_id(struct map_info *map, __u32 base, 1660static inline u32 jedec_read_id(struct map_info *map, uint32_t base,
1814 struct cfi_private *cfi) 1661 struct cfi_private *cfi)
1815{ 1662{
1816 map_word result; 1663 map_word result;
@@ -1821,8 +1668,7 @@ static inline u32 jedec_read_id(struct map_info *map, __u32 base,
1821 return result.x[0] & mask; 1668 return result.x[0] & mask;
1822} 1669}
1823 1670
1824static inline void jedec_reset(u32 base, struct map_info *map, 1671static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
1825 struct cfi_private *cfi)
1826{ 1672{
1827 /* Reset */ 1673 /* Reset */
1828 1674
@@ -1832,7 +1678,7 @@ static inline void jedec_reset(u32 base, struct map_info *map,
1832 * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips 1678 * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips
1833 * as they will ignore the writes and dont care what address 1679 * as they will ignore the writes and dont care what address
1834 * the F0 is written to */ 1680 * the F0 is written to */
1835 if(cfi->addr_unlock1) { 1681 if (cfi->addr_unlock1) {
1836 DEBUG( MTD_DEBUG_LEVEL3, 1682 DEBUG( MTD_DEBUG_LEVEL3,
1837 "reset unlock called %x %x \n", 1683 "reset unlock called %x %x \n",
1838 cfi->addr_unlock1,cfi->addr_unlock2); 1684 cfi->addr_unlock1,cfi->addr_unlock2);
@@ -1841,7 +1687,7 @@ static inline void jedec_reset(u32 base, struct map_info *map,
1841 } 1687 }
1842 1688
1843 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); 1689 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
1844 /* Some misdesigned intel chips do not respond for 0xF0 for a reset, 1690 /* Some misdesigned Intel chips do not respond for 0xF0 for a reset,
1845 * so ensure we're in read mode. Send both the Intel and the AMD command 1691 * so ensure we're in read mode. Send both the Intel and the AMD command
1846 * for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so 1692 * for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so
1847 * this should be safe. 1693 * this should be safe.
@@ -1851,42 +1697,20 @@ static inline void jedec_reset(u32 base, struct map_info *map,
1851} 1697}
1852 1698
1853 1699
1854static inline __u8 finfo_uaddr(const struct amd_flash_info *finfo, int device_type)
1855{
1856 int uaddr_idx;
1857 __u8 uaddr = MTD_UADDR_NOT_SUPPORTED;
1858
1859 switch ( device_type ) {
1860 case CFI_DEVICETYPE_X8: uaddr_idx = 0; break;
1861 case CFI_DEVICETYPE_X16: uaddr_idx = 1; break;
1862 case CFI_DEVICETYPE_X32: uaddr_idx = 2; break;
1863 default:
1864 printk(KERN_NOTICE "MTD: %s(): unknown device_type %d\n",
1865 __func__, device_type);
1866 goto uaddr_done;
1867 }
1868
1869 uaddr = finfo->uaddr[uaddr_idx];
1870
1871 if (uaddr != MTD_UADDR_NOT_SUPPORTED ) {
1872 /* ASSERT("The unlock addresses for non-8-bit mode
1873 are bollocks. We don't really need an array."); */
1874 uaddr = finfo->uaddr[0];
1875 }
1876
1877 uaddr_done:
1878 return uaddr;
1879}
1880
1881
1882static int cfi_jedec_setup(struct cfi_private *p_cfi, int index) 1700static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
1883{ 1701{
1884 int i,num_erase_regions; 1702 int i,num_erase_regions;
1885 __u8 uaddr; 1703 uint8_t uaddr;
1886 1704
1887 printk("Found: %s\n",jedec_table[index].name); 1705 if (! (jedec_table[index].devtypes & p_cfi->device_type)) {
1706 DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n",
1707 jedec_table[index].name, 4 * (1<<p_cfi->device_type));
1708 return 0;
1709 }
1710
1711 printk(KERN_INFO "Found: %s\n",jedec_table[index].name);
1888 1712
1889 num_erase_regions = jedec_table[index].NumEraseRegions; 1713 num_erase_regions = jedec_table[index].nr_regions;
1890 1714
1891 p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); 1715 p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
1892 if (!p_cfi->cfiq) { 1716 if (!p_cfi->cfiq) {
@@ -1896,9 +1720,9 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
1896 1720
1897 memset(p_cfi->cfiq,0,sizeof(struct cfi_ident)); 1721 memset(p_cfi->cfiq,0,sizeof(struct cfi_ident));
1898 1722
1899 p_cfi->cfiq->P_ID = jedec_table[index].CmdSet; 1723 p_cfi->cfiq->P_ID = jedec_table[index].cmd_set;
1900 p_cfi->cfiq->NumEraseRegions = jedec_table[index].NumEraseRegions; 1724 p_cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions;
1901 p_cfi->cfiq->DevSize = jedec_table[index].DevSize; 1725 p_cfi->cfiq->DevSize = jedec_table[index].dev_size;
1902 p_cfi->cfi_mode = CFI_MODE_JEDEC; 1726 p_cfi->cfi_mode = CFI_MODE_JEDEC;
1903 1727
1904 for (i=0; i<num_erase_regions; i++){ 1728 for (i=0; i<num_erase_regions; i++){
@@ -1910,14 +1734,14 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
1910 p_cfi->mfr = jedec_table[index].mfr_id; 1734 p_cfi->mfr = jedec_table[index].mfr_id;
1911 p_cfi->id = jedec_table[index].dev_id; 1735 p_cfi->id = jedec_table[index].dev_id;
1912 1736
1913 uaddr = finfo_uaddr(&jedec_table[index], p_cfi->device_type); 1737 uaddr = jedec_table[index].uaddr;
1914 if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) {
1915 kfree( p_cfi->cfiq );
1916 return 0;
1917 }
1918 1738
1919 p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1; 1739 /* The table has unlock addresses in _bytes_, and we try not to let
1920 p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2; 1740 our brains explode when we see the datasheets talking about address
1741 lines numbered from A-1 to A18. The CFI table has unlock addresses
1742 in device-words according to the mode the device is connected in */
1743 p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type;
1744 p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type;
1921 1745
1922 return 1; /* ok */ 1746 return 1; /* ok */
1923} 1747}
@@ -1930,14 +1754,14 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
1930 * be perfect - consequently there should be some module parameters that 1754 * be perfect - consequently there should be some module parameters that
1931 * could be manually specified to force the chip info. 1755 * could be manually specified to force the chip info.
1932 */ 1756 */
1933static inline int jedec_match( __u32 base, 1757static inline int jedec_match( uint32_t base,
1934 struct map_info *map, 1758 struct map_info *map,
1935 struct cfi_private *cfi, 1759 struct cfi_private *cfi,
1936 const struct amd_flash_info *finfo ) 1760 const struct amd_flash_info *finfo )
1937{ 1761{
1938 int rc = 0; /* failure until all tests pass */ 1762 int rc = 0; /* failure until all tests pass */
1939 u32 mfr, id; 1763 u32 mfr, id;
1940 __u8 uaddr; 1764 uint8_t uaddr;
1941 1765
1942 /* 1766 /*
1943 * The IDs must match. For X16 and X32 devices operating in 1767 * The IDs must match. For X16 and X32 devices operating in
@@ -1950,8 +1774,8 @@ static inline int jedec_match( __u32 base,
1950 */ 1774 */
1951 switch (cfi->device_type) { 1775 switch (cfi->device_type) {
1952 case CFI_DEVICETYPE_X8: 1776 case CFI_DEVICETYPE_X8:
1953 mfr = (__u8)finfo->mfr_id; 1777 mfr = (uint8_t)finfo->mfr_id;
1954 id = (__u8)finfo->dev_id; 1778 id = (uint8_t)finfo->dev_id;
1955 1779
1956 /* bjd: it seems that if we do this, we can end up 1780 /* bjd: it seems that if we do this, we can end up
1957 * detecting 16bit flashes as an 8bit device, even though 1781 * detecting 16bit flashes as an 8bit device, even though
@@ -1964,12 +1788,12 @@ static inline int jedec_match( __u32 base,
1964 } 1788 }
1965 break; 1789 break;
1966 case CFI_DEVICETYPE_X16: 1790 case CFI_DEVICETYPE_X16:
1967 mfr = (__u16)finfo->mfr_id; 1791 mfr = (uint16_t)finfo->mfr_id;
1968 id = (__u16)finfo->dev_id; 1792 id = (uint16_t)finfo->dev_id;
1969 break; 1793 break;
1970 case CFI_DEVICETYPE_X32: 1794 case CFI_DEVICETYPE_X32:
1971 mfr = (__u16)finfo->mfr_id; 1795 mfr = (uint16_t)finfo->mfr_id;
1972 id = (__u32)finfo->dev_id; 1796 id = (uint32_t)finfo->dev_id;
1973 break; 1797 break;
1974 default: 1798 default:
1975 printk(KERN_WARNING 1799 printk(KERN_WARNING
@@ -1984,25 +1808,25 @@ static inline int jedec_match( __u32 base,
1984 /* the part size must fit in the memory window */ 1808 /* the part size must fit in the memory window */
1985 DEBUG( MTD_DEBUG_LEVEL3, 1809 DEBUG( MTD_DEBUG_LEVEL3,
1986 "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n", 1810 "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
1987 __func__, base, 1 << finfo->DevSize, base + (1 << finfo->DevSize) ); 1811 __func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) );
1988 if ( base + cfi_interleave(cfi) * ( 1 << finfo->DevSize ) > map->size ) { 1812 if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) {
1989 DEBUG( MTD_DEBUG_LEVEL3, 1813 DEBUG( MTD_DEBUG_LEVEL3,
1990 "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n", 1814 "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
1991 __func__, finfo->mfr_id, finfo->dev_id, 1815 __func__, finfo->mfr_id, finfo->dev_id,
1992 1 << finfo->DevSize ); 1816 1 << finfo->dev_size );
1993 goto match_done; 1817 goto match_done;
1994 } 1818 }
1995 1819
1996 uaddr = finfo_uaddr(finfo, cfi->device_type); 1820 if (! (finfo->devtypes & cfi->device_type))
1997 if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) {
1998 goto match_done; 1821 goto match_done;
1999 } 1822
1823 uaddr = finfo->uaddr;
2000 1824
2001 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n", 1825 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
2002 __func__, cfi->addr_unlock1, cfi->addr_unlock2 ); 1826 __func__, cfi->addr_unlock1, cfi->addr_unlock2 );
2003 if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr 1827 if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr
2004 && ( unlock_addrs[uaddr].addr1 != cfi->addr_unlock1 || 1828 && ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 ||
2005 unlock_addrs[uaddr].addr2 != cfi->addr_unlock2 ) ) { 1829 unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) {
2006 DEBUG( MTD_DEBUG_LEVEL3, 1830 DEBUG( MTD_DEBUG_LEVEL3,
2007 "MTD %s(): 0x%.4x 0x%.4x did not match\n", 1831 "MTD %s(): 0x%.4x 0x%.4x did not match\n",
2008 __func__, 1832 __func__,
@@ -2042,7 +1866,7 @@ static inline int jedec_match( __u32 base,
2042 * were truly frobbing a real device. 1866 * were truly frobbing a real device.
2043 */ 1867 */
2044 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ ); 1868 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ );
2045 if(cfi->addr_unlock1) { 1869 if (cfi->addr_unlock1) {
2046 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); 1870 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
2047 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); 1871 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
2048 } 1872 }
@@ -2068,8 +1892,8 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2068 if (MTD_UADDR_UNNECESSARY == uaddr_idx) 1892 if (MTD_UADDR_UNNECESSARY == uaddr_idx)
2069 return 0; 1893 return 0;
2070 1894
2071 cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1; 1895 cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1 / cfi->device_type;
2072 cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2; 1896 cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2 / cfi->device_type;
2073 } 1897 }
2074 1898
2075 /* Make certain we aren't probing past the end of map */ 1899 /* Make certain we aren't probing past the end of map */
@@ -2081,19 +1905,11 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2081 1905
2082 } 1906 }
2083 /* Ensure the unlock addresses we try stay inside the map */ 1907 /* Ensure the unlock addresses we try stay inside the map */
2084 probe_offset1 = cfi_build_cmd_addr( 1908 probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, cfi_interleave(cfi), cfi->device_type);
2085 cfi->addr_unlock1, 1909 probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, cfi_interleave(cfi), cfi->device_type);
2086 cfi_interleave(cfi),
2087 cfi->device_type);
2088 probe_offset2 = cfi_build_cmd_addr(
2089 cfi->addr_unlock1,
2090 cfi_interleave(cfi),
2091 cfi->device_type);
2092 if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) || 1910 if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
2093 ((base + probe_offset2 + map_bankwidth(map)) >= map->size)) 1911 ((base + probe_offset2 + map_bankwidth(map)) >= map->size))
2094 {
2095 goto retry; 1912 goto retry;
2096 }
2097 1913
2098 /* Reset */ 1914 /* Reset */
2099 jedec_reset(base, map, cfi); 1915 jedec_reset(base, map, cfi);
@@ -2128,8 +1944,8 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2128 } 1944 }
2129 goto retry; 1945 goto retry;
2130 } else { 1946 } else {
2131 __u16 mfr; 1947 uint16_t mfr;
2132 __u16 id; 1948 uint16_t id;
2133 1949
2134 /* Make sure it is a chip of the same manufacturer and id */ 1950 /* Make sure it is a chip of the same manufacturer and id */
2135 mfr = jedec_read_mfr(map, base, cfi); 1951 mfr = jedec_read_mfr(map, base, cfi);
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 23fab14f1637..b44292abd9f7 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -9,7 +9,7 @@
9 * 9 *
10 * mtdparts=<mtddef>[;<mtddef] 10 * mtdparts=<mtddef>[;<mtddef]
11 * <mtddef> := <mtd-id>:<partdef>[,<partdef>] 11 * <mtddef> := <mtd-id>:<partdef>[,<partdef>]
12 * <partdef> := <size>[@offset][<name>][ro] 12 * <partdef> := <size>[@offset][<name>][ro][lk]
13 * <mtd-id> := unique name used in mapping driver/device (mtd->name) 13 * <mtd-id> := unique name used in mapping driver/device (mtd->name)
14 * <size> := standard linux memsize OR "-" to denote all remaining space 14 * <size> := standard linux memsize OR "-" to denote all remaining space
15 * <name> := '(' NAME ')' 15 * <name> := '(' NAME ')'
@@ -143,6 +143,13 @@ static struct mtd_partition * newpart(char *s,
143 s += 2; 143 s += 2;
144 } 144 }
145 145
146 /* if lk is found do NOT unlock the MTD partition*/
147 if (strncmp(s, "lk", 2) == 0)
148 {
149 mask_flags |= MTD_POWERUP_LOCK;
150 s += 2;
151 }
152
146 /* test if more partitions are following */ 153 /* test if more partitions are following */
147 if (*s == ',') 154 if (*s == ',')
148 { 155 {
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 4ea50a1dda85..99fd210feaec 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -323,7 +323,7 @@ static int flash_probe (void)
323 /* put the flash back into command mode */ 323 /* put the flash back into command mode */
324 write32 (DATA_TO_FLASH (READ_ARRAY),0x00000000); 324 write32 (DATA_TO_FLASH (READ_ARRAY),0x00000000);
325 325
326 return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || FLASH_DEVICE_16mbit_BOTTOM)); 326 return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || devtype == FLASH_DEVICE_16mbit_BOTTOM));
327} 327}
328 328
329/* 329/*
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index a5ed6d232c35..b35e4813a3a5 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -420,7 +420,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
420 status = dataflash_waitready(priv->spi); 420 status = dataflash_waitready(priv->spi);
421 421
422 /* Check result of the compare operation */ 422 /* Check result of the compare operation */
423 if ((status & (1 << 6)) == 1) { 423 if (status & (1 << 6)) {
424 printk(KERN_ERR "%s: compare page %u, err %d\n", 424 printk(KERN_ERR "%s: compare page %u, err %d\n",
425 spi->dev.bus_id, pageaddr, status); 425 spi->dev.bus_id, pageaddr, status);
426 remaining = 0; 426 remaining = 0;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index a592fc04cf78..12c253664eb2 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -110,13 +110,6 @@ config MTD_SUN_UFLASH
110 Sun Microsystems boardsets. This driver will require CFI support 110 Sun Microsystems boardsets. This driver will require CFI support
111 in the kernel, so if you did not enable CFI previously, do that now. 111 in the kernel, so if you did not enable CFI previously, do that now.
112 112
113config MTD_PNC2000
114 tristate "CFI Flash device mapped on Photron PNC-2000"
115 depends on X86 && MTD_CFI && MTD_PARTITIONS
116 help
117 PNC-2000 is the name of Network Camera product from PHOTRON
118 Ltd. in Japan. It uses CFI-compliant flash.
119
120config MTD_SC520CDP 113config MTD_SC520CDP
121 tristate "CFI Flash device mapped on AMD SC520 CDP" 114 tristate "CFI Flash device mapped on AMD SC520 CDP"
122 depends on X86 && MTD_CFI && MTD_CONCAT 115 depends on X86 && MTD_CFI && MTD_CONCAT
@@ -576,7 +569,7 @@ config MTD_BAST_MAXSIZE
576 default "4" 569 default "4"
577 570
578config MTD_SHARP_SL 571config MTD_SHARP_SL
579 bool "ROM mapped on Sharp SL Series" 572 tristate "ROM mapped on Sharp SL Series"
580 depends on ARCH_PXA 573 depends on ARCH_PXA
581 help 574 help
582 This enables access to the flash chip on the Sharp SL Series of PDAs. 575 This enables access to the flash chip on the Sharp SL Series of PDAs.
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 316382a1401b..a9cbe80f99a0 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -28,7 +28,6 @@ obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
28obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o 28obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
29obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o 29obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
30obj-$(CONFIG_MTD_PMC_MSP_RAMROOT)+= pmcmsp-ramroot.o 30obj-$(CONFIG_MTD_PMC_MSP_RAMROOT)+= pmcmsp-ramroot.o
31obj-$(CONFIG_MTD_PNC2000) += pnc2000.o
32obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o 31obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
33obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o 32obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
34obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o 33obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index aeed9ea79714..49acd4171893 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -80,64 +80,6 @@ static int parse_obsolete_partitions(struct of_device *dev,
80 80
81 return nr_parts; 81 return nr_parts;
82} 82}
83
84static int __devinit parse_partitions(struct of_flash *info,
85 struct of_device *dev)
86{
87 const char *partname;
88 static const char *part_probe_types[]
89 = { "cmdlinepart", "RedBoot", NULL };
90 struct device_node *dp = dev->node, *pp;
91 int nr_parts, i;
92
93 /* First look for RedBoot table or partitions on the command
94 * line, these take precedence over device tree information */
95 nr_parts = parse_mtd_partitions(info->mtd, part_probe_types,
96 &info->parts, 0);
97 if (nr_parts > 0) {
98 add_mtd_partitions(info->mtd, info->parts, nr_parts);
99 return 0;
100 }
101
102 /* First count the subnodes */
103 nr_parts = 0;
104 for (pp = dp->child; pp; pp = pp->sibling)
105 nr_parts++;
106
107 if (nr_parts == 0)
108 return parse_obsolete_partitions(dev, info, dp);
109
110 info->parts = kzalloc(nr_parts * sizeof(*info->parts),
111 GFP_KERNEL);
112 if (!info->parts)
113 return -ENOMEM;
114
115 for (pp = dp->child, i = 0; pp; pp = pp->sibling, i++) {
116 const u32 *reg;
117 int len;
118
119 reg = of_get_property(pp, "reg", &len);
120 if (!reg || (len != 2*sizeof(u32))) {
121 dev_err(&dev->dev, "Invalid 'reg' on %s\n",
122 dp->full_name);
123 kfree(info->parts);
124 info->parts = NULL;
125 return -EINVAL;
126 }
127 info->parts[i].offset = reg[0];
128 info->parts[i].size = reg[1];
129
130 partname = of_get_property(pp, "label", &len);
131 if (!partname)
132 partname = of_get_property(pp, "name", &len);
133 info->parts[i].name = (char *)partname;
134
135 if (of_get_property(pp, "read-only", &len))
136 info->parts[i].mask_flags = MTD_WRITEABLE;
137 }
138
139 return nr_parts;
140}
141#else /* MTD_PARTITIONS */ 83#else /* MTD_PARTITIONS */
142#define OF_FLASH_PARTS(info) (0) 84#define OF_FLASH_PARTS(info) (0)
143#define parse_partitions(info, dev) (0) 85#define parse_partitions(info, dev) (0)
@@ -212,6 +154,10 @@ static struct mtd_info * __devinit obsolete_probe(struct of_device *dev,
212static int __devinit of_flash_probe(struct of_device *dev, 154static int __devinit of_flash_probe(struct of_device *dev,
213 const struct of_device_id *match) 155 const struct of_device_id *match)
214{ 156{
157#ifdef CONFIG_MTD_PARTITIONS
158 static const char *part_probe_types[]
159 = { "cmdlinepart", "RedBoot", NULL };
160#endif
215 struct device_node *dp = dev->node; 161 struct device_node *dp = dev->node;
216 struct resource res; 162 struct resource res;
217 struct of_flash *info; 163 struct of_flash *info;
@@ -274,13 +220,33 @@ static int __devinit of_flash_probe(struct of_device *dev,
274 } 220 }
275 info->mtd->owner = THIS_MODULE; 221 info->mtd->owner = THIS_MODULE;
276 222
277 err = parse_partitions(info, dev); 223#ifdef CONFIG_MTD_PARTITIONS
224 /* First look for RedBoot table or partitions on the command
225 * line, these take precedence over device tree information */
226 err = parse_mtd_partitions(info->mtd, part_probe_types,
227 &info->parts, 0);
278 if (err < 0) 228 if (err < 0)
279 goto err_out; 229 return err;
230
231#ifdef CONFIG_MTD_OF_PARTS
232 if (err == 0) {
233 err = of_mtd_parse_partitions(&dev->dev, info->mtd,
234 dp, &info->parts);
235 if (err < 0)
236 return err;
237 }
238#endif
239
240 if (err == 0) {
241 err = parse_obsolete_partitions(dev, info, dp);
242 if (err < 0)
243 return err;
244 }
280 245
281 if (err > 0) 246 if (err > 0)
282 add_mtd_partitions(info->mtd, OF_FLASH_PARTS(info), err); 247 add_mtd_partitions(info->mtd, info->parts, err);
283 else 248 else
249#endif
284 add_mtd_device(info->mtd); 250 add_mtd_device(info->mtd);
285 251
286 return 0; 252 return 0;
diff --git a/drivers/mtd/maps/pnc2000.c b/drivers/mtd/maps/pnc2000.c
deleted file mode 100644
index d7e16c2d5c44..000000000000
--- a/drivers/mtd/maps/pnc2000.c
+++ /dev/null
@@ -1,93 +0,0 @@
1/*
2 * pnc2000.c - mapper for Photron PNC-2000 board.
3 *
4 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
5 *
6 * This code is GPL
7 *
8 * $Id: pnc2000.c,v 1.18 2005/11/07 11:14:28 gleixner Exp $
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h>
18#include <linux/mtd/partitions.h>
19
20
21#define WINDOW_ADDR 0xbf000000
22#define WINDOW_SIZE 0x00400000
23
24/*
25 * MAP DRIVER STUFF
26 */
27
28
29static struct map_info pnc_map = {
30 .name = "PNC-2000",
31 .size = WINDOW_SIZE,
32 .bankwidth = 4,
33 .phys = 0xFFFFFFFF,
34 .virt = (void __iomem *)WINDOW_ADDR,
35};
36
37
38/*
39 * MTD 'PARTITIONING' STUFF
40 */
41static struct mtd_partition pnc_partitions[3] = {
42 {
43 .name = "PNC-2000 boot firmware",
44 .size = 0x20000,
45 .offset = 0
46 },
47 {
48 .name = "PNC-2000 kernel",
49 .size = 0x1a0000,
50 .offset = 0x20000
51 },
52 {
53 .name = "PNC-2000 filesystem",
54 .size = 0x240000,
55 .offset = 0x1c0000
56 }
57};
58
59/*
60 * This is the master MTD device for which all the others are just
61 * auto-relocating aliases.
62 */
63static struct mtd_info *mymtd;
64
65static int __init init_pnc2000(void)
66{
67 printk(KERN_NOTICE "Photron PNC-2000 flash mapping: %x at %x\n", WINDOW_SIZE, WINDOW_ADDR);
68
69 simple_map_init(&pnc_map);
70
71 mymtd = do_map_probe("cfi_probe", &pnc_map);
72 if (mymtd) {
73 mymtd->owner = THIS_MODULE;
74 return add_mtd_partitions(mymtd, pnc_partitions, 3);
75 }
76
77 return -ENXIO;
78}
79
80static void __exit cleanup_pnc2000(void)
81{
82 if (mymtd) {
83 del_mtd_partitions(mymtd);
84 map_destroy(mymtd);
85 }
86}
87
88module_init(init_pnc2000);
89module_exit(cleanup_pnc2000);
90
91MODULE_LICENSE("GPL");
92MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp>");
93MODULE_DESCRIPTION("MTD map driver for Photron PNC-2000 board");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index dcfb85840d1e..0fc5584324e3 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -79,7 +79,7 @@ scb2_fixup_mtd(struct mtd_info *mtd)
79 struct cfi_private *cfi = map->fldrv_priv; 79 struct cfi_private *cfi = map->fldrv_priv;
80 80
81 /* barf if this doesn't look right */ 81 /* barf if this doesn't look right */
82 if (cfi->cfiq->InterfaceDesc != 1) { 82 if (cfi->cfiq->InterfaceDesc != CFI_INTERFACE_X16_ASYNC) {
83 printk(KERN_ERR MODNAME ": unsupported InterfaceDesc: %#x\n", 83 printk(KERN_ERR MODNAME ": unsupported InterfaceDesc: %#x\n",
84 cfi->cfiq->InterfaceDesc); 84 cfi->cfiq->InterfaceDesc);
85 return -1; 85 return -1;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 74d9d30edabd..839eed8430a2 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -248,9 +248,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
248 return -EBUSY; 248 return -EBUSY;
249 } 249 }
250 250
251 mutex_init(&new->lock);
252 list_add_tail(&new->list, &tr->devs); 251 list_add_tail(&new->list, &tr->devs);
253 added: 252 added:
253 mutex_init(&new->lock);
254 if (!tr->writesect) 254 if (!tr->writesect)
255 new->readonly = 1; 255 new->readonly = 1;
256 256
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index a0cee86464ca..5d3ac512ce16 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -481,6 +481,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
481 { 481 {
482 struct mtd_oob_buf buf; 482 struct mtd_oob_buf buf;
483 struct mtd_oob_ops ops; 483 struct mtd_oob_ops ops;
484 uint32_t retlen;
484 485
485 if(!(file->f_mode & 2)) 486 if(!(file->f_mode & 2))
486 return -EPERM; 487 return -EPERM;
@@ -520,8 +521,11 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
520 buf.start &= ~(mtd->oobsize - 1); 521 buf.start &= ~(mtd->oobsize - 1);
521 ret = mtd->write_oob(mtd, buf.start, &ops); 522 ret = mtd->write_oob(mtd, buf.start, &ops);
522 523
523 if (copy_to_user(argp + sizeof(uint32_t), &ops.oobretlen, 524 if (ops.oobretlen > 0xFFFFFFFFU)
524 sizeof(uint32_t))) 525 ret = -EOVERFLOW;
526 retlen = ops.oobretlen;
527 if (copy_to_user(&((struct mtd_oob_buf *)argp)->length,
528 &retlen, sizeof(buf.length)))
525 ret = -EFAULT; 529 ret = -EFAULT;
526 530
527 kfree(ops.oobbuf); 531 kfree(ops.oobbuf);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 6c2645e28371..f7e7890e5bc6 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -61,7 +61,7 @@ int add_mtd_device(struct mtd_info *mtd)
61 61
62 /* Some chips always power up locked. Unlock them now */ 62 /* Some chips always power up locked. Unlock them now */
63 if ((mtd->flags & MTD_WRITEABLE) 63 if ((mtd->flags & MTD_WRITEABLE)
64 && (mtd->flags & MTD_STUPID_LOCK) && mtd->unlock) { 64 && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
65 if (mtd->unlock(mtd, 0, mtd->size)) 65 if (mtd->unlock(mtd, 0, mtd->size))
66 printk(KERN_WARNING 66 printk(KERN_WARNING
67 "%s: unlock failed, " 67 "%s: unlock failed, "
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index f8af627f0b98..34681bc91105 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -28,19 +28,24 @@
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/wait.h> 30#include <linux/wait.h>
31#include <linux/spinlock.h>
31#include <linux/mtd/mtd.h> 32#include <linux/mtd/mtd.h>
32 33
33#define OOPS_PAGE_SIZE 4096 34#define OOPS_PAGE_SIZE 4096
34 35
35static struct mtdoops_context { 36struct mtdoops_context {
36 int mtd_index; 37 int mtd_index;
37 struct work_struct work; 38 struct work_struct work_erase;
39 struct work_struct work_write;
38 struct mtd_info *mtd; 40 struct mtd_info *mtd;
39 int oops_pages; 41 int oops_pages;
40 int nextpage; 42 int nextpage;
41 int nextcount; 43 int nextcount;
42 44
43 void *oops_buf; 45 void *oops_buf;
46
47 /* writecount and disabling ready are spin lock protected */
48 spinlock_t writecount_lock;
44 int ready; 49 int ready;
45 int writecount; 50 int writecount;
46} oops_cxt; 51} oops_cxt;
@@ -62,10 +67,7 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
62 erase.mtd = mtd; 67 erase.mtd = mtd;
63 erase.callback = mtdoops_erase_callback; 68 erase.callback = mtdoops_erase_callback;
64 erase.addr = offset; 69 erase.addr = offset;
65 if (mtd->erasesize < OOPS_PAGE_SIZE) 70 erase.len = mtd->erasesize;
66 erase.len = OOPS_PAGE_SIZE;
67 else
68 erase.len = mtd->erasesize;
69 erase.priv = (u_long)&wait_q; 71 erase.priv = (u_long)&wait_q;
70 72
71 set_current_state(TASK_INTERRUPTIBLE); 73 set_current_state(TASK_INTERRUPTIBLE);
@@ -87,7 +89,7 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
87 return 0; 89 return 0;
88} 90}
89 91
90static int mtdoops_inc_counter(struct mtdoops_context *cxt) 92static void mtdoops_inc_counter(struct mtdoops_context *cxt)
91{ 93{
92 struct mtd_info *mtd = cxt->mtd; 94 struct mtd_info *mtd = cxt->mtd;
93 size_t retlen; 95 size_t retlen;
@@ -103,25 +105,30 @@ static int mtdoops_inc_counter(struct mtdoops_context *cxt)
103 105
104 ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4, 106 ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4,
105 &retlen, (u_char *) &count); 107 &retlen, (u_char *) &count);
106 if ((retlen != 4) || (ret < 0)) { 108 if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
107 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)" 109 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
108 ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE, 110 ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE,
109 retlen, ret); 111 retlen, ret);
110 return 1; 112 schedule_work(&cxt->work_erase);
113 return;
111 } 114 }
112 115
113 /* See if we need to erase the next block */ 116 /* See if we need to erase the next block */
114 if (count != 0xffffffff) 117 if (count != 0xffffffff) {
115 return 1; 118 schedule_work(&cxt->work_erase);
119 return;
120 }
116 121
117 printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n", 122 printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n",
118 cxt->nextpage, cxt->nextcount); 123 cxt->nextpage, cxt->nextcount);
119 cxt->ready = 1; 124 cxt->ready = 1;
120 return 0;
121} 125}
122 126
123static void mtdoops_prepare(struct mtdoops_context *cxt) 127/* Scheduled work - when we can't proceed without erasing a block */
128static void mtdoops_workfunc_erase(struct work_struct *work)
124{ 129{
130 struct mtdoops_context *cxt =
131 container_of(work, struct mtdoops_context, work_erase);
125 struct mtd_info *mtd = cxt->mtd; 132 struct mtd_info *mtd = cxt->mtd;
126 int i = 0, j, ret, mod; 133 int i = 0, j, ret, mod;
127 134
@@ -136,8 +143,14 @@ static void mtdoops_prepare(struct mtdoops_context *cxt)
136 cxt->nextpage = 0; 143 cxt->nextpage = 0;
137 } 144 }
138 145
139 while (mtd->block_isbad && 146 while (mtd->block_isbad) {
140 mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE)) { 147 ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
148 if (!ret)
149 break;
150 if (ret < 0) {
151 printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n");
152 return;
153 }
141badblock: 154badblock:
142 printk(KERN_WARNING "mtdoops: Bad block at %08x\n", 155 printk(KERN_WARNING "mtdoops: Bad block at %08x\n",
143 cxt->nextpage * OOPS_PAGE_SIZE); 156 cxt->nextpage * OOPS_PAGE_SIZE);
@@ -154,34 +167,61 @@ badblock:
154 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) 167 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
155 ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 168 ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
156 169
157 if (ret < 0) { 170 if (ret >= 0) {
158 if (mtd->block_markbad) 171 printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount);
159 mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 172 cxt->ready = 1;
160 goto badblock; 173 return;
161 } 174 }
162 175
163 printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount); 176 if (mtd->block_markbad && (ret == -EIO)) {
164 177 ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
165 cxt->ready = 1; 178 if (ret < 0) {
179 printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n");
180 return;
181 }
182 }
183 goto badblock;
166} 184}
167 185
168static void mtdoops_workfunc(struct work_struct *work) 186static void mtdoops_workfunc_write(struct work_struct *work)
169{ 187{
170 struct mtdoops_context *cxt = 188 struct mtdoops_context *cxt =
171 container_of(work, struct mtdoops_context, work); 189 container_of(work, struct mtdoops_context, work_write);
190 struct mtd_info *mtd = cxt->mtd;
191 size_t retlen;
192 int ret;
172 193
173 mtdoops_prepare(cxt); 194 if (cxt->writecount < OOPS_PAGE_SIZE)
174} 195 memset(cxt->oops_buf + cxt->writecount, 0xff,
196 OOPS_PAGE_SIZE - cxt->writecount);
197
198 ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
199 OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
200
201 cxt->writecount = 0;
202
203 if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
204 printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
205 cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
175 206
176static int find_next_position(struct mtdoops_context *cxt) 207 mtdoops_inc_counter(cxt);
208}
209
210static void find_next_position(struct mtdoops_context *cxt)
177{ 211{
178 struct mtd_info *mtd = cxt->mtd; 212 struct mtd_info *mtd = cxt->mtd;
179 int page, maxpos = 0; 213 int ret, page, maxpos = 0;
180 u32 count, maxcount = 0xffffffff; 214 u32 count, maxcount = 0xffffffff;
181 size_t retlen; 215 size_t retlen;
182 216
183 for (page = 0; page < cxt->oops_pages; page++) { 217 for (page = 0; page < cxt->oops_pages; page++) {
184 mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count); 218 ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count);
219 if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
220 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
221 ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret);
222 continue;
223 }
224
185 if (count == 0xffffffff) 225 if (count == 0xffffffff)
186 continue; 226 continue;
187 if (maxcount == 0xffffffff) { 227 if (maxcount == 0xffffffff) {
@@ -205,20 +245,19 @@ static int find_next_position(struct mtdoops_context *cxt)
205 cxt->ready = 1; 245 cxt->ready = 1;
206 printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n", 246 printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n",
207 cxt->nextpage, cxt->nextcount); 247 cxt->nextpage, cxt->nextcount);
208 return 0; 248 return;
209 } 249 }
210 250
211 cxt->nextpage = maxpos; 251 cxt->nextpage = maxpos;
212 cxt->nextcount = maxcount; 252 cxt->nextcount = maxcount;
213 253
214 return mtdoops_inc_counter(cxt); 254 mtdoops_inc_counter(cxt);
215} 255}
216 256
217 257
218static void mtdoops_notify_add(struct mtd_info *mtd) 258static void mtdoops_notify_add(struct mtd_info *mtd)
219{ 259{
220 struct mtdoops_context *cxt = &oops_cxt; 260 struct mtdoops_context *cxt = &oops_cxt;
221 int ret;
222 261
223 if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) 262 if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
224 return; 263 return;
@@ -229,14 +268,18 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
229 return; 268 return;
230 } 269 }
231 270
271 if (mtd->erasesize < OOPS_PAGE_SIZE) {
272 printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n",
273 mtd->index);
274 return;
275 }
276
232 cxt->mtd = mtd; 277 cxt->mtd = mtd;
233 cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE; 278 cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE;
234 279
235 ret = find_next_position(cxt); 280 find_next_position(cxt);
236 if (ret == 1)
237 mtdoops_prepare(cxt);
238 281
239 printk(KERN_DEBUG "mtdoops: Attached to MTD device %d\n", mtd->index); 282 printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
240} 283}
241 284
242static void mtdoops_notify_remove(struct mtd_info *mtd) 285static void mtdoops_notify_remove(struct mtd_info *mtd)
@@ -254,31 +297,24 @@ static void mtdoops_console_sync(void)
254{ 297{
255 struct mtdoops_context *cxt = &oops_cxt; 298 struct mtdoops_context *cxt = &oops_cxt;
256 struct mtd_info *mtd = cxt->mtd; 299 struct mtd_info *mtd = cxt->mtd;
257 size_t retlen; 300 unsigned long flags;
258 int ret;
259 301
260 if (!cxt->ready || !mtd) 302 if (!cxt->ready || !mtd || cxt->writecount == 0)
261 return; 303 return;
262 304
263 if (cxt->writecount == 0) 305 /*
306 * Once ready is 0 and we've held the lock no further writes to the
307 * buffer will happen
308 */
309 spin_lock_irqsave(&cxt->writecount_lock, flags);
310 if (!cxt->ready) {
311 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
264 return; 312 return;
265 313 }
266 if (cxt->writecount < OOPS_PAGE_SIZE)
267 memset(cxt->oops_buf + cxt->writecount, 0xff,
268 OOPS_PAGE_SIZE - cxt->writecount);
269
270 ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
271 OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
272 cxt->ready = 0; 314 cxt->ready = 0;
273 cxt->writecount = 0; 315 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
274 316
275 if ((retlen != OOPS_PAGE_SIZE) || (ret < 0)) 317 schedule_work(&cxt->work_write);
276 printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
277 cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
278
279 ret = mtdoops_inc_counter(cxt);
280 if (ret == 1)
281 schedule_work(&cxt->work);
282} 318}
283 319
284static void 320static void
@@ -286,7 +322,7 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
286{ 322{
287 struct mtdoops_context *cxt = co->data; 323 struct mtdoops_context *cxt = co->data;
288 struct mtd_info *mtd = cxt->mtd; 324 struct mtd_info *mtd = cxt->mtd;
289 int i; 325 unsigned long flags;
290 326
291 if (!oops_in_progress) { 327 if (!oops_in_progress) {
292 mtdoops_console_sync(); 328 mtdoops_console_sync();
@@ -296,6 +332,13 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
296 if (!cxt->ready || !mtd) 332 if (!cxt->ready || !mtd)
297 return; 333 return;
298 334
335 /* Locking on writecount ensures sequential writes to the buffer */
336 spin_lock_irqsave(&cxt->writecount_lock, flags);
337
338 /* Check ready status didn't change whilst waiting for the lock */
339 if (!cxt->ready)
340 return;
341
299 if (cxt->writecount == 0) { 342 if (cxt->writecount == 0) {
300 u32 *stamp = cxt->oops_buf; 343 u32 *stamp = cxt->oops_buf;
301 *stamp = cxt->nextcount; 344 *stamp = cxt->nextcount;
@@ -305,10 +348,13 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
305 if ((count + cxt->writecount) > OOPS_PAGE_SIZE) 348 if ((count + cxt->writecount) > OOPS_PAGE_SIZE)
306 count = OOPS_PAGE_SIZE - cxt->writecount; 349 count = OOPS_PAGE_SIZE - cxt->writecount;
307 350
308 for (i = 0; i < count; i++, s++) 351 memcpy(cxt->oops_buf + cxt->writecount, s, count);
309 *((char *)(cxt->oops_buf) + cxt->writecount + i) = *s; 352 cxt->writecount += count;
353
354 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
310 355
311 cxt->writecount = cxt->writecount + count; 356 if (cxt->writecount == OOPS_PAGE_SIZE)
357 mtdoops_console_sync();
312} 358}
313 359
314static int __init mtdoops_console_setup(struct console *co, char *options) 360static int __init mtdoops_console_setup(struct console *co, char *options)
@@ -334,7 +380,6 @@ static struct console mtdoops_console = {
334 .write = mtdoops_console_write, 380 .write = mtdoops_console_write,
335 .setup = mtdoops_console_setup, 381 .setup = mtdoops_console_setup,
336 .unblank = mtdoops_console_sync, 382 .unblank = mtdoops_console_sync,
337 .flags = CON_PRINTBUFFER,
338 .index = -1, 383 .index = -1,
339 .data = &oops_cxt, 384 .data = &oops_cxt,
340}; 385};
@@ -347,11 +392,12 @@ static int __init mtdoops_console_init(void)
347 cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); 392 cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE);
348 393
349 if (!cxt->oops_buf) { 394 if (!cxt->oops_buf) {
350 printk(KERN_ERR "Failed to allocate oops buffer workspace\n"); 395 printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n");
351 return -ENOMEM; 396 return -ENOMEM;
352 } 397 }
353 398
354 INIT_WORK(&cxt->work, mtdoops_workfunc); 399 INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
400 INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
355 401
356 register_console(&mtdoops_console); 402 register_console(&mtdoops_console);
357 register_mtd_user(&mtdoops_notifier); 403 register_mtd_user(&mtdoops_notifier);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 246d4512f64b..0a840d5d75ae 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -93,7 +93,7 @@ config MTD_NAND_AU1550
93 93
94config MTD_NAND_BF5XX 94config MTD_NAND_BF5XX
95 tristate "Blackfin on-chip NAND Flash Controller driver" 95 tristate "Blackfin on-chip NAND Flash Controller driver"
96 depends on BF54x && MTD_NAND 96 depends on (BF54x || BF52x) && MTD_NAND
97 help 97 help
98 This enables the Blackfin on-chip NAND flash controller 98 This enables the Blackfin on-chip NAND flash controller
99 99
@@ -283,6 +283,12 @@ config MTD_NAND_CM_X270
283 tristate "Support for NAND Flash on CM-X270 modules" 283 tristate "Support for NAND Flash on CM-X270 modules"
284 depends on MTD_NAND && MACH_ARMCORE 284 depends on MTD_NAND && MACH_ARMCORE
285 285
286config MTD_NAND_PASEMI
287 tristate "NAND support for PA Semi PWRficient"
288 depends on MTD_NAND && PPC_PASEMI
289 help
290 Enables support for NAND Flash interface on PA Semi PWRficient
291 based boards
286 292
287config MTD_NAND_NANDSIM 293config MTD_NAND_NANDSIM
288 tristate "Support for NAND Flash Simulator" 294 tristate "Support for NAND Flash Simulator"
@@ -306,4 +312,13 @@ config MTD_ALAUDA
306 These two (and possibly other) Alauda-based cardreaders for 312 These two (and possibly other) Alauda-based cardreaders for
307 SmartMedia and xD allow raw flash access. 313 SmartMedia and xD allow raw flash access.
308 314
315config MTD_NAND_ORION
316 tristate "NAND Flash support for Marvell Orion SoC"
317 depends on ARCH_ORION && MTD_NAND
318 help
319 This enables the NAND flash controller on Orion machines.
320
321 No board specific support is done by this driver, each board
322 must advertise a platform_device for the driver to attach.
323
309endif # MTD_NAND 324endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 3ad6c0165da3..e35f5ea3a7a9 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -29,5 +29,7 @@ obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
29obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o 29obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
30obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o 30obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
31obj-$(CONFIG_MTD_ALAUDA) += alauda.o 31obj-$(CONFIG_MTD_ALAUDA) += alauda.o
32obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
33obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
32 34
33nand-objs := nand_base.o nand_bbt.o 35nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/at91_nand.c b/drivers/mtd/nand/at91_nand.c
index b2a5672df6e0..c9fb2acf4056 100644
--- a/drivers/mtd/nand/at91_nand.c
+++ b/drivers/mtd/nand/at91_nand.c
@@ -156,14 +156,14 @@ static int __init at91_nand_probe(struct platform_device *pdev)
156 } 156 }
157 157
158#ifdef CONFIG_MTD_PARTITIONS 158#ifdef CONFIG_MTD_PARTITIONS
159 if (host->board->partition_info)
160 partitions = host->board->partition_info(mtd->size, &num_partitions);
161#ifdef CONFIG_MTD_CMDLINE_PARTS 159#ifdef CONFIG_MTD_CMDLINE_PARTS
162 else { 160 mtd->name = "at91_nand";
163 mtd->name = "at91_nand"; 161 num_partitions = parse_mtd_partitions(mtd, part_probes,
164 num_partitions = parse_mtd_partitions(mtd, part_probes, &partitions, 0); 162 &partitions, 0);
165 }
166#endif 163#endif
164 if (num_partitions <= 0 && host->board->partition_info)
165 partitions = host->board->partition_info(mtd->size,
166 &num_partitions);
167 167
168 if ((!partitions) || (num_partitions == 0)) { 168 if ((!partitions) || (num_partitions == 0)) {
169 printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n"); 169 printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n");
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 1657ecd74881..542850cd4c37 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -74,7 +74,22 @@ static int hardware_ecc = 1;
74static int hardware_ecc; 74static int hardware_ecc;
75#endif 75#endif
76 76
77static unsigned short bfin_nfc_pin_req[] = {P_NAND_CE, P_NAND_RB, 0}; 77static unsigned short bfin_nfc_pin_req[] =
78 {P_NAND_CE,
79 P_NAND_RB,
80 P_NAND_D0,
81 P_NAND_D1,
82 P_NAND_D2,
83 P_NAND_D3,
84 P_NAND_D4,
85 P_NAND_D5,
86 P_NAND_D6,
87 P_NAND_D7,
88 P_NAND_WE,
89 P_NAND_RE,
90 P_NAND_CLE,
91 P_NAND_ALE,
92 0};
78 93
79/* 94/*
80 * Data structures for bf5xx nand flash controller driver 95 * Data structures for bf5xx nand flash controller driver
@@ -507,12 +522,13 @@ static int bf5xx_nand_dma_init(struct bf5xx_nand_info *info)
507 522
508 init_completion(&info->dma_completion); 523 init_completion(&info->dma_completion);
509 524
525#ifdef CONFIG_BF54x
510 /* Setup DMAC1 channel mux for NFC which shared with SDH */ 526 /* Setup DMAC1 channel mux for NFC which shared with SDH */
511 val = bfin_read_DMAC1_PERIMUX(); 527 val = bfin_read_DMAC1_PERIMUX();
512 val &= 0xFFFE; 528 val &= 0xFFFE;
513 bfin_write_DMAC1_PERIMUX(val); 529 bfin_write_DMAC1_PERIMUX(val);
514 SSYNC(); 530 SSYNC();
515 531#endif
516 /* Request NFC DMA channel */ 532 /* Request NFC DMA channel */
517 ret = request_dma(CH_NFC, "BF5XX NFC driver"); 533 ret = request_dma(CH_NFC, "BF5XX NFC driver");
518 if (ret < 0) { 534 if (ret < 0) {
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 1e811715211a..da6ceaa80ba1 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -11,6 +11,7 @@
11#undef DEBUG 11#undef DEBUG
12#include <linux/mtd/mtd.h> 12#include <linux/mtd/mtd.h>
13#include <linux/mtd/nand.h> 13#include <linux/mtd/nand.h>
14#include <linux/mtd/partitions.h>
14#include <linux/rslib.h> 15#include <linux/rslib.h>
15#include <linux/pci.h> 16#include <linux/pci.h>
16#include <linux/delay.h> 17#include <linux/delay.h>
@@ -52,6 +53,7 @@
52 53
53struct cafe_priv { 54struct cafe_priv {
54 struct nand_chip nand; 55 struct nand_chip nand;
56 struct mtd_partition *parts;
55 struct pci_dev *pdev; 57 struct pci_dev *pdev;
56 void __iomem *mmio; 58 void __iomem *mmio;
57 struct rs_control *rs; 59 struct rs_control *rs;
@@ -84,6 +86,10 @@ static unsigned int numtimings;
84static int timing[3]; 86static int timing[3];
85module_param_array(timing, int, &numtimings, 0644); 87module_param_array(timing, int, &numtimings, 0644);
86 88
89#ifdef CONFIG_MTD_PARTITIONS
90static const char *part_probes[] = { "RedBoot", NULL };
91#endif
92
87/* Hrm. Why isn't this already conditional on something in the struct device? */ 93/* Hrm. Why isn't this already conditional on something in the struct device? */
88#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0) 94#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
89 95
@@ -620,7 +626,9 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
620{ 626{
621 struct mtd_info *mtd; 627 struct mtd_info *mtd;
622 struct cafe_priv *cafe; 628 struct cafe_priv *cafe;
629 struct mtd_partition *parts;
623 uint32_t ctrl; 630 uint32_t ctrl;
631 int nr_parts;
624 int err = 0; 632 int err = 0;
625 633
626 /* Very old versions shared the same PCI ident for all three 634 /* Very old versions shared the same PCI ident for all three
@@ -787,7 +795,18 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
787 goto out_irq; 795 goto out_irq;
788 796
789 pci_set_drvdata(pdev, mtd); 797 pci_set_drvdata(pdev, mtd);
798
799 /* We register the whole device first, separate from the partitions */
790 add_mtd_device(mtd); 800 add_mtd_device(mtd);
801
802#ifdef CONFIG_MTD_PARTITIONS
803 nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
804 if (nr_parts > 0) {
805 cafe->parts = parts;
806 dev_info(&cafe->pdev->dev, "%d RedBoot partitions found\n", nr_parts);
807 add_mtd_partitions(mtd, parts, nr_parts);
808 }
809#endif
791 goto out; 810 goto out;
792 811
793 out_irq: 812 out_irq:
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index e29c1da7f56e..85a7283845ff 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2469,8 +2469,12 @@ int nand_scan_tail(struct mtd_info *mtd)
2469 chip->ecc.write_oob = nand_write_oob_std; 2469 chip->ecc.write_oob = nand_write_oob_std;
2470 2470
2471 case NAND_ECC_HW_SYNDROME: 2471 case NAND_ECC_HW_SYNDROME:
2472 if (!chip->ecc.calculate || !chip->ecc.correct || 2472 if ((!chip->ecc.calculate || !chip->ecc.correct ||
2473 !chip->ecc.hwctl) { 2473 !chip->ecc.hwctl) &&
2474 (!chip->ecc.read_page ||
2475 chip->ecc.read_page == nand_read_page_hwecc) ||
2476 !chip->ecc.write_page ||
2477 chip->ecc.write_page == nand_write_page_hwecc) {
2474 printk(KERN_WARNING "No ECC functions supplied, " 2478 printk(KERN_WARNING "No ECC functions supplied, "
2475 "Hardware ECC not possible\n"); 2479 "Hardware ECC not possible\n");
2476 BUG(); 2480 BUG();
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
new file mode 100644
index 000000000000..9162cca0182b
--- /dev/null
+++ b/drivers/mtd/nand/orion_nand.c
@@ -0,0 +1,171 @@
1/*
2 * drivers/mtd/nand/orion_nand.c
3 *
4 * NAND support for Marvell Orion SoC platforms
5 *
6 * Tzachi Perelstein <tzachi@marvell.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/nand.h>
18#include <linux/mtd/partitions.h>
19#include <asm/io.h>
20#include <asm/sizes.h>
21#include <asm/arch/platform.h>
22#include <asm/arch/hardware.h>
23
24#ifdef CONFIG_MTD_CMDLINE_PARTS
25static const char *part_probes[] = { "cmdlinepart", NULL };
26#endif
27
28static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
29{
30 struct nand_chip *nc = mtd->priv;
31 struct orion_nand_data *board = nc->priv;
32 u32 offs;
33
34 if (cmd == NAND_CMD_NONE)
35 return;
36
37 if (ctrl & NAND_CLE)
38 offs = (1 << board->cle);
39 else if (ctrl & NAND_ALE)
40 offs = (1 << board->ale);
41 else
42 return;
43
44 if (nc->options & NAND_BUSWIDTH_16)
45 offs <<= 1;
46
47 writeb(cmd, nc->IO_ADDR_W + offs);
48}
49
50static int __init orion_nand_probe(struct platform_device *pdev)
51{
52 struct mtd_info *mtd;
53 struct nand_chip *nc;
54 struct orion_nand_data *board;
55 void __iomem *io_base;
56 int ret = 0;
57#ifdef CONFIG_MTD_PARTITIONS
58 struct mtd_partition *partitions = NULL;
59 int num_part = 0;
60#endif
61
62 nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
63 if (!nc) {
64 printk(KERN_ERR "orion_nand: failed to allocate device structure.\n");
65 ret = -ENOMEM;
66 goto no_res;
67 }
68 mtd = (struct mtd_info *)(nc + 1);
69
70 io_base = ioremap(pdev->resource[0].start,
71 pdev->resource[0].end - pdev->resource[0].start + 1);
72 if (!io_base) {
73 printk(KERN_ERR "orion_nand: ioremap failed\n");
74 ret = -EIO;
75 goto no_res;
76 }
77
78 board = pdev->dev.platform_data;
79
80 mtd->priv = nc;
81 mtd->owner = THIS_MODULE;
82
83 nc->priv = board;
84 nc->IO_ADDR_R = nc->IO_ADDR_W = io_base;
85 nc->cmd_ctrl = orion_nand_cmd_ctrl;
86 nc->ecc.mode = NAND_ECC_SOFT;
87
88 if (board->width == 16)
89 nc->options |= NAND_BUSWIDTH_16;
90
91 platform_set_drvdata(pdev, mtd);
92
93 if (nand_scan(mtd, 1)) {
94 ret = -ENXIO;
95 goto no_dev;
96 }
97
98#ifdef CONFIG_MTD_PARTITIONS
99#ifdef CONFIG_MTD_CMDLINE_PARTS
100 mtd->name = "orion_nand";
101 num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
102#endif
103 /* If cmdline partitions have been passed, let them be used */
104 if (num_part <= 0) {
105 num_part = board->nr_parts;
106 partitions = board->parts;
107 }
108
109 if (partitions && num_part > 0)
110 ret = add_mtd_partitions(mtd, partitions, num_part);
111 else
112 ret = add_mtd_device(mtd);
113#else
114 ret = add_mtd_device(mtd);
115#endif
116
117 if (ret) {
118 nand_release(mtd);
119 goto no_dev;
120 }
121
122 return 0;
123
124no_dev:
125 platform_set_drvdata(pdev, NULL);
126 iounmap(io_base);
127no_res:
128 kfree(nc);
129
130 return ret;
131}
132
133static int __devexit orion_nand_remove(struct platform_device *pdev)
134{
135 struct mtd_info *mtd = platform_get_drvdata(pdev);
136 struct nand_chip *nc = mtd->priv;
137
138 nand_release(mtd);
139
140 iounmap(nc->IO_ADDR_W);
141
142 kfree(nc);
143
144 return 0;
145}
146
147static struct platform_driver orion_nand_driver = {
148 .probe = orion_nand_probe,
149 .remove = orion_nand_remove,
150 .driver = {
151 .name = "orion_nand",
152 .owner = THIS_MODULE,
153 },
154};
155
156static int __init orion_nand_init(void)
157{
158 return platform_driver_register(&orion_nand_driver);
159}
160
161static void __exit orion_nand_exit(void)
162{
163 platform_driver_unregister(&orion_nand_driver);
164}
165
166module_init(orion_nand_init);
167module_exit(orion_nand_exit);
168
169MODULE_LICENSE("GPL");
170MODULE_AUTHOR("Tzachi Perelstein");
171MODULE_DESCRIPTION("NAND glue for Orion platforms");
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
new file mode 100644
index 000000000000..75c899039023
--- /dev/null
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -0,0 +1,243 @@
1/*
2 * Copyright (C) 2006-2007 PA Semi, Inc
3 *
4 * Author: Egor Martovetsky <egor@pasemi.com>
5 * Maintained by: Olof Johansson <olof@lixom.net>
6 *
7 * Driver for the PWRficient onchip NAND flash interface
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#undef DEBUG
24
25#include <linux/slab.h>
26#include <linux/init.h>
27#include <linux/module.h>
28#include <linux/mtd/mtd.h>
29#include <linux/mtd/nand.h>
30#include <linux/mtd/nand_ecc.h>
31#include <linux/of_platform.h>
32#include <linux/platform_device.h>
33#include <linux/pci.h>
34
35#include <asm/io.h>
36
37#define LBICTRL_LPCCTL_NR 0x00004000
38#define CLE_PIN_CTL 15
39#define ALE_PIN_CTL 14
40
41static unsigned int lpcctl;
42static struct mtd_info *pasemi_nand_mtd;
43static const char driver_name[] = "pasemi-nand";
44
45static void pasemi_read_buf(struct mtd_info *mtd, u_char *buf, int len)
46{
47 struct nand_chip *chip = mtd->priv;
48
49 while (len > 0x800) {
50 memcpy_fromio(buf, chip->IO_ADDR_R, 0x800);
51 buf += 0x800;
52 len -= 0x800;
53 }
54 memcpy_fromio(buf, chip->IO_ADDR_R, len);
55}
56
57static void pasemi_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
58{
59 struct nand_chip *chip = mtd->priv;
60
61 while (len > 0x800) {
62 memcpy_toio(chip->IO_ADDR_R, buf, 0x800);
63 buf += 0x800;
64 len -= 0x800;
65 }
66 memcpy_toio(chip->IO_ADDR_R, buf, len);
67}
68
69static void pasemi_hwcontrol(struct mtd_info *mtd, int cmd,
70 unsigned int ctrl)
71{
72 struct nand_chip *chip = mtd->priv;
73
74 if (cmd == NAND_CMD_NONE)
75 return;
76
77 if (ctrl & NAND_CLE)
78 out_8(chip->IO_ADDR_W + (1 << CLE_PIN_CTL), cmd);
79 else
80 out_8(chip->IO_ADDR_W + (1 << ALE_PIN_CTL), cmd);
81
82 /* Push out posted writes */
83 eieio();
84 inl(lpcctl);
85}
86
87int pasemi_device_ready(struct mtd_info *mtd)
88{
89 return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
90}
91
92static int __devinit pasemi_nand_probe(struct of_device *ofdev,
93 const struct of_device_id *match)
94{
95 struct pci_dev *pdev;
96 struct device_node *np = ofdev->node;
97 struct resource res;
98 struct nand_chip *chip;
99 int err = 0;
100
101 err = of_address_to_resource(np, 0, &res);
102
103 if (err)
104 return -EINVAL;
105
106 /* We only support one device at the moment */
107 if (pasemi_nand_mtd)
108 return -ENODEV;
109
110 pr_debug("pasemi_nand at %lx-%lx\n", res.start, res.end);
111
112 /* Allocate memory for MTD device structure and private data */
113 pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) +
114 sizeof(struct nand_chip), GFP_KERNEL);
115 if (!pasemi_nand_mtd) {
116 printk(KERN_WARNING
117 "Unable to allocate PASEMI NAND MTD device structure\n");
118 err = -ENOMEM;
119 goto out;
120 }
121
122 /* Get pointer to private data */
123 chip = (struct nand_chip *)&pasemi_nand_mtd[1];
124
125 /* Link the private data with the MTD structure */
126 pasemi_nand_mtd->priv = chip;
127 pasemi_nand_mtd->owner = THIS_MODULE;
128
129 chip->IO_ADDR_R = of_iomap(np, 0);
130 chip->IO_ADDR_W = chip->IO_ADDR_R;
131
132 if (!chip->IO_ADDR_R) {
133 err = -EIO;
134 goto out_mtd;
135 }
136
137 pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa008, NULL);
138 if (!pdev) {
139 err = -ENODEV;
140 goto out_ior;
141 }
142
143 lpcctl = pci_resource_start(pdev, 0);
144
145 if (!request_region(lpcctl, 4, driver_name)) {
146 err = -EBUSY;
147 goto out_ior;
148 }
149
150 chip->cmd_ctrl = pasemi_hwcontrol;
151 chip->dev_ready = pasemi_device_ready;
152 chip->read_buf = pasemi_read_buf;
153 chip->write_buf = pasemi_write_buf;
154 chip->chip_delay = 0;
155 chip->ecc.mode = NAND_ECC_SOFT;
156
157 /* Enable the following for a flash based bad block table */
158 chip->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR;
159
160 /* Scan to find existance of the device */
161 if (nand_scan(pasemi_nand_mtd, 1)) {
162 err = -ENXIO;
163 goto out_lpc;
164 }
165
166 if (add_mtd_device(pasemi_nand_mtd)) {
167 printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n");
168 err = -ENODEV;
169 goto out_lpc;
170 }
171
172 printk(KERN_INFO "PA Semi NAND flash at %08lx, control at I/O %x\n",
173 res.start, lpcctl);
174
175 return 0;
176
177 out_lpc:
178 release_region(lpcctl, 4);
179 out_ior:
180 iounmap(chip->IO_ADDR_R);
181 out_mtd:
182 kfree(pasemi_nand_mtd);
183 out:
184 return err;
185}
186
187static int __devexit pasemi_nand_remove(struct of_device *ofdev)
188{
189 struct nand_chip *chip;
190
191 if (!pasemi_nand_mtd)
192 return 0;
193
194 chip = pasemi_nand_mtd->priv;
195
196 /* Release resources, unregister device */
197 nand_release(pasemi_nand_mtd);
198
199 release_region(lpcctl, 4);
200
201 iounmap(chip->IO_ADDR_R);
202
203 /* Free the MTD device structure */
204 kfree(pasemi_nand_mtd);
205
206 pasemi_nand_mtd = NULL;
207
208 return 0;
209}
210
211static struct of_device_id pasemi_nand_match[] =
212{
213 {
214 .compatible = "pasemi,localbus-nand",
215 },
216 {},
217};
218
219MODULE_DEVICE_TABLE(of, pasemi_nand_match);
220
221static struct of_platform_driver pasemi_nand_driver =
222{
223 .name = (char*)driver_name,
224 .match_table = pasemi_nand_match,
225 .probe = pasemi_nand_probe,
226 .remove = pasemi_nand_remove,
227};
228
229static int __init pasemi_nand_init(void)
230{
231 return of_register_platform_driver(&pasemi_nand_driver);
232}
233module_init(pasemi_nand_init);
234
235static void __exit pasemi_nand_exit(void)
236{
237 of_unregister_platform_driver(&pasemi_nand_driver);
238}
239module_exit(pasemi_nand_exit);
240
241MODULE_LICENSE("GPL");
242MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
243MODULE_DESCRIPTION("NAND flash interface driver for PA Semi PWRficient");
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 66f76e9618dd..d31cb7b3feeb 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -120,6 +120,8 @@ struct s3c2410_nand_info {
120 int sel_bit; 120 int sel_bit;
121 int mtd_count; 121 int mtd_count;
122 122
123 unsigned long save_nfconf;
124
123 enum s3c_cpu_type cpu_type; 125 enum s3c_cpu_type cpu_type;
124}; 126};
125 127
@@ -364,23 +366,21 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
364 ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) { 366 ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) {
365 /* calculate the bit position of the error */ 367 /* calculate the bit position of the error */
366 368
367 bit = (diff2 >> 2) & 1; 369 bit = ((diff2 >> 3) & 1) |
368 bit |= (diff2 >> 3) & 2; 370 ((diff2 >> 4) & 2) |
369 bit |= (diff2 >> 4) & 4; 371 ((diff2 >> 5) & 4);
370 372
371 /* calculate the byte position of the error */ 373 /* calculate the byte position of the error */
372 374
373 byte = (diff1 << 1) & 0x80; 375 byte = ((diff2 << 7) & 0x100) |
374 byte |= (diff1 << 2) & 0x40; 376 ((diff1 << 0) & 0x80) |
375 byte |= (diff1 << 3) & 0x20; 377 ((diff1 << 1) & 0x40) |
376 byte |= (diff1 << 4) & 0x10; 378 ((diff1 << 2) & 0x20) |
377 379 ((diff1 << 3) & 0x10) |
378 byte |= (diff0 >> 3) & 0x08; 380 ((diff0 >> 4) & 0x08) |
379 byte |= (diff0 >> 2) & 0x04; 381 ((diff0 >> 3) & 0x04) |
380 byte |= (diff0 >> 1) & 0x02; 382 ((diff0 >> 2) & 0x02) |
381 byte |= (diff0 >> 0) & 0x01; 383 ((diff0 >> 1) & 0x01);
382
383 byte |= (diff2 << 8) & 0x100;
384 384
385 dev_dbg(info->device, "correcting error bit %d, byte %d\n", 385 dev_dbg(info->device, "correcting error bit %d, byte %d\n",
386 bit, byte); 386 bit, byte);
@@ -399,7 +399,7 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
399 if ((diff0 & ~(1<<fls(diff0))) == 0) 399 if ((diff0 & ~(1<<fls(diff0))) == 0)
400 return 1; 400 return 1;
401 401
402 return 0; 402 return -1;
403} 403}
404 404
405/* ECC functions 405/* ECC functions
@@ -810,6 +810,16 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
810 struct s3c2410_nand_info *info = platform_get_drvdata(dev); 810 struct s3c2410_nand_info *info = platform_get_drvdata(dev);
811 811
812 if (info) { 812 if (info) {
813 info->save_nfconf = readl(info->regs + S3C2410_NFCONF);
814
815 /* For the moment, we must ensure nFCE is high during
816 * the time we are suspended. This really should be
817 * handled by suspending the MTDs we are using, but
818 * that is currently not the case. */
819
820 writel(info->save_nfconf | info->sel_bit,
821 info->regs + S3C2410_NFCONF);
822
813 if (!allow_clk_stop(info)) 823 if (!allow_clk_stop(info))
814 clk_disable(info->clk); 824 clk_disable(info->clk);
815 } 825 }
@@ -820,11 +830,19 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
820static int s3c24xx_nand_resume(struct platform_device *dev) 830static int s3c24xx_nand_resume(struct platform_device *dev)
821{ 831{
822 struct s3c2410_nand_info *info = platform_get_drvdata(dev); 832 struct s3c2410_nand_info *info = platform_get_drvdata(dev);
833 unsigned long nfconf;
823 834
824 if (info) { 835 if (info) {
825 clk_enable(info->clk); 836 clk_enable(info->clk);
826 s3c2410_nand_inithw(info, dev); 837 s3c2410_nand_inithw(info, dev);
827 838
839 /* Restore the state of the nFCE line. */
840
841 nfconf = readl(info->regs + S3C2410_NFCONF);
842 nfconf &= ~info->sel_bit;
843 nfconf |= info->save_nfconf & info->sel_bit;
844 writel(nfconf, info->regs + S3C2410_NFCONF);
845
828 if (allow_clk_stop(info)) 846 if (allow_clk_stop(info))
829 clk_disable(info->clk); 847 clk_disable(info->clk);
830 } 848 }
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
new file mode 100644
index 000000000000..f86e06934cd8
--- /dev/null
+++ b/drivers/mtd/ofpart.c
@@ -0,0 +1,74 @@
1/*
2 * Flash partitions described by the OF (or flattened) device tree
3 *
4 * Copyright (C) 2006 MontaVista Software Inc.
5 * Author: Vitaly Wool <vwool@ru.mvista.com>
6 *
7 * Revised to handle newer style flash binding by:
8 * Copyright (C) 2007 David Gibson, IBM Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/of.h>
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/partitions.h>
21
22int __devinit of_mtd_parse_partitions(struct device *dev,
23 struct mtd_info *mtd,
24 struct device_node *node,
25 struct mtd_partition **pparts)
26{
27 const char *partname;
28 struct device_node *pp;
29 int nr_parts, i;
30
31 /* First count the subnodes */
32 pp = NULL;
33 nr_parts = 0;
34 while ((pp = of_get_next_child(node, pp)))
35 nr_parts++;
36
37 if (nr_parts == 0)
38 return 0;
39
40 *pparts = kzalloc(nr_parts * sizeof(**pparts), GFP_KERNEL);
41 if (!*pparts)
42 return -ENOMEM;
43
44 pp = NULL;
45 i = 0;
46 while ((pp = of_get_next_child(node, pp))) {
47 const u32 *reg;
48 int len;
49
50 reg = of_get_property(pp, "reg", &len);
51 if (!reg || (len != 2 * sizeof(u32))) {
52 of_node_put(pp);
53 dev_err(dev, "Invalid 'reg' on %s\n", node->full_name);
54 kfree(*pparts);
55 *pparts = NULL;
56 return -EINVAL;
57 }
58 (*pparts)[i].offset = reg[0];
59 (*pparts)[i].size = reg[1];
60
61 partname = of_get_property(pp, "label", &len);
62 if (!partname)
63 partname = of_get_property(pp, "name", &len);
64 (*pparts)[i].name = (char *)partname;
65
66 if (of_get_property(pp, "read-only", &len))
67 (*pparts)[i].mask_flags = MTD_WRITEABLE;
68
69 i++;
70 }
71
72 return nr_parts;
73}
74EXPORT_SYMBOL(of_mtd_parse_partitions);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 1b0b32011415..ed9f9c061ac5 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -855,6 +855,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
855 this->command(mtd, ONENAND_CMD_READ, from, writesize); 855 this->command(mtd, ONENAND_CMD_READ, from, writesize);
856 ret = this->wait(mtd, FL_READING); 856 ret = this->wait(mtd, FL_READING);
857 onenand_update_bufferram(mtd, from, !ret); 857 onenand_update_bufferram(mtd, from, !ret);
858 if (ret == -EBADMSG)
859 ret = 0;
858 } 860 }
859 } 861 }
860 862
@@ -913,6 +915,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
913 /* Now wait for load */ 915 /* Now wait for load */
914 ret = this->wait(mtd, FL_READING); 916 ret = this->wait(mtd, FL_READING);
915 onenand_update_bufferram(mtd, from, !ret); 917 onenand_update_bufferram(mtd, from, !ret);
918 if (ret == -EBADMSG)
919 ret = 0;
916 } 920 }
917 921
918 /* 922 /*
@@ -923,12 +927,12 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
923 ops->retlen = read; 927 ops->retlen = read;
924 ops->oobretlen = oobread; 928 ops->oobretlen = oobread;
925 929
926 if (mtd->ecc_stats.failed - stats.failed)
927 return -EBADMSG;
928
929 if (ret) 930 if (ret)
930 return ret; 931 return ret;
931 932
933 if (mtd->ecc_stats.failed - stats.failed)
934 return -EBADMSG;
935
932 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; 936 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
933} 937}
934 938
@@ -944,6 +948,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
944 struct mtd_oob_ops *ops) 948 struct mtd_oob_ops *ops)
945{ 949{
946 struct onenand_chip *this = mtd->priv; 950 struct onenand_chip *this = mtd->priv;
951 struct mtd_ecc_stats stats;
947 int read = 0, thislen, column, oobsize; 952 int read = 0, thislen, column, oobsize;
948 size_t len = ops->ooblen; 953 size_t len = ops->ooblen;
949 mtd_oob_mode_t mode = ops->mode; 954 mtd_oob_mode_t mode = ops->mode;
@@ -977,6 +982,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
977 return -EINVAL; 982 return -EINVAL;
978 } 983 }
979 984
985 stats = mtd->ecc_stats;
986
980 while (read < len) { 987 while (read < len) {
981 cond_resched(); 988 cond_resched();
982 989
@@ -988,18 +995,16 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
988 onenand_update_bufferram(mtd, from, 0); 995 onenand_update_bufferram(mtd, from, 0);
989 996
990 ret = this->wait(mtd, FL_READING); 997 ret = this->wait(mtd, FL_READING);
991 /* First copy data and check return value for ECC handling */ 998 if (ret && ret != -EBADMSG) {
999 printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
1000 break;
1001 }
992 1002
993 if (mode == MTD_OOB_AUTO) 1003 if (mode == MTD_OOB_AUTO)
994 onenand_transfer_auto_oob(mtd, buf, column, thislen); 1004 onenand_transfer_auto_oob(mtd, buf, column, thislen);
995 else 1005 else
996 this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen); 1006 this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
997 1007
998 if (ret) {
999 printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
1000 break;
1001 }
1002
1003 read += thislen; 1008 read += thislen;
1004 1009
1005 if (read == len) 1010 if (read == len)
@@ -1016,7 +1021,14 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1016 } 1021 }
1017 1022
1018 ops->oobretlen = read; 1023 ops->oobretlen = read;
1019 return ret; 1024
1025 if (ret)
1026 return ret;
1027
1028 if (mtd->ecc_stats.failed - stats.failed)
1029 return -EBADMSG;
1030
1031 return 0;
1020} 1032}
1021 1033
1022/** 1034/**
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index a61351f88ec0..47474903263c 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -59,16 +59,31 @@ static int parse_redboot_partitions(struct mtd_info *master,
59 static char nullstring[] = "unallocated"; 59 static char nullstring[] = "unallocated";
60#endif 60#endif
61 61
62 if ( directory < 0 ) {
63 offset = master->size + directory * master->erasesize;
64 while (master->block_isbad &&
65 master->block_isbad(master, offset)) {
66 if (!offset) {
67 nogood:
68 printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n");
69 return -EIO;
70 }
71 offset -= master->erasesize;
72 }
73 } else {
74 offset = directory * master->erasesize;
75 while (master->block_isbad &&
76 master->block_isbad(master, offset)) {
77 offset += master->erasesize;
78 if (offset == master->size)
79 goto nogood;
80 }
81 }
62 buf = vmalloc(master->erasesize); 82 buf = vmalloc(master->erasesize);
63 83
64 if (!buf) 84 if (!buf)
65 return -ENOMEM; 85 return -ENOMEM;
66 86
67 if ( directory < 0 )
68 offset = master->size + directory*master->erasesize;
69 else
70 offset = directory*master->erasesize;
71
72 printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n", 87 printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n",
73 master->name, offset); 88 master->name, offset);
74 89
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 023653977a1a..8f1f9feb2d60 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -21,11 +21,16 @@
21 */ 21 */
22 22
23/* 23/*
24 * This file includes UBI initialization and building of UBI devices. At the 24 * This file includes UBI initialization and building of UBI devices.
25 * moment UBI devices may only be added while UBI is initialized, but dynamic 25 *
26 * device add/remove functionality is planned. Also, at the moment we only 26 * When UBI is initialized, it attaches all the MTD devices specified as the
27 * attach UBI devices by scanning, which will become a bottleneck when flashes 27 * module load parameters or the kernel boot parameters. If MTD devices were
28 * reach certain large size. Then one may improve UBI and add other methods. 28 * specified, UBI does not attach any MTD device, but it is possible to do
29 * later using the "UBI control device".
30 *
31 * At the moment we only attach UBI devices by scanning, which will become a
32 * bottleneck when flashes reach certain large size. Then one may improve UBI
33 * and add other methods, although it does not seem to be easy to do.
29 */ 34 */
30 35
31#include <linux/err.h> 36#include <linux/err.h>
@@ -33,7 +38,9 @@
33#include <linux/moduleparam.h> 38#include <linux/moduleparam.h>
34#include <linux/stringify.h> 39#include <linux/stringify.h>
35#include <linux/stat.h> 40#include <linux/stat.h>
41#include <linux/miscdevice.h>
36#include <linux/log2.h> 42#include <linux/log2.h>
43#include <linux/kthread.h>
37#include "ubi.h" 44#include "ubi.h"
38 45
39/* Maximum length of the 'mtd=' parameter */ 46/* Maximum length of the 'mtd=' parameter */
@@ -43,13 +50,11 @@
43 * struct mtd_dev_param - MTD device parameter description data structure. 50 * struct mtd_dev_param - MTD device parameter description data structure.
44 * @name: MTD device name or number string 51 * @name: MTD device name or number string
45 * @vid_hdr_offs: VID header offset 52 * @vid_hdr_offs: VID header offset
46 * @data_offs: data offset
47 */ 53 */
48struct mtd_dev_param 54struct mtd_dev_param
49{ 55{
50 char name[MTD_PARAM_LEN_MAX]; 56 char name[MTD_PARAM_LEN_MAX];
51 int vid_hdr_offs; 57 int vid_hdr_offs;
52 int data_offs;
53}; 58};
54 59
55/* Numbers of elements set in the @mtd_dev_param array */ 60/* Numbers of elements set in the @mtd_dev_param array */
@@ -58,14 +63,30 @@ static int mtd_devs = 0;
58/* MTD devices specification parameters */ 63/* MTD devices specification parameters */
59static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES]; 64static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
60 65
61/* Number of UBI devices in system */ 66/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
62int ubi_devices_cnt; 67struct class *ubi_class;
68
69/* Slab cache for lock-tree entries */
70struct kmem_cache *ubi_ltree_slab;
71
72/* Slab cache for wear-leveling entries */
73struct kmem_cache *ubi_wl_entry_slab;
74
75/* UBI control character device */
76static struct miscdevice ubi_ctrl_cdev = {
77 .minor = MISC_DYNAMIC_MINOR,
78 .name = "ubi_ctrl",
79 .fops = &ubi_ctrl_cdev_operations,
80};
63 81
64/* All UBI devices in system */ 82/* All UBI devices in system */
65struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; 83static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
66 84
67/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ 85/* Serializes UBI devices creations and removals */
68struct class *ubi_class; 86DEFINE_MUTEX(ubi_devices_mutex);
87
88/* Protects @ubi_devices and @ubi->ref_count */
89static DEFINE_SPINLOCK(ubi_devices_lock);
69 90
70/* "Show" method for files in '/<sysfs>/class/ubi/' */ 91/* "Show" method for files in '/<sysfs>/class/ubi/' */
71static ssize_t ubi_version_show(struct class *class, char *buf) 92static ssize_t ubi_version_show(struct class *class, char *buf)
@@ -101,38 +122,150 @@ static struct device_attribute dev_min_io_size =
101 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); 122 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
102static struct device_attribute dev_bgt_enabled = 123static struct device_attribute dev_bgt_enabled =
103 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); 124 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
125static struct device_attribute dev_mtd_num =
126 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
127
128/**
129 * ubi_get_device - get UBI device.
130 * @ubi_num: UBI device number
131 *
132 * This function returns UBI device description object for UBI device number
133 * @ubi_num, or %NULL if the device does not exist. This function increases the
134 * device reference count to prevent removal of the device. In other words, the
135 * device cannot be removed if its reference count is not zero.
136 */
137struct ubi_device *ubi_get_device(int ubi_num)
138{
139 struct ubi_device *ubi;
140
141 spin_lock(&ubi_devices_lock);
142 ubi = ubi_devices[ubi_num];
143 if (ubi) {
144 ubi_assert(ubi->ref_count >= 0);
145 ubi->ref_count += 1;
146 get_device(&ubi->dev);
147 }
148 spin_unlock(&ubi_devices_lock);
149
150 return ubi;
151}
152
153/**
154 * ubi_put_device - drop an UBI device reference.
155 * @ubi: UBI device description object
156 */
157void ubi_put_device(struct ubi_device *ubi)
158{
159 spin_lock(&ubi_devices_lock);
160 ubi->ref_count -= 1;
161 put_device(&ubi->dev);
162 spin_unlock(&ubi_devices_lock);
163}
164
165/**
166 * ubi_get_by_major - get UBI device description object by character device
167 * major number.
168 * @major: major number
169 *
170 * This function is similar to 'ubi_get_device()', but it searches the device
171 * by its major number.
172 */
173struct ubi_device *ubi_get_by_major(int major)
174{
175 int i;
176 struct ubi_device *ubi;
177
178 spin_lock(&ubi_devices_lock);
179 for (i = 0; i < UBI_MAX_DEVICES; i++) {
180 ubi = ubi_devices[i];
181 if (ubi && MAJOR(ubi->cdev.dev) == major) {
182 ubi_assert(ubi->ref_count >= 0);
183 ubi->ref_count += 1;
184 get_device(&ubi->dev);
185 spin_unlock(&ubi_devices_lock);
186 return ubi;
187 }
188 }
189 spin_unlock(&ubi_devices_lock);
190
191 return NULL;
192}
193
194/**
195 * ubi_major2num - get UBI device number by character device major number.
196 * @major: major number
197 *
198 * This function searches UBI device number object by its major number. If UBI
199 * device was not found, this function returns -ENODEV, otherwise the UBI device
200 * number is returned.
201 */
202int ubi_major2num(int major)
203{
204 int i, ubi_num = -ENODEV;
205
206 spin_lock(&ubi_devices_lock);
207 for (i = 0; i < UBI_MAX_DEVICES; i++) {
208 struct ubi_device *ubi = ubi_devices[i];
209
210 if (ubi && MAJOR(ubi->cdev.dev) == major) {
211 ubi_num = ubi->ubi_num;
212 break;
213 }
214 }
215 spin_unlock(&ubi_devices_lock);
216
217 return ubi_num;
218}
104 219
105/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */ 220/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
106static ssize_t dev_attribute_show(struct device *dev, 221static ssize_t dev_attribute_show(struct device *dev,
107 struct device_attribute *attr, char *buf) 222 struct device_attribute *attr, char *buf)
108{ 223{
109 const struct ubi_device *ubi; 224 ssize_t ret;
225 struct ubi_device *ubi;
110 226
227 /*
228 * The below code looks weird, but it actually makes sense. We get the
229 * UBI device reference from the contained 'struct ubi_device'. But it
230 * is unclear if the device was removed or not yet. Indeed, if the
231 * device was removed before we increased its reference count,
232 * 'ubi_get_device()' will return -ENODEV and we fail.
233 *
234 * Remember, 'struct ubi_device' is freed in the release function, so
235 * we still can use 'ubi->ubi_num'.
236 */
111 ubi = container_of(dev, struct ubi_device, dev); 237 ubi = container_of(dev, struct ubi_device, dev);
238 ubi = ubi_get_device(ubi->ubi_num);
239 if (!ubi)
240 return -ENODEV;
241
112 if (attr == &dev_eraseblock_size) 242 if (attr == &dev_eraseblock_size)
113 return sprintf(buf, "%d\n", ubi->leb_size); 243 ret = sprintf(buf, "%d\n", ubi->leb_size);
114 else if (attr == &dev_avail_eraseblocks) 244 else if (attr == &dev_avail_eraseblocks)
115 return sprintf(buf, "%d\n", ubi->avail_pebs); 245 ret = sprintf(buf, "%d\n", ubi->avail_pebs);
116 else if (attr == &dev_total_eraseblocks) 246 else if (attr == &dev_total_eraseblocks)
117 return sprintf(buf, "%d\n", ubi->good_peb_count); 247 ret = sprintf(buf, "%d\n", ubi->good_peb_count);
118 else if (attr == &dev_volumes_count) 248 else if (attr == &dev_volumes_count)
119 return sprintf(buf, "%d\n", ubi->vol_count); 249 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
120 else if (attr == &dev_max_ec) 250 else if (attr == &dev_max_ec)
121 return sprintf(buf, "%d\n", ubi->max_ec); 251 ret = sprintf(buf, "%d\n", ubi->max_ec);
122 else if (attr == &dev_reserved_for_bad) 252 else if (attr == &dev_reserved_for_bad)
123 return sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); 253 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
124 else if (attr == &dev_bad_peb_count) 254 else if (attr == &dev_bad_peb_count)
125 return sprintf(buf, "%d\n", ubi->bad_peb_count); 255 ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
126 else if (attr == &dev_max_vol_count) 256 else if (attr == &dev_max_vol_count)
127 return sprintf(buf, "%d\n", ubi->vtbl_slots); 257 ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
128 else if (attr == &dev_min_io_size) 258 else if (attr == &dev_min_io_size)
129 return sprintf(buf, "%d\n", ubi->min_io_size); 259 ret = sprintf(buf, "%d\n", ubi->min_io_size);
130 else if (attr == &dev_bgt_enabled) 260 else if (attr == &dev_bgt_enabled)
131 return sprintf(buf, "%d\n", ubi->thread_enabled); 261 ret = sprintf(buf, "%d\n", ubi->thread_enabled);
262 else if (attr == &dev_mtd_num)
263 ret = sprintf(buf, "%d\n", ubi->mtd->index);
132 else 264 else
133 BUG(); 265 ret = -EINVAL;
134 266
135 return 0; 267 ubi_put_device(ubi);
268 return ret;
136} 269}
137 270
138/* Fake "release" method for UBI devices */ 271/* Fake "release" method for UBI devices */
@@ -150,68 +283,44 @@ static int ubi_sysfs_init(struct ubi_device *ubi)
150 int err; 283 int err;
151 284
152 ubi->dev.release = dev_release; 285 ubi->dev.release = dev_release;
153 ubi->dev.devt = MKDEV(ubi->major, 0); 286 ubi->dev.devt = ubi->cdev.dev;
154 ubi->dev.class = ubi_class; 287 ubi->dev.class = ubi_class;
155 sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num); 288 sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num);
156 err = device_register(&ubi->dev); 289 err = device_register(&ubi->dev);
157 if (err) 290 if (err)
158 goto out; 291 return err;
159 292
160 err = device_create_file(&ubi->dev, &dev_eraseblock_size); 293 err = device_create_file(&ubi->dev, &dev_eraseblock_size);
161 if (err) 294 if (err)
162 goto out_unregister; 295 return err;
163 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); 296 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
164 if (err) 297 if (err)
165 goto out_eraseblock_size; 298 return err;
166 err = device_create_file(&ubi->dev, &dev_total_eraseblocks); 299 err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
167 if (err) 300 if (err)
168 goto out_avail_eraseblocks; 301 return err;
169 err = device_create_file(&ubi->dev, &dev_volumes_count); 302 err = device_create_file(&ubi->dev, &dev_volumes_count);
170 if (err) 303 if (err)
171 goto out_total_eraseblocks; 304 return err;
172 err = device_create_file(&ubi->dev, &dev_max_ec); 305 err = device_create_file(&ubi->dev, &dev_max_ec);
173 if (err) 306 if (err)
174 goto out_volumes_count; 307 return err;
175 err = device_create_file(&ubi->dev, &dev_reserved_for_bad); 308 err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
176 if (err) 309 if (err)
177 goto out_volumes_max_ec; 310 return err;
178 err = device_create_file(&ubi->dev, &dev_bad_peb_count); 311 err = device_create_file(&ubi->dev, &dev_bad_peb_count);
179 if (err) 312 if (err)
180 goto out_reserved_for_bad; 313 return err;
181 err = device_create_file(&ubi->dev, &dev_max_vol_count); 314 err = device_create_file(&ubi->dev, &dev_max_vol_count);
182 if (err) 315 if (err)
183 goto out_bad_peb_count; 316 return err;
184 err = device_create_file(&ubi->dev, &dev_min_io_size); 317 err = device_create_file(&ubi->dev, &dev_min_io_size);
185 if (err) 318 if (err)
186 goto out_max_vol_count; 319 return err;
187 err = device_create_file(&ubi->dev, &dev_bgt_enabled); 320 err = device_create_file(&ubi->dev, &dev_bgt_enabled);
188 if (err) 321 if (err)
189 goto out_min_io_size; 322 return err;
190 323 err = device_create_file(&ubi->dev, &dev_mtd_num);
191 return 0;
192
193out_min_io_size:
194 device_remove_file(&ubi->dev, &dev_min_io_size);
195out_max_vol_count:
196 device_remove_file(&ubi->dev, &dev_max_vol_count);
197out_bad_peb_count:
198 device_remove_file(&ubi->dev, &dev_bad_peb_count);
199out_reserved_for_bad:
200 device_remove_file(&ubi->dev, &dev_reserved_for_bad);
201out_volumes_max_ec:
202 device_remove_file(&ubi->dev, &dev_max_ec);
203out_volumes_count:
204 device_remove_file(&ubi->dev, &dev_volumes_count);
205out_total_eraseblocks:
206 device_remove_file(&ubi->dev, &dev_total_eraseblocks);
207out_avail_eraseblocks:
208 device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
209out_eraseblock_size:
210 device_remove_file(&ubi->dev, &dev_eraseblock_size);
211out_unregister:
212 device_unregister(&ubi->dev);
213out:
214 ubi_err("failed to initialize sysfs for %s", ubi->ubi_name);
215 return err; 324 return err;
216} 325}
217 326
@@ -221,6 +330,7 @@ out:
221 */ 330 */
222static void ubi_sysfs_close(struct ubi_device *ubi) 331static void ubi_sysfs_close(struct ubi_device *ubi)
223{ 332{
333 device_remove_file(&ubi->dev, &dev_mtd_num);
224 device_remove_file(&ubi->dev, &dev_bgt_enabled); 334 device_remove_file(&ubi->dev, &dev_bgt_enabled);
225 device_remove_file(&ubi->dev, &dev_min_io_size); 335 device_remove_file(&ubi->dev, &dev_min_io_size);
226 device_remove_file(&ubi->dev, &dev_max_vol_count); 336 device_remove_file(&ubi->dev, &dev_max_vol_count);
@@ -244,7 +354,7 @@ static void kill_volumes(struct ubi_device *ubi)
244 354
245 for (i = 0; i < ubi->vtbl_slots; i++) 355 for (i = 0; i < ubi->vtbl_slots; i++)
246 if (ubi->volumes[i]) 356 if (ubi->volumes[i])
247 ubi_free_volume(ubi, i); 357 ubi_free_volume(ubi, ubi->volumes[i]);
248} 358}
249 359
250/** 360/**
@@ -259,7 +369,7 @@ static int uif_init(struct ubi_device *ubi)
259 int i, err; 369 int i, err;
260 dev_t dev; 370 dev_t dev;
261 371
262 mutex_init(&ubi->vtbl_mutex); 372 mutex_init(&ubi->volumes_mutex);
263 spin_lock_init(&ubi->volumes_lock); 373 spin_lock_init(&ubi->volumes_lock);
264 374
265 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); 375 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
@@ -278,39 +388,40 @@ static int uif_init(struct ubi_device *ubi)
278 return err; 388 return err;
279 } 389 }
280 390
391 ubi_assert(MINOR(dev) == 0);
281 cdev_init(&ubi->cdev, &ubi_cdev_operations); 392 cdev_init(&ubi->cdev, &ubi_cdev_operations);
282 ubi->major = MAJOR(dev); 393 dbg_msg("%s major is %u", ubi->ubi_name, MAJOR(dev));
283 dbg_msg("%s major is %u", ubi->ubi_name, ubi->major);
284 ubi->cdev.owner = THIS_MODULE; 394 ubi->cdev.owner = THIS_MODULE;
285 395
286 dev = MKDEV(ubi->major, 0);
287 err = cdev_add(&ubi->cdev, dev, 1); 396 err = cdev_add(&ubi->cdev, dev, 1);
288 if (err) { 397 if (err) {
289 ubi_err("cannot add character device %s", ubi->ubi_name); 398 ubi_err("cannot add character device");
290 goto out_unreg; 399 goto out_unreg;
291 } 400 }
292 401
293 err = ubi_sysfs_init(ubi); 402 err = ubi_sysfs_init(ubi);
294 if (err) 403 if (err)
295 goto out_cdev; 404 goto out_sysfs;
296 405
297 for (i = 0; i < ubi->vtbl_slots; i++) 406 for (i = 0; i < ubi->vtbl_slots; i++)
298 if (ubi->volumes[i]) { 407 if (ubi->volumes[i]) {
299 err = ubi_add_volume(ubi, i); 408 err = ubi_add_volume(ubi, ubi->volumes[i]);
300 if (err) 409 if (err) {
410 ubi_err("cannot add volume %d", i);
301 goto out_volumes; 411 goto out_volumes;
412 }
302 } 413 }
303 414
304 return 0; 415 return 0;
305 416
306out_volumes: 417out_volumes:
307 kill_volumes(ubi); 418 kill_volumes(ubi);
419out_sysfs:
308 ubi_sysfs_close(ubi); 420 ubi_sysfs_close(ubi);
309out_cdev:
310 cdev_del(&ubi->cdev); 421 cdev_del(&ubi->cdev);
311out_unreg: 422out_unreg:
312 unregister_chrdev_region(MKDEV(ubi->major, 0), 423 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
313 ubi->vtbl_slots + 1); 424 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
314 return err; 425 return err;
315} 426}
316 427
@@ -323,7 +434,7 @@ static void uif_close(struct ubi_device *ubi)
323 kill_volumes(ubi); 434 kill_volumes(ubi);
324 ubi_sysfs_close(ubi); 435 ubi_sysfs_close(ubi);
325 cdev_del(&ubi->cdev); 436 cdev_del(&ubi->cdev);
326 unregister_chrdev_region(MKDEV(ubi->major, 0), ubi->vtbl_slots + 1); 437 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
327} 438}
328 439
329/** 440/**
@@ -384,9 +495,9 @@ out_si:
384 * assumed: 495 * assumed:
385 * o EC header is always at offset zero - this cannot be changed; 496 * o EC header is always at offset zero - this cannot be changed;
386 * o VID header starts just after the EC header at the closest address 497 * o VID header starts just after the EC header at the closest address
387 * aligned to @io->@hdrs_min_io_size; 498 * aligned to @io->hdrs_min_io_size;
388 * o data starts just after the VID header at the closest address aligned to 499 * o data starts just after the VID header at the closest address aligned to
389 * @io->@min_io_size 500 * @io->min_io_size
390 * 501 *
391 * This function returns zero in case of success and a negative error code in 502 * This function returns zero in case of success and a negative error code in
392 * case of failure. 503 * case of failure.
@@ -407,6 +518,9 @@ static int io_init(struct ubi_device *ubi)
407 return -EINVAL; 518 return -EINVAL;
408 } 519 }
409 520
521 if (ubi->vid_hdr_offset < 0)
522 return -EINVAL;
523
410 /* 524 /*
411 * Note, in this implementation we support MTD devices with 0x7FFFFFFF 525 * Note, in this implementation we support MTD devices with 0x7FFFFFFF
412 * physical eraseblocks maximum. 526 * physical eraseblocks maximum.
@@ -424,7 +538,8 @@ static int io_init(struct ubi_device *ubi)
424 538
425 /* Make sure minimal I/O unit is power of 2 */ 539 /* Make sure minimal I/O unit is power of 2 */
426 if (!is_power_of_2(ubi->min_io_size)) { 540 if (!is_power_of_2(ubi->min_io_size)) {
427 ubi_err("bad min. I/O unit"); 541 ubi_err("min. I/O unit (%d) is not power of 2",
542 ubi->min_io_size);
428 return -EINVAL; 543 return -EINVAL;
429 } 544 }
430 545
@@ -453,10 +568,8 @@ static int io_init(struct ubi_device *ubi)
453 } 568 }
454 569
455 /* Similar for the data offset */ 570 /* Similar for the data offset */
456 if (ubi->leb_start == 0) { 571 ubi->leb_start = ubi->vid_hdr_offset + ubi->vid_hdr_alsize;
457 ubi->leb_start = ubi->vid_hdr_offset + ubi->vid_hdr_alsize; 572 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
458 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
459 }
460 573
461 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); 574 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
462 dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); 575 dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
@@ -514,76 +627,91 @@ static int io_init(struct ubi_device *ubi)
514} 627}
515 628
516/** 629/**
517 * attach_mtd_dev - attach an MTD device. 630 * ubi_attach_mtd_dev - attach an MTD device.
518 * @mtd_dev: MTD device name or number string 631 * @mtd_dev: MTD device description object
632 * @ubi_num: number to assign to the new UBI device
519 * @vid_hdr_offset: VID header offset 633 * @vid_hdr_offset: VID header offset
520 * @data_offset: data offset
521 * 634 *
522 * This function attaches an MTD device to UBI. It first treats @mtd_dev as the 635 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
523 * MTD device name, and tries to open it by this name. If it is unable to open, 636 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
524 * it tries to convert @mtd_dev to an integer and open the MTD device by its 637 * which case this function finds a vacant device nubert and assings it
525 * number. Returns zero in case of success and a negative error code in case of 638 * automatically. Returns the new UBI device number in case of success and a
526 * failure. 639 * negative error code in case of failure.
640 *
641 * Note, the invocations of this function has to be serialized by the
642 * @ubi_devices_mutex.
527 */ 643 */
528static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset, 644int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
529 int data_offset)
530{ 645{
531 struct ubi_device *ubi; 646 struct ubi_device *ubi;
532 struct mtd_info *mtd;
533 int i, err; 647 int i, err;
534 648
535 mtd = get_mtd_device_nm(mtd_dev); 649 /*
536 if (IS_ERR(mtd)) { 650 * Check if we already have the same MTD device attached.
537 int mtd_num; 651 *
538 char *endp; 652 * Note, this function assumes that UBI devices creations and deletions
539 653 * are serialized, so it does not take the &ubi_devices_lock.
540 if (PTR_ERR(mtd) != -ENODEV) 654 */
541 return PTR_ERR(mtd); 655 for (i = 0; i < UBI_MAX_DEVICES; i++) {
542 656 ubi = ubi_devices[i];
543 /* 657 if (ubi && mtd->index == ubi->mtd->index) {
544 * Probably this is not MTD device name but MTD device number - 658 dbg_err("mtd%d is already attached to ubi%d",
545 * check this out. 659 mtd->index, i);
546 */ 660 return -EEXIST;
547 mtd_num = simple_strtoul(mtd_dev, &endp, 0);
548 if (*endp != '\0' || mtd_dev == endp) {
549 ubi_err("incorrect MTD device: \"%s\"", mtd_dev);
550 return -ENODEV;
551 } 661 }
662 }
552 663
553 mtd = get_mtd_device(NULL, mtd_num); 664 /*
554 if (IS_ERR(mtd)) 665 * Make sure this MTD device is not emulated on top of an UBI volume
555 return PTR_ERR(mtd); 666 * already. Well, generally this recursion works fine, but there are
667 * different problems like the UBI module takes a reference to itself
668 * by attaching (and thus, opening) the emulated MTD device. This
669 * results in inability to unload the module. And in general it makes
670 * no sense to attach emulated MTD devices, so we prohibit this.
671 */
672 if (mtd->type == MTD_UBIVOLUME) {
673 ubi_err("refuse attaching mtd%d - it is already emulated on "
674 "top of UBI", mtd->index);
675 return -EINVAL;
556 } 676 }
557 677
558 /* Check if we already have the same MTD device attached */ 678 if (ubi_num == UBI_DEV_NUM_AUTO) {
559 for (i = 0; i < ubi_devices_cnt; i++) 679 /* Search for an empty slot in the @ubi_devices array */
560 if (ubi_devices[i]->mtd->index == mtd->index) { 680 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
561 ubi_err("mtd%d is already attached to ubi%d", 681 if (!ubi_devices[ubi_num])
562 mtd->index, i); 682 break;
563 err = -EINVAL; 683 if (ubi_num == UBI_MAX_DEVICES) {
564 goto out_mtd; 684 dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES);
685 return -ENFILE;
686 }
687 } else {
688 if (ubi_num >= UBI_MAX_DEVICES)
689 return -EINVAL;
690
691 /* Make sure ubi_num is not busy */
692 if (ubi_devices[ubi_num]) {
693 dbg_err("ubi%d already exists", ubi_num);
694 return -EEXIST;
565 } 695 }
566
567 ubi = ubi_devices[ubi_devices_cnt] = kzalloc(sizeof(struct ubi_device),
568 GFP_KERNEL);
569 if (!ubi) {
570 err = -ENOMEM;
571 goto out_mtd;
572 } 696 }
573 697
574 ubi->ubi_num = ubi_devices_cnt; 698 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
699 if (!ubi)
700 return -ENOMEM;
701
575 ubi->mtd = mtd; 702 ubi->mtd = mtd;
703 ubi->ubi_num = ubi_num;
704 ubi->vid_hdr_offset = vid_hdr_offset;
576 705
577 dbg_msg("attaching mtd%d to ubi%d: VID header offset %d data offset %d", 706 dbg_msg("attaching mtd%d to ubi%d: VID header offset %d",
578 ubi->mtd->index, ubi_devices_cnt, vid_hdr_offset, data_offset); 707 mtd->index, ubi_num, vid_hdr_offset);
579 708
580 ubi->vid_hdr_offset = vid_hdr_offset;
581 ubi->leb_start = data_offset;
582 err = io_init(ubi); 709 err = io_init(ubi);
583 if (err) 710 if (err)
584 goto out_free; 711 goto out_free;
585 712
586 mutex_init(&ubi->buf_mutex); 713 mutex_init(&ubi->buf_mutex);
714 mutex_init(&ubi->ckvol_mutex);
587 ubi->peb_buf1 = vmalloc(ubi->peb_size); 715 ubi->peb_buf1 = vmalloc(ubi->peb_size);
588 if (!ubi->peb_buf1) 716 if (!ubi->peb_buf1)
589 goto out_free; 717 goto out_free;
@@ -609,8 +737,16 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
609 if (err) 737 if (err)
610 goto out_detach; 738 goto out_detach;
611 739
612 ubi_msg("attached mtd%d to ubi%d", ubi->mtd->index, ubi_devices_cnt); 740 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
613 ubi_msg("MTD device name: \"%s\"", ubi->mtd->name); 741 if (IS_ERR(ubi->bgt_thread)) {
742 err = PTR_ERR(ubi->bgt_thread);
743 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
744 err);
745 goto out_uif;
746 }
747
748 ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
749 ubi_msg("MTD device name: \"%s\"", mtd->name);
614 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); 750 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
615 ubi_msg("physical eraseblock size: %d bytes (%d KiB)", 751 ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
616 ubi->peb_size, ubi->peb_size >> 10); 752 ubi->peb_size, ubi->peb_size >> 10);
@@ -638,9 +774,11 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
638 wake_up_process(ubi->bgt_thread); 774 wake_up_process(ubi->bgt_thread);
639 } 775 }
640 776
641 ubi_devices_cnt += 1; 777 ubi_devices[ubi_num] = ubi;
642 return 0; 778 return ubi_num;
643 779
780out_uif:
781 uif_close(ubi);
644out_detach: 782out_detach:
645 ubi_eba_close(ubi); 783 ubi_eba_close(ubi);
646 ubi_wl_close(ubi); 784 ubi_wl_close(ubi);
@@ -652,21 +790,58 @@ out_free:
652 vfree(ubi->dbg_peb_buf); 790 vfree(ubi->dbg_peb_buf);
653#endif 791#endif
654 kfree(ubi); 792 kfree(ubi);
655out_mtd:
656 put_mtd_device(mtd);
657 ubi_devices[ubi_devices_cnt] = NULL;
658 return err; 793 return err;
659} 794}
660 795
661/** 796/**
662 * detach_mtd_dev - detach an MTD device. 797 * ubi_detach_mtd_dev - detach an MTD device.
663 * @ubi: UBI device description object 798 * @ubi_num: UBI device number to detach from
799 * @anyway: detach MTD even if device reference count is not zero
800 *
801 * This function destroys an UBI device number @ubi_num and detaches the
802 * underlying MTD device. Returns zero in case of success and %-EBUSY if the
803 * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
804 * exist.
805 *
806 * Note, the invocations of this function has to be serialized by the
807 * @ubi_devices_mutex.
664 */ 808 */
665static void detach_mtd_dev(struct ubi_device *ubi) 809int ubi_detach_mtd_dev(int ubi_num, int anyway)
666{ 810{
667 int ubi_num = ubi->ubi_num, mtd_num = ubi->mtd->index; 811 struct ubi_device *ubi;
668 812
813 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
814 return -EINVAL;
815
816 spin_lock(&ubi_devices_lock);
817 ubi = ubi_devices[ubi_num];
818 if (!ubi) {
819 spin_unlock(&ubi_devices_lock);
820 return -EINVAL;
821 }
822
823 if (ubi->ref_count) {
824 if (!anyway) {
825 spin_unlock(&ubi_devices_lock);
826 return -EBUSY;
827 }
828 /* This may only happen if there is a bug */
829 ubi_err("%s reference count %d, destroy anyway",
830 ubi->ubi_name, ubi->ref_count);
831 }
832 ubi_devices[ubi_num] = NULL;
833 spin_unlock(&ubi_devices_lock);
834
835 ubi_assert(ubi_num == ubi->ubi_num);
669 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); 836 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
837
838 /*
839 * Before freeing anything, we have to stop the background thread to
840 * prevent it from doing anything on this device while we are freeing.
841 */
842 if (ubi->bgt_thread)
843 kthread_stop(ubi->bgt_thread);
844
670 uif_close(ubi); 845 uif_close(ubi);
671 ubi_eba_close(ubi); 846 ubi_eba_close(ubi);
672 ubi_wl_close(ubi); 847 ubi_wl_close(ubi);
@@ -677,11 +852,51 @@ static void detach_mtd_dev(struct ubi_device *ubi)
677#ifdef CONFIG_MTD_UBI_DEBUG 852#ifdef CONFIG_MTD_UBI_DEBUG
678 vfree(ubi->dbg_peb_buf); 853 vfree(ubi->dbg_peb_buf);
679#endif 854#endif
680 kfree(ubi_devices[ubi_num]); 855 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
681 ubi_devices[ubi_num] = NULL; 856 kfree(ubi);
682 ubi_devices_cnt -= 1; 857 return 0;
683 ubi_assert(ubi_devices_cnt >= 0); 858}
684 ubi_msg("mtd%d is detached from ubi%d", mtd_num, ubi_num); 859
860/**
861 * ltree_entry_ctor - lock tree entries slab cache constructor.
862 * @obj: the lock-tree entry to construct
863 * @cache: the lock tree entry slab cache
864 * @flags: constructor flags
865 */
866static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
867{
868 struct ubi_ltree_entry *le = obj;
869
870 le->users = 0;
871 init_rwsem(&le->mutex);
872}
873
874/**
875 * find_mtd_device - open an MTD device by its name or number.
876 * @mtd_dev: name or number of the device
877 *
878 * This function tries to open and MTD device described by @mtd_dev string,
879 * which is first treated as an ASCII number, and if it is not true, it is
880 * treated as MTD device name. Returns MTD device description object in case of
881 * success and a negative error code in case of failure.
882 */
883static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
884{
885 struct mtd_info *mtd;
886 int mtd_num;
887 char *endp;
888
889 mtd_num = simple_strtoul(mtd_dev, &endp, 0);
890 if (*endp != '\0' || mtd_dev == endp) {
891 /*
892 * This does not look like an ASCII integer, probably this is
893 * MTD device name.
894 */
895 mtd = get_mtd_device_nm(mtd_dev);
896 } else
897 mtd = get_mtd_device(NULL, mtd_num);
898
899 return mtd;
685} 900}
686 901
687static int __init ubi_init(void) 902static int __init ubi_init(void)
@@ -693,47 +908,105 @@ static int __init ubi_init(void)
693 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); 908 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
694 909
695 if (mtd_devs > UBI_MAX_DEVICES) { 910 if (mtd_devs > UBI_MAX_DEVICES) {
696 printk("UBI error: too many MTD devices, maximum is %d\n", 911 printk(KERN_ERR "UBI error: too many MTD devices, "
697 UBI_MAX_DEVICES); 912 "maximum is %d\n", UBI_MAX_DEVICES);
698 return -EINVAL; 913 return -EINVAL;
699 } 914 }
700 915
916 /* Create base sysfs directory and sysfs files */
701 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); 917 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
702 if (IS_ERR(ubi_class)) 918 if (IS_ERR(ubi_class)) {
703 return PTR_ERR(ubi_class); 919 err = PTR_ERR(ubi_class);
920 printk(KERN_ERR "UBI error: cannot create UBI class\n");
921 goto out;
922 }
704 923
705 err = class_create_file(ubi_class, &ubi_version); 924 err = class_create_file(ubi_class, &ubi_version);
706 if (err) 925 if (err) {
926 printk(KERN_ERR "UBI error: cannot create sysfs file\n");
707 goto out_class; 927 goto out_class;
928 }
929
930 err = misc_register(&ubi_ctrl_cdev);
931 if (err) {
932 printk(KERN_ERR "UBI error: cannot register device\n");
933 goto out_version;
934 }
935
936 ubi_ltree_slab = kmem_cache_create("ubi_ltree_slab",
937 sizeof(struct ubi_ltree_entry), 0,
938 0, &ltree_entry_ctor);
939 if (!ubi_ltree_slab)
940 goto out_dev_unreg;
941
942 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
943 sizeof(struct ubi_wl_entry),
944 0, 0, NULL);
945 if (!ubi_wl_entry_slab)
946 goto out_ltree;
708 947
709 /* Attach MTD devices */ 948 /* Attach MTD devices */
710 for (i = 0; i < mtd_devs; i++) { 949 for (i = 0; i < mtd_devs; i++) {
711 struct mtd_dev_param *p = &mtd_dev_param[i]; 950 struct mtd_dev_param *p = &mtd_dev_param[i];
951 struct mtd_info *mtd;
712 952
713 cond_resched(); 953 cond_resched();
714 err = attach_mtd_dev(p->name, p->vid_hdr_offs, p->data_offs); 954
715 if (err) 955 mtd = open_mtd_device(p->name);
956 if (IS_ERR(mtd)) {
957 err = PTR_ERR(mtd);
716 goto out_detach; 958 goto out_detach;
959 }
960
961 mutex_lock(&ubi_devices_mutex);
962 err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
963 p->vid_hdr_offs);
964 mutex_unlock(&ubi_devices_mutex);
965 if (err < 0) {
966 put_mtd_device(mtd);
967 printk(KERN_ERR "UBI error: cannot attach %s\n",
968 p->name);
969 goto out_detach;
970 }
717 } 971 }
718 972
719 return 0; 973 return 0;
720 974
721out_detach: 975out_detach:
722 for (k = 0; k < i; k++) 976 for (k = 0; k < i; k++)
723 detach_mtd_dev(ubi_devices[k]); 977 if (ubi_devices[k]) {
978 mutex_lock(&ubi_devices_mutex);
979 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
980 mutex_unlock(&ubi_devices_mutex);
981 }
982 kmem_cache_destroy(ubi_wl_entry_slab);
983out_ltree:
984 kmem_cache_destroy(ubi_ltree_slab);
985out_dev_unreg:
986 misc_deregister(&ubi_ctrl_cdev);
987out_version:
724 class_remove_file(ubi_class, &ubi_version); 988 class_remove_file(ubi_class, &ubi_version);
725out_class: 989out_class:
726 class_destroy(ubi_class); 990 class_destroy(ubi_class);
991out:
992 printk(KERN_ERR "UBI error: cannot initialize UBI, error %d\n", err);
727 return err; 993 return err;
728} 994}
729module_init(ubi_init); 995module_init(ubi_init);
730 996
731static void __exit ubi_exit(void) 997static void __exit ubi_exit(void)
732{ 998{
733 int i, n = ubi_devices_cnt; 999 int i;
734 1000
735 for (i = 0; i < n; i++) 1001 for (i = 0; i < UBI_MAX_DEVICES; i++)
736 detach_mtd_dev(ubi_devices[i]); 1002 if (ubi_devices[i]) {
1003 mutex_lock(&ubi_devices_mutex);
1004 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
1005 mutex_unlock(&ubi_devices_mutex);
1006 }
1007 kmem_cache_destroy(ubi_wl_entry_slab);
1008 kmem_cache_destroy(ubi_ltree_slab);
1009 misc_deregister(&ubi_ctrl_cdev);
737 class_remove_file(ubi_class, &ubi_version); 1010 class_remove_file(ubi_class, &ubi_version);
738 class_destroy(ubi_class); 1011 class_destroy(ubi_class);
739} 1012}
@@ -754,7 +1027,8 @@ static int __init bytes_str_to_int(const char *str)
754 1027
755 result = simple_strtoul(str, &endp, 0); 1028 result = simple_strtoul(str, &endp, 0);
756 if (str == endp || result < 0) { 1029 if (str == endp || result < 0) {
757 printk("UBI error: incorrect bytes count: \"%s\"\n", str); 1030 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
1031 str);
758 return -EINVAL; 1032 return -EINVAL;
759 } 1033 }
760 1034
@@ -764,15 +1038,14 @@ static int __init bytes_str_to_int(const char *str)
764 case 'M': 1038 case 'M':
765 result *= 1024; 1039 result *= 1024;
766 case 'K': 1040 case 'K':
767 case 'k':
768 result *= 1024; 1041 result *= 1024;
769 if (endp[1] == 'i' && (endp[2] == '\0' || 1042 if (endp[1] == 'i' && endp[2] == 'B')
770 endp[2] == 'B' || endp[2] == 'b'))
771 endp += 2; 1043 endp += 2;
772 case '\0': 1044 case '\0':
773 break; 1045 break;
774 default: 1046 default:
775 printk("UBI error: incorrect bytes count: \"%s\"\n", str); 1047 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
1048 str);
776 return -EINVAL; 1049 return -EINVAL;
777 } 1050 }
778 1051
@@ -795,21 +1068,25 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
795 char *pbuf = &buf[0]; 1068 char *pbuf = &buf[0];
796 char *tokens[3] = {NULL, NULL, NULL}; 1069 char *tokens[3] = {NULL, NULL, NULL};
797 1070
1071 if (!val)
1072 return -EINVAL;
1073
798 if (mtd_devs == UBI_MAX_DEVICES) { 1074 if (mtd_devs == UBI_MAX_DEVICES) {
799 printk("UBI error: too many parameters, max. is %d\n", 1075 printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
800 UBI_MAX_DEVICES); 1076 UBI_MAX_DEVICES);
801 return -EINVAL; 1077 return -EINVAL;
802 } 1078 }
803 1079
804 len = strnlen(val, MTD_PARAM_LEN_MAX); 1080 len = strnlen(val, MTD_PARAM_LEN_MAX);
805 if (len == MTD_PARAM_LEN_MAX) { 1081 if (len == MTD_PARAM_LEN_MAX) {
806 printk("UBI error: parameter \"%s\" is too long, max. is %d\n", 1082 printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
807 val, MTD_PARAM_LEN_MAX); 1083 "max. is %d\n", val, MTD_PARAM_LEN_MAX);
808 return -EINVAL; 1084 return -EINVAL;
809 } 1085 }
810 1086
811 if (len == 0) { 1087 if (len == 0) {
812 printk("UBI warning: empty 'mtd=' parameter - ignored\n"); 1088 printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
1089 "ignored\n");
813 return 0; 1090 return 0;
814 } 1091 }
815 1092
@@ -823,7 +1100,8 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
823 tokens[i] = strsep(&pbuf, ","); 1100 tokens[i] = strsep(&pbuf, ",");
824 1101
825 if (pbuf) { 1102 if (pbuf) {
826 printk("UBI error: too many arguments at \"%s\"\n", val); 1103 printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
1104 val);
827 return -EINVAL; 1105 return -EINVAL;
828 } 1106 }
829 1107
@@ -832,13 +1110,9 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
832 1110
833 if (tokens[1]) 1111 if (tokens[1])
834 p->vid_hdr_offs = bytes_str_to_int(tokens[1]); 1112 p->vid_hdr_offs = bytes_str_to_int(tokens[1]);
835 if (tokens[2])
836 p->data_offs = bytes_str_to_int(tokens[2]);
837 1113
838 if (p->vid_hdr_offs < 0) 1114 if (p->vid_hdr_offs < 0)
839 return p->vid_hdr_offs; 1115 return p->vid_hdr_offs;
840 if (p->data_offs < 0)
841 return p->data_offs;
842 1116
843 mtd_devs += 1; 1117 mtd_devs += 1;
844 return 0; 1118 return 0;
@@ -846,16 +1120,15 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
846 1120
847module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); 1121module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
848MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " 1122MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
849 "mtd=<name|num>[,<vid_hdr_offs>,<data_offs>]. " 1123 "mtd=<name|num>[,<vid_hdr_offs>].\n"
850 "Multiple \"mtd\" parameters may be specified.\n" 1124 "Multiple \"mtd\" parameters may be specified.\n"
851 "MTD devices may be specified by their number or name. " 1125 "MTD devices may be specified by their number or name.\n"
852 "Optional \"vid_hdr_offs\" and \"data_offs\" parameters " 1126 "Optional \"vid_hdr_offs\" parameter specifies UBI VID "
853 "specify UBI VID header position and data starting " 1127 "header position and data starting position to be used "
854 "position to be used by UBI.\n" 1128 "by UBI.\n"
855 "Example: mtd=content,1984,2048 mtd=4 - attach MTD device" 1129 "Example: mtd=content,1984 mtd=4 - attach MTD device"
856 "with name content using VID header offset 1984 and data " 1130 "with name \"content\" using VID header offset 1984, and "
857 "start 2048, and MTD device number 4 using default " 1131 "MTD device number 4 with default VID header offset.");
858 "offsets");
859 1132
860MODULE_VERSION(__stringify(UBI_VERSION)); 1133MODULE_VERSION(__stringify(UBI_VERSION));
861MODULE_DESCRIPTION("UBI - Unsorted Block Images"); 1134MODULE_DESCRIPTION("UBI - Unsorted Block Images");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index fe4da1e96c52..5ec13dc4705b 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -28,6 +28,11 @@
28 * 28 *
29 * Major and minor numbers are assigned dynamically to both UBI and volume 29 * Major and minor numbers are assigned dynamically to both UBI and volume
30 * character devices. 30 * character devices.
31 *
32 * Well, there is the third kind of character devices - the UBI control
33 * character device, which allows to manipulate by UBI devices - create and
34 * delete them. In other words, it is used for attaching and detaching MTD
35 * devices.
31 */ 36 */
32 37
33#include <linux/module.h> 38#include <linux/module.h>
@@ -39,34 +44,6 @@
39#include <asm/div64.h> 44#include <asm/div64.h>
40#include "ubi.h" 45#include "ubi.h"
41 46
42/*
43 * Maximum sequence numbers of UBI and volume character device IOCTLs (direct
44 * logical eraseblock erase is a debug-only feature).
45 */
46#define UBI_CDEV_IOC_MAX_SEQ 2
47#ifndef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO
48#define VOL_CDEV_IOC_MAX_SEQ 1
49#else
50#define VOL_CDEV_IOC_MAX_SEQ 2
51#endif
52
53/**
54 * major_to_device - get UBI device object by character device major number.
55 * @major: major number
56 *
57 * This function returns a pointer to the UBI device object.
58 */
59static struct ubi_device *major_to_device(int major)
60{
61 int i;
62
63 for (i = 0; i < ubi_devices_cnt; i++)
64 if (ubi_devices[i] && ubi_devices[i]->major == major)
65 return ubi_devices[i];
66 BUG();
67 return NULL;
68}
69
70/** 47/**
71 * get_exclusive - get exclusive access to an UBI volume. 48 * get_exclusive - get exclusive access to an UBI volume.
72 * @desc: volume descriptor 49 * @desc: volume descriptor
@@ -124,9 +101,11 @@ static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
124static int vol_cdev_open(struct inode *inode, struct file *file) 101static int vol_cdev_open(struct inode *inode, struct file *file)
125{ 102{
126 struct ubi_volume_desc *desc; 103 struct ubi_volume_desc *desc;
127 const struct ubi_device *ubi = major_to_device(imajor(inode)); 104 int vol_id = iminor(inode) - 1, mode, ubi_num;
128 int vol_id = iminor(inode) - 1; 105
129 int mode; 106 ubi_num = ubi_major2num(imajor(inode));
107 if (ubi_num < 0)
108 return ubi_num;
130 109
131 if (file->f_mode & FMODE_WRITE) 110 if (file->f_mode & FMODE_WRITE)
132 mode = UBI_READWRITE; 111 mode = UBI_READWRITE;
@@ -135,7 +114,7 @@ static int vol_cdev_open(struct inode *inode, struct file *file)
135 114
136 dbg_msg("open volume %d, mode %d", vol_id, mode); 115 dbg_msg("open volume %d, mode %d", vol_id, mode);
137 116
138 desc = ubi_open_volume(ubi->ubi_num, vol_id, mode); 117 desc = ubi_open_volume(ubi_num, vol_id, mode);
139 if (IS_ERR(desc)) 118 if (IS_ERR(desc))
140 return PTR_ERR(desc); 119 return PTR_ERR(desc);
141 120
@@ -249,7 +228,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
249 if (off + len >= vol->usable_leb_size) 228 if (off + len >= vol->usable_leb_size)
250 len = vol->usable_leb_size - off; 229 len = vol->usable_leb_size - off;
251 230
252 err = ubi_eba_read_leb(ubi, vol_id, lnum, tbuf, off, len, 0); 231 err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
253 if (err) 232 if (err)
254 break; 233 break;
255 234
@@ -289,7 +268,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
289 struct ubi_volume_desc *desc = file->private_data; 268 struct ubi_volume_desc *desc = file->private_data;
290 struct ubi_volume *vol = desc->vol; 269 struct ubi_volume *vol = desc->vol;
291 struct ubi_device *ubi = vol->ubi; 270 struct ubi_device *ubi = vol->ubi;
292 int lnum, off, len, tbuf_size, vol_id = vol->vol_id, err = 0; 271 int lnum, off, len, tbuf_size, err = 0;
293 size_t count_save = count; 272 size_t count_save = count;
294 char *tbuf; 273 char *tbuf;
295 uint64_t tmp; 274 uint64_t tmp;
@@ -339,7 +318,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
339 break; 318 break;
340 } 319 }
341 320
342 err = ubi_eba_write_leb(ubi, vol_id, lnum, tbuf, off, len, 321 err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len,
343 UBI_UNKNOWN); 322 UBI_UNKNOWN);
344 if (err) 323 if (err)
345 break; 324 break;
@@ -377,7 +356,8 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
377 356
378 err = ubi_more_update_data(ubi, vol->vol_id, buf, count); 357 err = ubi_more_update_data(ubi, vol->vol_id, buf, count);
379 if (err < 0) { 358 if (err < 0) {
380 ubi_err("cannot write %zd bytes of update data", count); 359 ubi_err("cannot write %zd bytes of update data, error %d",
360 count, err);
381 return err; 361 return err;
382 } 362 }
383 363
@@ -483,7 +463,7 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
483 } 463 }
484 464
485 dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); 465 dbg_msg("erase LEB %d:%d", vol->vol_id, lnum);
486 err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum); 466 err = ubi_eba_unmap_leb(ubi, vol, lnum);
487 if (err) 467 if (err)
488 break; 468 break;
489 469
@@ -580,9 +560,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
580 if (!capable(CAP_SYS_RESOURCE)) 560 if (!capable(CAP_SYS_RESOURCE))
581 return -EPERM; 561 return -EPERM;
582 562
583 ubi = major_to_device(imajor(inode)); 563 ubi = ubi_get_by_major(imajor(inode));
584 if (IS_ERR(ubi)) 564 if (!ubi)
585 return PTR_ERR(ubi); 565 return -ENODEV;
586 566
587 switch (cmd) { 567 switch (cmd) {
588 /* Create volume command */ 568 /* Create volume command */
@@ -591,8 +571,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
591 struct ubi_mkvol_req req; 571 struct ubi_mkvol_req req;
592 572
593 dbg_msg("create volume"); 573 dbg_msg("create volume");
594 err = copy_from_user(&req, argp, 574 err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
595 sizeof(struct ubi_mkvol_req));
596 if (err) { 575 if (err) {
597 err = -EFAULT; 576 err = -EFAULT;
598 break; 577 break;
@@ -604,7 +583,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
604 583
605 req.name[req.name_len] = '\0'; 584 req.name[req.name_len] = '\0';
606 585
586 mutex_lock(&ubi->volumes_mutex);
607 err = ubi_create_volume(ubi, &req); 587 err = ubi_create_volume(ubi, &req);
588 mutex_unlock(&ubi->volumes_mutex);
608 if (err) 589 if (err)
609 break; 590 break;
610 591
@@ -633,10 +614,16 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
633 break; 614 break;
634 } 615 }
635 616
617 mutex_lock(&ubi->volumes_mutex);
636 err = ubi_remove_volume(desc); 618 err = ubi_remove_volume(desc);
637 if (err) 619 mutex_unlock(&ubi->volumes_mutex);
638 ubi_close_volume(desc);
639 620
621 /*
622 * The volume is deleted (unless an error occurred), and the
623 * 'struct ubi_volume' object will be freed when
624 * 'ubi_close_volume()' will call 'put_device()'.
625 */
626 ubi_close_volume(desc);
640 break; 627 break;
641 } 628 }
642 629
@@ -648,8 +635,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
648 struct ubi_rsvol_req req; 635 struct ubi_rsvol_req req;
649 636
650 dbg_msg("re-size volume"); 637 dbg_msg("re-size volume");
651 err = copy_from_user(&req, argp, 638 err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
652 sizeof(struct ubi_rsvol_req));
653 if (err) { 639 if (err) {
654 err = -EFAULT; 640 err = -EFAULT;
655 break; 641 break;
@@ -669,7 +655,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
669 pebs = !!do_div(tmp, desc->vol->usable_leb_size); 655 pebs = !!do_div(tmp, desc->vol->usable_leb_size);
670 pebs += tmp; 656 pebs += tmp;
671 657
658 mutex_lock(&ubi->volumes_mutex);
672 err = ubi_resize_volume(desc, pebs); 659 err = ubi_resize_volume(desc, pebs);
660 mutex_unlock(&ubi->volumes_mutex);
673 ubi_close_volume(desc); 661 ubi_close_volume(desc);
674 break; 662 break;
675 } 663 }
@@ -679,9 +667,93 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
679 break; 667 break;
680 } 668 }
681 669
670 ubi_put_device(ubi);
671 return err;
672}
673
674static int ctrl_cdev_ioctl(struct inode *inode, struct file *file,
675 unsigned int cmd, unsigned long arg)
676{
677 int err = 0;
678 void __user *argp = (void __user *)arg;
679
680 if (!capable(CAP_SYS_RESOURCE))
681 return -EPERM;
682
683 switch (cmd) {
684 /* Attach an MTD device command */
685 case UBI_IOCATT:
686 {
687 struct ubi_attach_req req;
688 struct mtd_info *mtd;
689
690 dbg_msg("attach MTD device");
691 err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
692 if (err) {
693 err = -EFAULT;
694 break;
695 }
696
697 if (req.mtd_num < 0 ||
698 (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) {
699 err = -EINVAL;
700 break;
701 }
702
703 mtd = get_mtd_device(NULL, req.mtd_num);
704 if (IS_ERR(mtd)) {
705 err = PTR_ERR(mtd);
706 break;
707 }
708
709 /*
710 * Note, further request verification is done by
711 * 'ubi_attach_mtd_dev()'.
712 */
713 mutex_lock(&ubi_devices_mutex);
714 err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset);
715 mutex_unlock(&ubi_devices_mutex);
716 if (err < 0)
717 put_mtd_device(mtd);
718 else
719 /* @err contains UBI device number */
720 err = put_user(err, (__user int32_t *)argp);
721
722 break;
723 }
724
725 /* Detach an MTD device command */
726 case UBI_IOCDET:
727 {
728 int ubi_num;
729
730 dbg_msg("dettach MTD device");
731 err = get_user(ubi_num, (__user int32_t *)argp);
732 if (err) {
733 err = -EFAULT;
734 break;
735 }
736
737 mutex_lock(&ubi_devices_mutex);
738 err = ubi_detach_mtd_dev(ubi_num, 0);
739 mutex_unlock(&ubi_devices_mutex);
740 break;
741 }
742
743 default:
744 err = -ENOTTY;
745 break;
746 }
747
682 return err; 748 return err;
683} 749}
684 750
751/* UBI control character device operations */
752struct file_operations ubi_ctrl_cdev_operations = {
753 .ioctl = ctrl_cdev_ioctl,
754 .owner = THIS_MODULE,
755};
756
685/* UBI character device operations */ 757/* UBI character device operations */
686struct file_operations ubi_cdev_operations = { 758struct file_operations ubi_cdev_operations = {
687 .owner = THIS_MODULE, 759 .owner = THIS_MODULE,
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 467722eb618b..51c40b17f1ec 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -39,8 +39,9 @@
39 39
40#ifdef CONFIG_MTD_UBI_DEBUG_MSG 40#ifdef CONFIG_MTD_UBI_DEBUG_MSG
41/* Generic debugging message */ 41/* Generic debugging message */
42#define dbg_msg(fmt, ...) \ 42#define dbg_msg(fmt, ...) \
43 printk(KERN_DEBUG "UBI DBG: %s: " fmt "\n", __FUNCTION__, ##__VA_ARGS__) 43 printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
44 current->pid, __FUNCTION__, ##__VA_ARGS__)
44 45
45#define ubi_dbg_dump_stack() dump_stack() 46#define ubi_dbg_dump_stack() dump_stack()
46 47
@@ -76,36 +77,28 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
76 77
77#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA 78#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA
78/* Messages from the eraseblock association unit */ 79/* Messages from the eraseblock association unit */
79#define dbg_eba(fmt, ...) \ 80#define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
80 printk(KERN_DEBUG "UBI DBG eba: %s: " fmt "\n", __FUNCTION__, \
81 ##__VA_ARGS__)
82#else 81#else
83#define dbg_eba(fmt, ...) ({}) 82#define dbg_eba(fmt, ...) ({})
84#endif 83#endif
85 84
86#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL 85#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL
87/* Messages from the wear-leveling unit */ 86/* Messages from the wear-leveling unit */
88#define dbg_wl(fmt, ...) \ 87#define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
89 printk(KERN_DEBUG "UBI DBG wl: %s: " fmt "\n", __FUNCTION__, \
90 ##__VA_ARGS__)
91#else 88#else
92#define dbg_wl(fmt, ...) ({}) 89#define dbg_wl(fmt, ...) ({})
93#endif 90#endif
94 91
95#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO 92#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO
96/* Messages from the input/output unit */ 93/* Messages from the input/output unit */
97#define dbg_io(fmt, ...) \ 94#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
98 printk(KERN_DEBUG "UBI DBG io: %s: " fmt "\n", __FUNCTION__, \
99 ##__VA_ARGS__)
100#else 95#else
101#define dbg_io(fmt, ...) ({}) 96#define dbg_io(fmt, ...) ({})
102#endif 97#endif
103 98
104#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD 99#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD
105/* Initialization and build messages */ 100/* Initialization and build messages */
106#define dbg_bld(fmt, ...) \ 101#define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
107 printk(KERN_DEBUG "UBI DBG bld: %s: " fmt "\n", __FUNCTION__, \
108 ##__VA_ARGS__)
109#else 102#else
110#define dbg_bld(fmt, ...) ({}) 103#define dbg_bld(fmt, ...) ({})
111#endif 104#endif
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 880fa3690352..85297cde4ac5 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -31,7 +31,7 @@
31 * logical eraseblock it is locked for reading or writing. The per-logical 31 * logical eraseblock it is locked for reading or writing. The per-logical
32 * eraseblock locking is implemented by means of the lock tree. The lock tree 32 * eraseblock locking is implemented by means of the lock tree. The lock tree
33 * is an RB-tree which refers all the currently locked logical eraseblocks. The 33 * is an RB-tree which refers all the currently locked logical eraseblocks. The
34 * lock tree elements are &struct ltree_entry objects. They are indexed by 34 * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by
35 * (@vol_id, @lnum) pairs. 35 * (@vol_id, @lnum) pairs.
36 * 36 *
37 * EBA also maintains the global sequence counter which is incremented each 37 * EBA also maintains the global sequence counter which is incremented each
@@ -50,29 +50,6 @@
50#define EBA_RESERVED_PEBS 1 50#define EBA_RESERVED_PEBS 1
51 51
52/** 52/**
53 * struct ltree_entry - an entry in the lock tree.
54 * @rb: links RB-tree nodes
55 * @vol_id: volume ID of the locked logical eraseblock
56 * @lnum: locked logical eraseblock number
57 * @users: how many tasks are using this logical eraseblock or wait for it
58 * @mutex: read/write mutex to implement read/write access serialization to
59 * the (@vol_id, @lnum) logical eraseblock
60 *
61 * When a logical eraseblock is being locked - corresponding &struct ltree_entry
62 * object is inserted to the lock tree (@ubi->ltree).
63 */
64struct ltree_entry {
65 struct rb_node rb;
66 int vol_id;
67 int lnum;
68 int users;
69 struct rw_semaphore mutex;
70};
71
72/* Slab cache for lock-tree entries */
73static struct kmem_cache *ltree_slab;
74
75/**
76 * next_sqnum - get next sequence number. 53 * next_sqnum - get next sequence number.
77 * @ubi: UBI device description object 54 * @ubi: UBI device description object
78 * 55 *
@@ -112,20 +89,20 @@ static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
112 * @vol_id: volume ID 89 * @vol_id: volume ID
113 * @lnum: logical eraseblock number 90 * @lnum: logical eraseblock number
114 * 91 *
115 * This function returns a pointer to the corresponding &struct ltree_entry 92 * This function returns a pointer to the corresponding &struct ubi_ltree_entry
116 * object if the logical eraseblock is locked and %NULL if it is not. 93 * object if the logical eraseblock is locked and %NULL if it is not.
117 * @ubi->ltree_lock has to be locked. 94 * @ubi->ltree_lock has to be locked.
118 */ 95 */
119static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, 96static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
120 int lnum) 97 int lnum)
121{ 98{
122 struct rb_node *p; 99 struct rb_node *p;
123 100
124 p = ubi->ltree.rb_node; 101 p = ubi->ltree.rb_node;
125 while (p) { 102 while (p) {
126 struct ltree_entry *le; 103 struct ubi_ltree_entry *le;
127 104
128 le = rb_entry(p, struct ltree_entry, rb); 105 le = rb_entry(p, struct ubi_ltree_entry, rb);
129 106
130 if (vol_id < le->vol_id) 107 if (vol_id < le->vol_id)
131 p = p->rb_left; 108 p = p->rb_left;
@@ -155,12 +132,12 @@ static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
155 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation 132 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
156 * failed. 133 * failed.
157 */ 134 */
158static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id, 135static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
159 int lnum) 136 int vol_id, int lnum)
160{ 137{
161 struct ltree_entry *le, *le1, *le_free; 138 struct ubi_ltree_entry *le, *le1, *le_free;
162 139
163 le = kmem_cache_alloc(ltree_slab, GFP_NOFS); 140 le = kmem_cache_alloc(ubi_ltree_slab, GFP_NOFS);
164 if (!le) 141 if (!le)
165 return ERR_PTR(-ENOMEM); 142 return ERR_PTR(-ENOMEM);
166 143
@@ -189,7 +166,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
189 p = &ubi->ltree.rb_node; 166 p = &ubi->ltree.rb_node;
190 while (*p) { 167 while (*p) {
191 parent = *p; 168 parent = *p;
192 le1 = rb_entry(parent, struct ltree_entry, rb); 169 le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
193 170
194 if (vol_id < le1->vol_id) 171 if (vol_id < le1->vol_id)
195 p = &(*p)->rb_left; 172 p = &(*p)->rb_left;
@@ -211,7 +188,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
211 spin_unlock(&ubi->ltree_lock); 188 spin_unlock(&ubi->ltree_lock);
212 189
213 if (le_free) 190 if (le_free)
214 kmem_cache_free(ltree_slab, le_free); 191 kmem_cache_free(ubi_ltree_slab, le_free);
215 192
216 return le; 193 return le;
217} 194}
@@ -227,7 +204,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
227 */ 204 */
228static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) 205static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
229{ 206{
230 struct ltree_entry *le; 207 struct ubi_ltree_entry *le;
231 208
232 le = ltree_add_entry(ubi, vol_id, lnum); 209 le = ltree_add_entry(ubi, vol_id, lnum);
233 if (IS_ERR(le)) 210 if (IS_ERR(le))
@@ -245,7 +222,7 @@ static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
245static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) 222static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
246{ 223{
247 int free = 0; 224 int free = 0;
248 struct ltree_entry *le; 225 struct ubi_ltree_entry *le;
249 226
250 spin_lock(&ubi->ltree_lock); 227 spin_lock(&ubi->ltree_lock);
251 le = ltree_lookup(ubi, vol_id, lnum); 228 le = ltree_lookup(ubi, vol_id, lnum);
@@ -259,7 +236,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
259 236
260 up_read(&le->mutex); 237 up_read(&le->mutex);
261 if (free) 238 if (free)
262 kmem_cache_free(ltree_slab, le); 239 kmem_cache_free(ubi_ltree_slab, le);
263} 240}
264 241
265/** 242/**
@@ -273,7 +250,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
273 */ 250 */
274static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) 251static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
275{ 252{
276 struct ltree_entry *le; 253 struct ubi_ltree_entry *le;
277 254
278 le = ltree_add_entry(ubi, vol_id, lnum); 255 le = ltree_add_entry(ubi, vol_id, lnum);
279 if (IS_ERR(le)) 256 if (IS_ERR(le))
@@ -283,6 +260,44 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
283} 260}
284 261
285/** 262/**
263 * leb_write_lock - lock logical eraseblock for writing.
264 * @ubi: UBI device description object
265 * @vol_id: volume ID
266 * @lnum: logical eraseblock number
267 *
268 * This function locks a logical eraseblock for writing if there is no
269 * contention and does nothing if there is contention. Returns %0 in case of
270 * success, %1 in case of contention, and and a negative error code in case of
271 * failure.
272 */
273static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
274{
275 int free;
276 struct ubi_ltree_entry *le;
277
278 le = ltree_add_entry(ubi, vol_id, lnum);
279 if (IS_ERR(le))
280 return PTR_ERR(le);
281 if (down_write_trylock(&le->mutex))
282 return 0;
283
284 /* Contention, cancel */
285 spin_lock(&ubi->ltree_lock);
286 le->users -= 1;
287 ubi_assert(le->users >= 0);
288 if (le->users == 0) {
289 rb_erase(&le->rb, &ubi->ltree);
290 free = 1;
291 } else
292 free = 0;
293 spin_unlock(&ubi->ltree_lock);
294 if (free)
295 kmem_cache_free(ubi_ltree_slab, le);
296
297 return 1;
298}
299
300/**
286 * leb_write_unlock - unlock logical eraseblock. 301 * leb_write_unlock - unlock logical eraseblock.
287 * @ubi: UBI device description object 302 * @ubi: UBI device description object
288 * @vol_id: volume ID 303 * @vol_id: volume ID
@@ -291,7 +306,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
291static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) 306static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
292{ 307{
293 int free; 308 int free;
294 struct ltree_entry *le; 309 struct ubi_ltree_entry *le;
295 310
296 spin_lock(&ubi->ltree_lock); 311 spin_lock(&ubi->ltree_lock);
297 le = ltree_lookup(ubi, vol_id, lnum); 312 le = ltree_lookup(ubi, vol_id, lnum);
@@ -306,23 +321,26 @@ static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
306 321
307 up_write(&le->mutex); 322 up_write(&le->mutex);
308 if (free) 323 if (free)
309 kmem_cache_free(ltree_slab, le); 324 kmem_cache_free(ubi_ltree_slab, le);
310} 325}
311 326
312/** 327/**
313 * ubi_eba_unmap_leb - un-map logical eraseblock. 328 * ubi_eba_unmap_leb - un-map logical eraseblock.
314 * @ubi: UBI device description object 329 * @ubi: UBI device description object
315 * @vol_id: volume ID 330 * @vol: volume description object
316 * @lnum: logical eraseblock number 331 * @lnum: logical eraseblock number
317 * 332 *
318 * This function un-maps logical eraseblock @lnum and schedules corresponding 333 * This function un-maps logical eraseblock @lnum and schedules corresponding
319 * physical eraseblock for erasure. Returns zero in case of success and a 334 * physical eraseblock for erasure. Returns zero in case of success and a
320 * negative error code in case of failure. 335 * negative error code in case of failure.
321 */ 336 */
322int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum) 337int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
338 int lnum)
323{ 339{
324 int idx = vol_id2idx(ubi, vol_id), err, pnum; 340 int err, pnum, vol_id = vol->vol_id;
325 struct ubi_volume *vol = ubi->volumes[idx]; 341
342 ubi_assert(ubi->ref_count > 0);
343 ubi_assert(vol->ref_count > 0);
326 344
327 if (ubi->ro_mode) 345 if (ubi->ro_mode)
328 return -EROFS; 346 return -EROFS;
@@ -349,7 +367,7 @@ out_unlock:
349/** 367/**
350 * ubi_eba_read_leb - read data. 368 * ubi_eba_read_leb - read data.
351 * @ubi: UBI device description object 369 * @ubi: UBI device description object
352 * @vol_id: volume ID 370 * @vol: volume description object
353 * @lnum: logical eraseblock number 371 * @lnum: logical eraseblock number
354 * @buf: buffer to store the read data 372 * @buf: buffer to store the read data
355 * @offset: offset from where to read 373 * @offset: offset from where to read
@@ -365,14 +383,16 @@ out_unlock:
365 * returned for any volume type if an ECC error was detected by the MTD device 383 * returned for any volume type if an ECC error was detected by the MTD device
366 * driver. Other negative error cored may be returned in case of other errors. 384 * driver. Other negative error cored may be returned in case of other errors.
367 */ 385 */
368int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, 386int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
369 int offset, int len, int check) 387 void *buf, int offset, int len, int check)
370{ 388{
371 int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id); 389 int err, pnum, scrub = 0, vol_id = vol->vol_id;
372 struct ubi_vid_hdr *vid_hdr; 390 struct ubi_vid_hdr *vid_hdr;
373 struct ubi_volume *vol = ubi->volumes[idx];
374 uint32_t uninitialized_var(crc); 391 uint32_t uninitialized_var(crc);
375 392
393 ubi_assert(ubi->ref_count > 0);
394 ubi_assert(vol->ref_count > 0);
395
376 err = leb_read_lock(ubi, vol_id, lnum); 396 err = leb_read_lock(ubi, vol_id, lnum);
377 if (err) 397 if (err)
378 return err; 398 return err;
@@ -578,7 +598,7 @@ write_error:
578/** 598/**
579 * ubi_eba_write_leb - write data to dynamic volume. 599 * ubi_eba_write_leb - write data to dynamic volume.
580 * @ubi: UBI device description object 600 * @ubi: UBI device description object
581 * @vol_id: volume ID 601 * @vol: volume description object
582 * @lnum: logical eraseblock number 602 * @lnum: logical eraseblock number
583 * @buf: the data to write 603 * @buf: the data to write
584 * @offset: offset within the logical eraseblock where to write 604 * @offset: offset within the logical eraseblock where to write
@@ -586,17 +606,19 @@ write_error:
586 * @dtype: data type 606 * @dtype: data type
587 * 607 *
588 * This function writes data to logical eraseblock @lnum of a dynamic volume 608 * This function writes data to logical eraseblock @lnum of a dynamic volume
589 * @vol_id. Returns zero in case of success and a negative error code in case 609 * @vol. Returns zero in case of success and a negative error code in case
590 * of failure. In case of error, it is possible that something was still 610 * of failure. In case of error, it is possible that something was still
591 * written to the flash media, but may be some garbage. 611 * written to the flash media, but may be some garbage.
592 */ 612 */
593int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, 613int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
594 const void *buf, int offset, int len, int dtype) 614 const void *buf, int offset, int len, int dtype)
595{ 615{
596 int idx = vol_id2idx(ubi, vol_id), err, pnum, tries = 0; 616 int err, pnum, tries = 0, vol_id = vol->vol_id;
597 struct ubi_volume *vol = ubi->volumes[idx];
598 struct ubi_vid_hdr *vid_hdr; 617 struct ubi_vid_hdr *vid_hdr;
599 618
619 ubi_assert(ubi->ref_count > 0);
620 ubi_assert(vol->ref_count > 0);
621
600 if (ubi->ro_mode) 622 if (ubi->ro_mode)
601 return -EROFS; 623 return -EROFS;
602 624
@@ -613,7 +635,8 @@ int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
613 if (err) { 635 if (err) {
614 ubi_warn("failed to write data to PEB %d", pnum); 636 ubi_warn("failed to write data to PEB %d", pnum);
615 if (err == -EIO && ubi->bad_allowed) 637 if (err == -EIO && ubi->bad_allowed)
616 err = recover_peb(ubi, pnum, vol_id, lnum, buf, offset, len); 638 err = recover_peb(ubi, pnum, vol_id, lnum, buf,
639 offset, len);
617 if (err) 640 if (err)
618 ubi_ro_mode(ubi); 641 ubi_ro_mode(ubi);
619 } 642 }
@@ -656,11 +679,14 @@ retry:
656 goto write_error; 679 goto write_error;
657 } 680 }
658 681
659 err = ubi_io_write_data(ubi, buf, pnum, offset, len); 682 if (len) {
660 if (err) { 683 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
661 ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, " 684 if (err) {
662 "PEB %d", len, offset, vol_id, lnum, pnum); 685 ubi_warn("failed to write %d bytes at offset %d of "
663 goto write_error; 686 "LEB %d:%d, PEB %d", len, offset, vol_id,
687 lnum, pnum);
688 goto write_error;
689 }
664 } 690 }
665 691
666 vol->eba_tbl[lnum] = pnum; 692 vol->eba_tbl[lnum] = pnum;
@@ -698,7 +724,7 @@ write_error:
698/** 724/**
699 * ubi_eba_write_leb_st - write data to static volume. 725 * ubi_eba_write_leb_st - write data to static volume.
700 * @ubi: UBI device description object 726 * @ubi: UBI device description object
701 * @vol_id: volume ID 727 * @vol: volume description object
702 * @lnum: logical eraseblock number 728 * @lnum: logical eraseblock number
703 * @buf: data to write 729 * @buf: data to write
704 * @len: how many bytes to write 730 * @len: how many bytes to write
@@ -706,7 +732,7 @@ write_error:
706 * @used_ebs: how many logical eraseblocks will this volume contain 732 * @used_ebs: how many logical eraseblocks will this volume contain
707 * 733 *
708 * This function writes data to logical eraseblock @lnum of static volume 734 * This function writes data to logical eraseblock @lnum of static volume
709 * @vol_id. The @used_ebs argument should contain total number of logical 735 * @vol. The @used_ebs argument should contain total number of logical
710 * eraseblock in this static volume. 736 * eraseblock in this static volume.
711 * 737 *
712 * When writing to the last logical eraseblock, the @len argument doesn't have 738 * When writing to the last logical eraseblock, the @len argument doesn't have
@@ -718,15 +744,17 @@ write_error:
718 * volumes. This function returns zero in case of success and a negative error 744 * volumes. This function returns zero in case of success and a negative error
719 * code in case of failure. 745 * code in case of failure.
720 */ 746 */
721int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum, 747int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
722 const void *buf, int len, int dtype, int used_ebs) 748 int lnum, const void *buf, int len, int dtype,
749 int used_ebs)
723{ 750{
724 int err, pnum, tries = 0, data_size = len; 751 int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
725 int idx = vol_id2idx(ubi, vol_id);
726 struct ubi_volume *vol = ubi->volumes[idx];
727 struct ubi_vid_hdr *vid_hdr; 752 struct ubi_vid_hdr *vid_hdr;
728 uint32_t crc; 753 uint32_t crc;
729 754
755 ubi_assert(ubi->ref_count > 0);
756 ubi_assert(vol->ref_count > 0);
757
730 if (ubi->ro_mode) 758 if (ubi->ro_mode)
731 return -EROFS; 759 return -EROFS;
732 760
@@ -819,7 +847,7 @@ write_error:
819/* 847/*
820 * ubi_eba_atomic_leb_change - change logical eraseblock atomically. 848 * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
821 * @ubi: UBI device description object 849 * @ubi: UBI device description object
822 * @vol_id: volume ID 850 * @vol: volume description object
823 * @lnum: logical eraseblock number 851 * @lnum: logical eraseblock number
824 * @buf: data to write 852 * @buf: data to write
825 * @len: how many bytes to write 853 * @len: how many bytes to write
@@ -834,14 +862,16 @@ write_error:
834 * UBI reserves one LEB for the "atomic LEB change" operation, so only one 862 * UBI reserves one LEB for the "atomic LEB change" operation, so only one
835 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. 863 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
836 */ 864 */
837int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, 865int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
838 const void *buf, int len, int dtype) 866 int lnum, const void *buf, int len, int dtype)
839{ 867{
840 int err, pnum, tries = 0, idx = vol_id2idx(ubi, vol_id); 868 int err, pnum, tries = 0, vol_id = vol->vol_id;
841 struct ubi_volume *vol = ubi->volumes[idx];
842 struct ubi_vid_hdr *vid_hdr; 869 struct ubi_vid_hdr *vid_hdr;
843 uint32_t crc; 870 uint32_t crc;
844 871
872 ubi_assert(ubi->ref_count > 0);
873 ubi_assert(vol->ref_count > 0);
874
845 if (ubi->ro_mode) 875 if (ubi->ro_mode)
846 return -EROFS; 876 return -EROFS;
847 877
@@ -928,20 +958,6 @@ write_error:
928} 958}
929 959
930/** 960/**
931 * ltree_entry_ctor - lock tree entries slab cache constructor.
932 * @obj: the lock-tree entry to construct
933 * @cache: the lock tree entry slab cache
934 * @flags: constructor flags
935 */
936static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
937{
938 struct ltree_entry *le = obj;
939
940 le->users = 0;
941 init_rwsem(&le->mutex);
942}
943
944/**
945 * ubi_eba_copy_leb - copy logical eraseblock. 961 * ubi_eba_copy_leb - copy logical eraseblock.
946 * @ubi: UBI device description object 962 * @ubi: UBI device description object
947 * @from: physical eraseblock number from where to copy 963 * @from: physical eraseblock number from where to copy
@@ -950,14 +966,16 @@ static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
950 * 966 *
951 * This function copies logical eraseblock from physical eraseblock @from to 967 * This function copies logical eraseblock from physical eraseblock @from to
952 * physical eraseblock @to. The @vid_hdr buffer may be changed by this 968 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
953 * function. Returns zero in case of success, %UBI_IO_BITFLIPS if the operation 969 * function. Returns:
954 * was canceled because bit-flips were detected at the target PEB, and a 970 * o %0 in case of success;
955 * negative error code in case of failure. 971 * o %1 if the operation was canceled and should be tried later (e.g.,
972 * because a bit-flip was detected at the target PEB);
973 * o %2 if the volume is being deleted and this LEB should not be moved.
956 */ 974 */
957int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 975int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
958 struct ubi_vid_hdr *vid_hdr) 976 struct ubi_vid_hdr *vid_hdr)
959{ 977{
960 int err, vol_id, lnum, data_size, aldata_size, pnum, idx; 978 int err, vol_id, lnum, data_size, aldata_size, idx;
961 struct ubi_volume *vol; 979 struct ubi_volume *vol;
962 uint32_t crc; 980 uint32_t crc;
963 981
@@ -973,51 +991,67 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
973 data_size = aldata_size = 991 data_size = aldata_size =
974 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); 992 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
975 993
976 /*
977 * We do not want anybody to write to this logical eraseblock while we
978 * are moving it, so we lock it.
979 */
980 err = leb_write_lock(ubi, vol_id, lnum);
981 if (err)
982 return err;
983
984 mutex_lock(&ubi->buf_mutex);
985
986 /*
987 * But the logical eraseblock might have been put by this time.
988 * Cancel if it is true.
989 */
990 idx = vol_id2idx(ubi, vol_id); 994 idx = vol_id2idx(ubi, vol_id);
991 995 spin_lock(&ubi->volumes_lock);
992 /* 996 /*
993 * We may race with volume deletion/re-size, so we have to hold 997 * Note, we may race with volume deletion, which means that the volume
994 * @ubi->volumes_lock. 998 * this logical eraseblock belongs to might be being deleted. Since the
999 * volume deletion unmaps all the volume's logical eraseblocks, it will
1000 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
995 */ 1001 */
996 spin_lock(&ubi->volumes_lock);
997 vol = ubi->volumes[idx]; 1002 vol = ubi->volumes[idx];
998 if (!vol) { 1003 if (!vol) {
999 dbg_eba("volume %d was removed meanwhile", vol_id); 1004 /* No need to do further work, cancel */
1005 dbg_eba("volume %d is being removed, cancel", vol_id);
1000 spin_unlock(&ubi->volumes_lock); 1006 spin_unlock(&ubi->volumes_lock);
1001 goto out_unlock; 1007 return 2;
1008 }
1009 spin_unlock(&ubi->volumes_lock);
1010
1011 /*
1012 * We do not want anybody to write to this logical eraseblock while we
1013 * are moving it, so lock it.
1014 *
1015 * Note, we are using non-waiting locking here, because we cannot sleep
1016 * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
1017 * unmapping the LEB which is mapped to the PEB we are going to move
1018 * (@from). This task locks the LEB and goes sleep in the
1019 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
1020 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
1021 * LEB is already locked, we just do not move it and return %1.
1022 */
1023 err = leb_write_trylock(ubi, vol_id, lnum);
1024 if (err) {
1025 dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum);
1026 return err;
1002 } 1027 }
1003 1028
1004 pnum = vol->eba_tbl[lnum]; 1029 /*
1005 if (pnum != from) { 1030 * The LEB might have been put meanwhile, and the task which put it is
1031 * probably waiting on @ubi->move_mutex. No need to continue the work,
1032 * cancel it.
1033 */
1034 if (vol->eba_tbl[lnum] != from) {
1006 dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " 1035 dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
1007 "PEB %d, cancel", vol_id, lnum, from, pnum); 1036 "PEB %d, cancel", vol_id, lnum, from,
1008 spin_unlock(&ubi->volumes_lock); 1037 vol->eba_tbl[lnum]);
1009 goto out_unlock; 1038 err = 1;
1039 goto out_unlock_leb;
1010 } 1040 }
1011 spin_unlock(&ubi->volumes_lock);
1012
1013 /* OK, now the LEB is locked and we can safely start moving it */
1014 1041
1042 /*
1043 * OK, now the LEB is locked and we can safely start moving iy. Since
1044 * this function utilizes thie @ubi->peb1_buf buffer which is shared
1045 * with some other functions, so lock the buffer by taking the
1046 * @ubi->buf_mutex.
1047 */
1048 mutex_lock(&ubi->buf_mutex);
1015 dbg_eba("read %d bytes of data", aldata_size); 1049 dbg_eba("read %d bytes of data", aldata_size);
1016 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size); 1050 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
1017 if (err && err != UBI_IO_BITFLIPS) { 1051 if (err && err != UBI_IO_BITFLIPS) {
1018 ubi_warn("error %d while reading data from PEB %d", 1052 ubi_warn("error %d while reading data from PEB %d",
1019 err, from); 1053 err, from);
1020 goto out_unlock; 1054 goto out_unlock_buf;
1021 } 1055 }
1022 1056
1023 /* 1057 /*
@@ -1053,7 +1087,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1053 1087
1054 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1088 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1055 if (err) 1089 if (err)
1056 goto out_unlock; 1090 goto out_unlock_buf;
1057 1091
1058 cond_resched(); 1092 cond_resched();
1059 1093
@@ -1062,13 +1096,15 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1062 if (err) { 1096 if (err) {
1063 if (err != UBI_IO_BITFLIPS) 1097 if (err != UBI_IO_BITFLIPS)
1064 ubi_warn("cannot read VID header back from PEB %d", to); 1098 ubi_warn("cannot read VID header back from PEB %d", to);
1065 goto out_unlock; 1099 else
1100 err = 1;
1101 goto out_unlock_buf;
1066 } 1102 }
1067 1103
1068 if (data_size > 0) { 1104 if (data_size > 0) {
1069 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); 1105 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
1070 if (err) 1106 if (err)
1071 goto out_unlock; 1107 goto out_unlock_buf;
1072 1108
1073 cond_resched(); 1109 cond_resched();
1074 1110
@@ -1082,7 +1118,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1082 if (err != UBI_IO_BITFLIPS) 1118 if (err != UBI_IO_BITFLIPS)
1083 ubi_warn("cannot read data back from PEB %d", 1119 ubi_warn("cannot read data back from PEB %d",
1084 to); 1120 to);
1085 goto out_unlock; 1121 else
1122 err = 1;
1123 goto out_unlock_buf;
1086 } 1124 }
1087 1125
1088 cond_resched(); 1126 cond_resched();
@@ -1090,15 +1128,16 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1090 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { 1128 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
1091 ubi_warn("read data back from PEB %d - it is different", 1129 ubi_warn("read data back from PEB %d - it is different",
1092 to); 1130 to);
1093 goto out_unlock; 1131 goto out_unlock_buf;
1094 } 1132 }
1095 } 1133 }
1096 1134
1097 ubi_assert(vol->eba_tbl[lnum] == from); 1135 ubi_assert(vol->eba_tbl[lnum] == from);
1098 vol->eba_tbl[lnum] = to; 1136 vol->eba_tbl[lnum] = to;
1099 1137
1100out_unlock: 1138out_unlock_buf:
1101 mutex_unlock(&ubi->buf_mutex); 1139 mutex_unlock(&ubi->buf_mutex);
1140out_unlock_leb:
1102 leb_write_unlock(ubi, vol_id, lnum); 1141 leb_write_unlock(ubi, vol_id, lnum);
1103 return err; 1142 return err;
1104} 1143}
@@ -1125,14 +1164,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1125 mutex_init(&ubi->alc_mutex); 1164 mutex_init(&ubi->alc_mutex);
1126 ubi->ltree = RB_ROOT; 1165 ubi->ltree = RB_ROOT;
1127 1166
1128 if (ubi_devices_cnt == 0) {
1129 ltree_slab = kmem_cache_create("ubi_ltree_slab",
1130 sizeof(struct ltree_entry), 0,
1131 0, &ltree_entry_ctor);
1132 if (!ltree_slab)
1133 return -ENOMEM;
1134 }
1135
1136 ubi->global_sqnum = si->max_sqnum + 1; 1167 ubi->global_sqnum = si->max_sqnum + 1;
1137 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; 1168 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1138 1169
@@ -1168,6 +1199,15 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1168 } 1199 }
1169 } 1200 }
1170 1201
1202 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1203 ubi_err("no enough physical eraseblocks (%d, need %d)",
1204 ubi->avail_pebs, EBA_RESERVED_PEBS);
1205 err = -ENOSPC;
1206 goto out_free;
1207 }
1208 ubi->avail_pebs -= EBA_RESERVED_PEBS;
1209 ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1210
1171 if (ubi->bad_allowed) { 1211 if (ubi->bad_allowed) {
1172 ubi_calculate_reserved(ubi); 1212 ubi_calculate_reserved(ubi);
1173 1213
@@ -1184,15 +1224,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1184 ubi->rsvd_pebs += ubi->beb_rsvd_pebs; 1224 ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
1185 } 1225 }
1186 1226
1187 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1188 ubi_err("no enough physical eraseblocks (%d, need %d)",
1189 ubi->avail_pebs, EBA_RESERVED_PEBS);
1190 err = -ENOSPC;
1191 goto out_free;
1192 }
1193 ubi->avail_pebs -= EBA_RESERVED_PEBS;
1194 ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1195
1196 dbg_eba("EBA unit is initialized"); 1227 dbg_eba("EBA unit is initialized");
1197 return 0; 1228 return 0;
1198 1229
@@ -1202,8 +1233,6 @@ out_free:
1202 continue; 1233 continue;
1203 kfree(ubi->volumes[i]->eba_tbl); 1234 kfree(ubi->volumes[i]->eba_tbl);
1204 } 1235 }
1205 if (ubi_devices_cnt == 0)
1206 kmem_cache_destroy(ltree_slab);
1207 return err; 1236 return err;
1208} 1237}
1209 1238
@@ -1222,6 +1251,4 @@ void ubi_eba_close(const struct ubi_device *ubi)
1222 continue; 1251 continue;
1223 kfree(ubi->volumes[i]->eba_tbl); 1252 kfree(ubi->volumes[i]->eba_tbl);
1224 } 1253 }
1225 if (ubi_devices_cnt == 1)
1226 kmem_cache_destroy(ltree_slab);
1227} 1254}
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 41ff74c60e14..d397219238d3 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -129,8 +129,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
129 if (to_read > total_read) 129 if (to_read > total_read)
130 to_read = total_read; 130 to_read = total_read;
131 131
132 err = ubi_eba_read_leb(ubi, vol->vol_id, lnum, buf, offs, 132 err = ubi_eba_read_leb(ubi, vol, lnum, buf, offs, to_read, 0);
133 to_read, 0);
134 if (err) 133 if (err)
135 break; 134 break;
136 135
@@ -187,8 +186,8 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
187 if (to_write > total_written) 186 if (to_write > total_written)
188 to_write = total_written; 187 to_write = total_written;
189 188
190 err = ubi_eba_write_leb(ubi, vol->vol_id, lnum, buf, offs, 189 err = ubi_eba_write_leb(ubi, vol, lnum, buf, offs, to_write,
191 to_write, UBI_UNKNOWN); 190 UBI_UNKNOWN);
192 if (err) 191 if (err)
193 break; 192 break;
194 193
@@ -237,7 +236,7 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
237 return -EROFS; 236 return -EROFS;
238 237
239 for (i = 0; i < count; i++) { 238 for (i = 0; i < count; i++) {
240 err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum + i); 239 err = ubi_eba_unmap_leb(ubi, vol, lnum + i);
241 if (err) 240 if (err)
242 goto out_err; 241 goto out_err;
243 } 242 }
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 7c304eec78b5..db3efdef2433 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -173,6 +173,16 @@ retry:
173 ubi_err("error %d while reading %d bytes from PEB %d:%d, " 173 ubi_err("error %d while reading %d bytes from PEB %d:%d, "
174 "read %zd bytes", err, len, pnum, offset, read); 174 "read %zd bytes", err, len, pnum, offset, read);
175 ubi_dbg_dump_stack(); 175 ubi_dbg_dump_stack();
176
177 /*
178 * The driver should never return -EBADMSG if it failed to read
179 * all the requested data. But some buggy drivers might do
180 * this, so we change it to -EIO.
181 */
182 if (read != len && err == -EBADMSG) {
183 ubi_assert(0);
184 err = -EIO;
185 }
176 } else { 186 } else {
177 ubi_assert(len == read); 187 ubi_assert(len == read);
178 188
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 03c774f41549..146957c3380d 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -30,23 +30,27 @@
30 * @ubi_num: UBI device number 30 * @ubi_num: UBI device number
31 * @di: the information is stored here 31 * @di: the information is stored here
32 * 32 *
33 * This function returns %0 in case of success and a %-ENODEV if there is no 33 * This function returns %0 in case of success, %-EINVAL if the UBI device
34 * such UBI device. 34 * number is invalid, and %-ENODEV if there is no such UBI device.
35 */ 35 */
36int ubi_get_device_info(int ubi_num, struct ubi_device_info *di) 36int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
37{ 37{
38 const struct ubi_device *ubi; 38 struct ubi_device *ubi;
39
40 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
41 return -EINVAL;
39 42
40 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || 43 ubi = ubi_get_device(ubi_num);
41 !ubi_devices[ubi_num]) 44 if (!ubi)
42 return -ENODEV; 45 return -ENODEV;
43 46
44 ubi = ubi_devices[ubi_num];
45 di->ubi_num = ubi->ubi_num; 47 di->ubi_num = ubi->ubi_num;
46 di->leb_size = ubi->leb_size; 48 di->leb_size = ubi->leb_size;
47 di->min_io_size = ubi->min_io_size; 49 di->min_io_size = ubi->min_io_size;
48 di->ro_mode = ubi->ro_mode; 50 di->ro_mode = ubi->ro_mode;
49 di->cdev = MKDEV(ubi->major, 0); 51 di->cdev = ubi->cdev.dev;
52
53 ubi_put_device(ubi);
50 return 0; 54 return 0;
51} 55}
52EXPORT_SYMBOL_GPL(ubi_get_device_info); 56EXPORT_SYMBOL_GPL(ubi_get_device_info);
@@ -73,7 +77,7 @@ void ubi_get_volume_info(struct ubi_volume_desc *desc,
73 vi->usable_leb_size = vol->usable_leb_size; 77 vi->usable_leb_size = vol->usable_leb_size;
74 vi->name_len = vol->name_len; 78 vi->name_len = vol->name_len;
75 vi->name = vol->name; 79 vi->name = vol->name;
76 vi->cdev = MKDEV(ubi->major, vi->vol_id + 1); 80 vi->cdev = vol->cdev.dev;
77} 81}
78EXPORT_SYMBOL_GPL(ubi_get_volume_info); 82EXPORT_SYMBOL_GPL(ubi_get_volume_info);
79 83
@@ -104,37 +108,39 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
104 108
105 dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode); 109 dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode);
106 110
107 err = -ENODEV; 111 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
108 if (ubi_num < 0) 112 return ERR_PTR(-EINVAL);
109 return ERR_PTR(err);
110
111 ubi = ubi_devices[ubi_num];
112
113 if (!try_module_get(THIS_MODULE))
114 return ERR_PTR(err);
115
116 if (ubi_num >= UBI_MAX_DEVICES || !ubi)
117 goto out_put;
118 113
119 err = -EINVAL;
120 if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
121 goto out_put;
122 if (mode != UBI_READONLY && mode != UBI_READWRITE && 114 if (mode != UBI_READONLY && mode != UBI_READWRITE &&
123 mode != UBI_EXCLUSIVE) 115 mode != UBI_EXCLUSIVE)
124 goto out_put; 116 return ERR_PTR(-EINVAL);
117
118 /*
119 * First of all, we have to get the UBI device to prevent its removal.
120 */
121 ubi = ubi_get_device(ubi_num);
122 if (!ubi)
123 return ERR_PTR(-ENODEV);
124
125 if (vol_id < 0 || vol_id >= ubi->vtbl_slots) {
126 err = -EINVAL;
127 goto out_put_ubi;
128 }
125 129
126 desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL); 130 desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL);
127 if (!desc) { 131 if (!desc) {
128 err = -ENOMEM; 132 err = -ENOMEM;
129 goto out_put; 133 goto out_put_ubi;
130 } 134 }
131 135
136 err = -ENODEV;
137 if (!try_module_get(THIS_MODULE))
138 goto out_free;
139
132 spin_lock(&ubi->volumes_lock); 140 spin_lock(&ubi->volumes_lock);
133 vol = ubi->volumes[vol_id]; 141 vol = ubi->volumes[vol_id];
134 if (!vol) { 142 if (!vol)
135 err = -ENODEV;
136 goto out_unlock; 143 goto out_unlock;
137 }
138 144
139 err = -EBUSY; 145 err = -EBUSY;
140 switch (mode) { 146 switch (mode) {
@@ -156,21 +162,19 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
156 vol->exclusive = 1; 162 vol->exclusive = 1;
157 break; 163 break;
158 } 164 }
165 get_device(&vol->dev);
166 vol->ref_count += 1;
159 spin_unlock(&ubi->volumes_lock); 167 spin_unlock(&ubi->volumes_lock);
160 168
161 desc->vol = vol; 169 desc->vol = vol;
162 desc->mode = mode; 170 desc->mode = mode;
163 171
164 /* 172 mutex_lock(&ubi->ckvol_mutex);
165 * To prevent simultaneous checks of the same volume we use @vtbl_mutex,
166 * although it is not the purpose it was introduced for.
167 */
168 mutex_lock(&ubi->vtbl_mutex);
169 if (!vol->checked) { 173 if (!vol->checked) {
170 /* This is the first open - check the volume */ 174 /* This is the first open - check the volume */
171 err = ubi_check_volume(ubi, vol_id); 175 err = ubi_check_volume(ubi, vol_id);
172 if (err < 0) { 176 if (err < 0) {
173 mutex_unlock(&ubi->vtbl_mutex); 177 mutex_unlock(&ubi->ckvol_mutex);
174 ubi_close_volume(desc); 178 ubi_close_volume(desc);
175 return ERR_PTR(err); 179 return ERR_PTR(err);
176 } 180 }
@@ -181,14 +185,17 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
181 } 185 }
182 vol->checked = 1; 186 vol->checked = 1;
183 } 187 }
184 mutex_unlock(&ubi->vtbl_mutex); 188 mutex_unlock(&ubi->ckvol_mutex);
189
185 return desc; 190 return desc;
186 191
187out_unlock: 192out_unlock:
188 spin_unlock(&ubi->volumes_lock); 193 spin_unlock(&ubi->volumes_lock);
189 kfree(desc);
190out_put:
191 module_put(THIS_MODULE); 194 module_put(THIS_MODULE);
195out_free:
196 kfree(desc);
197out_put_ubi:
198 ubi_put_device(ubi);
192 return ERR_PTR(err); 199 return ERR_PTR(err);
193} 200}
194EXPORT_SYMBOL_GPL(ubi_open_volume); 201EXPORT_SYMBOL_GPL(ubi_open_volume);
@@ -205,8 +212,8 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
205 int mode) 212 int mode)
206{ 213{
207 int i, vol_id = -1, len; 214 int i, vol_id = -1, len;
208 struct ubi_volume_desc *ret;
209 struct ubi_device *ubi; 215 struct ubi_device *ubi;
216 struct ubi_volume_desc *ret;
210 217
211 dbg_msg("open volume %s, mode %d", name, mode); 218 dbg_msg("open volume %s, mode %d", name, mode);
212 219
@@ -217,14 +224,12 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
217 if (len > UBI_VOL_NAME_MAX) 224 if (len > UBI_VOL_NAME_MAX)
218 return ERR_PTR(-EINVAL); 225 return ERR_PTR(-EINVAL);
219 226
220 ret = ERR_PTR(-ENODEV); 227 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
221 if (!try_module_get(THIS_MODULE)) 228 return ERR_PTR(-EINVAL);
222 return ret;
223
224 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || !ubi_devices[ubi_num])
225 goto out_put;
226 229
227 ubi = ubi_devices[ubi_num]; 230 ubi = ubi_get_device(ubi_num);
231 if (!ubi)
232 return ERR_PTR(-ENODEV);
228 233
229 spin_lock(&ubi->volumes_lock); 234 spin_lock(&ubi->volumes_lock);
230 /* Walk all volumes of this UBI device */ 235 /* Walk all volumes of this UBI device */
@@ -238,13 +243,16 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
238 } 243 }
239 spin_unlock(&ubi->volumes_lock); 244 spin_unlock(&ubi->volumes_lock);
240 245
241 if (vol_id < 0) 246 if (vol_id >= 0)
242 goto out_put; 247 ret = ubi_open_volume(ubi_num, vol_id, mode);
243 248 else
244 ret = ubi_open_volume(ubi_num, vol_id, mode); 249 ret = ERR_PTR(-ENODEV);
245 250
246out_put: 251 /*
247 module_put(THIS_MODULE); 252 * We should put the UBI device even in case of success, because
253 * 'ubi_open_volume()' took a reference as well.
254 */
255 ubi_put_device(ubi);
248 return ret; 256 return ret;
249} 257}
250EXPORT_SYMBOL_GPL(ubi_open_volume_nm); 258EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
@@ -256,10 +264,11 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
256void ubi_close_volume(struct ubi_volume_desc *desc) 264void ubi_close_volume(struct ubi_volume_desc *desc)
257{ 265{
258 struct ubi_volume *vol = desc->vol; 266 struct ubi_volume *vol = desc->vol;
267 struct ubi_device *ubi = vol->ubi;
259 268
260 dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode); 269 dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode);
261 270
262 spin_lock(&vol->ubi->volumes_lock); 271 spin_lock(&ubi->volumes_lock);
263 switch (desc->mode) { 272 switch (desc->mode) {
264 case UBI_READONLY: 273 case UBI_READONLY:
265 vol->readers -= 1; 274 vol->readers -= 1;
@@ -270,9 +279,12 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
270 case UBI_EXCLUSIVE: 279 case UBI_EXCLUSIVE:
271 vol->exclusive = 0; 280 vol->exclusive = 0;
272 } 281 }
273 spin_unlock(&vol->ubi->volumes_lock); 282 vol->ref_count -= 1;
283 spin_unlock(&ubi->volumes_lock);
274 284
275 kfree(desc); 285 kfree(desc);
286 put_device(&vol->dev);
287 ubi_put_device(ubi);
276 module_put(THIS_MODULE); 288 module_put(THIS_MODULE);
277} 289}
278EXPORT_SYMBOL_GPL(ubi_close_volume); 290EXPORT_SYMBOL_GPL(ubi_close_volume);
@@ -332,7 +344,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
332 if (len == 0) 344 if (len == 0)
333 return 0; 345 return 0;
334 346
335 err = ubi_eba_read_leb(ubi, vol_id, lnum, buf, offset, len, check); 347 err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
336 if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) { 348 if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) {
337 ubi_warn("mark volume %d as corrupted", vol_id); 349 ubi_warn("mark volume %d as corrupted", vol_id);
338 vol->corrupted = 1; 350 vol->corrupted = 1;
@@ -399,7 +411,7 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
399 if (len == 0) 411 if (len == 0)
400 return 0; 412 return 0;
401 413
402 return ubi_eba_write_leb(ubi, vol_id, lnum, buf, offset, len, dtype); 414 return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype);
403} 415}
404EXPORT_SYMBOL_GPL(ubi_leb_write); 416EXPORT_SYMBOL_GPL(ubi_leb_write);
405 417
@@ -448,7 +460,7 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
448 if (len == 0) 460 if (len == 0)
449 return 0; 461 return 0;
450 462
451 return ubi_eba_atomic_leb_change(ubi, vol_id, lnum, buf, len, dtype); 463 return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype);
452} 464}
453EXPORT_SYMBOL_GPL(ubi_leb_change); 465EXPORT_SYMBOL_GPL(ubi_leb_change);
454 466
@@ -481,7 +493,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
481 if (vol->upd_marker) 493 if (vol->upd_marker)
482 return -EBADF; 494 return -EBADF;
483 495
484 err = ubi_eba_unmap_leb(ubi, vol_id, lnum); 496 err = ubi_eba_unmap_leb(ubi, vol, lnum);
485 if (err) 497 if (err)
486 return err; 498 return err;
487 499
@@ -542,11 +554,56 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
542 if (vol->upd_marker) 554 if (vol->upd_marker)
543 return -EBADF; 555 return -EBADF;
544 556
545 return ubi_eba_unmap_leb(ubi, vol_id, lnum); 557 return ubi_eba_unmap_leb(ubi, vol, lnum);
546} 558}
547EXPORT_SYMBOL_GPL(ubi_leb_unmap); 559EXPORT_SYMBOL_GPL(ubi_leb_unmap);
548 560
549/** 561/**
562 * ubi_leb_map - map logical erasblock to a physical eraseblock.
563 * @desc: volume descriptor
564 * @lnum: logical eraseblock number
565 * @dtype: expected data type
566 *
567 * This function maps an un-mapped logical eraseblock @lnum to a physical
568 * eraseblock. This means, that after a successfull invocation of this
569 * function the logical eraseblock @lnum will be empty (contain only %0xFF
570 * bytes) and be mapped to a physical eraseblock, even if an unclean reboot
571 * happens.
572 *
573 * This function returns zero in case of success, %-EBADF if the volume is
574 * damaged because of an interrupted update, %-EBADMSG if the logical
575 * eraseblock is already mapped, and other negative error codes in case of
576 * other failures.
577 */
578int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
579{
580 struct ubi_volume *vol = desc->vol;
581 struct ubi_device *ubi = vol->ubi;
582 int vol_id = vol->vol_id;
583
584 dbg_msg("unmap LEB %d:%d", vol_id, lnum);
585
586 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
587 return -EROFS;
588
589 if (lnum < 0 || lnum >= vol->reserved_pebs)
590 return -EINVAL;
591
592 if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
593 dtype != UBI_UNKNOWN)
594 return -EINVAL;
595
596 if (vol->upd_marker)
597 return -EBADF;
598
599 if (vol->eba_tbl[lnum] >= 0)
600 return -EBADMSG;
601
602 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
603}
604EXPORT_SYMBOL_GPL(ubi_leb_map);
605
606/**
550 * ubi_is_mapped - check if logical eraseblock is mapped. 607 * ubi_is_mapped - check if logical eraseblock is mapped.
551 * @desc: volume descriptor 608 * @desc: volume descriptor
552 * @lnum: logical eraseblock number 609 * @lnum: logical eraseblock number
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 9e2338c8e2cf..93e052812012 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -79,7 +79,7 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
79 else 79 else
80 size = vol->usable_leb_size; 80 size = vol->usable_leb_size;
81 81
82 err = ubi_eba_read_leb(ubi, vol_id, i, buf, 0, size, 1); 82 err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
83 if (err) { 83 if (err) {
84 if (err == -EBADMSG) 84 if (err == -EBADMSG)
85 err = 1; 85 err = 1;
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index c7b0afc9d280..c57e8eff9866 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -769,7 +769,7 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
769 */ 769 */
770static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum) 770static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum)
771{ 771{
772 long long ec; 772 long long uninitialized_var(ec);
773 int err, bitflips = 0, vol_id, ec_corr = 0; 773 int err, bitflips = 0, vol_id, ec_corr = 0;
774 774
775 dbg_bld("scan PEB %d", pnum); 775 dbg_bld("scan PEB %d", pnum);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 5e941a633030..ef22f922f580 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -94,8 +94,43 @@ enum {
94 UBI_IO_BITFLIPS 94 UBI_IO_BITFLIPS
95}; 95};
96 96
97extern int ubi_devices_cnt; 97/**
98extern struct ubi_device *ubi_devices[]; 98 * struct ubi_wl_entry - wear-leveling entry.
99 * @rb: link in the corresponding RB-tree
100 * @ec: erase counter
101 * @pnum: physical eraseblock number
102 *
103 * This data structure is used in the WL unit. Each physical eraseblock has a
104 * corresponding &struct wl_entry object which may be kept in different
105 * RB-trees. See WL unit for details.
106 */
107struct ubi_wl_entry {
108 struct rb_node rb;
109 int ec;
110 int pnum;
111};
112
113/**
114 * struct ubi_ltree_entry - an entry in the lock tree.
115 * @rb: links RB-tree nodes
116 * @vol_id: volume ID of the locked logical eraseblock
117 * @lnum: locked logical eraseblock number
118 * @users: how many tasks are using this logical eraseblock or wait for it
119 * @mutex: read/write mutex to implement read/write access serialization to
120 * the (@vol_id, @lnum) logical eraseblock
121 *
122 * This data structure is used in the EBA unit to implement per-LEB locking.
123 * When a logical eraseblock is being locked - corresponding
124 * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree).
125 * See EBA unit for details.
126 */
127struct ubi_ltree_entry {
128 struct rb_node rb;
129 int vol_id;
130 int lnum;
131 int users;
132 struct rw_semaphore mutex;
133};
99 134
100struct ubi_volume_desc; 135struct ubi_volume_desc;
101 136
@@ -105,10 +140,10 @@ struct ubi_volume_desc;
105 * @cdev: character device object to create character device 140 * @cdev: character device object to create character device
106 * @ubi: reference to the UBI device description object 141 * @ubi: reference to the UBI device description object
107 * @vol_id: volume ID 142 * @vol_id: volume ID
143 * @ref_count: volume reference count
108 * @readers: number of users holding this volume in read-only mode 144 * @readers: number of users holding this volume in read-only mode
109 * @writers: number of users holding this volume in read-write mode 145 * @writers: number of users holding this volume in read-write mode
110 * @exclusive: whether somebody holds this volume in exclusive mode 146 * @exclusive: whether somebody holds this volume in exclusive mode
111 * @removed: if the volume was removed
112 * @checked: if this static volume was checked 147 * @checked: if this static volume was checked
113 * 148 *
114 * @reserved_pebs: how many physical eraseblocks are reserved for this volume 149 * @reserved_pebs: how many physical eraseblocks are reserved for this volume
@@ -121,7 +156,7 @@ struct ubi_volume_desc;
121 * @corrupted: non-zero if the volume is corrupted (static volumes only) 156 * @corrupted: non-zero if the volume is corrupted (static volumes only)
122 * @alignment: volume alignment 157 * @alignment: volume alignment
123 * @data_pad: how many bytes are not used at the end of physical eraseblocks to 158 * @data_pad: how many bytes are not used at the end of physical eraseblocks to
124 * satisfy the requested alignment 159 * satisfy the requested alignment
125 * @name_len: volume name length 160 * @name_len: volume name length
126 * @name: volume name 161 * @name: volume name
127 * 162 *
@@ -150,10 +185,10 @@ struct ubi_volume {
150 struct cdev cdev; 185 struct cdev cdev;
151 struct ubi_device *ubi; 186 struct ubi_device *ubi;
152 int vol_id; 187 int vol_id;
188 int ref_count;
153 int readers; 189 int readers;
154 int writers; 190 int writers;
155 int exclusive; 191 int exclusive;
156 int removed;
157 int checked; 192 int checked;
158 193
159 int reserved_pebs; 194 int reserved_pebs;
@@ -200,17 +235,17 @@ struct ubi_wl_entry;
200 235
201/** 236/**
202 * struct ubi_device - UBI device description structure 237 * struct ubi_device - UBI device description structure
203 * @dev: class device object to use the the Linux device model 238 * @dev: UBI device object to use the the Linux device model
204 * @cdev: character device object to create character device 239 * @cdev: character device object to create character device
205 * @ubi_num: UBI device number 240 * @ubi_num: UBI device number
206 * @ubi_name: UBI device name 241 * @ubi_name: UBI device name
207 * @major: character device major number
208 * @vol_count: number of volumes in this UBI device 242 * @vol_count: number of volumes in this UBI device
209 * @volumes: volumes of this UBI device 243 * @volumes: volumes of this UBI device
210 * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs, 244 * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs,
211 * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, @vol->readers, 245 * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count,
212 * @vol->writers, @vol->exclusive, @vol->removed, @vol->mapping and 246 * @vol->readers, @vol->writers, @vol->exclusive,
213 * @vol->eba_tbl. 247 * @vol->ref_count, @vol->mapping and @vol->eba_tbl.
248 * @ref_count: count of references on the UBI device
214 * 249 *
215 * @rsvd_pebs: count of reserved physical eraseblocks 250 * @rsvd_pebs: count of reserved physical eraseblocks
216 * @avail_pebs: count of available physical eraseblocks 251 * @avail_pebs: count of available physical eraseblocks
@@ -221,7 +256,8 @@ struct ubi_wl_entry;
221 * @vtbl_slots: how many slots are available in the volume table 256 * @vtbl_slots: how many slots are available in the volume table
222 * @vtbl_size: size of the volume table in bytes 257 * @vtbl_size: size of the volume table in bytes
223 * @vtbl: in-RAM volume table copy 258 * @vtbl: in-RAM volume table copy
224 * @vtbl_mutex: protects on-flash volume table 259 * @volumes_mutex: protects on-flash volume table and serializes volume
260 * changes, like creation, deletion, update, resize
225 * 261 *
226 * @max_ec: current highest erase counter value 262 * @max_ec: current highest erase counter value
227 * @mean_ec: current mean erase counter value 263 * @mean_ec: current mean erase counter value
@@ -238,15 +274,15 @@ struct ubi_wl_entry;
238 * @prot.pnum: protection tree indexed by physical eraseblock numbers 274 * @prot.pnum: protection tree indexed by physical eraseblock numbers
239 * @prot.aec: protection tree indexed by absolute erase counter value 275 * @prot.aec: protection tree indexed by absolute erase counter value
240 * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, 276 * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from,
241 * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works 277 * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works
242 * fields 278 * fields
279 * @move_mutex: serializes eraseblock moves
243 * @wl_scheduled: non-zero if the wear-leveling was scheduled 280 * @wl_scheduled: non-zero if the wear-leveling was scheduled
244 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any 281 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
245 * physical eraseblock 282 * physical eraseblock
246 * @abs_ec: absolute erase counter 283 * @abs_ec: absolute erase counter
247 * @move_from: physical eraseblock from where the data is being moved 284 * @move_from: physical eraseblock from where the data is being moved
248 * @move_to: physical eraseblock where the data is being moved to 285 * @move_to: physical eraseblock where the data is being moved to
249 * @move_from_put: if the "from" PEB was put
250 * @move_to_put: if the "to" PEB was put 286 * @move_to_put: if the "to" PEB was put
251 * @works: list of pending works 287 * @works: list of pending works
252 * @works_count: count of pending works 288 * @works_count: count of pending works
@@ -273,13 +309,13 @@ struct ubi_wl_entry;
273 * @hdrs_min_io_size 309 * @hdrs_min_io_size
274 * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset 310 * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
275 * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or 311 * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
276 * not 312 * not
277 * @mtd: MTD device descriptor 313 * @mtd: MTD device descriptor
278 * 314 *
279 * @peb_buf1: a buffer of PEB size used for different purposes 315 * @peb_buf1: a buffer of PEB size used for different purposes
280 * @peb_buf2: another buffer of PEB size used for different purposes 316 * @peb_buf2: another buffer of PEB size used for different purposes
281 * @buf_mutex: proptects @peb_buf1 and @peb_buf2 317 * @buf_mutex: proptects @peb_buf1 and @peb_buf2
282 * @dbg_peb_buf: buffer of PEB size used for debugging 318 * @dbg_peb_buf: buffer of PEB size used for debugging
283 * @dbg_buf_mutex: proptects @dbg_peb_buf 319 * @dbg_buf_mutex: proptects @dbg_peb_buf
284 */ 320 */
285struct ubi_device { 321struct ubi_device {
@@ -287,10 +323,10 @@ struct ubi_device {
287 struct device dev; 323 struct device dev;
288 int ubi_num; 324 int ubi_num;
289 char ubi_name[sizeof(UBI_NAME_STR)+5]; 325 char ubi_name[sizeof(UBI_NAME_STR)+5];
290 int major;
291 int vol_count; 326 int vol_count;
292 struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT]; 327 struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT];
293 spinlock_t volumes_lock; 328 spinlock_t volumes_lock;
329 int ref_count;
294 330
295 int rsvd_pebs; 331 int rsvd_pebs;
296 int avail_pebs; 332 int avail_pebs;
@@ -300,7 +336,7 @@ struct ubi_device {
300 int vtbl_slots; 336 int vtbl_slots;
301 int vtbl_size; 337 int vtbl_size;
302 struct ubi_vtbl_record *vtbl; 338 struct ubi_vtbl_record *vtbl;
303 struct mutex vtbl_mutex; 339 struct mutex volumes_mutex;
304 340
305 int max_ec; 341 int max_ec;
306 int mean_ec; 342 int mean_ec;
@@ -320,12 +356,13 @@ struct ubi_device {
320 struct rb_root aec; 356 struct rb_root aec;
321 } prot; 357 } prot;
322 spinlock_t wl_lock; 358 spinlock_t wl_lock;
359 struct mutex move_mutex;
360 struct rw_semaphore work_sem;
323 int wl_scheduled; 361 int wl_scheduled;
324 struct ubi_wl_entry **lookuptbl; 362 struct ubi_wl_entry **lookuptbl;
325 unsigned long long abs_ec; 363 unsigned long long abs_ec;
326 struct ubi_wl_entry *move_from; 364 struct ubi_wl_entry *move_from;
327 struct ubi_wl_entry *move_to; 365 struct ubi_wl_entry *move_to;
328 int move_from_put;
329 int move_to_put; 366 int move_to_put;
330 struct list_head works; 367 struct list_head works;
331 int works_count; 368 int works_count;
@@ -355,15 +392,20 @@ struct ubi_device {
355 void *peb_buf1; 392 void *peb_buf1;
356 void *peb_buf2; 393 void *peb_buf2;
357 struct mutex buf_mutex; 394 struct mutex buf_mutex;
395 struct mutex ckvol_mutex;
358#ifdef CONFIG_MTD_UBI_DEBUG 396#ifdef CONFIG_MTD_UBI_DEBUG
359 void *dbg_peb_buf; 397 void *dbg_peb_buf;
360 struct mutex dbg_buf_mutex; 398 struct mutex dbg_buf_mutex;
361#endif 399#endif
362}; 400};
363 401
402extern struct kmem_cache *ubi_ltree_slab;
403extern struct kmem_cache *ubi_wl_entry_slab;
404extern struct file_operations ubi_ctrl_cdev_operations;
364extern struct file_operations ubi_cdev_operations; 405extern struct file_operations ubi_cdev_operations;
365extern struct file_operations ubi_vol_cdev_operations; 406extern struct file_operations ubi_vol_cdev_operations;
366extern struct class *ubi_class; 407extern struct class *ubi_class;
408extern struct mutex ubi_devices_mutex;
367 409
368/* vtbl.c */ 410/* vtbl.c */
369int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, 411int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
@@ -374,8 +416,8 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si);
374int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); 416int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
375int ubi_remove_volume(struct ubi_volume_desc *desc); 417int ubi_remove_volume(struct ubi_volume_desc *desc);
376int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); 418int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs);
377int ubi_add_volume(struct ubi_device *ubi, int vol_id); 419int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol);
378void ubi_free_volume(struct ubi_device *ubi, int vol_id); 420void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol);
379 421
380/* upd.c */ 422/* upd.c */
381int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes); 423int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes);
@@ -399,16 +441,17 @@ void ubi_gluebi_updated(struct ubi_volume *vol);
399#endif 441#endif
400 442
401/* eba.c */ 443/* eba.c */
402int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum); 444int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
403int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, 445 int lnum);
404 int offset, int len, int check); 446int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
405int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, 447 void *buf, int offset, int len, int check);
448int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
406 const void *buf, int offset, int len, int dtype); 449 const void *buf, int offset, int len, int dtype);
407int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum, 450int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
408 const void *buf, int len, int dtype, 451 int lnum, const void *buf, int len, int dtype,
409 int used_ebs); 452 int used_ebs);
410int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, 453int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
411 const void *buf, int len, int dtype); 454 int lnum, const void *buf, int len, int dtype);
412int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 455int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
413 struct ubi_vid_hdr *vid_hdr); 456 struct ubi_vid_hdr *vid_hdr);
414int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); 457int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
@@ -421,6 +464,7 @@ int ubi_wl_flush(struct ubi_device *ubi);
421int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum); 464int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
422int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); 465int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
423void ubi_wl_close(struct ubi_device *ubi); 466void ubi_wl_close(struct ubi_device *ubi);
467int ubi_thread(void *u);
424 468
425/* io.c */ 469/* io.c */
426int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, 470int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
@@ -439,6 +483,14 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
439int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, 483int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
440 struct ubi_vid_hdr *vid_hdr); 484 struct ubi_vid_hdr *vid_hdr);
441 485
486/* build.c */
487int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
488int ubi_detach_mtd_dev(int ubi_num, int anyway);
489struct ubi_device *ubi_get_device(int ubi_num);
490void ubi_put_device(struct ubi_device *ubi);
491struct ubi_device *ubi_get_by_major(int major);
492int ubi_major2num(int major);
493
442/* 494/*
443 * ubi_rb_for_each_entry - walk an RB-tree. 495 * ubi_rb_for_each_entry - walk an RB-tree.
444 * @rb: a pointer to type 'struct rb_node' to to use as a loop counter 496 * @rb: a pointer to type 'struct rb_node' to to use as a loop counter
@@ -523,8 +575,10 @@ static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
523 */ 575 */
524static inline void ubi_ro_mode(struct ubi_device *ubi) 576static inline void ubi_ro_mode(struct ubi_device *ubi)
525{ 577{
526 ubi->ro_mode = 1; 578 if (!ubi->ro_mode) {
527 ubi_warn("switch to read-only mode"); 579 ubi->ro_mode = 1;
580 ubi_warn("switch to read-only mode");
581 }
528} 582}
529 583
530/** 584/**
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 0efc586a8328..e32b04d2e048 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -67,7 +67,9 @@ static int set_update_marker(struct ubi_device *ubi, int vol_id)
67 memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record)); 67 memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
68 vtbl_rec.upd_marker = 1; 68 vtbl_rec.upd_marker = 1;
69 69
70 mutex_lock(&ubi->volumes_mutex);
70 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 71 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
72 mutex_unlock(&ubi->volumes_mutex);
71 vol->upd_marker = 1; 73 vol->upd_marker = 1;
72 return err; 74 return err;
73} 75}
@@ -106,7 +108,9 @@ static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long byt
106 vol->last_eb_bytes = vol->usable_leb_size; 108 vol->last_eb_bytes = vol->usable_leb_size;
107 } 109 }
108 110
111 mutex_lock(&ubi->volumes_mutex);
109 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 112 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
113 mutex_unlock(&ubi->volumes_mutex);
110 vol->upd_marker = 0; 114 vol->upd_marker = 0;
111 return err; 115 return err;
112} 116}
@@ -136,7 +140,7 @@ int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes)
136 140
137 /* Before updating - wipe out the volume */ 141 /* Before updating - wipe out the volume */
138 for (i = 0; i < vol->reserved_pebs; i++) { 142 for (i = 0; i < vol->reserved_pebs; i++) {
139 err = ubi_eba_unmap_leb(ubi, vol_id, i); 143 err = ubi_eba_unmap_leb(ubi, vol, i);
140 if (err) 144 if (err)
141 return err; 145 return err;
142 } 146 }
@@ -209,8 +213,7 @@ static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
209 if (len != l) 213 if (len != l)
210 dbg_msg("skip last %d bytes (0xFF)", len - l); 214 dbg_msg("skip last %d bytes (0xFF)", len - l);
211 215
212 err = ubi_eba_write_leb(ubi, vol_id, lnum, buf, 0, l, 216 err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, l, UBI_UNKNOWN);
213 UBI_UNKNOWN);
214 } else { 217 } else {
215 /* 218 /*
216 * When writing static volume, and this is the last logical 219 * When writing static volume, and this is the last logical
@@ -222,7 +225,7 @@ static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
222 * contain zeros, not random trash. 225 * contain zeros, not random trash.
223 */ 226 */
224 memset(buf + len, 0, vol->usable_leb_size - len); 227 memset(buf + len, 0, vol->usable_leb_size - len);
225 err = ubi_eba_write_leb_st(ubi, vol_id, lnum, buf, len, 228 err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len,
226 UBI_UNKNOWN, used_ebs); 229 UBI_UNKNOWN, used_ebs);
227 } 230 }
228 231
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 88629a320c2b..177227e1f80d 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -63,21 +63,30 @@ static struct device_attribute attr_vol_upd_marker =
63 * B. process 2 removes volume Y; 63 * B. process 2 removes volume Y;
64 * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file; 64 * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file;
65 * 65 *
66 * What we want to do in a situation like that is to return error when the file 66 * In this situation, this function will return %-ENODEV because it will find
67 * is read. This is done by means of the 'removed' flag and the 'vol_lock' of 67 * out that the volume was removed from the @ubi->volumes array.
68 * the UBI volume description object.
69 */ 68 */
70static ssize_t vol_attribute_show(struct device *dev, 69static ssize_t vol_attribute_show(struct device *dev,
71 struct device_attribute *attr, char *buf) 70 struct device_attribute *attr, char *buf)
72{ 71{
73 int ret; 72 int ret;
74 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); 73 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
74 struct ubi_device *ubi;
75 75
76 spin_lock(&vol->ubi->volumes_lock); 76 ubi = ubi_get_device(vol->ubi->ubi_num);
77 if (vol->removed) { 77 if (!ubi)
78 spin_unlock(&vol->ubi->volumes_lock); 78 return -ENODEV;
79
80 spin_lock(&ubi->volumes_lock);
81 if (!ubi->volumes[vol->vol_id]) {
82 spin_unlock(&ubi->volumes_lock);
83 ubi_put_device(ubi);
79 return -ENODEV; 84 return -ENODEV;
80 } 85 }
86 /* Take a reference to prevent volume removal */
87 vol->ref_count += 1;
88 spin_unlock(&ubi->volumes_lock);
89
81 if (attr == &attr_vol_reserved_ebs) 90 if (attr == &attr_vol_reserved_ebs)
82 ret = sprintf(buf, "%d\n", vol->reserved_pebs); 91 ret = sprintf(buf, "%d\n", vol->reserved_pebs);
83 else if (attr == &attr_vol_type) { 92 else if (attr == &attr_vol_type) {
@@ -94,15 +103,22 @@ static ssize_t vol_attribute_show(struct device *dev,
94 ret = sprintf(buf, "%d\n", vol->corrupted); 103 ret = sprintf(buf, "%d\n", vol->corrupted);
95 else if (attr == &attr_vol_alignment) 104 else if (attr == &attr_vol_alignment)
96 ret = sprintf(buf, "%d\n", vol->alignment); 105 ret = sprintf(buf, "%d\n", vol->alignment);
97 else if (attr == &attr_vol_usable_eb_size) { 106 else if (attr == &attr_vol_usable_eb_size)
98 ret = sprintf(buf, "%d\n", vol->usable_leb_size); 107 ret = sprintf(buf, "%d\n", vol->usable_leb_size);
99 } else if (attr == &attr_vol_data_bytes) 108 else if (attr == &attr_vol_data_bytes)
100 ret = sprintf(buf, "%lld\n", vol->used_bytes); 109 ret = sprintf(buf, "%lld\n", vol->used_bytes);
101 else if (attr == &attr_vol_upd_marker) 110 else if (attr == &attr_vol_upd_marker)
102 ret = sprintf(buf, "%d\n", vol->upd_marker); 111 ret = sprintf(buf, "%d\n", vol->upd_marker);
103 else 112 else
104 BUG(); 113 /* This must be a bug */
105 spin_unlock(&vol->ubi->volumes_lock); 114 ret = -EINVAL;
115
116 /* We've done the operation, drop volume and UBI device references */
117 spin_lock(&ubi->volumes_lock);
118 vol->ref_count -= 1;
119 ubi_assert(vol->ref_count >= 0);
120 spin_unlock(&ubi->volumes_lock);
121 ubi_put_device(ubi);
106 return ret; 122 return ret;
107} 123}
108 124
@@ -110,7 +126,7 @@ static ssize_t vol_attribute_show(struct device *dev,
110static void vol_release(struct device *dev) 126static void vol_release(struct device *dev)
111{ 127{
112 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); 128 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
113 ubi_assert(vol->removed); 129
114 kfree(vol); 130 kfree(vol);
115} 131}
116 132
@@ -152,9 +168,7 @@ static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol)
152 if (err) 168 if (err)
153 return err; 169 return err;
154 err = device_create_file(&vol->dev, &attr_vol_upd_marker); 170 err = device_create_file(&vol->dev, &attr_vol_upd_marker);
155 if (err) 171 return err;
156 return err;
157 return 0;
158} 172}
159 173
160/** 174/**
@@ -180,16 +194,18 @@ static void volume_sysfs_close(struct ubi_volume *vol)
180 * @req: volume creation request 194 * @req: volume creation request
181 * 195 *
182 * This function creates volume described by @req. If @req->vol_id id 196 * This function creates volume described by @req. If @req->vol_id id
183 * %UBI_VOL_NUM_AUTO, this function automatically assigne ID to the new volume 197 * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume
184 * and saves it in @req->vol_id. Returns zero in case of success and a negative 198 * and saves it in @req->vol_id. Returns zero in case of success and a negative
185 * error code in case of failure. 199 * error code in case of failure. Note, the caller has to have the
200 * @ubi->volumes_mutex locked.
186 */ 201 */
187int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) 202int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
188{ 203{
189 int i, err, vol_id = req->vol_id; 204 int i, err, vol_id = req->vol_id, dont_free = 0;
190 struct ubi_volume *vol; 205 struct ubi_volume *vol;
191 struct ubi_vtbl_record vtbl_rec; 206 struct ubi_vtbl_record vtbl_rec;
192 uint64_t bytes; 207 uint64_t bytes;
208 dev_t dev;
193 209
194 if (ubi->ro_mode) 210 if (ubi->ro_mode)
195 return -EROFS; 211 return -EROFS;
@@ -199,7 +215,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
199 return -ENOMEM; 215 return -ENOMEM;
200 216
201 spin_lock(&ubi->volumes_lock); 217 spin_lock(&ubi->volumes_lock);
202
203 if (vol_id == UBI_VOL_NUM_AUTO) { 218 if (vol_id == UBI_VOL_NUM_AUTO) {
204 /* Find unused volume ID */ 219 /* Find unused volume ID */
205 dbg_msg("search for vacant volume ID"); 220 dbg_msg("search for vacant volume ID");
@@ -252,6 +267,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
252 } 267 }
253 ubi->avail_pebs -= vol->reserved_pebs; 268 ubi->avail_pebs -= vol->reserved_pebs;
254 ubi->rsvd_pebs += vol->reserved_pebs; 269 ubi->rsvd_pebs += vol->reserved_pebs;
270 spin_unlock(&ubi->volumes_lock);
255 271
256 vol->vol_id = vol_id; 272 vol->vol_id = vol_id;
257 vol->alignment = req->alignment; 273 vol->alignment = req->alignment;
@@ -259,10 +275,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
259 vol->vol_type = req->vol_type; 275 vol->vol_type = req->vol_type;
260 vol->name_len = req->name_len; 276 vol->name_len = req->name_len;
261 memcpy(vol->name, req->name, vol->name_len + 1); 277 memcpy(vol->name, req->name, vol->name_len + 1);
262 vol->exclusive = 1;
263 vol->ubi = ubi; 278 vol->ubi = ubi;
264 ubi->volumes[vol_id] = vol;
265 spin_unlock(&ubi->volumes_lock);
266 279
267 /* 280 /*
268 * Finish all pending erases because there may be some LEBs belonging 281 * Finish all pending erases because there may be some LEBs belonging
@@ -299,9 +312,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
299 /* Register character device for the volume */ 312 /* Register character device for the volume */
300 cdev_init(&vol->cdev, &ubi_vol_cdev_operations); 313 cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
301 vol->cdev.owner = THIS_MODULE; 314 vol->cdev.owner = THIS_MODULE;
302 err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol_id + 1), 1); 315 dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
316 err = cdev_add(&vol->cdev, dev, 1);
303 if (err) { 317 if (err) {
304 ubi_err("cannot add character device for volume %d", vol_id); 318 ubi_err("cannot add character device");
305 goto out_mapping; 319 goto out_mapping;
306 } 320 }
307 321
@@ -311,12 +325,15 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
311 325
312 vol->dev.release = vol_release; 326 vol->dev.release = vol_release;
313 vol->dev.parent = &ubi->dev; 327 vol->dev.parent = &ubi->dev;
314 vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1); 328 vol->dev.devt = dev;
315 vol->dev.class = ubi_class; 329 vol->dev.class = ubi_class;
330
316 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); 331 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
317 err = device_register(&vol->dev); 332 err = device_register(&vol->dev);
318 if (err) 333 if (err) {
334 ubi_err("cannot register device");
319 goto out_gluebi; 335 goto out_gluebi;
336 }
320 337
321 err = volume_sysfs_init(ubi, vol); 338 err = volume_sysfs_init(ubi, vol);
322 if (err) 339 if (err)
@@ -339,15 +356,27 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
339 goto out_sysfs; 356 goto out_sysfs;
340 357
341 spin_lock(&ubi->volumes_lock); 358 spin_lock(&ubi->volumes_lock);
359 ubi->volumes[vol_id] = vol;
342 ubi->vol_count += 1; 360 ubi->vol_count += 1;
343 vol->exclusive = 0;
344 spin_unlock(&ubi->volumes_lock); 361 spin_unlock(&ubi->volumes_lock);
345 362
346 paranoid_check_volumes(ubi); 363 paranoid_check_volumes(ubi);
347 return 0; 364 return 0;
348 365
366out_sysfs:
367 /*
368 * We have registered our device, we should not free the volume*
369 * description object in this function in case of an error - it is
370 * freed by the release function.
371 *
372 * Get device reference to prevent the release function from being
373 * called just after sysfs has been closed.
374 */
375 dont_free = 1;
376 get_device(&vol->dev);
377 volume_sysfs_close(vol);
349out_gluebi: 378out_gluebi:
350 err = ubi_destroy_gluebi(vol); 379 ubi_destroy_gluebi(vol);
351out_cdev: 380out_cdev:
352 cdev_del(&vol->cdev); 381 cdev_del(&vol->cdev);
353out_mapping: 382out_mapping:
@@ -356,26 +385,13 @@ out_acc:
356 spin_lock(&ubi->volumes_lock); 385 spin_lock(&ubi->volumes_lock);
357 ubi->rsvd_pebs -= vol->reserved_pebs; 386 ubi->rsvd_pebs -= vol->reserved_pebs;
358 ubi->avail_pebs += vol->reserved_pebs; 387 ubi->avail_pebs += vol->reserved_pebs;
359 ubi->volumes[vol_id] = NULL;
360out_unlock: 388out_unlock:
361 spin_unlock(&ubi->volumes_lock); 389 spin_unlock(&ubi->volumes_lock);
362 kfree(vol); 390 if (dont_free)
363 return err; 391 put_device(&vol->dev);
364 392 else
365 /* 393 kfree(vol);
366 * We are registered, so @vol is destroyed in the release function and 394 ubi_err("cannot create volume %d, error %d", vol_id, err);
367 * we have to de-initialize differently.
368 */
369out_sysfs:
370 err = ubi_destroy_gluebi(vol);
371 cdev_del(&vol->cdev);
372 kfree(vol->eba_tbl);
373 spin_lock(&ubi->volumes_lock);
374 ubi->rsvd_pebs -= vol->reserved_pebs;
375 ubi->avail_pebs += vol->reserved_pebs;
376 ubi->volumes[vol_id] = NULL;
377 spin_unlock(&ubi->volumes_lock);
378 volume_sysfs_close(vol);
379 return err; 395 return err;
380} 396}
381 397
@@ -385,7 +401,8 @@ out_sysfs:
385 * 401 *
386 * This function removes volume described by @desc. The volume has to be opened 402 * This function removes volume described by @desc. The volume has to be opened
387 * in "exclusive" mode. Returns zero in case of success and a negative error 403 * in "exclusive" mode. Returns zero in case of success and a negative error
388 * code in case of failure. 404 * code in case of failure. The caller has to have the @ubi->volumes_mutex
405 * locked.
389 */ 406 */
390int ubi_remove_volume(struct ubi_volume_desc *desc) 407int ubi_remove_volume(struct ubi_volume_desc *desc)
391{ 408{
@@ -400,30 +417,36 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
400 if (ubi->ro_mode) 417 if (ubi->ro_mode)
401 return -EROFS; 418 return -EROFS;
402 419
420 spin_lock(&ubi->volumes_lock);
421 if (vol->ref_count > 1) {
422 /*
423 * The volume is busy, probably someone is reading one of its
424 * sysfs files.
425 */
426 err = -EBUSY;
427 goto out_unlock;
428 }
429 ubi->volumes[vol_id] = NULL;
430 spin_unlock(&ubi->volumes_lock);
431
403 err = ubi_destroy_gluebi(vol); 432 err = ubi_destroy_gluebi(vol);
404 if (err) 433 if (err)
405 return err; 434 goto out_err;
406 435
407 err = ubi_change_vtbl_record(ubi, vol_id, NULL); 436 err = ubi_change_vtbl_record(ubi, vol_id, NULL);
408 if (err) 437 if (err)
409 return err; 438 goto out_err;
410 439
411 for (i = 0; i < vol->reserved_pebs; i++) { 440 for (i = 0; i < vol->reserved_pebs; i++) {
412 err = ubi_eba_unmap_leb(ubi, vol_id, i); 441 err = ubi_eba_unmap_leb(ubi, vol, i);
413 if (err) 442 if (err)
414 return err; 443 goto out_err;
415 } 444 }
416 445
417 spin_lock(&ubi->volumes_lock);
418 vol->removed = 1;
419 ubi->volumes[vol_id] = NULL;
420 spin_unlock(&ubi->volumes_lock);
421
422 kfree(vol->eba_tbl); 446 kfree(vol->eba_tbl);
423 vol->eba_tbl = NULL; 447 vol->eba_tbl = NULL;
424 cdev_del(&vol->cdev); 448 cdev_del(&vol->cdev);
425 volume_sysfs_close(vol); 449 volume_sysfs_close(vol);
426 kfree(desc);
427 450
428 spin_lock(&ubi->volumes_lock); 451 spin_lock(&ubi->volumes_lock);
429 ubi->rsvd_pebs -= reserved_pebs; 452 ubi->rsvd_pebs -= reserved_pebs;
@@ -441,8 +464,15 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
441 spin_unlock(&ubi->volumes_lock); 464 spin_unlock(&ubi->volumes_lock);
442 465
443 paranoid_check_volumes(ubi); 466 paranoid_check_volumes(ubi);
444 module_put(THIS_MODULE);
445 return 0; 467 return 0;
468
469out_err:
470 ubi_err("cannot remove volume %d, error %d", vol_id, err);
471 spin_lock(&ubi->volumes_lock);
472 ubi->volumes[vol_id] = vol;
473out_unlock:
474 spin_unlock(&ubi->volumes_lock);
475 return err;
446} 476}
447 477
448/** 478/**
@@ -450,8 +480,9 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
450 * @desc: volume descriptor 480 * @desc: volume descriptor
451 * @reserved_pebs: new size in physical eraseblocks 481 * @reserved_pebs: new size in physical eraseblocks
452 * 482 *
453 * This function returns zero in case of success, and a negative error code in 483 * This function re-sizes the volume and returns zero in case of success, and a
454 * case of failure. 484 * negative error code in case of failure. The caller has to have the
485 * @ubi->volumes_mutex locked.
455 */ 486 */
456int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) 487int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
457{ 488{
@@ -487,6 +518,15 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
487 for (i = 0; i < reserved_pebs; i++) 518 for (i = 0; i < reserved_pebs; i++)
488 new_mapping[i] = UBI_LEB_UNMAPPED; 519 new_mapping[i] = UBI_LEB_UNMAPPED;
489 520
521 spin_lock(&ubi->volumes_lock);
522 if (vol->ref_count > 1) {
523 spin_unlock(&ubi->volumes_lock);
524 err = -EBUSY;
525 goto out_free;
526 }
527 spin_unlock(&ubi->volumes_lock);
528
529
490 /* Reserve physical eraseblocks */ 530 /* Reserve physical eraseblocks */
491 pebs = reserved_pebs - vol->reserved_pebs; 531 pebs = reserved_pebs - vol->reserved_pebs;
492 if (pebs > 0) { 532 if (pebs > 0) {
@@ -516,7 +556,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
516 556
517 if (pebs < 0) { 557 if (pebs < 0) {
518 for (i = 0; i < -pebs; i++) { 558 for (i = 0; i < -pebs; i++) {
519 err = ubi_eba_unmap_leb(ubi, vol_id, reserved_pebs + i); 559 err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
520 if (err) 560 if (err)
521 goto out_acc; 561 goto out_acc;
522 } 562 }
@@ -565,27 +605,28 @@ out_free:
565/** 605/**
566 * ubi_add_volume - add volume. 606 * ubi_add_volume - add volume.
567 * @ubi: UBI device description object 607 * @ubi: UBI device description object
568 * @vol_id: volume ID 608 * @vol: volume description object
569 * 609 *
570 * This function adds an existin volume and initializes all its data 610 * This function adds an existing volume and initializes all its data
571 * structures. Returnes zero in case of success and a negative error code in 611 * structures. Returns zero in case of success and a negative error code in
572 * case of failure. 612 * case of failure.
573 */ 613 */
574int ubi_add_volume(struct ubi_device *ubi, int vol_id) 614int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
575{ 615{
576 int err; 616 int err, vol_id = vol->vol_id;
577 struct ubi_volume *vol = ubi->volumes[vol_id]; 617 dev_t dev;
578 618
579 dbg_msg("add volume %d", vol_id); 619 dbg_msg("add volume %d", vol_id);
580 ubi_dbg_dump_vol_info(vol); 620 ubi_dbg_dump_vol_info(vol);
581 ubi_assert(vol);
582 621
583 /* Register character device for the volume */ 622 /* Register character device for the volume */
584 cdev_init(&vol->cdev, &ubi_vol_cdev_operations); 623 cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
585 vol->cdev.owner = THIS_MODULE; 624 vol->cdev.owner = THIS_MODULE;
586 err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol->vol_id + 1), 1); 625 dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
626 err = cdev_add(&vol->cdev, dev, 1);
587 if (err) { 627 if (err) {
588 ubi_err("cannot add character device for volume %d", vol_id); 628 ubi_err("cannot add character device for volume %d, error %d",
629 vol_id, err);
589 return err; 630 return err;
590 } 631 }
591 632
@@ -595,7 +636,7 @@ int ubi_add_volume(struct ubi_device *ubi, int vol_id)
595 636
596 vol->dev.release = vol_release; 637 vol->dev.release = vol_release;
597 vol->dev.parent = &ubi->dev; 638 vol->dev.parent = &ubi->dev;
598 vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1); 639 vol->dev.devt = dev;
599 vol->dev.class = ubi_class; 640 vol->dev.class = ubi_class;
600 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); 641 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
601 err = device_register(&vol->dev); 642 err = device_register(&vol->dev);
@@ -623,22 +664,19 @@ out_cdev:
623/** 664/**
624 * ubi_free_volume - free volume. 665 * ubi_free_volume - free volume.
625 * @ubi: UBI device description object 666 * @ubi: UBI device description object
626 * @vol_id: volume ID 667 * @vol: volume description object
627 * 668 *
628 * This function frees all resources for volume @vol_id but does not remove it. 669 * This function frees all resources for volume @vol but does not remove it.
629 * Used only when the UBI device is detached. 670 * Used only when the UBI device is detached.
630 */ 671 */
631void ubi_free_volume(struct ubi_device *ubi, int vol_id) 672void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
632{ 673{
633 int err; 674 int err;
634 struct ubi_volume *vol = ubi->volumes[vol_id];
635 675
636 dbg_msg("free volume %d", vol_id); 676 dbg_msg("free volume %d", vol->vol_id);
637 ubi_assert(vol);
638 677
639 vol->removed = 1; 678 ubi->volumes[vol->vol_id] = NULL;
640 err = ubi_destroy_gluebi(vol); 679 err = ubi_destroy_gluebi(vol);
641 ubi->volumes[vol_id] = NULL;
642 cdev_del(&vol->cdev); 680 cdev_del(&vol->cdev);
643 volume_sysfs_close(vol); 681 volume_sysfs_close(vol);
644} 682}
@@ -820,9 +858,7 @@ static void paranoid_check_volumes(struct ubi_device *ubi)
820{ 858{
821 int i; 859 int i;
822 860
823 mutex_lock(&ubi->vtbl_mutex);
824 for (i = 0; i < ubi->vtbl_slots; i++) 861 for (i = 0; i < ubi->vtbl_slots; i++)
825 paranoid_check_volume(ubi, i); 862 paranoid_check_volume(ubi, i);
826 mutex_unlock(&ubi->vtbl_mutex);
827} 863}
828#endif 864#endif
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 25b3bd61c7ec..7a1a8a1da610 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -86,8 +86,10 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
86{ 86{
87 int i, err; 87 int i, err;
88 uint32_t crc; 88 uint32_t crc;
89 struct ubi_volume *layout_vol;
89 90
90 ubi_assert(idx >= 0 && idx < ubi->vtbl_slots); 91 ubi_assert(idx >= 0 && idx < ubi->vtbl_slots);
92 layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOL_ID)];
91 93
92 if (!vtbl_rec) 94 if (!vtbl_rec)
93 vtbl_rec = &empty_vtbl_record; 95 vtbl_rec = &empty_vtbl_record;
@@ -96,31 +98,25 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
96 vtbl_rec->crc = cpu_to_be32(crc); 98 vtbl_rec->crc = cpu_to_be32(crc);
97 } 99 }
98 100
99 mutex_lock(&ubi->vtbl_mutex);
100 memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record)); 101 memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
101 for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { 102 for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
102 err = ubi_eba_unmap_leb(ubi, UBI_LAYOUT_VOL_ID, i); 103 err = ubi_eba_unmap_leb(ubi, layout_vol, i);
103 if (err) { 104 if (err)
104 mutex_unlock(&ubi->vtbl_mutex);
105 return err; 105 return err;
106 } 106
107 err = ubi_eba_write_leb(ubi, UBI_LAYOUT_VOL_ID, i, ubi->vtbl, 0, 107 err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
108 ubi->vtbl_size, UBI_LONGTERM); 108 ubi->vtbl_size, UBI_LONGTERM);
109 if (err) { 109 if (err)
110 mutex_unlock(&ubi->vtbl_mutex);
111 return err; 110 return err;
112 }
113 } 111 }
114 112
115 paranoid_vtbl_check(ubi); 113 paranoid_vtbl_check(ubi);
116 mutex_unlock(&ubi->vtbl_mutex);
117 return ubi_wl_flush(ubi); 114 return ubi_wl_flush(ubi);
118} 115}
119 116
120/** 117/**
121 * vol_til_check - check if volume table is not corrupted and contains sensible 118 * vtbl_check - check if volume table is not corrupted and contains sensible
122 * data. 119 * data.
123 *
124 * @ubi: UBI device description object 120 * @ubi: UBI device description object
125 * @vtbl: volume table 121 * @vtbl: volume table
126 * 122 *
@@ -568,6 +564,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
568 vol->last_eb_bytes = sv->last_data_size; 564 vol->last_eb_bytes = sv->last_data_size;
569 } 565 }
570 566
567 /* And add the layout volume */
571 vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); 568 vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
572 if (!vol) 569 if (!vol)
573 return -ENOMEM; 570 return -ENOMEM;
@@ -583,6 +580,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
583 vol->used_bytes = 580 vol->used_bytes =
584 (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad); 581 (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad);
585 vol->vol_id = UBI_LAYOUT_VOL_ID; 582 vol->vol_id = UBI_LAYOUT_VOL_ID;
583 vol->ref_count = 1;
586 584
587 ubi_assert(!ubi->volumes[i]); 585 ubi_assert(!ubi->volumes[i]);
588 ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol; 586 ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 6330c8cc72b5..0d44ad95ab84 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -117,21 +117,6 @@
117#define WL_MAX_FAILURES 32 117#define WL_MAX_FAILURES 32
118 118
119/** 119/**
120 * struct ubi_wl_entry - wear-leveling entry.
121 * @rb: link in the corresponding RB-tree
122 * @ec: erase counter
123 * @pnum: physical eraseblock number
124 *
125 * Each physical eraseblock has a corresponding &struct wl_entry object which
126 * may be kept in different RB-trees.
127 */
128struct ubi_wl_entry {
129 struct rb_node rb;
130 int ec;
131 int pnum;
132};
133
134/**
135 * struct ubi_wl_prot_entry - PEB protection entry. 120 * struct ubi_wl_prot_entry - PEB protection entry.
136 * @rb_pnum: link in the @wl->prot.pnum RB-tree 121 * @rb_pnum: link in the @wl->prot.pnum RB-tree
137 * @rb_aec: link in the @wl->prot.aec RB-tree 122 * @rb_aec: link in the @wl->prot.aec RB-tree
@@ -216,9 +201,6 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
216#define paranoid_check_in_wl_tree(e, root) 201#define paranoid_check_in_wl_tree(e, root)
217#endif 202#endif
218 203
219/* Slab cache for wear-leveling entries */
220static struct kmem_cache *wl_entries_slab;
221
222/** 204/**
223 * wl_tree_add - add a wear-leveling entry to a WL RB-tree. 205 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
224 * @e: the wear-leveling entry to add 206 * @e: the wear-leveling entry to add
@@ -267,15 +249,26 @@ static int do_work(struct ubi_device *ubi)
267 int err; 249 int err;
268 struct ubi_work *wrk; 250 struct ubi_work *wrk;
269 251
270 spin_lock(&ubi->wl_lock); 252 cond_resched();
271 253
254 /*
255 * @ubi->work_sem is used to synchronize with the workers. Workers take
256 * it in read mode, so many of them may be doing works at a time. But
257 * the queue flush code has to be sure the whole queue of works is
258 * done, and it takes the mutex in write mode.
259 */
260 down_read(&ubi->work_sem);
261 spin_lock(&ubi->wl_lock);
272 if (list_empty(&ubi->works)) { 262 if (list_empty(&ubi->works)) {
273 spin_unlock(&ubi->wl_lock); 263 spin_unlock(&ubi->wl_lock);
264 up_read(&ubi->work_sem);
274 return 0; 265 return 0;
275 } 266 }
276 267
277 wrk = list_entry(ubi->works.next, struct ubi_work, list); 268 wrk = list_entry(ubi->works.next, struct ubi_work, list);
278 list_del(&wrk->list); 269 list_del(&wrk->list);
270 ubi->works_count -= 1;
271 ubi_assert(ubi->works_count >= 0);
279 spin_unlock(&ubi->wl_lock); 272 spin_unlock(&ubi->wl_lock);
280 273
281 /* 274 /*
@@ -286,11 +279,8 @@ static int do_work(struct ubi_device *ubi)
286 err = wrk->func(ubi, wrk, 0); 279 err = wrk->func(ubi, wrk, 0);
287 if (err) 280 if (err)
288 ubi_err("work failed with error code %d", err); 281 ubi_err("work failed with error code %d", err);
282 up_read(&ubi->work_sem);
289 283
290 spin_lock(&ubi->wl_lock);
291 ubi->works_count -= 1;
292 ubi_assert(ubi->works_count >= 0);
293 spin_unlock(&ubi->wl_lock);
294 return err; 284 return err;
295} 285}
296 286
@@ -549,8 +539,12 @@ retry:
549 * prot_tree_del - remove a physical eraseblock from the protection trees 539 * prot_tree_del - remove a physical eraseblock from the protection trees
550 * @ubi: UBI device description object 540 * @ubi: UBI device description object
551 * @pnum: the physical eraseblock to remove 541 * @pnum: the physical eraseblock to remove
542 *
543 * This function returns PEB @pnum from the protection trees and returns zero
544 * in case of success and %-ENODEV if the PEB was not found in the protection
545 * trees.
552 */ 546 */
553static void prot_tree_del(struct ubi_device *ubi, int pnum) 547static int prot_tree_del(struct ubi_device *ubi, int pnum)
554{ 548{
555 struct rb_node *p; 549 struct rb_node *p;
556 struct ubi_wl_prot_entry *pe = NULL; 550 struct ubi_wl_prot_entry *pe = NULL;
@@ -561,7 +555,7 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum)
561 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum); 555 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
562 556
563 if (pnum == pe->e->pnum) 557 if (pnum == pe->e->pnum)
564 break; 558 goto found;
565 559
566 if (pnum < pe->e->pnum) 560 if (pnum < pe->e->pnum)
567 p = p->rb_left; 561 p = p->rb_left;
@@ -569,10 +563,14 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum)
569 p = p->rb_right; 563 p = p->rb_right;
570 } 564 }
571 565
566 return -ENODEV;
567
568found:
572 ubi_assert(pe->e->pnum == pnum); 569 ubi_assert(pe->e->pnum == pnum);
573 rb_erase(&pe->rb_aec, &ubi->prot.aec); 570 rb_erase(&pe->rb_aec, &ubi->prot.aec);
574 rb_erase(&pe->rb_pnum, &ubi->prot.pnum); 571 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
575 kfree(pe); 572 kfree(pe);
573 return 0;
576} 574}
577 575
578/** 576/**
@@ -744,7 +742,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
744static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 742static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
745 int cancel) 743 int cancel)
746{ 744{
747 int err, put = 0; 745 int err, put = 0, scrubbing = 0, protect = 0;
746 struct ubi_wl_prot_entry *uninitialized_var(pe);
748 struct ubi_wl_entry *e1, *e2; 747 struct ubi_wl_entry *e1, *e2;
749 struct ubi_vid_hdr *vid_hdr; 748 struct ubi_vid_hdr *vid_hdr;
750 749
@@ -757,21 +756,17 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
757 if (!vid_hdr) 756 if (!vid_hdr)
758 return -ENOMEM; 757 return -ENOMEM;
759 758
759 mutex_lock(&ubi->move_mutex);
760 spin_lock(&ubi->wl_lock); 760 spin_lock(&ubi->wl_lock);
761 ubi_assert(!ubi->move_from && !ubi->move_to);
762 ubi_assert(!ubi->move_to_put);
761 763
762 /* 764 if (!ubi->free.rb_node ||
763 * Only one WL worker at a time is supported at this implementation, so
764 * make sure a PEB is not being moved already.
765 */
766 if (ubi->move_to || !ubi->free.rb_node ||
767 (!ubi->used.rb_node && !ubi->scrub.rb_node)) { 765 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
768 /* 766 /*
769 * Only one WL worker at a time is supported at this 767 * No free physical eraseblocks? Well, they must be waiting in
770 * implementation, so if a LEB is already being moved, cancel. 768 * the queue to be erased. Cancel movement - it will be
771 * 769 * triggered again when a free physical eraseblock appears.
772 * No free physical eraseblocks? Well, we cancel wear-leveling
773 * then. It will be triggered again when a free physical
774 * eraseblock appears.
775 * 770 *
776 * No used physical eraseblocks? They must be temporarily 771 * No used physical eraseblocks? They must be temporarily
777 * protected from being moved. They will be moved to the 772 * protected from being moved. They will be moved to the
@@ -780,10 +775,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
780 */ 775 */
781 dbg_wl("cancel WL, a list is empty: free %d, used %d", 776 dbg_wl("cancel WL, a list is empty: free %d, used %d",
782 !ubi->free.rb_node, !ubi->used.rb_node); 777 !ubi->free.rb_node, !ubi->used.rb_node);
783 ubi->wl_scheduled = 0; 778 goto out_cancel;
784 spin_unlock(&ubi->wl_lock);
785 ubi_free_vid_hdr(ubi, vid_hdr);
786 return 0;
787 } 779 }
788 780
789 if (!ubi->scrub.rb_node) { 781 if (!ubi->scrub.rb_node) {
@@ -798,27 +790,24 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
798 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { 790 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
799 dbg_wl("no WL needed: min used EC %d, max free EC %d", 791 dbg_wl("no WL needed: min used EC %d, max free EC %d",
800 e1->ec, e2->ec); 792 e1->ec, e2->ec);
801 ubi->wl_scheduled = 0; 793 goto out_cancel;
802 spin_unlock(&ubi->wl_lock);
803 ubi_free_vid_hdr(ubi, vid_hdr);
804 return 0;
805 } 794 }
806 paranoid_check_in_wl_tree(e1, &ubi->used); 795 paranoid_check_in_wl_tree(e1, &ubi->used);
807 rb_erase(&e1->rb, &ubi->used); 796 rb_erase(&e1->rb, &ubi->used);
808 dbg_wl("move PEB %d EC %d to PEB %d EC %d", 797 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
809 e1->pnum, e1->ec, e2->pnum, e2->ec); 798 e1->pnum, e1->ec, e2->pnum, e2->ec);
810 } else { 799 } else {
800 /* Perform scrubbing */
801 scrubbing = 1;
811 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); 802 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
812 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 803 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
813 paranoid_check_in_wl_tree(e1, &ubi->scrub); 804 paranoid_check_in_wl_tree(e1, &ubi->scrub);
814 rb_erase(&e1->rb, &ubi->scrub); 805 rb_erase(&e1->rb, &ubi->scrub);
815 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); 806 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
816 } 807 }
817 808
818 paranoid_check_in_wl_tree(e2, &ubi->free); 809 paranoid_check_in_wl_tree(e2, &ubi->free);
819 rb_erase(&e2->rb, &ubi->free); 810 rb_erase(&e2->rb, &ubi->free);
820 ubi_assert(!ubi->move_from && !ubi->move_to);
821 ubi_assert(!ubi->move_to_put && !ubi->move_from_put);
822 ubi->move_from = e1; 811 ubi->move_from = e1;
823 ubi->move_to = e2; 812 ubi->move_to = e2;
824 spin_unlock(&ubi->wl_lock); 813 spin_unlock(&ubi->wl_lock);
@@ -828,6 +817,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
828 * We so far do not know which logical eraseblock our physical 817 * We so far do not know which logical eraseblock our physical
829 * eraseblock (@e1) belongs to. We have to read the volume identifier 818 * eraseblock (@e1) belongs to. We have to read the volume identifier
830 * header first. 819 * header first.
820 *
821 * Note, we are protected from this PEB being unmapped and erased. The
822 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
823 * which is being moved was unmapped.
831 */ 824 */
832 825
833 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); 826 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
@@ -842,32 +835,51 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
842 * likely have the VID header in place. 835 * likely have the VID header in place.
843 */ 836 */
844 dbg_wl("PEB %d has no VID header", e1->pnum); 837 dbg_wl("PEB %d has no VID header", e1->pnum);
845 err = 0; 838 goto out_not_moved;
846 } else {
847 ubi_err("error %d while reading VID header from PEB %d",
848 err, e1->pnum);
849 if (err > 0)
850 err = -EIO;
851 } 839 }
852 goto error; 840
841 ubi_err("error %d while reading VID header from PEB %d",
842 err, e1->pnum);
843 if (err > 0)
844 err = -EIO;
845 goto out_error;
853 } 846 }
854 847
855 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 848 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
856 if (err) { 849 if (err) {
857 if (err == UBI_IO_BITFLIPS) 850
858 err = 0; 851 if (err < 0)
859 goto error; 852 goto out_error;
853 if (err == 1)
854 goto out_not_moved;
855
856 /*
857 * For some reason the LEB was not moved - it might be because
858 * the volume is being deleted. We should prevent this PEB from
859 * being selected for wear-levelling movement for some "time",
860 * so put it to the protection tree.
861 */
862
863 dbg_wl("cancelled moving PEB %d", e1->pnum);
864 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
865 if (!pe) {
866 err = -ENOMEM;
867 goto out_error;
868 }
869
870 protect = 1;
860 } 871 }
861 872
862 ubi_free_vid_hdr(ubi, vid_hdr); 873 ubi_free_vid_hdr(ubi, vid_hdr);
863 spin_lock(&ubi->wl_lock); 874 spin_lock(&ubi->wl_lock);
875 if (protect)
876 prot_tree_add(ubi, e1, pe, protect);
864 if (!ubi->move_to_put) 877 if (!ubi->move_to_put)
865 wl_tree_add(e2, &ubi->used); 878 wl_tree_add(e2, &ubi->used);
866 else 879 else
867 put = 1; 880 put = 1;
868 ubi->move_from = ubi->move_to = NULL; 881 ubi->move_from = ubi->move_to = NULL;
869 ubi->move_from_put = ubi->move_to_put = 0; 882 ubi->move_to_put = ubi->wl_scheduled = 0;
870 ubi->wl_scheduled = 0;
871 spin_unlock(&ubi->wl_lock); 883 spin_unlock(&ubi->wl_lock);
872 884
873 if (put) { 885 if (put) {
@@ -877,62 +889,67 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
877 */ 889 */
878 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); 890 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
879 err = schedule_erase(ubi, e2, 0); 891 err = schedule_erase(ubi, e2, 0);
880 if (err) { 892 if (err)
881 kmem_cache_free(wl_entries_slab, e2); 893 goto out_error;
882 ubi_ro_mode(ubi);
883 }
884 } 894 }
885 895
886 err = schedule_erase(ubi, e1, 0); 896 if (!protect) {
887 if (err) { 897 err = schedule_erase(ubi, e1, 0);
888 kmem_cache_free(wl_entries_slab, e1); 898 if (err)
889 ubi_ro_mode(ubi); 899 goto out_error;
890 } 900 }
891 901
902
892 dbg_wl("done"); 903 dbg_wl("done");
893 return err; 904 mutex_unlock(&ubi->move_mutex);
905 return 0;
894 906
895 /* 907 /*
896 * Some error occurred. @e1 was not changed, so return it back. @e2 908 * For some reasons the LEB was not moved, might be an error, might be
897 * might be changed, schedule it for erasure. 909 * something else. @e1 was not changed, so return it back. @e2 might
910 * be changed, schedule it for erasure.
898 */ 911 */
899error: 912out_not_moved:
900 if (err)
901 dbg_wl("error %d occurred, cancel operation", err);
902 ubi_assert(err <= 0);
903
904 ubi_free_vid_hdr(ubi, vid_hdr); 913 ubi_free_vid_hdr(ubi, vid_hdr);
905 spin_lock(&ubi->wl_lock); 914 spin_lock(&ubi->wl_lock);
906 ubi->wl_scheduled = 0; 915 if (scrubbing)
907 if (ubi->move_from_put) 916 wl_tree_add(e1, &ubi->scrub);
908 put = 1;
909 else 917 else
910 wl_tree_add(e1, &ubi->used); 918 wl_tree_add(e1, &ubi->used);
911 ubi->move_from = ubi->move_to = NULL; 919 ubi->move_from = ubi->move_to = NULL;
912 ubi->move_from_put = ubi->move_to_put = 0; 920 ubi->move_to_put = ubi->wl_scheduled = 0;
913 spin_unlock(&ubi->wl_lock); 921 spin_unlock(&ubi->wl_lock);
914 922
915 if (put) {
916 /*
917 * Well, the target PEB was put meanwhile, schedule it for
918 * erasure.
919 */
920 dbg_wl("PEB %d was put meanwhile, erase", e1->pnum);
921 err = schedule_erase(ubi, e1, 0);
922 if (err) {
923 kmem_cache_free(wl_entries_slab, e1);
924 ubi_ro_mode(ubi);
925 }
926 }
927
928 err = schedule_erase(ubi, e2, 0); 923 err = schedule_erase(ubi, e2, 0);
929 if (err) { 924 if (err)
930 kmem_cache_free(wl_entries_slab, e2); 925 goto out_error;
931 ubi_ro_mode(ubi); 926
932 } 927 mutex_unlock(&ubi->move_mutex);
928 return 0;
929
930out_error:
931 ubi_err("error %d while moving PEB %d to PEB %d",
932 err, e1->pnum, e2->pnum);
933 933
934 yield(); 934 ubi_free_vid_hdr(ubi, vid_hdr);
935 spin_lock(&ubi->wl_lock);
936 ubi->move_from = ubi->move_to = NULL;
937 ubi->move_to_put = ubi->wl_scheduled = 0;
938 spin_unlock(&ubi->wl_lock);
939
940 kmem_cache_free(ubi_wl_entry_slab, e1);
941 kmem_cache_free(ubi_wl_entry_slab, e2);
942 ubi_ro_mode(ubi);
943
944 mutex_unlock(&ubi->move_mutex);
935 return err; 945 return err;
946
947out_cancel:
948 ubi->wl_scheduled = 0;
949 spin_unlock(&ubi->wl_lock);
950 mutex_unlock(&ubi->move_mutex);
951 ubi_free_vid_hdr(ubi, vid_hdr);
952 return 0;
936} 953}
937 954
938/** 955/**
@@ -1020,7 +1037,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1020 if (cancel) { 1037 if (cancel) {
1021 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); 1038 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1022 kfree(wl_wrk); 1039 kfree(wl_wrk);
1023 kmem_cache_free(wl_entries_slab, e); 1040 kmem_cache_free(ubi_wl_entry_slab, e);
1024 return 0; 1041 return 0;
1025 } 1042 }
1026 1043
@@ -1049,7 +1066,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1049 1066
1050 ubi_err("failed to erase PEB %d, error %d", pnum, err); 1067 ubi_err("failed to erase PEB %d, error %d", pnum, err);
1051 kfree(wl_wrk); 1068 kfree(wl_wrk);
1052 kmem_cache_free(wl_entries_slab, e); 1069 kmem_cache_free(ubi_wl_entry_slab, e);
1053 1070
1054 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || 1071 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1055 err == -EBUSY) { 1072 err == -EBUSY) {
@@ -1119,8 +1136,7 @@ out_ro:
1119} 1136}
1120 1137
1121/** 1138/**
1122 * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling 1139 * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit.
1123 * unit.
1124 * @ubi: UBI device description object 1140 * @ubi: UBI device description object
1125 * @pnum: physical eraseblock to return 1141 * @pnum: physical eraseblock to return
1126 * @torture: if this physical eraseblock has to be tortured 1142 * @torture: if this physical eraseblock has to be tortured
@@ -1128,7 +1144,7 @@ out_ro:
1128 * This function is called to return physical eraseblock @pnum to the pool of 1144 * This function is called to return physical eraseblock @pnum to the pool of
1129 * free physical eraseblocks. The @torture flag has to be set if an I/O error 1145 * free physical eraseblocks. The @torture flag has to be set if an I/O error
1130 * occurred to this @pnum and it has to be tested. This function returns zero 1146 * occurred to this @pnum and it has to be tested. This function returns zero
1131 * in case of success and a negative error code in case of failure. 1147 * in case of success, and a negative error code in case of failure.
1132 */ 1148 */
1133int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) 1149int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1134{ 1150{
@@ -1139,8 +1155,8 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1139 ubi_assert(pnum >= 0); 1155 ubi_assert(pnum >= 0);
1140 ubi_assert(pnum < ubi->peb_count); 1156 ubi_assert(pnum < ubi->peb_count);
1141 1157
1158retry:
1142 spin_lock(&ubi->wl_lock); 1159 spin_lock(&ubi->wl_lock);
1143
1144 e = ubi->lookuptbl[pnum]; 1160 e = ubi->lookuptbl[pnum];
1145 if (e == ubi->move_from) { 1161 if (e == ubi->move_from) {
1146 /* 1162 /*
@@ -1148,17 +1164,22 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1148 * be moved. It will be scheduled for erasure in the 1164 * be moved. It will be scheduled for erasure in the
1149 * wear-leveling worker. 1165 * wear-leveling worker.
1150 */ 1166 */
1151 dbg_wl("PEB %d is being moved", pnum); 1167 dbg_wl("PEB %d is being moved, wait", pnum);
1152 ubi_assert(!ubi->move_from_put);
1153 ubi->move_from_put = 1;
1154 spin_unlock(&ubi->wl_lock); 1168 spin_unlock(&ubi->wl_lock);
1155 return 0; 1169
1170 /* Wait for the WL worker by taking the @ubi->move_mutex */
1171 mutex_lock(&ubi->move_mutex);
1172 mutex_unlock(&ubi->move_mutex);
1173 goto retry;
1156 } else if (e == ubi->move_to) { 1174 } else if (e == ubi->move_to) {
1157 /* 1175 /*
1158 * User is putting the physical eraseblock which was selected 1176 * User is putting the physical eraseblock which was selected
1159 * as the target the data is moved to. It may happen if the EBA 1177 * as the target the data is moved to. It may happen if the EBA
1160 * unit already re-mapped the LEB but the WL unit did has not 1178 * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but
1161 * put the PEB to the "used" tree. 1179 * the WL unit has not put the PEB to the "used" tree yet, but
1180 * it is about to do this. So we just set a flag which will
1181 * tell the WL worker that the PEB is not needed anymore and
1182 * should be scheduled for erasure.
1162 */ 1183 */
1163 dbg_wl("PEB %d is the target of data moving", pnum); 1184 dbg_wl("PEB %d is the target of data moving", pnum);
1164 ubi_assert(!ubi->move_to_put); 1185 ubi_assert(!ubi->move_to_put);
@@ -1172,8 +1193,15 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1172 } else if (in_wl_tree(e, &ubi->scrub)) { 1193 } else if (in_wl_tree(e, &ubi->scrub)) {
1173 paranoid_check_in_wl_tree(e, &ubi->scrub); 1194 paranoid_check_in_wl_tree(e, &ubi->scrub);
1174 rb_erase(&e->rb, &ubi->scrub); 1195 rb_erase(&e->rb, &ubi->scrub);
1175 } else 1196 } else {
1176 prot_tree_del(ubi, e->pnum); 1197 err = prot_tree_del(ubi, e->pnum);
1198 if (err) {
1199 ubi_err("PEB %d not found", pnum);
1200 ubi_ro_mode(ubi);
1201 spin_unlock(&ubi->wl_lock);
1202 return err;
1203 }
1204 }
1177 } 1205 }
1178 spin_unlock(&ubi->wl_lock); 1206 spin_unlock(&ubi->wl_lock);
1179 1207
@@ -1227,8 +1255,17 @@ retry:
1227 if (in_wl_tree(e, &ubi->used)) { 1255 if (in_wl_tree(e, &ubi->used)) {
1228 paranoid_check_in_wl_tree(e, &ubi->used); 1256 paranoid_check_in_wl_tree(e, &ubi->used);
1229 rb_erase(&e->rb, &ubi->used); 1257 rb_erase(&e->rb, &ubi->used);
1230 } else 1258 } else {
1231 prot_tree_del(ubi, pnum); 1259 int err;
1260
1261 err = prot_tree_del(ubi, e->pnum);
1262 if (err) {
1263 ubi_err("PEB %d not found", pnum);
1264 ubi_ro_mode(ubi);
1265 spin_unlock(&ubi->wl_lock);
1266 return err;
1267 }
1268 }
1232 1269
1233 wl_tree_add(e, &ubi->scrub); 1270 wl_tree_add(e, &ubi->scrub);
1234 spin_unlock(&ubi->wl_lock); 1271 spin_unlock(&ubi->wl_lock);
@@ -1249,17 +1286,33 @@ retry:
1249 */ 1286 */
1250int ubi_wl_flush(struct ubi_device *ubi) 1287int ubi_wl_flush(struct ubi_device *ubi)
1251{ 1288{
1252 int err, pending_count; 1289 int err;
1253
1254 pending_count = ubi->works_count;
1255
1256 dbg_wl("flush (%d pending works)", pending_count);
1257 1290
1258 /* 1291 /*
1259 * Erase while the pending works queue is not empty, but not more then 1292 * Erase while the pending works queue is not empty, but not more then
1260 * the number of currently pending works. 1293 * the number of currently pending works.
1261 */ 1294 */
1262 while (pending_count-- > 0) { 1295 dbg_wl("flush (%d pending works)", ubi->works_count);
1296 while (ubi->works_count) {
1297 err = do_work(ubi);
1298 if (err)
1299 return err;
1300 }
1301
1302 /*
1303 * Make sure all the works which have been done in parallel are
1304 * finished.
1305 */
1306 ubi_assert(ubi->ref_count > 0);
1307 down_write(&ubi->work_sem);
1308 up_write(&ubi->work_sem);
1309
1310 /*
1311 * And in case last was the WL worker and it cancelled the LEB
1312 * movement, flush again.
1313 */
1314 while (ubi->works_count) {
1315 dbg_wl("flush more (%d pending works)", ubi->works_count);
1263 err = do_work(ubi); 1316 err = do_work(ubi);
1264 if (err) 1317 if (err)
1265 return err; 1318 return err;
@@ -1294,7 +1347,7 @@ static void tree_destroy(struct rb_root *root)
1294 rb->rb_right = NULL; 1347 rb->rb_right = NULL;
1295 } 1348 }
1296 1349
1297 kmem_cache_free(wl_entries_slab, e); 1350 kmem_cache_free(ubi_wl_entry_slab, e);
1298 } 1351 }
1299 } 1352 }
1300} 1353}
@@ -1303,7 +1356,7 @@ static void tree_destroy(struct rb_root *root)
1303 * ubi_thread - UBI background thread. 1356 * ubi_thread - UBI background thread.
1304 * @u: the UBI device description object pointer 1357 * @u: the UBI device description object pointer
1305 */ 1358 */
1306static int ubi_thread(void *u) 1359int ubi_thread(void *u)
1307{ 1360{
1308 int failures = 0; 1361 int failures = 0;
1309 struct ubi_device *ubi = u; 1362 struct ubi_device *ubi = u;
@@ -1394,36 +1447,22 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1394 ubi->used = ubi->free = ubi->scrub = RB_ROOT; 1447 ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1395 ubi->prot.pnum = ubi->prot.aec = RB_ROOT; 1448 ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1396 spin_lock_init(&ubi->wl_lock); 1449 spin_lock_init(&ubi->wl_lock);
1450 mutex_init(&ubi->move_mutex);
1451 init_rwsem(&ubi->work_sem);
1397 ubi->max_ec = si->max_ec; 1452 ubi->max_ec = si->max_ec;
1398 INIT_LIST_HEAD(&ubi->works); 1453 INIT_LIST_HEAD(&ubi->works);
1399 1454
1400 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); 1455 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1401 1456
1402 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
1403 if (IS_ERR(ubi->bgt_thread)) {
1404 err = PTR_ERR(ubi->bgt_thread);
1405 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
1406 err);
1407 return err;
1408 }
1409
1410 if (ubi_devices_cnt == 0) {
1411 wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab",
1412 sizeof(struct ubi_wl_entry),
1413 0, 0, NULL);
1414 if (!wl_entries_slab)
1415 return -ENOMEM;
1416 }
1417
1418 err = -ENOMEM; 1457 err = -ENOMEM;
1419 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); 1458 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1420 if (!ubi->lookuptbl) 1459 if (!ubi->lookuptbl)
1421 goto out_free; 1460 return err;
1422 1461
1423 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { 1462 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1424 cond_resched(); 1463 cond_resched();
1425 1464
1426 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1465 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1427 if (!e) 1466 if (!e)
1428 goto out_free; 1467 goto out_free;
1429 1468
@@ -1431,7 +1470,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1431 e->ec = seb->ec; 1470 e->ec = seb->ec;
1432 ubi->lookuptbl[e->pnum] = e; 1471 ubi->lookuptbl[e->pnum] = e;
1433 if (schedule_erase(ubi, e, 0)) { 1472 if (schedule_erase(ubi, e, 0)) {
1434 kmem_cache_free(wl_entries_slab, e); 1473 kmem_cache_free(ubi_wl_entry_slab, e);
1435 goto out_free; 1474 goto out_free;
1436 } 1475 }
1437 } 1476 }
@@ -1439,7 +1478,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1439 list_for_each_entry(seb, &si->free, u.list) { 1478 list_for_each_entry(seb, &si->free, u.list) {
1440 cond_resched(); 1479 cond_resched();
1441 1480
1442 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1481 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1443 if (!e) 1482 if (!e)
1444 goto out_free; 1483 goto out_free;
1445 1484
@@ -1453,7 +1492,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1453 list_for_each_entry(seb, &si->corr, u.list) { 1492 list_for_each_entry(seb, &si->corr, u.list) {
1454 cond_resched(); 1493 cond_resched();
1455 1494
1456 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1495 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1457 if (!e) 1496 if (!e)
1458 goto out_free; 1497 goto out_free;
1459 1498
@@ -1461,7 +1500,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1461 e->ec = seb->ec; 1500 e->ec = seb->ec;
1462 ubi->lookuptbl[e->pnum] = e; 1501 ubi->lookuptbl[e->pnum] = e;
1463 if (schedule_erase(ubi, e, 0)) { 1502 if (schedule_erase(ubi, e, 0)) {
1464 kmem_cache_free(wl_entries_slab, e); 1503 kmem_cache_free(ubi_wl_entry_slab, e);
1465 goto out_free; 1504 goto out_free;
1466 } 1505 }
1467 } 1506 }
@@ -1470,7 +1509,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1470 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { 1509 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1471 cond_resched(); 1510 cond_resched();
1472 1511
1473 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1512 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1474 if (!e) 1513 if (!e)
1475 goto out_free; 1514 goto out_free;
1476 1515
@@ -1510,8 +1549,6 @@ out_free:
1510 tree_destroy(&ubi->free); 1549 tree_destroy(&ubi->free);
1511 tree_destroy(&ubi->scrub); 1550 tree_destroy(&ubi->scrub);
1512 kfree(ubi->lookuptbl); 1551 kfree(ubi->lookuptbl);
1513 if (ubi_devices_cnt == 0)
1514 kmem_cache_destroy(wl_entries_slab);
1515 return err; 1552 return err;
1516} 1553}
1517 1554
@@ -1541,7 +1578,7 @@ static void protection_trees_destroy(struct ubi_device *ubi)
1541 rb->rb_right = NULL; 1578 rb->rb_right = NULL;
1542 } 1579 }
1543 1580
1544 kmem_cache_free(wl_entries_slab, pe->e); 1581 kmem_cache_free(ubi_wl_entry_slab, pe->e);
1545 kfree(pe); 1582 kfree(pe);
1546 } 1583 }
1547 } 1584 }
@@ -1553,10 +1590,6 @@ static void protection_trees_destroy(struct ubi_device *ubi)
1553 */ 1590 */
1554void ubi_wl_close(struct ubi_device *ubi) 1591void ubi_wl_close(struct ubi_device *ubi)
1555{ 1592{
1556 dbg_wl("disable \"%s\"", ubi->bgt_name);
1557 if (ubi->bgt_thread)
1558 kthread_stop(ubi->bgt_thread);
1559
1560 dbg_wl("close the UBI wear-leveling unit"); 1593 dbg_wl("close the UBI wear-leveling unit");
1561 1594
1562 cancel_pending(ubi); 1595 cancel_pending(ubi);
@@ -1565,8 +1598,6 @@ void ubi_wl_close(struct ubi_device *ubi)
1565 tree_destroy(&ubi->free); 1598 tree_destroy(&ubi->free);
1566 tree_destroy(&ubi->scrub); 1599 tree_destroy(&ubi->scrub);
1567 kfree(ubi->lookuptbl); 1600 kfree(ubi->lookuptbl);
1568 if (ubi_devices_cnt == 1)
1569 kmem_cache_destroy(wl_entries_slab);
1570} 1601}
1571 1602
1572#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1603#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID